From a453ac31f3428614cceb99027f8efbdb9258a40b Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 14 May 2024 22:03:01 +0200 Subject: Adding upstream version 2.10.7+merged+base+2.10.8+dfsg. Signed-off-by: Daniel Baumann --- test/ansible_test/Makefile | 13 + test/ansible_test/unit/test_diff.py | 105 + .../test_validate_modules_regex.py | 43 + test/integration/network-integration.cfg | 14 + .../network-integration.requirements.txt | 1 + test/integration/targets/add_host/aliases | 1 + test/integration/targets/add_host/tasks/main.yml | 159 + test/integration/targets/ansiballz_python/aliases | 2 + .../library/check_rlimit_and_maxfd.py | 31 + .../ansiballz_python/library/custom_module.py | 19 + .../targets/ansiballz_python/library/sys_check.py | 23 + .../ansiballz_python/module_utils/custom_util.py | 6 + .../targets/ansiballz_python/tasks/main.yml | 68 + test/integration/targets/ansible-doc/aliases | 1 + .../testns/testcol/MANIFEST.json | 30 + .../testns/testcol/plugins/cache/notjsonfile.py | 49 + .../testns/testcol/plugins/inventory/statichost.py | 35 + .../testns/testcol/plugins/lookup/noop.py | 37 + .../testns/testcol/plugins/modules/fakemodule.py | 26 + .../testcol/plugins/modules/notrealmodule.py | 13 + .../testcol/plugins/vars/noop_vars_plugin.py | 27 + .../targets/ansible-doc/fakemodule.output | 15 + test/integration/targets/ansible-doc/inventory | 1 + .../targets/ansible-doc/library/test_docs.py | 39 + .../library/test_docs_missing_description.py | 40 + .../ansible-doc/library/test_docs_no_metadata.py | 35 + .../ansible-doc/library/test_docs_no_status.py | 38 + .../library/test_docs_non_iterable_status.py | 39 + .../library/test_docs_removed_precedence.py | 40 + .../library/test_docs_removed_status.py | 39 + .../ansible-doc/library/test_docs_returns.py | 56 + .../library/test_docs_returns_broken.py | 40 + .../ansible-doc/library/test_docs_suboptions.py | 70 + .../targets/ansible-doc/library/test_empty.py | 0 .../targets/ansible-doc/library/test_no_docs.py | 23 + .../library/test_no_docs_no_metadata.py | 18 + .../ansible-doc/library/test_no_docs_no_status.py | 22 + .../library/test_no_docs_non_iterable_status.py | 23 + test/integration/targets/ansible-doc/runme.sh | 42 + test/integration/targets/ansible-doc/test.yml | 138 + .../targets/ansible-doc/test_docs_returns.output | 37 + .../ansible-doc/test_docs_suboptions.output | 43 + .../targets/ansible-galaxy-collection-scm/aliases | 3 + .../ansible-galaxy-collection-scm/meta/main.yml | 3 + .../tasks/download.yml | 47 + .../tasks/empty_installed_collections.yml | 7 + .../tasks/individual_collection_repo.yml | 20 + .../ansible-galaxy-collection-scm/tasks/main.yml | 41 + .../tasks/multi_collection_repo_all.yml | 14 + .../tasks/multi_collection_repo_individual.yml | 15 + .../tasks/reinstalling.yml | 31 + .../tasks/scm_dependency.yml | 14 + .../tasks/scm_dependency_deduplication.yml | 54 + .../ansible-galaxy-collection-scm/tasks/setup.yml | 19 + .../tasks/setup_multi_collection_repo.yml | 27 + .../tasks/setup_recursive_scm_dependency.yml | 33 + .../targets/ansible-galaxy-collection/aliases | 3 + .../files/build_bad_tar.py | 84 + .../library/setup_collections.py | 169 + .../ansible-galaxy-collection/meta/main.yml | 3 + .../ansible-galaxy-collection/tasks/build.yml | 53 + .../ansible-galaxy-collection/tasks/download.yml | 142 + .../ansible-galaxy-collection/tasks/init.yml | 44 + .../ansible-galaxy-collection/tasks/install.yml | 330 + .../ansible-galaxy-collection/tasks/main.yml | 175 + .../ansible-galaxy-collection/tasks/publish.yml | 46 + .../templates/ansible.cfg.j2 | 10 + .../ansible-galaxy-collection/vars/main.yml | 1 + test/integration/targets/ansible-galaxy/aliases | 4 + .../targets/ansible-galaxy/cleanup-default.yml | 5 + .../targets/ansible-galaxy/cleanup-freebsd.yml | 6 + .../integration/targets/ansible-galaxy/cleanup.yml | 19 + test/integration/targets/ansible-galaxy/runme.sh | 449 ++ test/integration/targets/ansible-galaxy/setup.yml | 11 + test/integration/targets/ansible-runner/aliases | 6 + .../targets/ansible-runner/files/adhoc_example1.py | 26 + .../targets/ansible-runner/files/constraints.txt | 5 + .../ansible-runner/files/playbook_example1.py | 38 + .../targets/ansible-runner/filter_plugins/parse.py | 17 + test/integration/targets/ansible-runner/inventory | 1 + test/integration/targets/ansible-runner/runme.sh | 5 + .../ansible-runner/tasks/adhoc_example1.yml | 16 + .../targets/ansible-runner/tasks/main.yml | 5 + .../ansible-runner/tasks/playbook_example1.yml | 16 + .../targets/ansible-runner/tasks/setup.yml | 19 + test/integration/targets/ansible-runner/test.yml | 3 + .../targets/ansible-test-docker/aliases | 1 + .../ansible_collections/ns/col/galaxy.yml | 6 + .../ns/col/plugins/doc_fragments/ps_util.py | 21 + .../ns/col/plugins/module_utils/PSUtil.psm1 | 16 + .../ns/col/plugins/module_utils/my_util.py | 6 + .../ns/col/plugins/modules/hello.py | 46 + .../ns/col/plugins/modules/win_util_args.ps1 | 16 + .../ns/col/plugins/modules/win_util_args.py | 39 + .../integration/targets/minimal/tasks/main.yml | 7 + .../unit/plugins/module_utils/test_my_util.py | 8 + .../col/tests/unit/plugins/modules/test_hello.py | 8 + .../ansible-test-docker/collection-tests/docker.sh | 18 + .../targets/ansible-test-docker/runme.sh | 24 + test/integration/targets/ansible-test/aliases | 2 + .../ansible_collections/ns/col/README.rst | 3 + .../ansible_collections/ns/col/galaxy.yml | 6 + .../ansible_collections/ns/col/meta/runtime.yml | 4 + .../ns/col/plugins/filter/check_pylint.py | 21 + .../ns/col/plugins/module_utils/__init__.py | 0 .../ns/col/plugins/module_utils/my_util.py | 6 + .../ns/col/plugins/modules/bad.py | 34 + .../ns/col/plugins/modules/hello.py | 46 + .../tests/integration/targets/hello/files/bad.py | 16 + .../tests/integration/targets/hello/tasks/main.yml | 7 + .../ns/col/tests/sanity/ignore.txt | 6 + .../unit/plugins/module_utils/test_my_util.py | 8 + .../col/tests/unit/plugins/modules/test_hello.py | 8 + .../tests/integration/constraints.txt | 1 + .../tests/integration/requirements.txt | 1 + .../integration/targets/constraints/tasks/main.yml | 7 + .../ns/col_constraints/tests/unit/constraints.txt | 1 + .../tests/unit/plugins/modules/test_constraints.py | 8 + .../ns/col_constraints/tests/unit/requirements.txt | 1 + .../ansible-test/collection-tests/constraints.sh | 20 + .../ansible-test/collection-tests/coverage.sh | 22 + .../collection-tests/git-at-collection-base.sh | 10 + .../collection-tests/git-at-collection-root.sh | 10 + .../ansible-test/collection-tests/git-common.bash | 43 + .../ansible-test/collection-tests/install-git.yml | 5 + .../targets/ansible-test/collection-tests/venv.sh | 43 + test/integration/targets/ansible-test/runme.sh | 24 + .../targets/ansible/adhoc-callback.stdout | 12 + test/integration/targets/ansible/aliases | 2 + .../targets/ansible/ansible-test\303\251.cfg" | 2 + .../ansible/callback_plugins/callback_debug.py | 24 + test/integration/targets/ansible/no-extension | 2 + test/integration/targets/ansible/playbook.yml | 5 + .../targets/ansible/playbookdir_cfg.ini | 2 + test/integration/targets/ansible/runme.sh | 64 + test/integration/targets/ansible/vars.yml | 1 + .../integration/targets/any_errors_fatal/18602.yml | 21 + test/integration/targets/any_errors_fatal/aliases | 1 + .../targets/any_errors_fatal/always_block.yml | 27 + .../integration/targets/any_errors_fatal/inventory | 6 + .../targets/any_errors_fatal/on_includes.yml | 7 + .../targets/any_errors_fatal/play_level.yml | 15 + test/integration/targets/any_errors_fatal/runme.sh | 23 + .../targets/any_errors_fatal/test_fatal.yml | 12 + test/integration/targets/apt/aliases | 7 + test/integration/targets/apt/defaults/main.yml | 1 + test/integration/targets/apt/meta/main.yml | 3 + .../integration/targets/apt/tasks/apt-builddep.yml | 55 + .../targets/apt/tasks/apt-multiarch.yml | 47 + test/integration/targets/apt/tasks/apt.yml | 416 ++ test/integration/targets/apt/tasks/main.yml | 40 + test/integration/targets/apt/tasks/repo.yml | 253 + test/integration/targets/apt/tasks/upgrade.yml | 64 + .../targets/apt/tasks/url-with-deps.yml | 56 + test/integration/targets/apt/vars/Ubuntu-20.yml | 1 + test/integration/targets/apt/vars/default.yml | 1 + test/integration/targets/apt_key/aliases | 6 + test/integration/targets/apt_key/meta/main.yml | 2 + test/integration/targets/apt_key/tasks/apt_key.yml | 19 + test/integration/targets/apt_key/tasks/main.yml | 28 + test/integration/targets/apt_repository/aliases | 7 + .../targets/apt_repository/meta/main.yml | 2 + .../targets/apt_repository/tasks/apt.yml | 243 + .../targets/apt_repository/tasks/cleanup.yml | 17 + .../targets/apt_repository/tasks/main.yml | 25 + .../targets/apt_repository/tasks/mode.yaml | 130 + .../targets/apt_repository/tasks/mode_cleanup.yaml | 7 + test/integration/targets/args/aliases | 1 + test/integration/targets/args/runme.sh | 12 + test/integration/targets/argspec/aliases | 1 + .../integration/targets/argspec/library/argspec.py | 153 + test/integration/targets/argspec/tasks/main.yml | 419 ++ .../targets/argspec/tasks/password_no_log.yml | 14 + test/integration/targets/assemble/aliases | 1 + test/integration/targets/assemble/files/fragment1 | 1 + test/integration/targets/assemble/files/fragment2 | 1 + test/integration/targets/assemble/files/fragment3 | 1 + test/integration/targets/assemble/files/fragment4 | 1 + test/integration/targets/assemble/files/fragment5 | 1 + test/integration/targets/assemble/meta/main.yml | 20 + test/integration/targets/assemble/tasks/main.yml | 163 + test/integration/targets/assert/aliases | 2 + .../targets/assert/assert_quiet.out.quiet.stderr | 2 + .../targets/assert/assert_quiet.out.quiet.stdout | 17 + test/integration/targets/assert/inventory | 3 + test/integration/targets/assert/quiet.yml | 16 + test/integration/targets/assert/runme.sh | 71 + test/integration/targets/async/aliases | 4 + .../targets/async/library/async_test.py | 49 + test/integration/targets/async/meta/main.yml | 2 + test/integration/targets/async/tasks/main.yml | 300 + test/integration/targets/async_extra_data/aliases | 1 + .../targets/async_extra_data/library/junkping.py | 15 + test/integration/targets/async_extra_data/runme.sh | 7 + .../targets/async_extra_data/test_async.yml | 10 + .../targets/async_fail/action_plugins/normal.py | 62 + test/integration/targets/async_fail/aliases | 3 + .../targets/async_fail/library/async_test.py | 50 + test/integration/targets/async_fail/meta/main.yml | 2 + test/integration/targets/async_fail/tasks/main.yml | 36 + test/integration/targets/become/aliases | 3 + test/integration/targets/become/files/baz.txt | 1 + test/integration/targets/become/tasks/default.yml | 82 + test/integration/targets/become/tasks/main.yml | 5 + test/integration/targets/become/tasks/su.yml | 91 + test/integration/targets/become/tasks/sudo.yml | 91 + test/integration/targets/become/templates/bar.j2 | 1 + test/integration/targets/become/vars/default.yml | 1 + test/integration/targets/become_su/aliases | 3 + test/integration/targets/become_su/runme.sh | 6 + test/integration/targets/binary/aliases | 1 + test/integration/targets/binary/files/b64_latin1 | 1 + test/integration/targets/binary/files/b64_utf8 | 1 + .../integration/targets/binary/files/from_playbook | 1 + test/integration/targets/binary/meta/main.yml | 2 + test/integration/targets/binary/tasks/main.yml | 131 + .../binary/templates/b64_latin1_template.j2 | 1 + .../targets/binary/templates/b64_utf8_template.j2 | 1 + .../binary/templates/from_playbook_template.j2 | 1 + test/integration/targets/binary/vars/main.yml | 3 + test/integration/targets/binary_modules/Makefile | 16 + test/integration/targets/binary_modules/aliases | 1 + .../binary_modules/download_binary_modules.yml | 9 + .../targets/binary_modules/group_vars/all | 3 + .../targets/binary_modules/library/.gitignore | 1 + .../targets/binary_modules/library/helloworld.go | 89 + .../roles/test_binary_modules/tasks/main.yml | 53 + test/integration/targets/binary_modules/test.sh | 8 + .../targets/binary_modules/test_binary_modules.yml | 5 + .../targets/binary_modules_posix/aliases | 2 + .../targets/binary_modules_posix/runme.sh | 6 + .../targets/binary_modules_winrm/aliases | 4 + .../targets/binary_modules_winrm/runme.sh | 6 + test/integration/targets/blockinfile/aliases | 1 + .../targets/blockinfile/files/sshd_config | 135 + test/integration/targets/blockinfile/meta/main.yml | 2 + .../tasks/add_block_to_existing_file.yml | 47 + .../tasks/block_without_trailing_newline.yml | 30 + .../targets/blockinfile/tasks/create_file.yml | 32 + .../integration/targets/blockinfile/tasks/diff.yml | 18 + .../tasks/file_without_trailing_newline.yml | 36 + .../targets/blockinfile/tasks/insertafter.yml | 37 + .../targets/blockinfile/tasks/insertbefore.yml | 39 + .../integration/targets/blockinfile/tasks/main.yml | 40 + .../blockinfile/tasks/preserve_line_endings.yml | 24 + .../targets/blockinfile/tasks/validate.yml | 28 + test/integration/targets/blocks/aliases | 1 + .../targets/blocks/always_failure_no_rescue_rc.yml | 13 + .../blocks/always_failure_with_rescue_rc.yml | 16 + .../targets/blocks/always_no_rescue_rc.yml | 12 + test/integration/targets/blocks/block_fail.yml | 5 + .../targets/blocks/block_fail_tasks.yml | 9 + .../integration/targets/blocks/block_in_rescue.yml | 33 + .../targets/blocks/block_rescue_vars.yml | 16 + test/integration/targets/blocks/fail.yml | 2 + test/integration/targets/blocks/finalized_task.yml | 17 + test/integration/targets/blocks/issue29047.yml | 4 + .../targets/blocks/issue29047_tasks.yml | 13 + test/integration/targets/blocks/issue71306.yml | 16 + test/integration/targets/blocks/main.yml | 128 + test/integration/targets/blocks/nested_fail.yml | 3 + .../targets/blocks/nested_nested_fail.yml | 3 + .../targets/blocks/roles/fail/tasks/main.yml | 3 + test/integration/targets/blocks/runme.sh | 95 + .../targets/builtin_vars_prompt/aliases | 3 + .../targets/builtin_vars_prompt/runme.sh | 6 + .../builtin_vars_prompt/test-vars_prompt.py | 127 + .../targets/builtin_vars_prompt/unsafe.yml | 20 + .../targets/builtin_vars_prompt/unsupported.yml | 18 + .../targets/builtin_vars_prompt/vars_prompt-1.yml | 15 + .../targets/builtin_vars_prompt/vars_prompt-2.yml | 16 + .../targets/builtin_vars_prompt/vars_prompt-3.yml | 17 + .../targets/builtin_vars_prompt/vars_prompt-4.yml | 16 + .../targets/builtin_vars_prompt/vars_prompt-5.yml | 14 + .../targets/builtin_vars_prompt/vars_prompt-6.yml | 20 + .../targets/builtin_vars_prompt/vars_prompt-7.yml | 12 + test/integration/targets/callback_default/aliases | 2 + .../callback_default.out.check_markers_dry.stderr | 2 + .../callback_default.out.check_markers_dry.stdout | 78 + .../callback_default.out.check_markers_wet.stderr | 2 + .../callback_default.out.check_markers_wet.stdout | 74 + ...callback_default.out.check_nomarkers_dry.stderr | 2 + ...callback_default.out.check_nomarkers_dry.stdout | 74 + ...callback_default.out.check_nomarkers_wet.stderr | 2 + ...callback_default.out.check_nomarkers_wet.stdout | 74 + .../callback_default.out.default.stderr | 2 + .../callback_default.out.default.stdout | 72 + .../callback_default.out.failed_to_stderr.stderr | 5 + .../callback_default.out.failed_to_stderr.stdout | 69 + .../callback_default.out.free.stdout | 35 + .../callback_default.out.hide_ok.stderr | 2 + .../callback_default.out.hide_ok.stdout | 56 + .../callback_default.out.hide_skipped.stderr | 2 + .../callback_default.out.hide_skipped.stdout | 66 + .../callback_default.out.hide_skipped_ok.stderr | 2 + .../callback_default.out.hide_skipped_ok.stdout | 52 + .../callback_default.out.host_pinned.stdout | 35 + .../targets/callback_default/include_me.yml | 2 + .../integration/targets/callback_default/inventory | 10 + test/integration/targets/callback_default/runme.sh | 190 + test/integration/targets/callback_default/test.yml | 88 + .../targets/callback_default/test_2.yml | 6 + .../targets/callback_default/test_dryrun.yml | 93 + .../targets/callback_default/test_non_lockstep.yml | 7 + test/integration/targets/changed_when/aliases | 1 + .../integration/targets/changed_when/meta/main.yml | 2 + .../targets/changed_when/tasks/main.yml | 61 + test/integration/targets/check_mode/aliases | 1 + .../targets/check_mode/check_mode-not-on-cli.yml | 37 + .../targets/check_mode/check_mode-on-cli.yml | 36 + test/integration/targets/check_mode/check_mode.yml | 7 + .../check_mode/roles/test_always_run/meta/main.yml | 17 + .../roles/test_always_run/tasks/main.yml | 29 + .../check_mode/roles/test_check_mode/files/foo.txt | 1 + .../roles/test_check_mode/tasks/main.yml | 50 + .../roles/test_check_mode/templates/foo.j2 | 1 + .../check_mode/roles/test_check_mode/vars/main.yml | 1 + test/integration/targets/check_mode/runme.sh | 7 + test/integration/targets/cli/aliases | 5 + test/integration/targets/cli/runme.sh | 7 + test/integration/targets/cli/setup.yml | 42 + test/integration/targets/cli/test-cli.py | 21 + test/integration/targets/cli/test_k_and_K.py | 27 + .../targets/collections/a.statichost.yml | 3 + test/integration/targets/collections/aliases | 4 + .../targets/collections/cache.statichost.yml | 7 + .../collections/check_populated_inventory.yml | 11 + .../coll_in_sys/plugins/modules/systestmodule.py | 13 + .../testcoll/plugins/modules/maskedmodule.py | 13 + .../testns/testcoll/plugins/modules/testmodule.py | 13 + .../testcoll/roles/maskedrole/tasks/main.yml | 2 + .../ansible/builtin/plugins/modules/ping.py | 13 + .../ansible/bullcoll/plugins/modules/bullmodule.py | 13 + .../module_utils/formerly_testcoll_pkg/__init__.py | 1 + .../module_utils/formerly_testcoll_pkg/submod.py | 1 + .../testbroken/plugins/filter/broken_filter.py | 13 + .../testns/testcoll/meta/runtime.yml | 52 + .../playbooks/default_collection_playbook.yml | 49 + .../roles/non_coll_role/library/embedded_module.py | 13 + .../playbooks/roles/non_coll_role/tasks/main.yml | 29 + .../roles/non_coll_role_to_call/tasks/main.yml | 7 + .../action/action_subdir/subdir_ping_action.py | 19 + .../testcoll/plugins/action/bypass_host_loop.py | 17 + .../testcoll/plugins/action/plugin_lookup.py | 40 + .../testcoll/plugins/action/subclassed_normal.py | 11 + .../plugins/action/uses_redirected_import.py | 20 + .../testcoll/plugins/callback/usercallback.py | 27 + .../testcoll/plugins/connection/localconn.py | 41 + .../testns/testcoll/plugins/doc_fragments/frag.py | 18 + .../filter/filter_subdir/my_subdir_filters.py | 14 + .../testns/testcoll/plugins/filter/myfilters.py | 14 + .../testns/testcoll/plugins/filter/myfilters2.py | 14 + .../lookup/lookup_subdir/my_subdir_lookup.py | 11 + .../testns/testcoll/plugins/lookup/mylookup.py | 11 + .../testns/testcoll/plugins/lookup/mylookup2.py | 12 + .../testcoll/plugins/module_utils/AnotherCSMU.cs | 12 + .../testns/testcoll/plugins/module_utils/MyCSMU.cs | 19 + .../testcoll/plugins/module_utils/MyPSMU.psm1 | 9 + .../testns/testcoll/plugins/module_utils/base.py | 12 + .../testns/testcoll/plugins/module_utils/leaf.py | 6 + .../plugins/module_utils/nested_same/__init__.py | 0 .../nested_same/nested_same/__init__.py | 0 .../nested_same/nested_same/nested_same.py | 6 + .../testcoll/plugins/module_utils/secondary.py | 6 + .../plugins/module_utils/subpkg/__init__.py | 0 .../testcoll/plugins/module_utils/subpkg/subcs.cs | 13 + .../testcoll/plugins/module_utils/subpkg/submod.py | 6 + .../plugins/module_utils/subpkg/subps.psm1 | 9 + .../plugins/module_utils/subpkg_with_init.py | 11 + .../module_utils/subpkg_with_init/__init__.py | 10 + .../subpkg_with_init/mod_in_subpkg_with_init.py | 6 + .../testcoll/plugins/modules/deprecated_ping.py | 13 + .../modules/module_subdir/subdir_ping_module.py | 14 + .../testns/testcoll/plugins/modules/ping.py | 13 + .../testns/testcoll/plugins/modules/testmodule.py | 21 + .../plugins/modules/testmodule_bad_docfrags.py | 25 + .../modules/uses_base_mu_granular_nested_import.py | 19 + .../modules/uses_collection_redirected_mu.py | 21 + .../plugins/modules/uses_core_redirected_mu.py | 19 + .../plugins/modules/uses_leaf_mu_flat_import.bak | 3 + .../plugins/modules/uses_leaf_mu_flat_import.py | 19 + .../plugins/modules/uses_leaf_mu_flat_import.yml | 3 + .../modules/uses_leaf_mu_granular_import.py | 19 + .../modules/uses_leaf_mu_module_import_from.py | 31 + .../testcoll/plugins/modules/uses_mu_missing.py | 16 + .../modules/uses_mu_missing_redirect_collection.py | 16 + .../modules/uses_mu_missing_redirect_module.py | 16 + .../plugins/modules/uses_nested_same_as_func.py | 19 + .../plugins/modules/uses_nested_same_as_module.py | 19 + .../testcoll/plugins/modules/win_csbasic_only.ps1 | 22 + .../testcoll/plugins/modules/win_selfcontained.ps1 | 9 + .../testcoll/plugins/modules/win_selfcontained.py | 1 + .../plugins/modules/win_uses_coll_csmu.ps1 | 26 + .../plugins/modules/win_uses_coll_psmu.ps1 | 25 + .../testns/testcoll/plugins/test/mytests.py | 13 + .../testns/testcoll/plugins/test/mytests2.py | 13 + .../plugins/test/test_subdir/my_subdir_tests.py | 13 + .../testns/testcoll/plugins/vars/custom_vars.py | 44 + .../testcoll/roles/call_standalone/tasks/main.yml | 6 + .../meta/main.yml | 2 + .../tasks/main.yml | 7 + .../roles/common_handlers/handlers/main.yml | 27 + .../role_subdir/subdir_testrole/tasks/main.yml | 10 + .../roles/test_fqcn_handlers/meta/main.yml | 2 + .../roles/test_fqcn_handlers/tasks/main.yml | 16 + .../testns/testcoll/roles/testrole/meta/main.yml | 4 + .../testns/testcoll/roles/testrole/tasks/main.yml | 39 + .../roles/testrole_main_yaml/meta/main.yml | 4 + .../roles/testrole_main_yaml/tasks/main.yml | 33 + .../testns/testredirect/meta/runtime.yml | 4 + .../me/mycoll1/plugins/action/action1.py | 29 + .../me/mycoll1/plugins/modules/action1.py | 24 + .../me/mycoll2/plugins/modules/module1.py | 43 + .../content_adj/plugins/cache/custom_jsonfile.py | 63 + .../content_adj/plugins/inventory/statichost.py | 68 + .../content_adj/plugins/module_utils/__init__.py | 0 .../plugins/module_utils/sub1/__init__.py | 0 .../plugins/module_utils/sub1/foomodule.py | 6 + .../plugins/modules/contentadjmodule.py | 13 + .../content_adj/plugins/vars/custom_adj_vars.py | 45 + .../custom_vars_plugins/v1_vars_plugin.py | 37 + .../custom_vars_plugins/v2_vars_plugin.py | 45 + .../custom_vars_plugins/vars_req_whitelist.py | 46 + .../override_formerly_core_masked_filter.py | 13 + test/integration/targets/collections/includeme.yml | 6 + .../targets/collections/inventory_test.yml | 26 + .../targets/collections/invocation_tests.yml | 5 + .../targets/collections/library/ping.py | 13 + test/integration/targets/collections/noop.yml | 4 + test/integration/targets/collections/posix.yml | 443 ++ .../targets/collections/redirected.statichost.yml | 3 + .../collections/roles/standalone/tasks/main.yml | 2 + .../collections/roles/testrole/tasks/main.yml | 28 + test/integration/targets/collections/runme.sh | 114 + .../targets/collections/test_bypass_host_loop.yml | 22 + .../targets/collections/test_collection_meta.yml | 46 + .../override_formerly_core_masked_test.py | 16 + .../targets/collections/test_redirect_list.yml | 86 + .../targets/collections/testcoll2/MANIFEST.json | 0 .../testcoll2/plugins/modules/testmodule2.py | 33 + .../targets/collections/vars_plugin_tests.sh | 91 + test/integration/targets/collections/windows.yml | 28 + .../targets/collections_plugin_namespace/aliases | 1 + .../my_ns/my_col/plugins/filter/test_filter.py | 15 + .../my_ns/my_col/plugins/lookup/lookup_name.py | 9 + .../plugins/lookup/lookup_no_future_boilerplate.py | 10 + .../my_ns/my_col/plugins/test/test_test.py | 13 + .../my_ns/my_col/roles/test/tasks/main.yml | 12 + .../targets/collections_plugin_namespace/runme.sh | 5 + .../targets/collections_plugin_namespace/test.yml | 3 + .../targets/collections_relative_imports/aliases | 4 + .../my_ns/my_col/plugins/module_utils/PSRel1.psm1 | 11 + .../my_ns/my_col/plugins/module_utils/my_util1.py | 6 + .../my_ns/my_col/plugins/module_utils/my_util2.py | 8 + .../my_ns/my_col/plugins/module_utils/my_util3.py | 8 + .../plugins/module_utils/sub_pkg/PSRel2.psm1 | 11 + .../my_ns/my_col/plugins/modules/my_module.py | 24 + .../my_ns/my_col/plugins/modules/win_relative.ps1 | 10 + .../my_ns/my_col/roles/test/tasks/main.yml | 4 + .../my_ns/my_col2/plugins/module_utils/PSRel3.psm1 | 11 + .../my_col2/plugins/module_utils/sub_pkg/CSRel4.cs | 14 + .../targets/collections_relative_imports/runme.sh | 13 + .../targets/collections_relative_imports/test.yml | 3 + .../collections_relative_imports/windows.yml | 11 + .../targets/collections_runtime_pythonpath/aliases | 3 + .../python/dist/plugins/modules/boo.py | 28 + .../pyproject.toml | 6 + .../ansible-collection-python-dist-boo/setup.cfg | 15 + .../python/dist/plugins/modules/boo.py | 28 + .../collections_runtime_pythonpath/runme.sh | 60 + test/integration/targets/command_shell/aliases | 4 + .../targets/command_shell/files/create_afile.sh | 3 + .../targets/command_shell/files/remove_afile.sh | 3 + .../targets/command_shell/files/test.sh | 3 + .../targets/command_shell/meta/main.yml | 2 + .../targets/command_shell/tasks/main.yml | 446 ++ test/integration/targets/common_network/aliases | 1 + .../targets/common_network/tasks/main.yml | 4 + .../targets/common_network/test_plugins/is_mac.py | 14 + test/integration/targets/conditionals/aliases | 1 + test/integration/targets/conditionals/play.yml | 551 ++ test/integration/targets/conditionals/runme.sh | 15 + .../targets/conditionals/test_no_warnings.yml | 18 + .../targets/conditionals/test_warnings.yml | 14 + .../integration/targets/conditionals/vars/main.yml | 22 + test/integration/targets/config/aliases | 1 + .../targets/config/inline_comment_ansible.cfg | 2 + test/integration/targets/config/runme.sh | 20 + test/integration/targets/connection/aliases | 1 + test/integration/targets/connection/test.sh | 25 + .../targets/connection/test_connection.yml | 43 + .../targets/connection/test_reset_connection.yml | 5 + .../action_plugins/delegation_action.py | 12 + .../targets/connection_delegation/aliases | 5 + .../connection_plugins/delegation_connection.py | 45 + .../targets/connection_delegation/inventory.ini | 1 + .../targets/connection_delegation/runme.sh | 9 + .../targets/connection_delegation/test.yml | 23 + test/integration/targets/connection_local/aliases | 1 + test/integration/targets/connection_local/runme.sh | 1 + .../connection_local/test_connection.inventory | 7 + .../targets/connection_paramiko_ssh/aliases | 5 + .../targets/connection_paramiko_ssh/runme.sh | 7 + .../targets/connection_paramiko_ssh/test.sh | 1 + .../test_connection.inventory | 7 + test/integration/targets/connection_posix/aliases | 2 + test/integration/targets/connection_posix/test.sh | 18 + test/integration/targets/connection_psrp/aliases | 4 + .../targets/connection_psrp/files/empty.txt | 0 test/integration/targets/connection_psrp/runme.sh | 24 + .../connection_psrp/test_connection.inventory.j2 | 9 + test/integration/targets/connection_psrp/tests.yml | 133 + test/integration/targets/connection_ssh/aliases | 3 + test/integration/targets/connection_ssh/posix.sh | 1 + test/integration/targets/connection_ssh/runme.sh | 65 + .../connection_ssh/test_connection.inventory | 7 + .../targets/connection_windows_ssh/aliases | 6 + .../targets/connection_windows_ssh/runme.sh | 54 + .../test_connection.inventory.j2 | 12 + .../targets/connection_windows_ssh/tests.yml | 32 + .../targets/connection_windows_ssh/tests_fetch.yml | 41 + .../targets/connection_windows_ssh/windows.sh | 25 + test/integration/targets/connection_winrm/aliases | 4 + test/integration/targets/connection_winrm/runme.sh | 18 + .../connection_winrm/test_connection.inventory.j2 | 10 + test/integration/targets/copy/aliases | 4 + test/integration/targets/copy/defaults/main.yml | 2 + test/integration/targets/copy/files/foo.txt | 1 + test/integration/targets/copy/files/subdir/bar.txt | 1 + .../targets/copy/files/subdir/subdir1/bar.txt | 1 + .../targets/copy/files/subdir/subdir2/baz.txt | 1 + .../files/subdir/subdir2/subdir3/subdir4/qux.txt | 1 + test/integration/targets/copy/meta/main.yml | 3 + test/integration/targets/copy/tasks/acls.yml | 33 + test/integration/targets/copy/tasks/check_mode.yml | 126 + .../tasks/dest_in_non_existent_directories.yml | 29 + ...dest_in_non_existent_directories_remote_src.yml | 43 + test/integration/targets/copy/tasks/main.yml | 117 + test/integration/targets/copy/tasks/no_log.yml | 82 + test/integration/targets/copy/tasks/selinux.yml | 35 + .../src_file_dest_file_in_non_existent_dir.yml | 26 + ...le_dest_file_in_non_existent_dir_remote_src.yml | 32 + test/integration/targets/copy/tasks/tests.yml | 2261 +++++++ test/integration/targets/cron/aliases | 5 + test/integration/targets/cron/defaults/main.yml | 1 + test/integration/targets/cron/meta/main.yml | 2 + test/integration/targets/cron/tasks/main.yml | 213 + test/integration/targets/dataloader/aliases | 1 + .../dataloader/attempt_to_load_invalid_json.yml | 4 + test/integration/targets/dataloader/runme.sh | 6 + .../targets/dataloader/vars/invalid.json | 1 + test/integration/targets/debconf/aliases | 2 + test/integration/targets/debconf/meta/main.yml | 2 + test/integration/targets/debconf/tasks/main.yml | 36 + test/integration/targets/debug/aliases | 1 + test/integration/targets/debug/main.yml | 6 + test/integration/targets/debug/main_fqcn.yml | 6 + test/integration/targets/debug/nosetfacts.yml | 21 + test/integration/targets/debug/runme.sh | 20 + test/integration/targets/delegate_to/aliases | 4 + .../delegate_to/connection_plugins/fakelocal.py | 76 + .../targets/delegate_to/delegate_and_nolog.yml | 8 + .../targets/delegate_to/delegate_facts_block.yml | 25 + .../delegate_to/delegate_local_from_root.yml | 10 + .../delegate_to/delegate_vars_hanldling.yml | 58 + .../targets/delegate_to/discovery_applied.yml | 8 + .../integration/targets/delegate_to/files/testfile | 1 + .../targets/delegate_to/has_hostvars.yml | 64 + test/integration/targets/delegate_to/inventory | 9 + .../targets/delegate_to/inventory_interpreters | 5 + .../delegate_to/library/detect_interpreter.py | 18 + .../roles/test_template/templates/foo.j2 | 3 + test/integration/targets/delegate_to/runme.sh | 75 + .../targets/delegate_to/test_delegate_to.yml | 58 + .../delegate_to/test_delegate_to_loop_caching.yml | 45 + .../test_delegate_to_loop_randomness.yml | 73 + .../targets/delegate_to/test_loop_control.yml | 16 + .../targets/delegate_to/verify_interpreter.yml | 47 + .../targets/dict_transformations/aliases | 1 + .../library/convert_camelCase.py | 48 + .../library/convert_snake_case.py | 55 + .../targets/dict_transformations/tasks/main.yml | 3 + .../tasks/test_convert_camelCase.yml | 33 + .../tasks/test_convert_snake_case.yml | 26 + test/integration/targets/dnf/aliases | 7 + test/integration/targets/dnf/meta/main.yml | 4 + test/integration/targets/dnf/tasks/dnf.yml | 774 +++ .../targets/dnf/tasks/dnfinstallroot.yml | 47 + .../targets/dnf/tasks/dnfreleasever.yml | 47 + test/integration/targets/dnf/tasks/filters.yml | 134 + .../targets/dnf/tasks/filters_check_mode.yml | 118 + test/integration/targets/dnf/tasks/gpg.yml | 72 + test/integration/targets/dnf/tasks/logging.yml | 48 + test/integration/targets/dnf/tasks/main.yml | 62 + test/integration/targets/dnf/tasks/modularity.yml | 99 + test/integration/targets/dnf/tasks/repo.yml | 309 + test/integration/targets/dnf/vars/CentOS.yml | 2 + test/integration/targets/dnf/vars/Fedora.yml | 6 + test/integration/targets/dnf/vars/RedHat.yml | 2 + test/integration/targets/dnf/vars/main.yml | 4 + test/integration/targets/dpkg_selections/aliases | 7 + .../targets/dpkg_selections/defaults/main.yaml | 1 + .../dpkg_selections/tasks/dpkg_selections.yaml | 89 + .../targets/dpkg_selections/tasks/main.yaml | 3 + test/integration/targets/egg-info/aliases | 1 + .../lookup_plugins/import_pkg_resources.py | 11 + test/integration/targets/egg-info/tasks/main.yml | 3 + test/integration/targets/embedded_module/aliases | 1 + .../library/test_integration_module | 3 + .../targets/embedded_module/tasks/main.yml | 9 + test/integration/targets/environment/aliases | 1 + test/integration/targets/environment/runme.sh | 5 + .../targets/environment/test_environment.yml | 173 + .../targets/error_from_connection/aliases | 1 + .../connection_plugins/dummy.py | 45 + .../targets/error_from_connection/inventory | 2 + .../targets/error_from_connection/play.yml | 20 + .../targets/error_from_connection/runme.sh | 5 + test/integration/targets/expect/aliases | 3 + test/integration/targets/expect/files/foo.txt | 1 + .../targets/expect/files/test_command.py | 12 + test/integration/targets/expect/tasks/main.yml | 204 + test/integration/targets/facts_d/aliases | 1 + .../targets/facts_d/files/basdscript.fact | 3 + .../targets/facts_d/files/goodscript.fact | 3 + .../targets/facts_d/files/preferences.fact | 2 + .../targets/facts_d/files/unreadable.fact | 1 + test/integration/targets/facts_d/meta/main.yml | 2 + test/integration/targets/facts_d/tasks/main.yml | 45 + .../targets/facts_linux_network/aliases | 5 + .../targets/facts_linux_network/meta/main.yml | 2 + .../targets/facts_linux_network/tasks/main.yml | 18 + test/integration/targets/failed_when/aliases | 1 + .../integration/targets/failed_when/tasks/main.yml | 68 + test/integration/targets/fetch/aliases | 2 + .../targets/fetch/injection/avoid_slurp_return.yml | 26 + test/integration/targets/fetch/injection/here.txt | 1 + .../targets/fetch/injection/library/slurp.py | 29 + .../targets/fetch/roles/fetch_tests/meta/main.yml | 2 + .../targets/fetch/roles/fetch_tests/tasks/main.yml | 141 + test/integration/targets/fetch/run_fetch_tests.yml | 5 + test/integration/targets/fetch/runme.sh | 12 + test/integration/targets/file/aliases | 3 + test/integration/targets/file/defaults/main.yml | 2 + test/integration/targets/file/files/foo.txt | 1 + .../targets/file/files/foobar/directory/fileC | 0 .../targets/file/files/foobar/directory/fileD | 0 test/integration/targets/file/files/foobar/fileA | 0 test/integration/targets/file/files/foobar/fileB | 0 test/integration/targets/file/meta/main.yml | 3 + test/integration/targets/file/tasks/diff_peek.yml | 10 + .../targets/file/tasks/directory_as_dest.yml | 345 + test/integration/targets/file/tasks/initialize.yml | 15 + test/integration/targets/file/tasks/main.yml | 752 +++ .../targets/file/tasks/selinux_tests.yml | 33 + test/integration/targets/file/tasks/state_link.yml | 487 ++ .../targets/file/tasks/unicode_path.yml | 10 + test/integration/targets/filter_core/aliases | 3 + .../integration/targets/filter_core/files/9851.txt | 3 + .../targets/filter_core/files/fileglob/one.txt | 0 .../targets/filter_core/files/fileglob/two.txt | 0 test/integration/targets/filter_core/files/foo.txt | 69 + .../filter_core/handle_undefined_type_errors.yml | 29 + .../targets/filter_core/host_vars/localhost | 1 + test/integration/targets/filter_core/meta/main.yml | 3 + test/integration/targets/filter_core/runme.sh | 6 + test/integration/targets/filter_core/runme.yml | 3 + .../integration/targets/filter_core/tasks/main.yml | 576 ++ .../targets/filter_core/templates/foo.j2 | 62 + .../targets/filter_core/templates/py26json.j2 | 2 + test/integration/targets/filter_core/vars/main.yml | 106 + test/integration/targets/filter_mathstuff/aliases | 3 + .../targets/filter_mathstuff/tasks/main.yml | 288 + test/integration/targets/filter_urls/aliases | 3 + test/integration/targets/filter_urls/runme.sh | 23 + test/integration/targets/filter_urls/runme.yml | 4 + .../integration/targets/filter_urls/tasks/main.yml | 31 + test/integration/targets/filter_urlsplit/aliases | 3 + .../targets/filter_urlsplit/tasks/main.yml | 30 + test/integration/targets/find/aliases | 1 + test/integration/targets/find/meta/main.yml | 2 + test/integration/targets/find/tasks/main.yml | 163 + test/integration/targets/gathering/aliases | 1 + test/integration/targets/gathering/explicit.yml | 14 + test/integration/targets/gathering/implicit.yml | 23 + test/integration/targets/gathering/runme.sh | 7 + test/integration/targets/gathering/smart.yml | 23 + test/integration/targets/gathering/uuid.fact | 10 + test/integration/targets/gathering_facts/aliases | 2 + .../targets/gathering_facts/cache_plugins/none.py | 50 + test/integration/targets/gathering_facts/inventory | 2 + .../targets/gathering_facts/library/bogus_facts | 12 + .../targets/gathering_facts/library/facts_one | 25 + .../targets/gathering_facts/library/facts_two | 24 + .../targets/gathering_facts/library/file_utils.py | 54 + .../targets/gathering_facts/one_two.json | 27 + .../targets/gathering_facts/prevent_clobbering.yml | 8 + test/integration/targets/gathering_facts/runme.sh | 21 + .../gathering_facts/test_gathering_facts.yml | 474 ++ .../gathering_facts/test_prevent_injection.yml | 14 + .../targets/gathering_facts/test_run_once.yml | 32 + .../targets/gathering_facts/two_one.json | 27 + test/integration/targets/gathering_facts/uuid.fact | 10 + .../targets/gathering_facts/verify_merge_facts.yml | 41 + .../targets/gathering_facts/verify_subset.yml | 13 + test/integration/targets/get_url/aliases | 4 + .../targets/get_url/files/testserver.py | 20 + test/integration/targets/get_url/meta/main.yml | 4 + test/integration/targets/get_url/tasks/main.yml | 463 ++ test/integration/targets/getent/aliases | 2 + test/integration/targets/getent/meta/main.yml | 2 + test/integration/targets/getent/tasks/main.yml | 46 + test/integration/targets/git/aliases | 2 + .../targets/git/handlers/cleanup-default.yml | 6 + .../targets/git/handlers/cleanup-freebsd.yml | 5 + test/integration/targets/git/handlers/main.yml | 7 + test/integration/targets/git/meta/main.yml | 3 + .../targets/git/tasks/ambiguous-ref.yml | 37 + test/integration/targets/git/tasks/archive.yml | 135 + .../targets/git/tasks/change-repo-url.yml | 132 + .../targets/git/tasks/checkout-new-tag.yml | 54 + test/integration/targets/git/tasks/depth.yml | 229 + .../targets/git/tasks/forcefully-fetch-tag.yml | 38 + test/integration/targets/git/tasks/formats.yml | 40 + .../targets/git/tasks/gpg-verification.yml | 212 + test/integration/targets/git/tasks/localmods.yml | 112 + test/integration/targets/git/tasks/main.yml | 40 + .../targets/git/tasks/missing_hostkey.yml | 48 + .../targets/git/tasks/no-destination.yml | 13 + .../integration/targets/git/tasks/reset-origin.yml | 25 + .../targets/git/tasks/separate-git-dir.yml | 132 + .../targets/git/tasks/setup-local-repos.yml | 45 + test/integration/targets/git/tasks/setup.yml | 43 + .../targets/git/tasks/specific-revision.yml | 238 + test/integration/targets/git/tasks/submodules.yml | 124 + test/integration/targets/git/vars/main.yml | 97 + test/integration/targets/group/aliases | 2 + test/integration/targets/group/files/gidget.py | 12 + test/integration/targets/group/files/grouplist.sh | 20 + test/integration/targets/group/meta/main.yml | 2 + test/integration/targets/group/tasks/main.yml | 40 + test/integration/targets/group/tasks/tests.yml | 329 + test/integration/targets/group_by/aliases | 1 + .../integration/targets/group_by/create_groups.yml | 39 + test/integration/targets/group_by/group_vars/all | 3 + .../targets/group_by/group_vars/camelus | 1 + .../targets/group_by/group_vars/vicugna | 1 + .../targets/group_by/inventory.group_by | 9 + test/integration/targets/group_by/runme.sh | 6 + .../integration/targets/group_by/test_group_by.yml | 187 + .../targets/group_by/test_group_by_skipped.yml | 30 + test/integration/targets/groupby_filter/aliases | 1 + test/integration/targets/groupby_filter/runme.sh | 14 + .../targets/groupby_filter/test_jinja2_groupby.yml | 29 + test/integration/targets/handler_race/aliases | 3 + test/integration/targets/handler_race/inventory | 30 + .../roles/do_handlers/handlers/main.yml | 4 + .../handler_race/roles/do_handlers/tasks/main.yml | 9 + .../handler_race/roles/more_sleep/tasks/main.yml | 8 + .../handler_race/roles/random_sleep/tasks/main.yml | 8 + test/integration/targets/handler_race/runme.sh | 6 + .../targets/handler_race/test_handler_race.yml | 10 + test/integration/targets/handlers/aliases | 3 + .../integration/targets/handlers/from_handlers.yml | 39 + test/integration/targets/handlers/handlers.yml | 2 + .../targets/handlers/inventory.handlers | 10 + .../roles/test_force_handlers/handlers/main.yml | 2 + .../roles/test_force_handlers/tasks/main.yml | 26 + .../handlers/roles/test_handlers/handlers/main.yml | 5 + .../handlers/roles/test_handlers/meta/main.yml | 1 + .../handlers/roles/test_handlers/tasks/main.yml | 52 + .../roles/test_handlers_include/handlers/main.yml | 1 + .../roles/test_handlers_include/tasks/main.yml | 4 + .../test_handlers_include_role/handlers/main.yml | 5 + .../roles/test_handlers_include_role/meta/main.yml | 1 + .../test_handlers_include_role/tasks/main.yml | 47 + .../roles/test_handlers_listen/handlers/main.yml | 10 + .../roles/test_handlers_listen/tasks/main.yml | 6 + .../test_handlers_meta/handlers/alternate.yml | 12 + .../roles/test_handlers_meta/handlers/main.yml | 10 + .../roles/test_handlers_meta/tasks/main.yml | 75 + .../test_templating_in_handlers/handlers/main.yml | 21 + .../test_templating_in_handlers/tasks/main.yml | 26 + test/integration/targets/handlers/runme.sh | 95 + .../targets/handlers/test_force_handlers.yml | 27 + .../integration/targets/handlers/test_handlers.yml | 47 + .../handlers/test_handlers_any_errors_fatal.yml | 24 + .../targets/handlers/test_handlers_include.yml | 14 + .../handlers/test_handlers_include_role.yml | 8 + .../handlers/test_handlers_including_task.yml | 16 + .../handlers/test_handlers_inexistent_notify.yml | 10 + .../targets/handlers/test_handlers_listen.yml | 128 + .../handlers/test_handlers_template_run_once.yml | 12 + .../targets/handlers/test_listening_handlers.yml | 24 + .../handlers/test_templating_in_handlers.yml | 62 + test/integration/targets/hash/aliases | 1 + test/integration/targets/hash/group_vars/all | 3 + test/integration/targets/hash/host_vars/testhost | 2 + .../roles/test_hash_behaviour/defaults/main.yml | 21 + .../hash/roles/test_hash_behaviour/meta/main.yml | 17 + .../hash/roles/test_hash_behaviour/tasks/main.yml | 37 + .../hash/roles/test_hash_behaviour/vars/main.yml | 21 + test/integration/targets/hash/runme.sh | 11 + test/integration/targets/hash/test_hash.yml | 21 + test/integration/targets/hash/test_inv1.yml | 10 + test/integration/targets/hash/test_inv2.yml | 8 + .../targets/hash/test_inventory_hash.yml | 41 + .../targets/hash/vars/test_hash_vars.yml | 3 + test/integration/targets/hosts_field/aliases | 1 + .../targets/hosts_field/inventory.hosts_field | 1 + test/integration/targets/hosts_field/runme.sh | 49 + .../targets/hosts_field/test_hosts_field.json | 1 + .../targets/hosts_field/test_hosts_field.yml | 62 + test/integration/targets/ignore_errors/aliases | 1 + .../targets/ignore_errors/meta/main.yml | 2 + .../targets/ignore_errors/tasks/main.yml | 22 + .../integration/targets/ignore_unreachable/aliases | 1 + .../ignore_unreachable/fake_connectors/bad_exec.py | 11 + .../fake_connectors/bad_put_file.py | 11 + .../targets/ignore_unreachable/inventory | 3 + .../targets/ignore_unreachable/meta/main.yml | 2 + .../targets/ignore_unreachable/runme.sh | 16 + .../test_base_cannot_connect.yml | 5 + .../ignore_unreachable/test_cannot_connect.yml | 29 + .../ignore_unreachable/test_with_bad_plugins.yml | 24 + .../incidental_azure_rm_mariadbserver/aliases | 3 + .../tasks/main.yml | 640 ++ .../targets/incidental_azure_rm_resource/aliases | 3 + .../incidental_azure_rm_resource/tasks/main.yml | 158 + .../incidental_cloud_init_data_facts/aliases | 6 + .../tasks/main.yml | 50 + .../targets/incidental_cloudformation/aliases | 2 + .../incidental_cloudformation/defaults/main.yml | 8 + .../files/cf_template.json | 37 + .../incidental_cloudformation/tasks/main.yml | 476 ++ .../targets/incidental_cs_common/aliases | 1 + .../targets/incidental_cs_common/defaults/main.yml | 6 + .../targets/incidental_deploy_helper/aliases | 1 + .../incidental_deploy_helper/tasks/main.yml | 149 + .../targets/incidental_flatpak_remote/aliases | 8 + .../incidental_flatpak_remote/meta/main.yml | 2 + .../incidental_flatpak_remote/tasks/check_mode.yml | 101 + .../incidental_flatpak_remote/tasks/main.yml | 57 + .../incidental_flatpak_remote/tasks/setup.yml | 27 + .../incidental_flatpak_remote/tasks/test.yml | 72 + .../targets/incidental_inventory_aws_ec2/aliases | 2 + .../playbooks/create_inventory_config.yml | 11 + .../playbooks/empty_inventory_config.yml | 9 + .../playbooks/populate_cache.yml | 64 + .../playbooks/setup.yml | 62 + .../playbooks/tear_down.yml | 39 + .../test_invalid_aws_ec2_inventory_config.yml | 9 + .../playbooks/test_inventory_cache.yml | 18 + .../playbooks/test_populating_inventory.yml | 91 + .../test_populating_inventory_with_constructed.yml | 79 + .../playbooks/test_refresh_inventory.yml | 74 + .../targets/incidental_inventory_aws_ec2/runme.sh | 35 + .../templates/inventory.yml | 12 + .../templates/inventory_with_cache.yml | 12 + .../templates/inventory_with_constructed.yml | 20 + .../incidental_inventory_aws_ec2/test.aws_ec2.yml | 0 .../incidental_inventory_docker_swarm/aliases | 13 + .../inventory_1.docker_swarm.yml | 3 + .../inventory_2.docker_swarm.yml | 5 + .../meta/main.yml | 3 + .../playbooks/swarm_cleanup.yml | 19 + .../playbooks/swarm_setup.yml | 15 + .../playbooks/test_inventory_1.yml | 58 + .../playbooks/test_inventory_2.yml | 35 + .../incidental_inventory_docker_swarm/runme.sh | 22 + .../targets/incidental_inventory_foreman/aliases | 3 + .../incidental_inventory_foreman/ansible.cfg | 5 + .../incidental_inventory_foreman/inspect_cache.yml | 31 + .../targets/incidental_inventory_foreman/runme.sh | 50 + .../test_foreman_inventory.yml | 59 + .../targets/incidental_ios_file/aliases | 2 + .../targets/incidental_ios_file/defaults/main.yaml | 2 + .../targets/incidental_ios_file/ios1.cfg | 3 + .../targets/incidental_ios_file/nonascii.bin | Bin 0 -> 32768 bytes .../targets/incidental_ios_file/tasks/cli.yaml | 17 + .../targets/incidental_ios_file/tasks/main.yaml | 2 + .../incidental_ios_file/tests/cli/net_get.yaml | 52 + .../incidental_ios_file/tests/cli/net_put.yaml | 73 + .../targets/incidental_lookup_rabbitmq/aliases | 6 + .../incidental_lookup_rabbitmq/meta/main.yml | 2 + .../incidental_lookup_rabbitmq/tasks/main.yml | 5 + .../incidental_lookup_rabbitmq/tasks/ubuntu.yml | 138 + test/integration/targets/incidental_lvg/aliases | 6 + .../targets/incidental_lvg/meta/main.yml | 2 + .../targets/incidental_lvg/tasks/main.yml | 15 + .../targets/incidental_lvg/tasks/setup.yml | 13 + .../targets/incidental_lvg/tasks/teardown.yml | 17 + .../incidental_lvg/tasks/test_grow_reduce.yml | 33 + .../incidental_lvg/tasks/test_indempotency.yml | 15 + .../targets/incidental_mongodb_parameter/aliases | 8 + .../incidental_mongodb_parameter/defaults/main.yml | 21 + .../incidental_mongodb_parameter/meta/main.yml | 3 + .../incidental_mongodb_parameter/tasks/main.yml | 143 + .../tasks/mongod_singlenode.yml | 55 + .../tasks/mongod_teardown.yml | 25 + .../targets/incidental_postgresql_user/aliases | 4 + .../incidental_postgresql_user/defaults/main.yml | 3 + .../incidental_postgresql_user/meta/main.yml | 2 + .../incidental_postgresql_user/tasks/main.yml | 7 + .../tasks/postgresql_user_general.yml | 741 +++ .../tasks/postgresql_user_initial.yml | 153 + .../tasks/test_no_password_change.yml | 167 + .../tasks/test_password.yml | 336 + .../targets/incidental_setup_docker/aliases | 2 + .../incidental_setup_docker/defaults/main.yml | 18 + .../incidental_setup_docker/handlers/main.yml | 14 + .../targets/incidental_setup_docker/meta/main.yml | 2 + .../incidental_setup_docker/tasks/Debian.yml | 43 + .../incidental_setup_docker/tasks/Fedora.yml | 21 + .../incidental_setup_docker/tasks/RedHat-7.yml | 44 + .../incidental_setup_docker/tasks/RedHat-8.yml | 33 + .../targets/incidental_setup_docker/tasks/Suse.yml | 7 + .../targets/incidental_setup_docker/tasks/main.yml | 113 + .../incidental_setup_docker/vars/Debian.yml | 9 + .../incidental_setup_docker/vars/Fedora.yml | 5 + .../incidental_setup_docker/vars/RedHat-7.yml | 18 + .../incidental_setup_docker/vars/RedHat-8.yml | 9 + .../targets/incidental_setup_docker/vars/Suse.yml | 2 + .../incidental_setup_docker/vars/Ubuntu-14.yml | 5 + .../incidental_setup_docker/vars/default.yml | 0 .../targets/incidental_setup_ec2/aliases | 1 + .../targets/incidental_setup_ec2/defaults/main.yml | 2 + .../targets/incidental_setup_ec2/tasks/common.yml | 119 + .../targets/incidental_setup_ec2/vars/main.yml | 3 + .../incidental_setup_flatpak_remote/README.md | 138 + .../incidental_setup_flatpak_remote/aliases | 1 + .../files/repo.tar.xz | Bin 0 -> 20440 bytes .../handlers/main.yaml | 4 + .../incidental_setup_flatpak_remote/meta/main.yaml | 2 + .../tasks/main.yaml | 22 + .../targets/incidental_setup_mongodb/aliases | 1 + .../incidental_setup_mongodb/defaults/main.yml | 46 + .../incidental_setup_mongodb/handlers/main.yml | 24 + .../incidental_setup_mongodb/tasks/main.yml | 168 + .../targets/incidental_setup_postgresql_db/aliases | 1 + .../defaults/main.yml | 17 + .../files/dummy--1.0.sql | 2 + .../files/dummy--2.0.sql | 2 + .../files/dummy--3.0.sql | 2 + .../files/dummy.control | 3 + .../files/pg_hba.conf | 10 + .../incidental_setup_postgresql_db/tasks/main.yml | 222 + .../incidental_setup_postgresql_db/tasks/ssl.yml | 81 + .../vars/Debian-8.yml | 8 + .../vars/FreeBSD-11-py3.yml | 12 + .../vars/FreeBSD-11.yml | 12 + .../vars/FreeBSD-12.0-py3.yml | 12 + .../vars/FreeBSD-12.0.yml | 12 + .../vars/FreeBSD-12.1-py3.yml | 12 + .../vars/FreeBSD-12.1.yml | 12 + .../vars/RedHat-py3.yml | 8 + .../incidental_setup_postgresql_db/vars/RedHat.yml | 7 + .../vars/Ubuntu-12.yml | 8 + .../vars/Ubuntu-14.yml | 8 + .../vars/Ubuntu-16-py3.yml | 8 + .../vars/Ubuntu-16.yml | 8 + .../vars/Ubuntu-18-py3.yml | 8 + .../vars/Ubuntu-20-py3.yml | 8 + .../vars/default-py3.yml | 6 + .../vars/default.yml | 6 + .../targets/incidental_setup_rabbitmq/aliases | 1 + .../incidental_setup_rabbitmq/files/rabbitmq.conf | 8 + .../incidental_setup_rabbitmq/meta/main.yml | 3 + .../incidental_setup_rabbitmq/tasks/main.yml | 5 + .../incidental_setup_rabbitmq/tasks/ubuntu.yml | 63 + .../targets/incidental_setup_tls/aliases | 1 + .../incidental_setup_tls/files/ca_certificate.pem | 19 + .../targets/incidental_setup_tls/files/ca_key.pem | 28 + .../files/client_certificate.pem | 20 + .../incidental_setup_tls/files/client_key.pem | 27 + .../files/server_certificate.pem | 20 + .../incidental_setup_tls/files/server_key.pem | 27 + .../targets/incidental_setup_tls/tasks/main.yml | 21 + .../targets/incidental_synchronize/aliases | 1 + .../targets/incidental_synchronize/files/bar.txt | 1 + .../targets/incidental_synchronize/files/foo.txt | 1 + .../targets/incidental_synchronize/tasks/main.yml | 273 + .../targets/incidental_timezone/aliases | 5 + .../targets/incidental_timezone/tasks/main.yml | 57 + .../targets/incidental_timezone/tasks/test.yml | 607 ++ .../targets/incidental_vyos_config/aliases | 2 + .../incidental_vyos_config/defaults/main.yaml | 3 + .../targets/incidental_vyos_config/tasks/cli.yaml | 22 + .../incidental_vyos_config/tasks/cli_config.yaml | 16 + .../targets/incidental_vyos_config/tasks/main.yaml | 3 + .../incidental_vyos_config/tests/cli/backup.yaml | 113 + .../tests/cli/check_config.yaml | 63 + .../incidental_vyos_config/tests/cli/comment.yaml | 34 + .../incidental_vyos_config/tests/cli/config.cfg | 3 + .../incidental_vyos_config/tests/cli/save.yaml | 54 + .../incidental_vyos_config/tests/cli/simple.yaml | 53 + .../tests/cli_config/cli_backup.yaml | 114 + .../tests/cli_config/cli_basic.yaml | 28 + .../tests/cli_config/cli_comment.yaml | 30 + .../incidental_vyos_lldp_interfaces/aliases | 2 + .../defaults/main.yaml | 3 + .../incidental_vyos_lldp_interfaces/meta/main.yaml | 3 + .../incidental_vyos_lldp_interfaces/tasks/cli.yaml | 19 + .../tasks/main.yaml | 2 + .../tests/cli/_populate.yaml | 14 + .../tests/cli/_populate_intf.yaml | 10 + .../tests/cli/_remove_config.yaml | 8 + .../tests/cli/deleted.yaml | 46 + .../tests/cli/empty_config.yaml | 36 + .../tests/cli/merged.yaml | 58 + .../tests/cli/overridden.yaml | 49 + .../tests/cli/replaced.yaml | 63 + .../tests/cli/rtt.yaml | 57 + .../incidental_vyos_lldp_interfaces/vars/main.yaml | 130 + .../targets/incidental_vyos_prepare_tests/aliases | 1 + .../incidental_vyos_prepare_tests/tasks/main.yaml | 13 + .../targets/incidental_win_copy/aliases | 2 + .../targets/incidental_win_copy/defaults/main.yml | 1 + .../files-different/vault/folder/nested-vault-file | 6 + .../files-different/vault/readme.txt | 5 + .../files-different/vault/vault-file | 6 + .../targets/incidental_win_copy/files/empty.txt | 0 .../targets/incidental_win_copy/files/foo.txt | 1 + .../incidental_win_copy/files/subdir/bar.txt | 1 + .../files/subdir/subdir2/baz.txt | 1 + .../files/subdir/subdir2/subdir3/subdir4/qux.txt | 1 + .../targets/incidental_win_copy/tasks/main.yml | 34 + .../incidental_win_copy/tasks/remote_tests.yml | 471 ++ .../targets/incidental_win_copy/tasks/tests.yml | 535 ++ .../incidental_win_data_deduplication/aliases | 5 + .../meta/main.yml | 2 + .../tasks/main.yml | 2 + .../tasks/pre_test.yml | 40 + .../tasks/tests.yml | 47 + .../templates/partition_creation_script.j2 | 11 + .../templates/partition_deletion_script.j2 | 3 + .../integration/targets/incidental_win_dsc/aliases | 6 + .../ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1 | 41 + .../ANSIBLE_xSetReboot.schema.mof | 7 + .../ANSIBLE_xTestResource.psm1 | 214 + .../ANSIBLE_xTestResource.schema.mof | 60 + .../files/xTestDsc/1.0.0/xTestDsc.psd1 | 13 + .../ANSIBLE_xTestResource.psm1 | 214 + .../ANSIBLE_xTestResource.schema.mof | 63 + .../files/xTestDsc/1.0.1/xTestDsc.psd1 | 13 + .../targets/incidental_win_dsc/meta/main.yml | 2 + .../targets/incidental_win_dsc/tasks/main.yml | 39 + .../targets/incidental_win_dsc/tasks/tests.yml | 544 ++ .../targets/incidental_win_lineinfile/aliases | 3 + .../incidental_win_lineinfile/files/test.txt | 5 + .../files/test_linebreak.txt | 0 .../files/test_quoting.txt | 0 .../incidental_win_lineinfile/files/testempty.txt | 0 .../incidental_win_lineinfile/files/testnoeof.txt | 2 + .../incidental_win_lineinfile/meta/main.yml | 2 + .../incidental_win_lineinfile/tasks/main.yml | 708 ++ .../targets/incidental_win_ping/aliases | 2 + .../library/win_ping_set_attr.ps1 | 31 + .../library/win_ping_strict_mode_error.ps1 | 30 + .../library/win_ping_syntax_error.ps1 | 30 + .../incidental_win_ping/library/win_ping_throw.ps1 | 30 + .../library/win_ping_throw_string.ps1 | 30 + .../targets/incidental_win_ping/tasks/main.yml | 67 + .../targets/incidental_win_prepare_tests/aliases | 1 + .../incidental_win_prepare_tests/meta/main.yml | 3 + .../incidental_win_prepare_tests/tasks/main.yml | 29 + .../targets/incidental_win_psexec/aliases | 2 + .../targets/incidental_win_psexec/meta/main.yml | 2 + .../targets/incidental_win_psexec/tasks/main.yml | 80 + .../targets/incidental_win_reboot/aliases | 2 + .../targets/incidental_win_reboot/tasks/main.yml | 70 + .../templates/post_reboot.ps1 | 8 + .../targets/incidental_win_security_policy/aliases | 2 + .../library/test_win_security_policy.ps1 | 53 + .../incidental_win_security_policy/tasks/main.yml | 41 + .../incidental_win_security_policy/tasks/tests.yml | 186 + test/integration/targets/incidental_xml/aliases | 4 + .../fixtures/ansible-xml-beers-unicode.xml | 13 + .../incidental_xml/fixtures/ansible-xml-beers.xml | 14 + .../fixtures/ansible-xml-namespaced-beers.xml | 14 + .../results/test-add-children-elements-unicode.xml | 14 + .../results/test-add-children-elements.xml | 14 + .../results/test-add-children-from-groupvars.xml | 14 + .../results/test-add-children-insertafter.xml | 17 + .../results/test-add-children-insertbefore.xml | 17 + .../test-add-children-with-attributes-unicode.xml | 14 + .../results/test-add-children-with-attributes.xml | 14 + .../results/test-add-element-implicitly.yml | 32 + .../test-add-namespaced-children-elements.xml | 14 + .../results/test-pretty-print-only.xml | 14 + .../incidental_xml/results/test-pretty-print.xml | 15 + .../results/test-remove-attribute.xml | 14 + .../incidental_xml/results/test-remove-element.xml | 13 + .../results/test-remove-namespaced-attribute.xml | 14 + .../results/test-remove-namespaced-element.xml | 13 + .../results/test-set-attribute-value-unicode.xml | 14 + .../results/test-set-attribute-value.xml | 14 + .../results/test-set-children-elements-level.xml | 11 + .../results/test-set-children-elements-unicode.xml | 11 + .../results/test-set-children-elements.xml | 11 + .../results/test-set-element-value-empty.xml | 14 + .../results/test-set-element-value-unicode.xml | 14 + .../results/test-set-element-value.xml | 14 + .../test-set-namespaced-attribute-value.xml | 14 + .../results/test-set-namespaced-element-value.xml | 14 + .../targets/incidental_xml/tasks/main.yml | 67 + .../tasks/test-add-children-elements-unicode.yml | 29 + .../tasks/test-add-children-elements.yml | 29 + .../tasks/test-add-children-from-groupvars.yml | 28 + .../tasks/test-add-children-insertafter.yml | 32 + .../tasks/test-add-children-insertbefore.yml | 32 + .../test-add-children-with-attributes-unicode.yml | 31 + .../tasks/test-add-children-with-attributes.yml | 35 + .../tasks/test-add-element-implicitly.yml | 237 + .../test-add-namespaced-children-elements.yml | 32 + .../tasks/test-children-elements-xml.yml | 30 + .../incidental_xml/tasks/test-count-unicode.yml | 19 + .../targets/incidental_xml/tasks/test-count.yml | 19 + .../tasks/test-get-element-content-unicode.yml | 32 + .../tasks/test-get-element-content.yml | 52 + .../tasks/test-mutually-exclusive-attributes.yml | 22 + .../tasks/test-pretty-print-only.yml | 29 + .../incidental_xml/tasks/test-pretty-print.yml | 30 + .../incidental_xml/tasks/test-remove-attribute.yml | 28 + .../incidental_xml/tasks/test-remove-element.yml | 28 + .../tasks/test-remove-namespaced-attribute.yml | 33 + .../tasks/test-remove-namespaced-element.yml | 33 + .../tasks/test-set-attribute-value-unicode.yml | 29 + .../tasks/test-set-attribute-value.yml | 29 + .../tasks/test-set-children-elements-level.yml | 74 + .../tasks/test-set-children-elements-unicode.yml | 46 + .../tasks/test-set-children-elements.yml | 53 + .../tasks/test-set-element-value-empty.yml | 28 + .../tasks/test-set-element-value-unicode.yml | 43 + .../tasks/test-set-element-value.yml | 43 + .../tasks/test-set-namespaced-attribute-value.yml | 34 + .../test-set-namespaced-children-elements.yml | 57 + .../tasks/test-set-namespaced-element-value.yml | 46 + .../incidental_xml/tasks/test-xmlstring.yml | 81 + .../targets/incidental_xml/vars/main.yml | 6 + test/integration/targets/include_import/aliases | 2 + .../targets/include_import/apply/import_apply.yml | 31 + .../targets/include_import/apply/include_apply.yml | 50 + .../targets/include_import/apply/include_tasks.yml | 2 + .../apply/roles/include_role/tasks/main.yml | 2 + .../apply/roles/include_role2/tasks/main.yml | 2 + .../empty_group_warning/playbook.yml | 13 + .../include_import/empty_group_warning/tasks.yml | 3 + .../grandchild/block_include_tasks.yml | 2 + .../targets/include_import/grandchild/import.yml | 1 + .../grandchild/import_include_include_tasks.yml | 2 + .../include_import/grandchild/include_level_1.yml | 1 + .../include_import/handler_addressing/playbook.yml | 11 + .../roles/import_handler_test/handlers/main.yml | 2 + .../roles/import_handler_test/tasks/handlers.yml | 2 + .../roles/import_handler_test/tasks/main.yml | 3 + .../roles/include_handler_test/handlers/main.yml | 2 + .../roles/include_handler_test/tasks/handlers.yml | 2 + .../roles/include_handler_test/tasks/main.yml | 3 + test/integration/targets/include_import/inventory | 6 + .../include_import/nestedtasks/nested/nested.yml | 2 + .../include_import/parent_templating/playbook.yml | 11 + .../roles/test/tasks/localhost.yml | 1 + .../parent_templating/roles/test/tasks/main.yml | 1 + .../parent_templating/roles/test/tasks/other.yml | 2 + .../include_import/playbook/group_vars/all.yml | 1 + .../targets/include_import/playbook/playbook1.yml | 9 + .../targets/include_import/playbook/playbook2.yml | 9 + .../targets/include_import/playbook/playbook3.yml | 10 + .../targets/include_import/playbook/playbook4.yml | 9 + .../playbook/playbook_needing_vars.yml | 6 + .../roles/import_playbook_role/tasks/main.yml | 2 + .../playbook/sub_playbook/library/helloworld.py | 30 + .../playbook/sub_playbook/sub_playbook.yml | 4 + .../playbook/test_import_playbook.yml | 26 + .../playbook/test_import_playbook_tags.yml | 10 + .../targets/include_import/playbook/validate1.yml | 10 + .../targets/include_import/playbook/validate2.yml | 10 + .../targets/include_import/playbook/validate34.yml | 11 + .../include_import/playbook/validate_tags.yml | 11 + .../include_import/public_exposure/no_bleeding.yml | 25 + .../public_exposure/no_overwrite_roles.yml | 4 + .../include_import/public_exposure/playbook.yml | 56 + .../roles/call_import/tasks/main.yml | 6 + .../roles/dynamic/defaults/main.yml | 1 + .../public_exposure/roles/dynamic/tasks/main.yml | 5 + .../public_exposure/roles/dynamic/vars/main.yml | 1 + .../roles/dynamic_private/defaults/main.yml | 1 + .../roles/dynamic_private/tasks/main.yml | 5 + .../roles/dynamic_private/vars/main.yml | 1 + .../public_exposure/roles/from/defaults/from.yml | 1 + .../public_exposure/roles/from/tasks/from.yml | 5 + .../public_exposure/roles/from/vars/from.yml | 1 + .../roles/regular/defaults/main.yml | 1 + .../public_exposure/roles/regular/tasks/main.yml | 5 + .../public_exposure/roles/regular/vars/main.yml | 1 + .../public_exposure/roles/static/defaults/main.yml | 1 + .../public_exposure/roles/static/tasks/main.yml | 5 + .../public_exposure/roles/static/vars/main.yml | 1 + .../include_import/role/test_import_role.yml | 139 + .../include_import/role/test_include_role.yml | 166 + .../role/test_include_role_vars_from.yml | 10 + .../roles/delegated_handler/handlers/main.yml | 4 + .../roles/delegated_handler/tasks/main.yml | 3 + .../roles/dup_allowed_role/meta/main.yml | 2 + .../roles/dup_allowed_role/tasks/main.yml | 3 + .../roles/loop_name_assert/tasks/main.yml | 4 + .../nested/nested_dep_role2/defaults/main.yml | 3 + .../nested/nested/nested_dep_role2/meta/main.yml | 2 + .../nested/nested/nested_dep_role2/tasks/main.yml | 2 + .../nested/nested/nested_dep_role2/tasks/rund.yml | 2 + .../nested/nested/nested_dep_role2/vars/main.yml | 2 + .../nested/nested_dep_role2a/defaults/main.yml | 3 + .../nested/nested/nested_dep_role2a/meta/main.yml | 2 + .../nested/nested/nested_dep_role2a/tasks/main.yml | 2 + .../nested/nested/nested_dep_role2a/tasks/rune.yml | 2 + .../nested/nested/nested_dep_role2a/vars/main.yml | 2 + .../nested/nested_dep_role2b/defaults/main.yml | 3 + .../nested/nested/nested_dep_role2b/meta/main.yml | 1 + .../nested/nested/nested_dep_role2b/tasks/main.yml | 2 + .../nested/nested/nested_dep_role2b/tasks/runf.yml | 2 + .../nested/nested/nested_dep_role2b/vars/main.yml | 2 + .../roles/nested/nested_dep_role/defaults/main.yml | 3 + .../roles/nested/nested_dep_role/meta/main.yml | 2 + .../roles/nested/nested_dep_role/tasks/main.yml | 2 + .../roles/nested/nested_dep_role/tasks/runc.yml | 4 + .../roles/nested/nested_dep_role/vars/main.yml | 2 + .../roles/nested_include_task/meta/main.yml | 2 + .../roles/nested_include_task/tasks/main.yml | 2 + .../roles/nested_include_task/tasks/runa.yml | 3 + .../include_import/roles/role1/tasks/canary1.yml | 2 + .../include_import/roles/role1/tasks/canary2.yml | 2 + .../include_import/roles/role1/tasks/canary3.yml | 2 + .../include_import/roles/role1/tasks/fail.yml | 3 + .../include_import/roles/role1/tasks/main.yml | 3 + .../include_import/roles/role1/tasks/r1t01.yml | 1 + .../include_import/roles/role1/tasks/r1t02.yml | 1 + .../include_import/roles/role1/tasks/r1t03.yml | 1 + .../include_import/roles/role1/tasks/r1t04.yml | 1 + .../include_import/roles/role1/tasks/r1t05.yml | 1 + .../include_import/roles/role1/tasks/r1t06.yml | 1 + .../include_import/roles/role1/tasks/r1t07.yml | 1 + .../include_import/roles/role1/tasks/r1t08.yml | 1 + .../include_import/roles/role1/tasks/r1t09.yml | 1 + .../include_import/roles/role1/tasks/r1t10.yml | 1 + .../include_import/roles/role1/tasks/r1t11.yml | 1 + .../include_import/roles/role1/tasks/r1t12.yml | 2 + .../include_import/roles/role1/tasks/tasks.yml | 2 + .../include_import/roles/role1/tasks/vartest.yml | 2 + .../include_import/roles/role1/vars/main.yml | 1 + .../include_import/roles/role1/vars/role1vars.yml | 1 + .../include_import/roles/role2/tasks/main.yml | 3 + .../include_import/roles/role3/defaults/main.yml | 2 + .../include_import/roles/role3/handlers/main.yml | 3 + .../include_import/roles/role3/tasks/main.yml | 3 + .../include_import/roles/role3/tasks/tasks.yml | 2 + .../include_import/roles/role3/tasks/vartest.yml | 2 + .../include_import/roles/role3/vars/main.yml | 1 + .../include_import/roles/role3/vars/role3vars.yml | 2 + .../roles/role_with_deps/meta/main.yml | 3 + .../roles/role_with_deps/tasks/main.yml | 2 + .../targets/include_import/run_once/include_me.yml | 2 + .../targets/include_import/run_once/playbook.yml | 61 + test/integration/targets/include_import/runme.sh | 124 + .../targets/include_import/tasks/debug_item.yml | 2 + .../targets/include_import/tasks/hello/.gitignore | 1 + .../targets/include_import/tasks/hello/keep | 0 .../targets/include_import/tasks/nested/nested.yml | 2 + .../targets/include_import/tasks/tasks1.yml | 5 + .../targets/include_import/tasks/tasks2.yml | 5 + .../targets/include_import/tasks/tasks3.yml | 5 + .../targets/include_import/tasks/tasks4.yml | 5 + .../targets/include_import/tasks/tasks5.yml | 6 + .../targets/include_import/tasks/tasks6.yml | 5 + .../tasks/test_allow_single_role_dup.yml | 8 + .../include_import/tasks/test_import_tasks.yml | 41 + .../tasks/test_import_tasks_tags.yml | 23 + .../tasks/test_include_dupe_loop.yml | 8 + .../include_import/tasks/test_include_tasks.yml | 44 + .../tasks/test_include_tasks_tags.yml | 25 + .../include_import/tasks/test_recursion.yml | 6 + .../targets/include_import/tasks/validate3.yml | 4 + .../targets/include_import/tasks/validate_tags.yml | 8 + .../include_import/test_copious_include_tasks.yml | 44 + .../test_copious_include_tasks_fqcn.yml | 44 + .../test_grandparent_inheritance.yml | 29 + .../test_grandparent_inheritance_fqcn.yml | 29 + .../include_import/test_include_loop_fqcn.yml | 17 + .../include_import/test_loop_var_bleed.yaml | 9 + .../targets/include_import/test_nested_tasks.yml | 6 + .../include_import/test_nested_tasks_fqcn.yml | 6 + .../targets/include_import/test_role_recursion.yml | 7 + .../include_import/test_role_recursion_fqcn.yml | 7 + .../include_import/undefined_var/include_tasks.yml | 5 + .../undefined_var/include_that_defines_var.yml | 5 + .../include_import/undefined_var/playbook.yml | 36 + .../valid_include_keywords/include_me.yml | 6 + .../valid_include_keywords/include_me_listen.yml | 2 + .../valid_include_keywords/include_me_notify.yml | 2 + .../valid_include_keywords/playbook.yml | 40 + .../targets/include_parent_role_vars/aliases | 2 + .../tasks/included_by_other_role.yml | 37 + .../tasks/included_by_ourselves.yml | 14 + .../include_parent_role_vars/tasks/main.yml | 21 + .../targets/include_vars-ad-hoc/aliases | 1 + .../targets/include_vars-ad-hoc/dir/inc.yml | 1 + .../targets/include_vars-ad-hoc/runme.sh | 6 + test/integration/targets/include_vars/aliases | 1 + .../targets/include_vars/defaults/main.yml | 3 + .../targets/include_vars/tasks/main.yml | 164 + .../targets/include_vars/vars/all/all.yml | 3 + .../vars/environments/development/all.yml | 3 + .../environments/development/services/webapp.yml | 4 + .../include_vars/vars/services/service_vars.yml | 2 + .../vars/services/service_vars_fqcn.yml | 3 + .../targets/include_vars/vars/services/webapp.yml | 4 + .../vars/webapp/file_without_extension | 2 + .../targets/include_when_parent_is_dynamic/aliases | 2 + .../include_when_parent_is_dynamic/playbook.yml | 4 + .../include_when_parent_is_dynamic/runme.sh | 13 + .../syntax_error.yml | 1 + .../include_when_parent_is_dynamic/tasks.yml | 12 + .../targets/include_when_parent_is_static/aliases | 2 + .../include_when_parent_is_static/playbook.yml | 4 + .../targets/include_when_parent_is_static/runme.sh | 13 + .../include_when_parent_is_static/syntax_error.yml | 1 + .../include_when_parent_is_static/tasks.yml | 12 + test/integration/targets/includes/aliases | 1 + .../includes/roles/test_includes/handlers/main.yml | 1 + .../roles/test_includes/handlers/more_handlers.yml | 12 + .../roles/test_includes/tasks/branch_toplevel.yml | 9 + .../includes/roles/test_includes/tasks/empty.yml | 0 .../roles/test_includes/tasks/included_task1.yml | 9 + .../roles/test_includes/tasks/leaf_sublevel.yml | 2 + .../includes/roles/test_includes/tasks/main.yml | 106 + .../roles/test_includes/tasks/not_a_role_task.yml | 4 + .../roles/test_includes_free/tasks/inner.yml | 2 + .../roles/test_includes_free/tasks/inner_fqcn.yml | 2 + .../roles/test_includes_free/tasks/main.yml | 9 + .../test_includes_host_pinned/tasks/inner.yml | 2 + .../roles/test_includes_host_pinned/tasks/main.yml | 6 + test/integration/targets/includes/runme.sh | 5 + .../targets/includes/test_include_free.yml | 10 + .../targets/includes/test_include_host_pinned.yml | 9 + .../integration/targets/includes/test_includes.yml | 7 + .../targets/includes/test_includes2.yml | 22 + .../targets/includes/test_includes3.yml | 6 + .../targets/includes/test_includes4.yml | 2 + test/integration/targets/includes_race/aliases | 2 + test/integration/targets/includes_race/inventory | 30 + .../roles/random_sleep/tasks/main.yml | 8 + .../includes_race/roles/set_a_fact/tasks/fact1.yml | 4 + .../includes_race/roles/set_a_fact/tasks/fact2.yml | 4 + test/integration/targets/includes_race/runme.sh | 5 + .../targets/includes_race/test_includes_race.yml | 19 + test/integration/targets/infra/aliases | 3 + test/integration/targets/infra/inventory.local | 2 + test/integration/targets/infra/library/test.py | 21 + test/integration/targets/infra/runme.sh | 39 + test/integration/targets/infra/test_test_infra.yml | 25 + .../targets/interpreter_discovery_python/aliases | 2 + .../library/test_echo_module.py | 29 + .../interpreter_discovery_python/tasks/main.yml | 177 + .../aliases | 2 + .../delegate_facts.yml | 10 + .../inventory | 2 + .../runme.sh | 5 + test/integration/targets/inventory/aliases | 1 + .../integration/targets/inventory/inv_with_int.yml | 6 + test/integration/targets/inventory/playbook.yml | 4 + test/integration/targets/inventory/runme.sh | 86 + test/integration/targets/inventory/strategy.yml | 12 + test/integration/targets/inventory_cache/aliases | 1 + .../targets/inventory_cache/cache/.keep | 0 .../targets/inventory_cache/cache_host.yml | 4 + .../plugins/inventory/cache_host.py | 56 + test/integration/targets/inventory_cache/runme.sh | 23 + test/integration/targets/inventory_ini/aliases | 1 + .../targets/inventory_ini/inventory.ini | 5 + test/integration/targets/inventory_ini/runme.sh | 5 + .../targets/inventory_ini/test_ansible_become.yml | 11 + test/integration/targets/inventory_script/aliases | 1 + .../targets/inventory_script/inventory.json | 1045 +++ .../targets/inventory_script/inventory.sh | 7 + test/integration/targets/inventory_script/runme.sh | 5 + test/integration/targets/inventory_yaml/aliases | 2 + test/integration/targets/inventory_yaml/empty.json | 10 + test/integration/targets/inventory_yaml/runme.sh | 4 + .../targets/inventory_yaml/success.json | 61 + test/integration/targets/inventory_yaml/test.yml | 27 + .../targets/jinja2_native_types/aliases | 1 + .../jinja2_native_types/nested_undefined.yml | 24 + .../targets/jinja2_native_types/runme.sh | 10 + .../targets/jinja2_native_types/runtests.yml | 50 + .../targets/jinja2_native_types/test_bool.yml | 53 + .../targets/jinja2_native_types/test_casting.yml | 31 + .../jinja2_native_types/test_concatentation.yml | 88 + .../targets/jinja2_native_types/test_dunder.yml | 23 + .../targets/jinja2_native_types/test_hostvars.yml | 10 + .../targets/jinja2_native_types/test_none.yml | 11 + .../targets/jinja2_native_types/test_template.yml | 27 + .../jinja2_native_types/test_template_newlines.j2 | 4 + .../targets/jinja2_native_types/test_types.yml | 20 + .../targets/jinja2_native_types/test_vault.yml | 16 + .../targets/jinja2_native_types/test_vault_pass | 1 + test/integration/targets/json_cleanup/aliases | 1 + .../targets/json_cleanup/library/bad_json | 11 + .../json_cleanup/module_output_cleaning.yml | 26 + test/integration/targets/json_cleanup/runme.sh | 5 + test/integration/targets/known_hosts/aliases | 1 + .../targets/known_hosts/defaults/main.yml | 3 + .../targets/known_hosts/files/existing_known_hosts | 5 + test/integration/targets/known_hosts/meta/main.yml | 2 + .../integration/targets/known_hosts/tasks/main.yml | 377 ++ test/integration/targets/limit_inventory/aliases | 1 + test/integration/targets/limit_inventory/hosts.yml | 5 + test/integration/targets/limit_inventory/runme.sh | 31 + test/integration/targets/lineinfile/aliases | 1 + .../targets/lineinfile/files/firstmatch.txt | 5 + .../integration/targets/lineinfile/files/test.conf | 5 + test/integration/targets/lineinfile/files/test.txt | 5 + .../targets/lineinfile/files/test_58923.txt | 4 + .../targets/lineinfile/files/testempty.txt | 0 .../targets/lineinfile/files/testmultiple.txt | 7 + .../targets/lineinfile/files/testnoeof.txt | 2 + test/integration/targets/lineinfile/meta/main.yml | 20 + test/integration/targets/lineinfile/tasks/main.yml | 1157 ++++ test/integration/targets/lineinfile/vars/main.yml | 29 + test/integration/targets/lookup_config/aliases | 3 + .../targets/lookup_config/tasks/main.yml | 59 + test/integration/targets/lookup_dict/aliases | 3 + .../integration/targets/lookup_dict/tasks/main.yml | 54 + test/integration/targets/lookup_env/aliases | 3 + test/integration/targets/lookup_env/tasks/main.yml | 15 + test/integration/targets/lookup_file/aliases | 3 + .../integration/targets/lookup_file/tasks/main.yml | 13 + test/integration/targets/lookup_fileglob/aliases | 1 + .../find_levels/files/play_adj_subdir.txt | 1 + .../files/somepath/play_adj_subsubdir.txt | 1 + .../targets/lookup_fileglob/find_levels/play.yml | 13 + .../lookup_fileglob/find_levels/play_adj.txt | 1 + .../find_levels/roles/get_file/files/in_role.txt | 1 + .../get_file/files/otherpath/in_role_subdir.txt | 1 + .../find_levels/roles/get_file/tasks/main.yml | 10 + .../targets/lookup_fileglob/non_existent/play.yml | 6 + test/integration/targets/lookup_fileglob/runme.sh | 15 + .../integration/targets/lookup_first_found/aliases | 3 + .../targets/lookup_first_found/files/bar1 | 1 + .../targets/lookup_first_found/files/foo1 | 1 + .../targets/lookup_first_found/tasks/main.yml | 73 + .../targets/lookup_indexed_items/aliases | 3 + .../targets/lookup_indexed_items/tasks/main.yml | 16 + test/integration/targets/lookup_ini/aliases | 2 + .../targets/lookup_ini/lookup-8859-15.ini | 7 + test/integration/targets/lookup_ini/lookup.ini | 25 + .../targets/lookup_ini/lookup.properties | 6 + test/integration/targets/lookup_ini/runme.sh | 5 + .../targets/lookup_ini/test_lookup_properties.yml | 71 + .../targets/lookup_inventory_hostnames/aliases | 2 + .../targets/lookup_inventory_hostnames/inventory | 6 + .../targets/lookup_inventory_hostnames/main.yml | 13 + .../targets/lookup_inventory_hostnames/runme.sh | 5 + test/integration/targets/lookup_items/aliases | 3 + .../targets/lookup_items/tasks/main.yml | 14 + test/integration/targets/lookup_lines/aliases | 3 + .../targets/lookup_lines/tasks/main.yml | 13 + test/integration/targets/lookup_list/aliases | 3 + .../integration/targets/lookup_list/tasks/main.yml | 19 + test/integration/targets/lookup_nested/aliases | 3 + .../targets/lookup_nested/tasks/main.yml | 18 + test/integration/targets/lookup_password/aliases | 3 + test/integration/targets/lookup_password/runme.sh | 12 + test/integration/targets/lookup_password/runme.yml | 4 + .../targets/lookup_password/tasks/main.yml | 104 + test/integration/targets/lookup_pipe/aliases | 3 + .../integration/targets/lookup_pipe/tasks/main.yml | 9 + .../targets/lookup_random_choice/aliases | 3 + .../targets/lookup_random_choice/tasks/main.yml | 10 + test/integration/targets/lookup_sequence/aliases | 3 + .../targets/lookup_sequence/tasks/main.yml | 63 + .../integration/targets/lookup_subelements/aliases | 3 + .../targets/lookup_subelements/tasks/main.yml | 45 + .../targets/lookup_subelements/vars/main.yml | 43 + test/integration/targets/lookup_template/aliases | 3 + .../targets/lookup_template/tasks/main.yml | 19 + .../targets/lookup_template/templates/hello.txt | 1 + .../lookup_template/templates/hello_string.txt | 1 + .../targets/lookup_template/templates/world.txt | 1 + test/integration/targets/lookup_together/aliases | 3 + .../targets/lookup_together/tasks/main.yml | 14 + test/integration/targets/lookup_unvault/aliases | 3 + .../targets/lookup_unvault/files/foot.txt | 1 + .../targets/lookup_unvault/files/foot.txt.vault | 6 + test/integration/targets/lookup_unvault/runme.sh | 6 + test/integration/targets/lookup_unvault/secret | 1 + .../integration/targets/lookup_unvault/unvault.yml | 9 + test/integration/targets/lookup_url/aliases | 5 + test/integration/targets/lookup_url/meta/main.yml | 2 + test/integration/targets/lookup_url/tasks/main.yml | 28 + test/integration/targets/lookup_vars/aliases | 3 + .../integration/targets/lookup_vars/tasks/main.yml | 16 + test/integration/targets/loop_control/aliases | 1 + test/integration/targets/loop_control/extended.yml | 12 + test/integration/targets/loop_control/inner.yml | 9 + test/integration/targets/loop_control/label.yml | 23 + test/integration/targets/loop_control/runme.sh | 12 + test/integration/targets/loops/aliases | 2 + test/integration/targets/loops/files/data1.txt | 1 + test/integration/targets/loops/files/data2.txt | 1 + .../targets/loops/tasks/index_var_tasks.yml | 3 + test/integration/targets/loops/tasks/main.yml | 391 ++ .../loops/tasks/templated_loop_var_tasks.yml | 4 + test/integration/targets/loops/vars/64169.yml | 2 + test/integration/targets/loops/vars/main.yml | 8 + test/integration/targets/meta_tasks/aliases | 1 + test/integration/targets/meta_tasks/inventory.yml | 9 + test/integration/targets/meta_tasks/runme.sh | 50 + .../targets/meta_tasks/test_end_host.yml | 14 + .../targets/meta_tasks/test_end_host_all.yml | 13 + .../targets/meta_tasks/test_end_host_all_fqcn.yml | 13 + .../targets/meta_tasks/test_end_host_fqcn.yml | 14 + .../targets/meta_tasks/test_end_play.yml | 12 + .../targets/meta_tasks/test_end_play_fqcn.yml | 12 + .../targets/missing_required_lib/aliases | 1 + .../library/missing_required_lib.py | 37 + .../targets/missing_required_lib/runme.sh | 5 + .../targets/missing_required_lib/runme.yml | 57 + .../targets/missing_required_lib/tasks/main.yml | 3 + test/integration/targets/module_defaults/aliases | 1 + .../othercoll/plugins/action/other_echoaction.py | 8 + .../othercoll/plugins/modules/other_echo1.py | 13 + .../testns/testcoll/meta/runtime.yml | 9 + .../testns/testcoll/plugins/action/echoaction.py | 19 + .../testcoll/plugins/module_utils/echo_impl.py | 15 + .../testns/testcoll/plugins/modules/echo1.py | 13 + .../testns/testcoll/plugins/modules/echo2.py | 13 + .../library/test_module_defaults.py | 30 + test/integration/targets/module_defaults/runme.sh | 5 + .../targets/module_defaults/tasks/main.yml | 89 + .../targets/module_defaults/test_defaults.yml | 60 + test/integration/targets/module_no_log/aliases | 5 + .../module_no_log/library/module_that_logs.py | 18 + .../targets/module_no_log/tasks/main.yml | 61 + test/integration/targets/module_precedence/aliases | 1 + .../module_precedence/lib_no_extension/ping | 69 + .../module_precedence/lib_with_extension/a.ini | 13 + .../module_precedence/lib_with_extension/a.py | 13 + .../module_precedence/lib_with_extension/ping.ini | 13 + .../module_precedence/lib_with_extension/ping.py | 69 + .../targets/module_precedence/modules_test.yml | 10 + .../module_precedence/modules_test_envvar.yml | 11 + .../module_precedence/modules_test_envvar_ext.yml | 16 + .../modules_test_multiple_roles.yml | 17 + .../modules_test_multiple_roles_reverse_order.yml | 16 + .../module_precedence/modules_test_role.yml | 13 + .../module_precedence/modules_test_role_ext.yml | 18 + .../multiple_roles/bar/library/ping.py | 69 + .../multiple_roles/bar/tasks/main.yml | 10 + .../multiple_roles/foo/library/ping.py | 69 + .../multiple_roles/foo/tasks/main.yml | 10 + .../roles_no_extension/foo/library/ping | 69 + .../roles_no_extension/foo/tasks/main.yml | 10 + .../roles_with_extension/foo/library/a.ini | 13 + .../roles_with_extension/foo/library/a.py | 13 + .../roles_with_extension/foo/library/ping.ini | 13 + .../roles_with_extension/foo/library/ping.py | 69 + .../roles_with_extension/foo/tasks/main.yml | 10 + .../integration/targets/module_precedence/runme.sh | 49 + test/integration/targets/module_tracebacks/aliases | 3 + .../targets/module_tracebacks/inventory | 5 + .../integration/targets/module_tracebacks/runme.sh | 5 + .../targets/module_tracebacks/traceback.yml | 21 + test/integration/targets/module_utils/aliases | 3 + .../targets/module_utils/callback/pure_json.py | 31 + .../testns/testcoll/plugins/module_utils/legit.py | 6 + .../targets/module_utils/library/test.py | 85 + .../module_utils/library/test_alias_deprecation.py | 15 + .../module_utils/library/test_cwd_missing.py | 33 + .../module_utils/library/test_cwd_unreadable.py | 28 + .../module_utils/library/test_env_override.py | 11 + .../targets/module_utils/library/test_failure.py | 12 + .../targets/module_utils/library/test_no_log.py | 35 + .../targets/module_utils/library/test_optional.py | 84 + .../targets/module_utils/library/test_override.py | 7 + .../module_utils/library/test_recursive_diff.py | 29 + .../targets/module_utils/module_utils/__init__.py | 0 .../module_utils/module_utils/a/__init__.py | 0 .../module_utils/module_utils/a/b/__init__.py | 0 .../module_utils/module_utils/a/b/c/__init__.py | 0 .../module_utils/module_utils/a/b/c/d/__init__.py | 0 .../module_utils/a/b/c/d/e/__init__.py | 0 .../module_utils/a/b/c/d/e/f/__init__.py | 0 .../module_utils/a/b/c/d/e/f/g/__init__.py | 0 .../module_utils/a/b/c/d/e/f/g/h/__init__.py | 1 + .../module_utils/module_utils/bar0/__init__.py | 0 .../targets/module_utils/module_utils/bar0/foo.py | 1 + .../module_utils/module_utils/bar1/__init__.py | 1 + .../module_utils/module_utils/bar2/__init__.py | 1 + .../module_utils/module_utils/baz1/__init__.py | 0 .../targets/module_utils/module_utils/baz1/one.py | 1 + .../module_utils/module_utils/baz2/__init__.py | 0 .../targets/module_utils/module_utils/baz2/one.py | 1 + .../targets/module_utils/module_utils/facts.py | 1 + .../targets/module_utils/module_utils/foo.py | 3 + .../targets/module_utils/module_utils/foo0.py | 1 + .../targets/module_utils/module_utils/foo1.py | 1 + .../targets/module_utils/module_utils/foo2.py | 1 + .../module_utils/module_utils/qux1/__init__.py | 0 .../targets/module_utils/module_utils/qux1/quux.py | 1 + .../module_utils/module_utils/qux2/__init__.py | 0 .../targets/module_utils/module_utils/qux2/quux.py | 1 + .../targets/module_utils/module_utils/qux2/quuz.py | 1 + .../targets/module_utils/module_utils/service.py | 1 + .../module_utils/module_utils/spam1/__init__.py | 0 .../module_utils/spam1/ham/__init__.py | 0 .../module_utils/spam1/ham/eggs/__init__.py | 1 + .../module_utils/module_utils/spam2/__init__.py | 0 .../module_utils/spam2/ham/__init__.py | 0 .../module_utils/spam2/ham/eggs/__init__.py | 1 + .../module_utils/module_utils/spam3/__init__.py | 0 .../module_utils/spam3/ham/__init__.py | 0 .../module_utils/module_utils/spam3/ham/bacon.py | 1 + .../module_utils/module_utils/spam4/__init__.py | 0 .../module_utils/spam4/ham/__init__.py | 0 .../module_utils/module_utils/spam4/ham/bacon.py | 1 + .../module_utils/module_utils/spam5/__init__.py | 0 .../module_utils/spam5/ham/__init__.py | 0 .../module_utils/module_utils/spam5/ham/bacon.py | 1 + .../module_utils/module_utils/spam5/ham/eggs.py | 1 + .../module_utils/module_utils/spam6/__init__.py | 0 .../module_utils/spam6/ham/__init__.py | 2 + .../module_utils/module_utils/spam7/__init__.py | 0 .../module_utils/spam7/ham/__init__.py | 1 + .../module_utils/module_utils/spam7/ham/bacon.py | 1 + .../module_utils/module_utils/spam8/__init__.py | 0 .../module_utils/spam8/ham/__init__.py | 1 + .../module_utils/module_utils/spam8/ham/bacon.py | 1 + .../module_utils/module_utils/sub/__init__.py | 0 .../targets/module_utils/module_utils/sub/bam.py | 3 + .../module_utils/module_utils/sub/bam/__init__.py | 0 .../module_utils/module_utils/sub/bam/bam.py | 3 + .../module_utils/module_utils/sub/bar/__init__.py | 0 .../module_utils/module_utils/sub/bar/bam.py | 3 + .../module_utils/module_utils/sub/bar/bar.py | 3 + .../module_utils/module_utils/yak/__init__.py | 0 .../module_utils/yak/zebra/__init__.py | 0 .../module_utils/module_utils/yak/zebra/foo.py | 1 + .../module_utils/module_utils_basic_setcwd.yml | 22 + .../module_utils_common_dict_transformation.yml | 34 + .../targets/module_utils/module_utils_envvar.yml | 51 + .../targets/module_utils/module_utils_test.yml | 71 + .../module_utils/module_utils_test_no_log.yml | 9 + .../targets/module_utils/module_utils_vvvvv.yml | 27 + .../targets/module_utils/other_mu_dir/__init__.py | 0 .../module_utils/other_mu_dir/a/__init__.py | 0 .../module_utils/other_mu_dir/a/b/__init__.py | 0 .../module_utils/other_mu_dir/a/b/c/__init__.py | 0 .../module_utils/other_mu_dir/a/b/c/d/__init__.py | 0 .../other_mu_dir/a/b/c/d/e/__init__.py | 0 .../other_mu_dir/a/b/c/d/e/f/__init__.py | 0 .../other_mu_dir/a/b/c/d/e/f/g/__init__.py | 0 .../other_mu_dir/a/b/c/d/e/f/g/h/__init__.py | 1 + .../targets/module_utils/other_mu_dir/facts.py | 1 + .../module_utils/other_mu_dir/json_utils.py | 1 + .../targets/module_utils/other_mu_dir/mork.py | 1 + test/integration/targets/module_utils/runme.sh | 14 + .../module_utils_Ansible.AccessToken/aliases | 3 + .../library/ansible_access_token_tests.ps1 | 378 ++ .../tasks/main.yml | 29 + .../targets/module_utils_Ansible.Basic/aliases | 3 + .../library/ansible_basic_tests.ps1 | 3098 +++++++++ .../module_utils_Ansible.Basic/tasks/main.yml | 9 + .../targets/module_utils_Ansible.Become/aliases | 3 + .../library/ansible_become_tests.ps1 | 1009 +++ .../module_utils_Ansible.Become/tasks/main.yml | 28 + .../aliases | 3 + .../library/add_type_test.ps1 | 299 + .../tasks/main.yml | 10 + .../aliases | 3 + .../library/argv_parser_test.ps1 | 93 + .../meta/main.yml | 3 + .../tasks/main.yml | 9 + .../aliases | 3 + .../library/backup_file_test.ps1 | 89 + .../tasks/main.yml | 10 + .../aliases | 3 + .../library/camel_conversion_test.ps1 | 74 + .../tasks/main.yml | 8 + .../aliases | 3 + .../library/command_util_test.ps1 | 135 + .../meta/main.yml | 3 + .../tasks/main.yml | 9 + .../aliases | 3 + .../library/file_util_test.ps1 | 108 + .../tasks/main.yml | 8 + .../aliases | 3 + .../library/testlist.ps1 | 12 + .../library/testpath.ps1 | 9 + .../tasks/main.yml | 41 + .../aliases | 3 + .../library/symbolic_link_test.ps1 | 170 + .../tasks/main.yml | 8 + .../aliases | 3 + .../library/privilege_util_test.ps1 | 112 + .../tasks/main.yml | 8 + .../module_utils_Ansible.ModuleUtils.SID/aliases | 3 + .../library/sid_utils_test.ps1 | 93 + .../tasks/main.yml | 22 + .../aliases | 4 + .../library/web_request_test.ps1 | 467 ++ .../meta/main.yml | 3 + .../tasks/main.yml | 10 + .../targets/module_utils_Ansible.Privilege/aliases | 3 + .../library/ansible_privilege_tests.ps1 | 324 + .../module_utils_Ansible.Privilege/tasks/main.yml | 9 + .../targets/module_utils_Ansible.Process/aliases | 3 + .../library/ansible_process_tests.ps1 | 236 + .../module_utils_Ansible.Process/tasks/main.yml | 9 + .../targets/module_utils_Ansible.Service/aliases | 3 + .../library/ansible_service_tests.ps1 | 937 +++ .../module_utils_Ansible.Service/tasks/main.yml | 9 + test/integration/targets/network_cli/aliases | 3 + .../targets/network_cli/passworded_user.yml | 14 + test/integration/targets/network_cli/runme.sh | 27 + test/integration/targets/network_cli/setup.yml | 14 + test/integration/targets/network_cli/teardown.yml | 14 + test/integration/targets/no_log/aliases | 1 + test/integration/targets/no_log/dynamic.yml | 27 + test/integration/targets/no_log/library/module.py | 45 + test/integration/targets/no_log/no_log_local.yml | 92 + .../targets/no_log/no_log_suboptions.yml | 24 + .../targets/no_log/no_log_suboptions_invalid.yml | 45 + test/integration/targets/no_log/runme.sh | 21 + test/integration/targets/noexec/aliases | 3 + test/integration/targets/noexec/inventory | 1 + test/integration/targets/noexec/runme.sh | 9 + test/integration/targets/noexec/test-noexec.yml | 8 + .../targets/old_style_cache_plugins/aliases | 4 + .../old_style_cache_plugins/inventory_config | 1 + .../old_style_cache_plugins/plugins/cache/redis.py | 141 + .../plugins/inventory/test.py | 59 + .../targets/old_style_cache_plugins/runme.sh | 81 + .../test_fact_gathering.yml | 6 + .../targets/old_style_modules_posix/aliases | 1 + .../old_style_modules_posix/library/helloworld.sh | 29 + .../targets/old_style_modules_posix/meta/main.yml | 2 + .../targets/old_style_modules_posix/tasks/main.yml | 44 + test/integration/targets/omit/48673.yml | 4 + test/integration/targets/omit/aliases | 1 + test/integration/targets/omit/runme.sh | 5 + test/integration/targets/order/aliases | 1 + test/integration/targets/order/inventory | 9 + test/integration/targets/order/order.yml | 39 + test/integration/targets/order/runme.sh | 24 + test/integration/targets/package/aliases | 3 + test/integration/targets/package/meta/main.yml | 2 + test/integration/targets/package/tasks/main.yml | 114 + test/integration/targets/package_facts/aliases | 4 + .../targets/package_facts/tasks/main.yml | 115 + test/integration/targets/parsing/aliases | 1 + test/integration/targets/parsing/bad_parsing.yml | 12 + test/integration/targets/parsing/good_parsing.yml | 9 + .../parsing/roles/test_bad_parsing/tasks/main.yml | 60 + .../roles/test_bad_parsing/tasks/scenario1.yml | 4 + .../roles/test_bad_parsing/tasks/scenario2.yml | 4 + .../roles/test_bad_parsing/tasks/scenario3.yml | 4 + .../roles/test_bad_parsing/tasks/scenario4.yml | 4 + .../parsing/roles/test_bad_parsing/vars/main.yml | 2 + .../parsing/roles/test_good_parsing/tasks/main.yml | 204 + .../roles/test_good_parsing/tasks/test_include.yml | 1 + .../tasks/test_include_conditional.yml | 1 + .../tasks/test_include_nested.yml | 2 + .../parsing/roles/test_good_parsing/vars/main.yml | 2 + test/integration/targets/parsing/runme.sh | 6 + test/integration/targets/path_lookups/aliases | 1 + test/integration/targets/path_lookups/play.yml | 49 + .../path_lookups/roles/showfile/tasks/main.yml | 2 + test/integration/targets/path_lookups/runme.sh | 5 + test/integration/targets/path_lookups/testplay.yml | 20 + .../targets/path_with_comma_in_inventory/aliases | 1 + .../path_with_comma_in_inventory/playbook.yml | 9 + .../targets/path_with_comma_in_inventory/runme.sh | 5 + .../this,path,has,commas/group_vars/all.yml | 1 + .../this,path,has,commas/hosts | 1 + test/integration/targets/pause/aliases | 3 + test/integration/targets/pause/pause-1.yml | 11 + test/integration/targets/pause/pause-2.yml | 12 + test/integration/targets/pause/pause-3.yml | 12 + test/integration/targets/pause/pause-4.yml | 13 + test/integration/targets/pause/pause-5.yml | 35 + test/integration/targets/pause/runme.sh | 43 + test/integration/targets/pause/setup.yml | 4 + .../targets/pause/test-pause-background.yml | 10 + .../targets/pause/test-pause-no-tty.yml | 7 + test/integration/targets/pause/test-pause.py | 273 + test/integration/targets/pause/test-pause.yml | 51 + test/integration/targets/ping/aliases | 1 + test/integration/targets/ping/tasks/main.yml | 53 + test/integration/targets/pip/aliases | 3 + .../pip/files/ansible_test_pip_chdir/__init__.py | 2 + test/integration/targets/pip/files/setup.py | 14 + test/integration/targets/pip/meta/main.yml | 2 + .../targets/pip/tasks/default_cleanup.yml | 5 + .../targets/pip/tasks/freebsd_cleanup.yml | 6 + test/integration/targets/pip/tasks/main.yml | 53 + test/integration/targets/pip/tasks/pip.yml | 580 ++ test/integration/targets/pip/vars/main.yml | 13 + test/integration/targets/pkg_resources/aliases | 1 + .../lookup_plugins/check_pkg_resources.py | 23 + .../targets/pkg_resources/tasks/main.yml | 3 + test/integration/targets/play_iterator/aliases | 1 + .../integration/targets/play_iterator/playbook.yml | 10 + test/integration/targets/play_iterator/runme.sh | 5 + test/integration/targets/playbook/aliases | 1 + test/integration/targets/playbook/runme.sh | 9 + test/integration/targets/playbook/timeout.yml | 12 + test/integration/targets/playbook/types.yml | 21 + .../targets/plugin_config_for_inventory/aliases | 1 + .../config_with_parameter.yml | 3 + .../config_without_parameter.yml | 1 + .../targets/plugin_config_for_inventory/runme.sh | 16 + .../plugin_config_for_inventory/test_inventory.py | 52 + test/integration/targets/plugin_filtering/aliases | 1 + test/integration/targets/plugin_filtering/copy.yml | 10 + .../targets/plugin_filtering/filter_lookup.ini | 4 + .../targets/plugin_filtering/filter_lookup.yml | 6 + .../targets/plugin_filtering/filter_modules.ini | 4 + .../targets/plugin_filtering/filter_modules.yml | 9 + .../targets/plugin_filtering/filter_ping.ini | 4 + .../targets/plugin_filtering/filter_ping.yml | 5 + .../targets/plugin_filtering/filter_stat.ini | 4 + .../targets/plugin_filtering/filter_stat.yml | 5 + .../targets/plugin_filtering/lookup.yml | 14 + .../plugin_filtering/no_blacklist_module.ini | 3 + .../plugin_filtering/no_blacklist_module.yml | 3 + .../targets/plugin_filtering/no_filters.ini | 4 + .../integration/targets/plugin_filtering/pause.yml | 6 + test/integration/targets/plugin_filtering/ping.yml | 6 + test/integration/targets/plugin_filtering/runme.sh | 137 + test/integration/targets/plugin_filtering/stat.yml | 6 + .../targets/plugin_filtering/tempfile.yml | 9 + test/integration/targets/plugin_loader/aliases | 1 + .../targets/plugin_loader/normal/filters.yml | 13 + .../plugin_loader/normal/library/_symlink.py | 1 + .../plugin_loader/normal/library/_underscore.py | 13 + .../targets/plugin_loader/normal/underscore.yml | 15 + .../plugin_loader/override/filter_plugins/core.py | 18 + .../targets/plugin_loader/override/filters.yml | 15 + test/integration/targets/plugin_loader/runme.sh | 24 + test/integration/targets/plugin_namespace/aliases | 1 + .../plugin_namespace/filter_plugins/test_filter.py | 15 + .../plugin_namespace/lookup_plugins/lookup_name.py | 9 + .../targets/plugin_namespace/tasks/main.yml | 11 + .../plugin_namespace/test_plugins/test_test.py | 16 + .../targets/prepare_http_tests/defaults/main.yml | 4 + .../targets/prepare_http_tests/meta/main.yml | 2 + .../targets/prepare_http_tests/tasks/default.yml | 64 + .../targets/prepare_http_tests/tasks/main.yml | 24 + .../targets/prepare_http_tests/tasks/windows.yml | 33 + .../targets/prepare_http_tests/vars/httptester.yml | 5 + .../targets/prepare_tests/tasks/main.yml | 0 test/integration/targets/pull/aliases | 2 + test/integration/targets/pull/cleanup.yml | 16 + .../targets/pull/pull-integration-test/ansible.cfg | 2 + .../targets/pull/pull-integration-test/inventory | 2 + .../targets/pull/pull-integration-test/local.yml | 20 + test/integration/targets/pull/runme.sh | 69 + test/integration/targets/pull/setup.yml | 11 + test/integration/targets/raw/aliases | 1 + test/integration/targets/raw/meta/main.yml | 2 + test/integration/targets/raw/runme.sh | 6 + test/integration/targets/raw/runme.yml | 4 + test/integration/targets/raw/tasks/main.yml | 107 + test/integration/targets/reboot/aliases | 2 + .../targets/reboot/tasks/check_reboot.yml | 10 + .../targets/reboot/tasks/get_boot_time.yml | 3 + test/integration/targets/reboot/tasks/main.yml | 111 + test/integration/targets/reboot/vars/main.yml | 9 + .../integration/targets/rel_plugin_loading/aliases | 1 + .../targets/rel_plugin_loading/notyaml.yml | 5 + .../targets/rel_plugin_loading/runme.sh | 5 + .../subdir/inventory_plugins/notyaml.py | 168 + .../targets/rel_plugin_loading/subdir/play.yml | 6 + test/integration/targets/remote_tmp/aliases | 2 + test/integration/targets/remote_tmp/playbook.yml | 57 + test/integration/targets/remote_tmp/runme.sh | 5 + test/integration/targets/replace/aliases | 1 + test/integration/targets/replace/meta/main.yml | 2 + test/integration/targets/replace/tasks/main.yml | 265 + .../targets/retry_task_name_in_callback/aliases | 1 + .../targets/retry_task_name_in_callback/runme.sh | 13 + .../targets/retry_task_name_in_callback/test.yml | 28 + test/integration/targets/roles/aliases | 1 + test/integration/targets/roles/allowed_dupes.yml | 18 + test/integration/targets/roles/data_integrity.yml | 4 + test/integration/targets/roles/no_dupes.yml | 19 + .../targets/roles/roles/a/tasks/main.yml | 1 + .../targets/roles/roles/b/meta/main.yml | 2 + .../targets/roles/roles/b/tasks/main.yml | 1 + .../targets/roles/roles/c/meta/main.yml | 2 + .../targets/roles/roles/c/tasks/main.yml | 1 + .../targets/roles/roles/data/defaults/main/00.yml | 1 + .../targets/roles/roles/data/defaults/main/01.yml | 0 .../targets/roles/roles/data/tasks/main.yml | 5 + test/integration/targets/roles/runme.sh | 18 + test/integration/targets/rpm_key/aliases | 3 + .../integration/targets/rpm_key/defaults/main.yaml | 0 test/integration/targets/rpm_key/tasks/main.yaml | 2 + .../integration/targets/rpm_key/tasks/rpm_key.yaml | 195 + test/integration/targets/run_modules/aliases | 1 + test/integration/targets/run_modules/args.json | 1 + .../targets/run_modules/library/test.py | 7 + test/integration/targets/run_modules/runme.sh | 6 + test/integration/targets/script/aliases | 1 + .../targets/script/files/create_afile.sh | 3 + .../integration/targets/script/files/no_shebang.py | 3 + .../targets/script/files/remove_afile.sh | 3 + .../targets/script/files/space path/test.sh | 3 + test/integration/targets/script/files/test.sh | 3 + .../targets/script/files/test_with_args.sh | 5 + test/integration/targets/script/meta/main.yml | 2 + test/integration/targets/script/tasks/main.yml | 240 + test/integration/targets/service/aliases | 5 + .../targets/service/files/ansible-broken.upstart | 10 + test/integration/targets/service/files/ansible.rc | 16 + .../targets/service/files/ansible.systemd | 11 + .../integration/targets/service/files/ansible.sysv | 134 + .../targets/service/files/ansible.upstart | 9 + .../targets/service/files/ansible_test_service.py | 71 + test/integration/targets/service/meta/main.yml | 20 + test/integration/targets/service/tasks/main.yml | 58 + .../targets/service/tasks/rc_cleanup.yml | 9 + .../integration/targets/service/tasks/rc_setup.yml | 21 + .../targets/service/tasks/systemd_cleanup.yml | 25 + .../targets/service/tasks/systemd_setup.yml | 17 + .../targets/service/tasks/sysv_cleanup.yml | 9 + .../targets/service/tasks/sysv_setup.yml | 11 + test/integration/targets/service/tasks/tests.yml | 225 + .../targets/service/tasks/upstart_cleanup.yml | 17 + .../targets/service/tasks/upstart_setup.yml | 19 + .../integration/targets/service/templates/main.yml | 0 test/integration/targets/service_facts/aliases | 5 + .../targets/service_facts/files/ansible.systemd | 11 + .../service_facts/files/ansible_test_service.py | 73 + .../targets/service_facts/tasks/main.yml | 25 + .../service_facts/tasks/systemd_cleanup.yml | 32 + .../targets/service_facts/tasks/systemd_setup.yml | 26 + .../targets/service_facts/tasks/tests.yml | 36 + test/integration/targets/set_fact/aliases | 2 + test/integration/targets/set_fact/incremental.yml | 35 + test/integration/targets/set_fact/inventory | 3 + .../targets/set_fact/nowarn_clean_facts.yml | 10 + test/integration/targets/set_fact/runme.sh | 30 + .../targets/set_fact/set_fact_bool_conv.yml | 35 + .../set_fact/set_fact_bool_conv_jinja2_native.yml | 35 + .../targets/set_fact/set_fact_cached_1.yml | 324 + .../targets/set_fact/set_fact_cached_2.yml | 57 + .../targets/set_fact/set_fact_no_cache.yml | 39 + .../targets/setup_cron/defaults/main.yml | 1 + test/integration/targets/setup_cron/tasks/main.yml | 70 + .../integration/targets/setup_cron/vars/debian.yml | 3 + .../targets/setup_cron/vars/default.yml | 0 .../integration/targets/setup_cron/vars/fedora.yml | 3 + .../targets/setup_cron/vars/freebsd.yml | 3 + .../integration/targets/setup_cron/vars/redhat.yml | 4 + test/integration/targets/setup_cron/vars/suse.yml | 3 + .../setup_deb_repo/files/package_specs/foo-1.0.0 | 10 + .../setup_deb_repo/files/package_specs/foo-1.0.1 | 10 + .../files/package_specs/foobar-1.0.0 | 11 + .../files/package_specs/foobar-1.0.1 | 10 + .../targets/setup_deb_repo/meta/main.yml | 2 + .../targets/setup_deb_repo/tasks/main.yml | 56 + test/integration/targets/setup_epel/tasks/main.yml | 5 + .../targets/setup_gnutar/handlers/main.yml | 6 + .../targets/setup_gnutar/tasks/main.yml | 18 + .../targets/setup_nobody/handlers/main.yml | 5 + .../targets/setup_nobody/tasks/main.yml | 7 + test/integration/targets/setup_paramiko/aliases | 1 + .../targets/setup_paramiko/constraints.txt | 1 + .../setup_paramiko/install-CentOS-6-python-2.yml | 3 + .../setup_paramiko/install-Darwin-python-3.yml | 9 + .../setup_paramiko/install-FreeBSD-11-python-2.yml | 3 + .../setup_paramiko/install-FreeBSD-11-python-3.yml | 12 + .../setup_paramiko/install-FreeBSD-12-python-2.yml | 3 + .../setup_paramiko/install-FreeBSD-12-python-3.yml | 3 + .../setup_paramiko/install-RedHat-8-python-3.yml | 6 + .../setup_paramiko/install-Ubuntu-16-python-2.yml | 3 + .../targets/setup_paramiko/install-fail.yml | 7 + .../targets/setup_paramiko/install-python-2.yml | 3 + .../targets/setup_paramiko/install-python-3.yml | 3 + .../integration/targets/setup_paramiko/install.yml | 18 + test/integration/targets/setup_paramiko/inventory | 1 + .../setup_paramiko/library/detect_paramiko.py | 31 + .../setup_paramiko/setup-remote-constraints.yml | 12 + test/integration/targets/setup_paramiko/setup.sh | 9 + .../setup_paramiko/uninstall-Darwin-python-3.yml | 4 + .../uninstall-FreeBSD-11-python-2.yml | 4 + .../uninstall-FreeBSD-11-python-3.yml | 4 + .../uninstall-FreeBSD-12-python-2.yml | 4 + .../uninstall-FreeBSD-12-python-3.yml | 4 + .../setup_paramiko/uninstall-RedHat-8-python-3.yml | 4 + .../setup_paramiko/uninstall-apt-python-2.yml | 5 + .../setup_paramiko/uninstall-apt-python-3.yml | 5 + .../targets/setup_paramiko/uninstall-dnf.yml | 4 + .../targets/setup_paramiko/uninstall-fail.yml | 7 + .../targets/setup_paramiko/uninstall-yum.yml | 4 + .../setup_paramiko/uninstall-zypper-python-2.yml | 4 + .../setup_paramiko/uninstall-zypper-python-3.yml | 4 + .../targets/setup_paramiko/uninstall.yml | 20 + .../targets/setup_passlib/tasks/main.yml | 4 + .../targets/setup_pexpect/files/constraints.txt | 2 + .../targets/setup_pexpect/meta/main.yml | 2 + .../targets/setup_pexpect/tasks/main.yml | 10 + .../targets/setup_remote_constraints/aliases | 1 + .../targets/setup_remote_constraints/meta/main.yml | 2 + .../setup_remote_constraints/tasks/main.yml | 8 + .../targets/setup_remote_tmp_dir/handlers/main.yml | 5 + .../setup_remote_tmp_dir/tasks/default-cleanup.yml | 5 + .../targets/setup_remote_tmp_dir/tasks/default.yml | 11 + .../targets/setup_remote_tmp_dir/tasks/main.yml | 10 + .../setup_remote_tmp_dir/tasks/windows-cleanup.yml | 4 + .../targets/setup_remote_tmp_dir/tasks/windows.yml | 11 + test/integration/targets/setup_rpm_repo/aliases | 1 + .../targets/setup_rpm_repo/defaults/main.yml | 1 + .../targets/setup_rpm_repo/files/comps.xml | 36 + .../targets/setup_rpm_repo/files/create-repo.py | 69 + .../targets/setup_rpm_repo/handlers/main.yml | 5 + .../targets/setup_rpm_repo/library/create_repo.py | 94 + .../targets/setup_rpm_repo/meta/main.yml | 2 + .../targets/setup_rpm_repo/tasks/main.yml | 102 + .../targets/setup_rpm_repo/vars/Fedora.yml | 4 + .../targets/setup_rpm_repo/vars/RedHat-6.yml | 5 + .../targets/setup_rpm_repo/vars/RedHat-7.yml | 5 + .../targets/setup_rpm_repo/vars/RedHat-8.yml | 4 + .../targets/setup_rpm_repo/vars/main.yml | 1 + .../targets/setup_win_printargv/files/PrintArgv.cs | 13 + .../targets/setup_win_printargv/meta/main.yml | 3 + .../targets/setup_win_printargv/tasks/main.yml | 9 + .../targets/shell/action_plugins/test_shell.py | 19 + test/integration/targets/shell/aliases | 1 + .../connection_plugins/test_connection_default.py | 44 + .../connection_plugins/test_connection_override.py | 45 + test/integration/targets/shell/tasks/main.yml | 36 + test/integration/targets/slurp/aliases | 1 + test/integration/targets/slurp/files/bar.bin | Bin 0 -> 256 bytes test/integration/targets/slurp/tasks/main.yml | 98 + test/integration/targets/special_vars/aliases | 2 + .../integration/targets/special_vars/meta/main.yml | 2 + .../targets/special_vars/tasks/main.yml | 100 + .../targets/special_vars/templates/foo.j2 | 7 + .../integration/targets/special_vars/vars/main.yml | 0 test/integration/targets/stat/aliases | 1 + test/integration/targets/stat/files/foo.txt | 1 + test/integration/targets/stat/meta/main.yml | 2 + test/integration/targets/stat/tasks/main.yml | 157 + test/integration/targets/strategy_linear/aliases | 1 + test/integration/targets/strategy_linear/inventory | 3 + .../strategy_linear/roles/role1/tasks/main.yml | 6 + .../strategy_linear/roles/role1/tasks/tasks.yml | 7 + .../strategy_linear/roles/role2/tasks/main.yml | 7 + test/integration/targets/strategy_linear/runme.sh | 5 + .../strategy_linear/test_include_file_noop.yml | 16 + test/integration/targets/subversion/aliases | 7 + .../subversion/roles/subversion/defaults/main.yml | 10 + .../roles/subversion/files/create_repo.sh | 6 + .../subversion/roles/subversion/tasks/cleanup.yml | 8 + .../subversion/roles/subversion/tasks/main.yml | 20 + .../subversion/roles/subversion/tasks/setup.yml | 63 + .../roles/subversion/tasks/setup_selinux.yml | 11 + .../subversion/roles/subversion/tasks/tests.yml | 133 + .../subversion/roles/subversion/tasks/warnings.yml | 7 + .../roles/subversion/templates/subversion.conf.j2 | 67 + test/integration/targets/subversion/runme.sh | 32 + test/integration/targets/subversion/runme.yml | 15 + .../integration/targets/subversion/vars/Debian.yml | 6 + .../targets/subversion/vars/FreeBSD.yml | 7 + .../integration/targets/subversion/vars/RedHat.yml | 10 + test/integration/targets/subversion/vars/Suse.yml | 6 + .../targets/subversion/vars/Ubuntu-18.yml | 6 + .../targets/subversion/vars/Ubuntu-20.yml | 6 + test/integration/targets/systemd/aliases | 2 + test/integration/targets/systemd/defaults/main.yml | 1 + test/integration/targets/systemd/handlers/main.yml | 4 + test/integration/targets/systemd/meta/main.yml | 2 + test/integration/targets/systemd/tasks/main.yml | 121 + .../targets/systemd/tasks/test_unit_template.yml | 50 + .../targets/systemd/templates/sleeper@.service | 8 + test/integration/targets/systemd/vars/Debian.yml | 2 + test/integration/targets/systemd/vars/default.yml | 2 + test/integration/targets/tags/aliases | 2 + test/integration/targets/tags/runme.sh | 49 + test/integration/targets/tags/test_tags.yml | 33 + test/integration/targets/task_ordering/aliases | 1 + .../targets/task_ordering/meta/main.yml | 2 + .../targets/task_ordering/tasks/main.yml | 15 + .../task_ordering/tasks/taskorder-include.yml | 10 + test/integration/targets/tasks/aliases | 1 + test/integration/targets/tasks/tasks/main.yml | 4 + test/integration/targets/template/6653.yml | 10 + test/integration/targets/template/72262.yml | 6 + test/integration/targets/template/72615.yml | 26 + test/integration/targets/template/aliases | 3 + .../targets/template/ansible_managed.cfg | 2 + .../targets/template/ansible_managed.yml | 14 + test/integration/targets/template/corner_cases.yml | 51 + .../targets/template/custom_tasks/tasks/main.yml | 15 + .../targets/template/custom_tasks/templates/test | 1 + .../targets/template/custom_template.yml | 4 + .../template/files/encoding_1252_utf-8.expected | 1 + .../files/encoding_1252_windows-1252.expected | 1 + .../targets/template/files/foo-py26.txt | 9 + .../integration/targets/template/files/foo.dos.txt | 3 + test/integration/targets/template/files/foo.txt | 9 + .../targets/template/files/foo.unix.txt | 3 + .../targets/template/files/import_as.expected | 3 + .../template/files/import_as_with_context.expected | 2 + .../template/files/import_with_context.expected | 3 + .../template/files/lstrip_blocks_false.expected | 4 + .../template/files/lstrip_blocks_true.expected | 3 + .../template/files/trim_blocks_false.expected | 4 + .../template/files/trim_blocks_true.expected | 2 + .../targets/template/filter_plugins.yml | 9 + test/integration/targets/template/meta/main.yml | 3 + .../role_filter/filter_plugins/myplugin.py | 9 + .../targets/template/role_filter/tasks/main.yml | 3 + test/integration/targets/template/runme.sh | 36 + .../targets/template/tasks/backup_test.yml | 60 + test/integration/targets/template/tasks/main.yml | 719 +++ test/integration/targets/template/template.yml | 4 + .../targets/template/templates/6653-include.j2 | 1 + .../integration/targets/template/templates/6653.j2 | 4 + .../targets/template/templates/72262-included.j2 | 1 + .../targets/template/templates/72262-vars.j2 | 1 + .../targets/template/templates/72262.j2 | 3 + .../template/templates/72615-macro-nested.j2 | 4 + .../targets/template/templates/72615-macro.j2 | 8 + .../targets/template/templates/72615.j2 | 4 + test/integration/targets/template/templates/bar | 1 + .../targets/template/templates/caf\303\251.j2" | 1 + .../targets/template/templates/encoding_1252.j2 | 1 + test/integration/targets/template/templates/foo.j2 | 3 + .../integration/targets/template/templates/foo2.j2 | 3 + .../integration/targets/template/templates/foo3.j2 | 3 + .../targets/template/templates/for_loop.j2 | 4 + .../targets/template/templates/for_loop_include.j2 | 3 + .../template/templates/for_loop_include_nested.j2 | 1 + .../targets/template/templates/import_as.j2 | 4 + .../template/templates/import_as_with_context.j2 | 3 + .../template/templates/import_with_context.j2 | 4 + .../targets/template/templates/lstrip_blocks.j2 | 8 + .../targets/template/templates/parent.j2 | 3 + test/integration/targets/template/templates/qux | 1 + .../targets/template/templates/short.j2 | 1 + .../targets/template/templates/subtemplate.j2 | 2 + .../template/templates/template_destpath_test.j2 | 1 + .../targets/template/templates/trim_blocks.j2 | 4 + .../template/templates/unused_vars_include.j2 | 1 + .../template/templates/unused_vars_template.j2 | 2 + .../targets/template/undefined_var_info.yml | 15 + .../targets/template/unused_vars_include.yml | 8 + test/integration/targets/template/vars/main.yml | 20 + .../targets/template_jinja2_latest/aliases | 4 + .../targets/template_jinja2_latest/main.yml | 4 + .../template_jinja2_latest/requirements.txt | 2 + .../targets/template_jinja2_latest/runme.sh | 13 + .../integration/targets/templating_lookups/aliases | 2 + .../targets/templating_lookups/runme.sh | 12 + .../targets/templating_lookups/runme.yml | 4 + .../templating_lookups/template_deepcopy/hosts | 1 + .../template_deepcopy/playbook.yml | 10 + .../template_deepcopy/template.in | 1 + .../template_lookup_safe_eval_unicode/playbook.yml | 8 + .../template.json.j2 | 4 + .../template_lookup_vaulted/playbook.yml | 13 + .../templates/vaulted_hello.j2 | 6 + .../template_lookup_vaulted/test_vault_pass | 1 + .../template_lookups/tasks/errors.yml | 31 + .../template_lookups/tasks/main.yml | 90 + .../template_lookups/vars/main.yml | 9 + .../targets/templating_settings/aliases | 1 + .../templating_settings/dont_warn_register.yml | 6 + .../targets/templating_settings/runme.sh | 6 + .../test_templating_settings.yml | 14 + test/integration/targets/test_core/aliases | 2 + test/integration/targets/test_core/inventory | 1 + test/integration/targets/test_core/runme.sh | 5 + test/integration/targets/test_core/runme.yml | 4 + test/integration/targets/test_core/tasks/main.yml | 303 + test/integration/targets/test_core/vault-password | 1 + test/integration/targets/test_files/aliases | 2 + test/integration/targets/test_files/tasks/main.yml | 60 + test/integration/targets/test_mathstuff/aliases | 2 + .../targets/test_mathstuff/tasks/main.yml | 38 + test/integration/targets/throttle/aliases | 1 + .../targets/throttle/group_vars/all.yml | 4 + test/integration/targets/throttle/inventory | 6 + test/integration/targets/throttle/runme.sh | 7 + test/integration/targets/throttle/test_throttle.py | 34 + .../integration/targets/throttle/test_throttle.yml | 84 + test/integration/targets/unarchive/aliases | 4 + test/integration/targets/unarchive/files/foo.txt | 1 + ...217\343\202\211\343\201\250\343\201\277.tar.gz" | Bin 0 -> 4947 bytes test/integration/targets/unarchive/meta/main.yml | 4 + test/integration/targets/unarchive/tasks/main.yml | 16 + .../targets/unarchive/tasks/prepare_tests.yml | 92 + .../targets/unarchive/tasks/test_download.yml | 34 + .../targets/unarchive/tasks/test_exclude.yml | 48 + .../targets/unarchive/tasks/test_missing_files.yml | 47 + .../targets/unarchive/tasks/test_mode.yml | 151 + .../unarchive/tasks/test_non_ascii_filename.yml | 66 + .../unarchive/tasks/test_parent_not_writeable.yml | 32 + .../unarchive/tasks/test_quotable_characters.yml | 38 + .../targets/unarchive/tasks/test_symlink.yml | 64 + .../targets/unarchive/tasks/test_tar.yml | 26 + .../targets/unarchive/tasks/test_tar_gz.yml | 28 + .../unarchive/tasks/test_tar_gz_creates.yml | 53 + .../unarchive/tasks/test_tar_gz_keep_newer.yml | 57 + .../unarchive/tasks/test_tar_gz_owner_group.yml | 48 + .../unarchive/tasks/test_unprivileged_user.yml | 86 + .../targets/unarchive/tasks/test_zip.yml | 45 + test/integration/targets/undefined/aliases | 1 + test/integration/targets/undefined/tasks/main.yml | 18 + test/integration/targets/unicode/aliases | 1 + test/integration/targets/unicode/inventory | 5 + .../ansible.cfg" | 2 + test/integration/targets/unicode/runme.sh | 13 + .../targets/unicode/unicode-test-script | 7 + test/integration/targets/unicode/unicode.yml | 149 + test/integration/targets/unsafe_writes/aliases | 6 + test/integration/targets/unsafe_writes/basic.yml | 53 + test/integration/targets/unsafe_writes/runme.sh | 5 + test/integration/targets/until/aliases | 1 + test/integration/targets/until/tasks/main.yml | 71 + test/integration/targets/unvault/aliases | 1 + test/integration/targets/unvault/main.yml | 9 + test/integration/targets/unvault/password | 1 + test/integration/targets/unvault/runme.sh | 6 + test/integration/targets/unvault/vault | 6 + test/integration/targets/uri/aliases | 4 + test/integration/targets/uri/files/README | 9 + test/integration/targets/uri/files/fail0.json | 1 + test/integration/targets/uri/files/fail1.json | 1 + test/integration/targets/uri/files/fail10.json | 1 + test/integration/targets/uri/files/fail11.json | 1 + test/integration/targets/uri/files/fail12.json | 1 + test/integration/targets/uri/files/fail13.json | 1 + test/integration/targets/uri/files/fail14.json | 1 + test/integration/targets/uri/files/fail15.json | 1 + test/integration/targets/uri/files/fail16.json | 1 + test/integration/targets/uri/files/fail17.json | 1 + test/integration/targets/uri/files/fail18.json | 1 + test/integration/targets/uri/files/fail19.json | 1 + test/integration/targets/uri/files/fail2.json | 1 + test/integration/targets/uri/files/fail20.json | 1 + test/integration/targets/uri/files/fail21.json | 1 + test/integration/targets/uri/files/fail22.json | 1 + test/integration/targets/uri/files/fail23.json | 1 + test/integration/targets/uri/files/fail24.json | 1 + test/integration/targets/uri/files/fail25.json | 1 + test/integration/targets/uri/files/fail26.json | 2 + test/integration/targets/uri/files/fail27.json | 2 + test/integration/targets/uri/files/fail28.json | 1 + test/integration/targets/uri/files/fail29.json | 1 + test/integration/targets/uri/files/fail3.json | 1 + test/integration/targets/uri/files/fail30.json | 1 + test/integration/targets/uri/files/fail4.json | 1 + test/integration/targets/uri/files/fail5.json | 1 + test/integration/targets/uri/files/fail6.json | 1 + test/integration/targets/uri/files/fail7.json | 1 + test/integration/targets/uri/files/fail8.json | 1 + test/integration/targets/uri/files/fail9.json | 1 + test/integration/targets/uri/files/formdata.txt | 1 + test/integration/targets/uri/files/pass0.json | 58 + test/integration/targets/uri/files/pass1.json | 1 + test/integration/targets/uri/files/pass2.json | 6 + test/integration/targets/uri/files/pass3.json | 1 + test/integration/targets/uri/files/pass4.json | 1 + test/integration/targets/uri/files/testserver.py | 20 + test/integration/targets/uri/meta/main.yml | 5 + test/integration/targets/uri/tasks/main.yml | 600 ++ .../integration/targets/uri/tasks/redirect-all.yml | 272 + .../targets/uri/tasks/redirect-none.yml | 296 + .../targets/uri/tasks/redirect-safe.yml | 274 + .../targets/uri/tasks/redirect-urllib2.yml | 294 + .../targets/uri/tasks/return-content.yml | 49 + .../targets/uri/tasks/unexpected-failures.yml | 27 + test/integration/targets/uri/templates/netrc.j2 | 3 + test/integration/targets/uri/vars/main.yml | 20 + test/integration/targets/user/aliases | 3 + test/integration/targets/user/files/userlist.sh | 20 + test/integration/targets/user/meta/main.yml | 2 + test/integration/targets/user/tasks/main.yml | 34 + .../targets/user/tasks/test_create_system_user.yml | 12 + .../targets/user/tasks/test_create_user.yml | 67 + .../targets/user/tasks/test_create_user_home.yml | 136 + .../user/tasks/test_create_user_password.yml | 90 + .../targets/user/tasks/test_create_user_uid.yml | 26 + .../targets/user/tasks/test_expires.yml | 147 + .../user/tasks/test_expires_new_account.yml | 55 + .../test_expires_new_account_epoch_negative.yml | 112 + test/integration/targets/user/tasks/test_local.yml | 169 + .../targets/user/tasks/test_local_expires.yml | 333 + .../targets/user/tasks/test_no_home_fallback.yml | 106 + .../targets/user/tasks/test_password_lock.yml | 140 + .../user/tasks/test_password_lock_new_user.yml | 63 + .../targets/user/tasks/test_remove_user.yml | 19 + .../targets/user/tasks/test_shadow_backup.yml | 21 + .../targets/user/tasks/test_ssh_key_passphrase.yml | 29 + test/integration/targets/user/vars/main.yml | 13 + test/integration/targets/var_blending/aliases | 1 + .../targets/var_blending/group_vars/all | 9 + .../targets/var_blending/group_vars/local | 1 + .../targets/var_blending/host_vars/testhost | 4 + test/integration/targets/var_blending/inventory | 26 + .../roles/test_var_blending/defaults/main.yml | 4 + .../roles/test_var_blending/files/foo.txt | 77 + .../roles/test_var_blending/tasks/main.yml | 57 + .../roles/test_var_blending/templates/foo.j2 | 77 + .../roles/test_var_blending/vars/main.yml | 4 + .../roles/test_var_blending/vars/more_vars.yml | 3 + test/integration/targets/var_blending/runme.sh | 5 + .../targets/var_blending/test_var_blending.yml | 8 + .../integration/targets/var_blending/test_vars.yml | 1 + .../integration/targets/var_blending/vars_file.yml | 12 + test/integration/targets/var_precedence/aliases | 1 + .../var_precedence/ansible-var-precedence-check.py | 541 ++ .../targets/var_precedence/host_vars/testhost | 2 + test/integration/targets/var_precedence/inventory | 13 + .../roles/test_var_precedence/meta/main.yml | 4 + .../roles/test_var_precedence/tasks/main.yml | 10 + .../test_var_precedence_dep/defaults/main.yml | 5 + .../roles/test_var_precedence_dep/tasks/main.yml | 14 + .../roles/test_var_precedence_dep/vars/main.yml | 4 + .../tasks/main.yml | 5 + .../test_var_precedence_role1/defaults/main.yml | 5 + .../roles/test_var_precedence_role1/meta/main.yml | 2 + .../roles/test_var_precedence_role1/tasks/main.yml | 14 + .../roles/test_var_precedence_role1/vars/main.yml | 4 + .../test_var_precedence_role2/defaults/main.yml | 5 + .../roles/test_var_precedence_role2/tasks/main.yml | 14 + .../roles/test_var_precedence_role2/vars/main.yml | 5 + .../test_var_precedence_role3/defaults/main.yml | 7 + .../roles/test_var_precedence_role3/tasks/main.yml | 14 + .../roles/test_var_precedence_role3/vars/main.yml | 3 + test/integration/targets/var_precedence/runme.sh | 9 + .../targets/var_precedence/test_var_precedence.yml | 44 + .../var_precedence/vars/test_var_precedence.yml | 5 + test/integration/targets/var_templating/aliases | 1 + .../targets/var_templating/group_vars/all.yml | 7 + test/integration/targets/var_templating/runme.sh | 17 + .../var_templating/task_vars_templating.yml | 58 + .../var_templating/test_connection_vars.yml | 26 + test/integration/targets/var_templating/undall.yml | 6 + .../targets/var_templating/undefined.yml | 13 + .../targets/var_templating/vars/connection.yml | 3 + test/integration/targets/vault/aliases | 2 + test/integration/targets/vault/empty-password | 0 .../targets/vault/encrypted-vault-password | 6 + .../vault/encrypted_file_encrypted_var_password | 1 + test/integration/targets/vault/example1_password | 1 + test/integration/targets/vault/example2_password | 1 + test/integration/targets/vault/example3_password | 1 + test/integration/targets/vault/faux-editor.py | 44 + .../vault/files/test_assemble/nonsecret.txt | 1 + .../targets/vault/files/test_assemble/secret.vault | 7 + .../targets/vault/format_1_1_AES256.yml | 6 + .../targets/vault/format_1_2_AES256.yml | 6 + .../integration/targets/vault/host_vars/myhost.yml | 7 + .../targets/vault/host_vars/testhost.yml | 7 + .../targets/vault/invalid_format/README.md | 1 + .../invalid_format/broken-group-vars-tasks.yml | 23 + .../invalid_format/broken-host-vars-tasks.yml | 7 + .../group_vars/broken-group-vars.yml | 8 + .../host_vars/broken-host-vars.example.com/vars | 11 + .../targets/vault/invalid_format/inventory | 5 + .../vault/invalid_format/original-broken-host-vars | 6 + .../vault/invalid_format/original-group-vars.yml | 2 + .../targets/vault/invalid_format/some-vars | 6 + .../targets/vault/invalid_format/vault-secret | 1 + test/integration/targets/vault/inventory.toml | 5 + test/integration/targets/vault/password-script.py | 33 + .../targets/vault/roles/test_vault/tasks/main.yml | 9 + .../targets/vault/roles/test_vault/vars/main.yml | 9 + .../vault/roles/test_vault_embedded/tasks/main.yml | 13 + .../vault/roles/test_vault_embedded/vars/main.yml | 17 + .../roles/test_vault_embedded_ids/tasks/main.yml | 29 + .../roles/test_vault_embedded_ids/vars/main.yml | 194 + .../test_vault_file_encrypted_embedded/README.md | 1 + .../tasks/main.yml | 13 + .../vars/main.yml | 76 + .../roles/test_vaulted_template/tasks/main.yml | 19 + .../templates/vaulted_template.j2 | 6 + test/integration/targets/vault/runme.sh | 526 ++ .../targets/vault/runme_change_pip_installed.sh | 27 + .../targets/vault/single_vault_as_string.yml | 123 + .../integration/targets/vault/test-vault-client.py | 63 + .../targets/vault/test_dangling_temp.yml | 34 + .../targets/vault/test_utf8_value_in_filename.yml | 16 + test/integration/targets/vault/test_vault.yml | 6 + .../targets/vault/test_vault_embedded.yml | 4 + .../targets/vault/test_vault_embedded_ids.yml | 4 + .../vault/test_vault_file_encrypted_embedded.yml | 4 + .../targets/vault/test_vaulted_inventory.yml | 5 + .../targets/vault/test_vaulted_inventory_toml.yml | 9 + .../targets/vault/test_vaulted_template.yml | 6 + .../targets/vault/test_vaulted_utf8_value.yml | 15 + .../targets/vault/vault-caf\303\251.yml" | 6 + test/integration/targets/vault/vault-password | 1 + .../targets/vault/vault-password-ansible | 1 + .../integration/targets/vault/vault-password-wrong | 1 + test/integration/targets/vault/vault-secret.txt | 6 + test/integration/targets/vault/vaulted.inventory | 8 + test/integration/targets/wait_for/aliases | 2 + .../targets/wait_for/files/testserver.py | 16 + test/integration/targets/wait_for/files/zombie.py | 13 + test/integration/targets/wait_for/meta/main.yml | 2 + test/integration/targets/wait_for/tasks/main.yml | 177 + test/integration/targets/wait_for/vars/main.yml | 4 + .../targets/wait_for_connection/aliases | 2 + .../targets/wait_for_connection/tasks/main.yml | 30 + .../targets/want_json_modules_posix/aliases | 1 + .../want_json_modules_posix/library/helloworld.py | 31 + .../targets/want_json_modules_posix/meta/main.yml | 2 + .../targets/want_json_modules_posix/tasks/main.yml | 43 + test/integration/targets/win_async_wrapper/aliases | 3 + .../win_async_wrapper/library/async_test.ps1 | 48 + .../targets/win_async_wrapper/tasks/main.yml | 257 + test/integration/targets/win_become/aliases | 2 + test/integration/targets/win_become/tasks/main.yml | 251 + test/integration/targets/win_exec_wrapper/aliases | 2 + .../win_exec_wrapper/library/test_all_options.ps1 | 12 + .../library/test_common_functions.ps1 | 40 + .../targets/win_exec_wrapper/library/test_fail.ps1 | 58 + .../library/test_invalid_requires.ps1 | 9 + .../library/test_min_os_version.ps1 | 8 + .../library/test_min_ps_version.ps1 | 8 + .../targets/win_exec_wrapper/tasks/main.yml | 274 + test/integration/targets/win_fetch/aliases | 1 + test/integration/targets/win_fetch/meta/main.yml | 2 + test/integration/targets/win_fetch/tasks/main.yml | 212 + test/integration/targets/win_module_utils/aliases | 2 + .../win_module_utils/library/csharp_util.ps1 | 12 + .../library/legacy_only_new_way.ps1 | 5 + .../legacy_only_new_way_win_line_ending.ps1 | 6 + .../library/legacy_only_old_way.ps1 | 5 + .../legacy_only_old_way_win_line_ending.ps1 | 4 + .../library/recursive_requires.ps1 | 13 + .../win_module_utils/library/uses_bogus_utils.ps1 | 6 + .../win_module_utils/library/uses_local_utils.ps1 | 9 + .../Ansible.ModuleUtils.Recursive1.psm1 | 9 + .../Ansible.ModuleUtils.Recursive2.psm1 | 12 + .../Ansible.ModuleUtils.Recursive3.psm1 | 20 + .../Ansible.ModuleUtils.ValidTestModule.psm1 | 3 + .../win_module_utils/module_utils/Ansible.Test.cs | 26 + .../targets/win_module_utils/tasks/main.yml | 71 + test/integration/targets/win_raw/aliases | 2 + test/integration/targets/win_raw/tasks/main.yml | 143 + test/integration/targets/win_script/aliases | 2 + .../targets/win_script/defaults/main.yml | 5 + test/integration/targets/win_script/files/fail.bat | 1 + .../win_script/files/space path/test_script.ps1 | 1 + .../targets/win_script/files/test_script.bat | 2 + .../targets/win_script/files/test_script.cmd | 2 + .../targets/win_script/files/test_script.ps1 | 2 + .../targets/win_script/files/test_script_bool.ps1 | 6 + .../win_script/files/test_script_creates_file.ps1 | 3 + .../win_script/files/test_script_removes_file.ps1 | 3 + .../win_script/files/test_script_whoami.ps1 | 2 + .../win_script/files/test_script_with_args.ps1 | 7 + .../win_script/files/test_script_with_env.ps1 | 1 + .../win_script/files/test_script_with_errors.ps1 | 9 + .../files/test_script_with_splatting.ps1 | 6 + test/integration/targets/win_script/tasks/main.yml | 316 + test/integration/targets/windows-minimal/aliases | 4 + .../targets/windows-minimal/library/win_ping.ps1 | 21 + .../targets/windows-minimal/library/win_ping.py | 55 + .../windows-minimal/library/win_ping_set_attr.ps1 | 31 + .../library/win_ping_strict_mode_error.ps1 | 30 + .../library/win_ping_syntax_error.ps1 | 30 + .../windows-minimal/library/win_ping_throw.ps1 | 30 + .../library/win_ping_throw_string.ps1 | 30 + .../targets/windows-minimal/tasks/main.yml | 67 + test/integration/targets/windows-paths/aliases | 3 + .../targets/windows-paths/tasks/main.yml | 191 + test/integration/targets/yum/aliases | 6 + test/integration/targets/yum/files/yum.conf | 5 + test/integration/targets/yum/meta/main.yml | 4 + .../targets/yum/tasks/check_mode_consistency.yml | 61 + test/integration/targets/yum/tasks/lock.yml | 28 + test/integration/targets/yum/tasks/main.yml | 71 + test/integration/targets/yum/tasks/proxy.yml | 186 + test/integration/targets/yum/tasks/repo.yml | 705 ++ test/integration/targets/yum/tasks/yum.yml | 873 +++ .../targets/yum/tasks/yum_group_remove.yml | 152 + .../targets/yum/tasks/yuminstallroot.yml | 122 + test/integration/targets/yum_repository/aliases | 3 + .../targets/yum_repository/defaults/main.yml | 5 + .../targets/yum_repository/handlers/main.yml | 4 + .../targets/yum_repository/meta/main.yml | 4 + .../targets/yum_repository/tasks/main.yml | 187 + test/lib/ansible_test/__init__.py | 0 test/lib/ansible_test/_data/ansible.cfg | 0 .../_data/cli/ansible_test_cli_stub.py | 28 + test/lib/ansible_test/_data/collection_detail.py | 95 + test/lib/ansible_test/_data/completion/docker.txt | 13 + test/lib/ansible_test/_data/completion/network.txt | 2 + test/lib/ansible_test/_data/completion/remote.txt | 12 + test/lib/ansible_test/_data/completion/windows.txt | 6 + test/lib/ansible_test/_data/coveragerc | 0 .../_data/cryptography-constraints.txt | 3 + test/lib/ansible_test/_data/injector/ansible | 1 + .../lib/ansible_test/_data/injector/ansible-config | 1 + .../ansible_test/_data/injector/ansible-connection | 1 + .../ansible_test/_data/injector/ansible-console | 1 + test/lib/ansible_test/_data/injector/ansible-doc | 1 + .../lib/ansible_test/_data/injector/ansible-galaxy | 1 + .../ansible_test/_data/injector/ansible-inventory | 1 + .../ansible_test/_data/injector/ansible-playbook | 1 + test/lib/ansible_test/_data/injector/ansible-pull | 1 + test/lib/ansible_test/_data/injector/ansible-test | 1 + test/lib/ansible_test/_data/injector/ansible-vault | 1 + test/lib/ansible_test/_data/injector/importer.py | 1 + test/lib/ansible_test/_data/injector/pytest | 1 + test/lib/ansible_test/_data/injector/python.py | 80 + .../_data/injector/virtualenv-isolated.sh | 18 + test/lib/ansible_test/_data/injector/virtualenv.sh | 14 + test/lib/ansible_test/_data/inventory | 6 + .../_data/playbooks/windows_coverage_setup.yml | 19 + .../_data/playbooks/windows_coverage_teardown.yml | 77 + test/lib/ansible_test/_data/pytest.ini | 9 + .../pytest/plugins/ansible_pytest_collections.py | 67 + .../pytest/plugins/ansible_pytest_coverage.py | 68 + test/lib/ansible_test/_data/quiet_pip.py | 70 + .../_data/requirements/ansible-test.txt | 6 + .../_data/requirements/constraints.txt | 64 + .../ansible_test/_data/requirements/coverage.txt | 1 + .../_data/requirements/integration.cloud.aws.txt | 3 + .../_data/requirements/integration.cloud.azure.txt | 39 + .../_data/requirements/integration.cloud.cs.txt | 2 + .../requirements/integration.cloud.hcloud.txt | 1 + .../_data/requirements/integration.cloud.nios.txt | 1 + .../requirements/integration.cloud.opennebula.txt | 1 + .../requirements/integration.cloud.openshift.txt | 1 + .../requirements/integration.cloud.vcenter.txt | 2 + .../_data/requirements/integration.txt | 6 + .../_data/requirements/network-integration.txt | 7 + .../_data/requirements/sanity.ansible-doc.txt | 2 + .../_data/requirements/sanity.changelog.txt | 2 + .../_data/requirements/sanity.import.txt | 2 + .../requirements/sanity.integration-aliases.txt | 1 + .../_data/requirements/sanity.pep8.txt | 1 + .../lib/ansible_test/_data/requirements/sanity.ps1 | 45 + .../_data/requirements/sanity.pylint.txt | 3 + .../_data/requirements/sanity.rstcheck.txt | 1 + .../_data/requirements/sanity.runtime-metadata.txt | 2 + .../_data/requirements/sanity.validate-modules.txt | 3 + .../_data/requirements/sanity.yamllint.txt | 1 + test/lib/ansible_test/_data/requirements/units.txt | 7 + .../_data/requirements/windows-integration.txt | 11 + .../sanity/code-smell/action-plugin-docs.json | 13 + .../_data/sanity/code-smell/action-plugin-docs.py | 68 + .../_data/sanity/code-smell/changelog.json | 9 + .../_data/sanity/code-smell/changelog.py | 49 + .../_data/sanity/code-smell/empty-init.json | 14 + .../_data/sanity/code-smell/empty-init.py | 16 + .../code-smell/future-import-boilerplate.json | 6 + .../sanity/code-smell/future-import-boilerplate.py | 46 + .../_data/sanity/code-smell/line-endings.json | 4 + .../_data/sanity/code-smell/line-endings.py | 18 + .../sanity/code-smell/metaclass-boilerplate.json | 6 + .../sanity/code-smell/metaclass-boilerplate.py | 44 + .../_data/sanity/code-smell/no-assert.json | 10 + .../_data/sanity/code-smell/no-assert.py | 24 + .../_data/sanity/code-smell/no-basestring.json | 7 + .../_data/sanity/code-smell/no-basestring.py | 21 + .../_data/sanity/code-smell/no-dict-iteritems.json | 7 + .../_data/sanity/code-smell/no-dict-iteritems.py | 21 + .../_data/sanity/code-smell/no-dict-iterkeys.json | 7 + .../_data/sanity/code-smell/no-dict-iterkeys.py | 21 + .../sanity/code-smell/no-dict-itervalues.json | 7 + .../_data/sanity/code-smell/no-dict-itervalues.py | 21 + .../_data/sanity/code-smell/no-get-exception.json | 7 + .../_data/sanity/code-smell/no-get-exception.py | 28 + .../sanity/code-smell/no-illegal-filenames.json | 5 + .../sanity/code-smell/no-illegal-filenames.py | 82 + .../_data/sanity/code-smell/no-main-display.json | 10 + .../_data/sanity/code-smell/no-main-display.py | 21 + .../_data/sanity/code-smell/no-smart-quotes.json | 5 + .../_data/sanity/code-smell/no-smart-quotes.py | 28 + .../sanity/code-smell/no-unicode-literals.json | 7 + .../_data/sanity/code-smell/no-unicode-literals.py | 21 + .../_data/sanity/code-smell/replace-urlopen.json | 7 + .../_data/sanity/code-smell/replace-urlopen.py | 21 + .../_data/sanity/code-smell/runtime-metadata.json | 11 + .../_data/sanity/code-smell/runtime-metadata.py | 150 + .../_data/sanity/code-smell/shebang.json | 4 + .../_data/sanity/code-smell/shebang.py | 120 + .../_data/sanity/code-smell/symlinks.json | 5 + .../_data/sanity/code-smell/symlinks.py | 32 + .../sanity/code-smell/use-argspec-type-path.json | 10 + .../sanity/code-smell/use-argspec-type-path.py | 21 + .../_data/sanity/code-smell/use-compat-six.json | 6 + .../_data/sanity/code-smell/use-compat-six.py | 21 + .../ansible_test/_data/sanity/compile/compile.py | 41 + .../ansible_test/_data/sanity/import/importer.py | 467 ++ .../_data/sanity/import/yaml_to_json.py | 27 + .../sanity/integration-aliases/yaml_to_json.py | 15 + .../_data/sanity/pep8/current-ignore.txt | 4 + .../ansible_test/_data/sanity/pslint/pslint.ps1 | 43 + .../ansible_test/_data/sanity/pslint/settings.psd1 | 13 + .../_data/sanity/pylint/config/ansible-test.cfg | 39 + .../_data/sanity/pylint/config/collection.cfg | 135 + .../_data/sanity/pylint/config/default.cfg | 135 + .../_data/sanity/pylint/config/sanity.cfg | 42 + .../_data/sanity/pylint/plugins/deprecated.py | 250 + .../_data/sanity/pylint/plugins/string_format.py | 90 + .../_data/sanity/pylint/plugins/unwanted.py | 242 + .../_data/sanity/rstcheck/ignore-substitutions.txt | 5 + .../_data/sanity/shellcheck/exclude.txt | 3 + .../_data/sanity/validate-modules/main.py | 8 + .../_data/sanity/validate-modules/validate-modules | 1 + .../validate-modules/validate_modules/__init__.py | 20 + .../validate-modules/validate_modules/main.py | 2444 +++++++ .../validate_modules/module_args.py | 170 + .../validate_modules/ps_argspec.ps1 | 110 + .../validate-modules/validate_modules/schema.py | 488 ++ .../validate-modules/validate_modules/utils.py | 218 + .../_data/sanity/yamllint/config/default.yml | 19 + .../_data/sanity/yamllint/config/modules.yml | 19 + .../_data/sanity/yamllint/config/plugins.yml | 19 + .../_data/sanity/yamllint/yamllinter.py | 249 + .../_data/setup/ConfigureRemotingForAnsible.ps1 | 453 ++ test/lib/ansible_test/_data/setup/docker.sh | 14 + test/lib/ansible_test/_data/setup/remote.sh | 159 + .../_data/setup/windows-httptester.ps1 | 228 + test/lib/ansible_test/_data/sslcheck.py | 24 + test/lib/ansible_test/_data/versions.py | 20 + test/lib/ansible_test/_data/virtualenvcheck.py | 16 + test/lib/ansible_test/_data/yamlcheck.py | 21 + test/lib/ansible_test/_internal/__init__.py | 3 + test/lib/ansible_test/_internal/ansible_util.py | 296 + test/lib/ansible_test/_internal/cache.py | 35 + test/lib/ansible_test/_internal/ci/__init__.py | 227 + test/lib/ansible_test/_internal/ci/azp.py | 268 + test/lib/ansible_test/_internal/ci/local.py | 217 + test/lib/ansible_test/_internal/ci/shippable.py | 269 + test/lib/ansible_test/_internal/classification.py | 977 +++ test/lib/ansible_test/_internal/cli.py | 1217 ++++ test/lib/ansible_test/_internal/cloud/__init__.py | 429 ++ test/lib/ansible_test/_internal/cloud/acme.py | 193 + test/lib/ansible_test/_internal/cloud/aws.py | 124 + test/lib/ansible_test/_internal/cloud/azure.py | 213 + .../lib/ansible_test/_internal/cloud/cloudscale.py | 80 + test/lib/ansible_test/_internal/cloud/cs.py | 300 + test/lib/ansible_test/_internal/cloud/fallaxy.py | 177 + test/lib/ansible_test/_internal/cloud/foreman.py | 191 + test/lib/ansible_test/_internal/cloud/gcp.py | 62 + test/lib/ansible_test/_internal/cloud/hcloud.py | 116 + test/lib/ansible_test/_internal/cloud/nios.py | 193 + .../lib/ansible_test/_internal/cloud/opennebula.py | 66 + test/lib/ansible_test/_internal/cloud/openshift.py | 236 + test/lib/ansible_test/_internal/cloud/scaleway.py | 72 + test/lib/ansible_test/_internal/cloud/tower.py | 255 + test/lib/ansible_test/_internal/cloud/vcenter.py | 232 + test/lib/ansible_test/_internal/cloud/vultr.py | 71 + test/lib/ansible_test/_internal/config.py | 356 + test/lib/ansible_test/_internal/constants.py | 10 + test/lib/ansible_test/_internal/core_ci.py | 680 ++ .../ansible_test/_internal/coverage/__init__.py | 325 + .../_internal/coverage/analyze/__init__.py | 19 + .../_internal/coverage/analyze/targets/__init__.py | 154 + .../_internal/coverage/analyze/targets/combine.py | 64 + .../_internal/coverage/analyze/targets/expand.py | 39 + .../_internal/coverage/analyze/targets/filter.py | 104 + .../_internal/coverage/analyze/targets/generate.py | 146 + .../_internal/coverage/analyze/targets/missing.py | 109 + .../lib/ansible_test/_internal/coverage/combine.py | 303 + test/lib/ansible_test/_internal/coverage/erase.py | 27 + test/lib/ansible_test/_internal/coverage/html.py | 45 + test/lib/ansible_test/_internal/coverage/report.py | 156 + test/lib/ansible_test/_internal/coverage/xml.py | 191 + test/lib/ansible_test/_internal/coverage_util.py | 125 + .../_internal/csharp_import_analysis.py | 106 + test/lib/ansible_test/_internal/data.py | 200 + test/lib/ansible_test/_internal/delegation.py | 667 ++ test/lib/ansible_test/_internal/diff.py | 256 + test/lib/ansible_test/_internal/docker_util.py | 409 ++ test/lib/ansible_test/_internal/encoding.py | 41 + test/lib/ansible_test/_internal/env.py | 293 + test/lib/ansible_test/_internal/executor.py | 2146 +++++++ test/lib/ansible_test/_internal/git.py | 137 + test/lib/ansible_test/_internal/http.py | 181 + test/lib/ansible_test/_internal/import_analysis.py | 362 ++ test/lib/ansible_test/_internal/init.py | 16 + .../ansible_test/_internal/integration/__init__.py | 349 + test/lib/ansible_test/_internal/io.py | 94 + test/lib/ansible_test/_internal/manage_ci.py | 335 + test/lib/ansible_test/_internal/metadata.py | 151 + test/lib/ansible_test/_internal/payload.py | 146 + .../_internal/powershell_import_analysis.py | 105 + .../ansible_test/_internal/provider/__init__.py | 78 + .../_internal/provider/layout/__init__.py | 232 + .../_internal/provider/layout/ansible.py | 47 + .../_internal/provider/layout/collection.py | 123 + .../_internal/provider/source/__init__.py | 18 + .../ansible_test/_internal/provider/source/git.py | 72 + .../_internal/provider/source/installed.py | 43 + .../_internal/provider/source/unversioned.py | 87 + test/lib/ansible_test/_internal/sanity/__init__.py | 946 +++ .../ansible_test/_internal/sanity/ansible_doc.py | 144 + .../ansible_test/_internal/sanity/bin_symlinks.py | 110 + test/lib/ansible_test/_internal/sanity/compile.py | 92 + test/lib/ansible_test/_internal/sanity/ignores.py | 89 + test/lib/ansible_test/_internal/sanity/import.py | 184 + .../_internal/sanity/integration_aliases.py | 399 ++ test/lib/ansible_test/_internal/sanity/pep8.py | 109 + test/lib/ansible_test/_internal/sanity/pslint.py | 121 + test/lib/ansible_test/_internal/sanity/pylint.py | 289 + test/lib/ansible_test/_internal/sanity/rstcheck.py | 95 + .../ansible_test/_internal/sanity/sanity_docs.py | 62 + .../ansible_test/_internal/sanity/shellcheck.py | 110 + .../_internal/sanity/validate_modules.py | 149 + test/lib/ansible_test/_internal/sanity/yamllint.py | 136 + test/lib/ansible_test/_internal/target.py | 694 ++ test/lib/ansible_test/_internal/test.py | 524 ++ test/lib/ansible_test/_internal/thread.py | 57 + test/lib/ansible_test/_internal/types.py | 32 + test/lib/ansible_test/_internal/units/__init__.py | 159 + test/lib/ansible_test/_internal/util.py | 853 +++ test/lib/ansible_test/_internal/util_common.py | 487 ++ test/lib/ansible_test/_internal/venv.py | 227 + .../config/cloud-config-aws.ini.template | 26 + .../config/cloud-config-azure.ini.template | 32 + .../config/cloud-config-cloudscale.ini.template | 9 + .../config/cloud-config-cs.ini.template | 18 + .../config/cloud-config-gcp.ini.template | 18 + .../config/cloud-config-hcloud.ini.template | 15 + .../config/cloud-config-opennebula.ini.template | 20 + .../cloud-config-openshift.kubeconfig.template | 12 + .../config/cloud-config-scaleway.ini.template | 13 + .../config/cloud-config-tower.ini.template | 18 + .../config/cloud-config-vcenter.ini.template | 26 + .../config/cloud-config-vultr.ini.template | 12 + .../config/inventory.networking.template | 42 + .../ansible_test/config/inventory.winrm.template | 28 + test/sanity/code-smell/configure-remoting-ps1.json | 4 + test/sanity/code-smell/configure-remoting-ps1.py | 54 + test/sanity/code-smell/deprecated-config.json | 10 + test/sanity/code-smell/deprecated-config.py | 102 + .../code-smell/deprecated-config.requirements.txt | 2 + test/sanity/code-smell/docs-build.json | 6 + test/sanity/code-smell/docs-build.py | 155 + test/sanity/code-smell/docs-build.requirements.txt | 6 + test/sanity/code-smell/no-unwanted-files.json | 7 + test/sanity/code-smell/no-unwanted-files.py | 47 + test/sanity/code-smell/obsolete-files.json | 17 + test/sanity/code-smell/obsolete-files.py | 19 + test/sanity/code-smell/package-data.json | 6 + test/sanity/code-smell/package-data.py | 379 ++ .../code-smell/package-data.requirements.txt | 10 + test/sanity/code-smell/release-names.json | 4 + test/sanity/code-smell/release-names.py | 50 + .../code-smell/release-names.requirements.txt | 1 + .../required-and-default-attributes.json | 9 + .../code-smell/required-and-default-attributes.py | 21 + test/sanity/code-smell/skip.txt | 2 + test/sanity/code-smell/test-constraints.json | 9 + test/sanity/code-smell/test-constraints.py | 21 + test/sanity/code-smell/update-bundled.json | 8 + test/sanity/code-smell/update-bundled.py | 165 + .../code-smell/update-bundled.requirements.txt | 1 + test/sanity/ignore.txt | 427 ++ test/support/integration/plugins/cache/jsonfile.py | 63 + .../integration/plugins/filter/json_query.py | 53 + .../integration/plugins/inventory/aws_ec2.py | 760 +++ .../integration/plugins/inventory/docker_swarm.py | 351 + .../integration/plugins/inventory/foreman.py | 295 + .../support/integration/plugins/lookup/rabbitmq.py | 190 + .../plugins/module_utils/aws/__init__.py | 0 .../integration/plugins/module_utils/aws/core.py | 335 + .../integration/plugins/module_utils/aws/iam.py | 49 + .../integration/plugins/module_utils/aws/s3.py | 50 + .../plugins/module_utils/aws/waiters.py | 405 ++ .../plugins/module_utils/azure_rm_common.py | 1473 +++++ .../plugins/module_utils/azure_rm_common_rest.py | 97 + .../integration/plugins/module_utils/cloud.py | 217 + .../plugins/module_utils/compat/__init__.py | 0 .../plugins/module_utils/compat/ipaddress.py | 2476 +++++++ .../integration/plugins/module_utils/crypto.py | 2125 ++++++ .../integration/plugins/module_utils/database.py | 142 + .../plugins/module_utils/docker/__init__.py | 0 .../plugins/module_utils/docker/common.py | 1022 +++ .../plugins/module_utils/docker/swarm.py | 280 + .../integration/plugins/module_utils/ec2.py | 758 +++ .../plugins/module_utils/ecs/__init__.py | 0 .../integration/plugins/module_utils/ecs/api.py | 364 ++ .../integration/plugins/module_utils/mysql.py | 106 + .../plugins/module_utils/net_tools/__init__.py | 0 .../plugins/module_utils/network/__init__.py | 0 .../module_utils/network/common/__init__.py | 0 .../plugins/module_utils/network/common/utils.py | 643 ++ .../integration/plugins/module_utils/postgres.py | 330 + .../integration/plugins/module_utils/rabbitmq.py | 220 + .../_azure_rm_mariadbconfiguration_facts.py | 1 + .../modules/_azure_rm_mariadbdatabase_facts.py | 1 + .../modules/_azure_rm_mariadbfirewallrule_facts.py | 1 + .../modules/_azure_rm_mariadbserver_facts.py | 1 + .../plugins/modules/_azure_rm_resource_facts.py | 1 + .../plugins/modules/_azure_rm_webapp_facts.py | 1 + .../integration/plugins/modules/aws_az_info.py | 111 + test/support/integration/plugins/modules/aws_s3.py | 925 +++ .../plugins/modules/azure_rm_appserviceplan.py | 379 ++ .../plugins/modules/azure_rm_functionapp.py | 421 ++ .../plugins/modules/azure_rm_functionapp_info.py | 207 + .../modules/azure_rm_mariadbconfiguration.py | 241 + .../modules/azure_rm_mariadbconfiguration_info.py | 217 + .../plugins/modules/azure_rm_mariadbdatabase.py | 304 + .../modules/azure_rm_mariadbdatabase_info.py | 212 + .../modules/azure_rm_mariadbfirewallrule.py | 277 + .../modules/azure_rm_mariadbfirewallrule_info.py | 208 + .../plugins/modules/azure_rm_mariadbserver.py | 388 ++ .../plugins/modules/azure_rm_mariadbserver_info.py | 265 + .../plugins/modules/azure_rm_resource.py | 427 ++ .../plugins/modules/azure_rm_resource_info.py | 432 ++ .../plugins/modules/azure_rm_storageaccount.py | 684 ++ .../integration/plugins/modules/azure_rm_webapp.py | 1070 ++++ .../plugins/modules/azure_rm_webapp_info.py | 489 ++ .../plugins/modules/azure_rm_webappslot.py | 1058 +++ .../plugins/modules/cloud_init_data_facts.py | 134 + .../integration/plugins/modules/cloudformation.py | 837 +++ .../plugins/modules/cloudformation_info.py | 355 + .../integration/plugins/modules/deploy_helper.py | 521 ++ .../integration/plugins/modules/docker_swarm.py | 681 ++ test/support/integration/plugins/modules/ec2.py | 1766 +++++ .../integration/plugins/modules/ec2_ami_info.py | 282 + .../integration/plugins/modules/ec2_group.py | 1345 ++++ .../integration/plugins/modules/ec2_vpc_net.py | 524 ++ .../integration/plugins/modules/ec2_vpc_subnet.py | 604 ++ .../integration/plugins/modules/flatpak_remote.py | 243 + .../integration/plugins/modules/htpasswd.py | 275 + .../integration/plugins/modules/locale_gen.py | 237 + test/support/integration/plugins/modules/lvg.py | 295 + .../plugins/modules/mongodb_parameter.py | 223 + .../integration/plugins/modules/mongodb_user.py | 474 ++ test/support/integration/plugins/modules/pids.py | 89 + test/support/integration/plugins/modules/pkgng.py | 406 ++ .../integration/plugins/modules/postgresql_db.py | 657 ++ .../plugins/modules/postgresql_privs.py | 1097 ++++ .../plugins/modules/postgresql_query.py | 364 ++ .../integration/plugins/modules/postgresql_set.py | 434 ++ .../plugins/modules/postgresql_table.py | 601 ++ .../integration/plugins/modules/postgresql_user.py | 927 +++ .../integration/plugins/modules/rabbitmq_plugin.py | 180 + .../integration/plugins/modules/rabbitmq_queue.py | 257 + .../integration/plugins/modules/s3_bucket.py | 740 +++ .../integration/plugins/modules/sefcontext.py | 298 + .../support/integration/plugins/modules/selogin.py | 260 + .../integration/plugins/modules/synchronize.py | 618 ++ .../integration/plugins/modules/timezone.py | 909 +++ .../integration/plugins/modules/x509_crl.py | 783 +++ .../integration/plugins/modules/x509_crl_info.py | 281 + test/support/integration/plugins/modules/xml.py | 966 +++ test/support/integration/plugins/modules/zypper.py | 540 ++ .../ansible/netcommon/plugins/action/cli_config.py | 40 + .../ansible/netcommon/plugins/action/net_base.py | 90 + .../ansible/netcommon/plugins/action/net_get.py | 199 + .../ansible/netcommon/plugins/action/net_put.py | 235 + .../ansible/netcommon/plugins/action/network.py | 209 + .../ansible/netcommon/plugins/become/enable.py | 42 + .../netcommon/plugins/connection/httpapi.py | 324 + .../netcommon/plugins/connection/netconf.py | 404 ++ .../netcommon/plugins/connection/network_cli.py | 924 +++ .../netcommon/plugins/connection/persistent.py | 97 + .../netcommon/plugins/doc_fragments/netconf.py | 66 + .../plugins/doc_fragments/network_agnostic.py | 14 + .../ansible/netcommon/plugins/filter/ipaddr.py | 1186 ++++ .../ansible/netcommon/plugins/filter/network.py | 531 ++ .../ansible/netcommon/plugins/httpapi/restconf.py | 91 + .../plugins/module_utils/compat/ipaddress.py | 2578 ++++++++ .../module_utils/network/common/cfg/base.py | 27 + .../plugins/module_utils/network/common/config.py | 473 ++ .../module_utils/network/common/facts/facts.py | 162 + .../plugins/module_utils/network/common/netconf.py | 179 + .../plugins/module_utils/network/common/network.py | 275 + .../plugins/module_utils/network/common/parsing.py | 316 + .../plugins/module_utils/network/common/utils.py | 686 ++ .../module_utils/network/netconf/netconf.py | 147 + .../module_utils/network/restconf/restconf.py | 61 + .../netcommon/plugins/modules/cli_config.py | 444 ++ .../ansible/netcommon/plugins/modules/net_get.py | 71 + .../ansible/netcommon/plugins/modules/net_put.py | 82 + .../ansible/netcommon/plugins/netconf/default.py | 70 + .../cisco/ios/plugins/action/ios.py | 133 + .../cisco/ios/plugins/cliconf/ios.py | 465 ++ .../cisco/ios/plugins/doc_fragments/ios.py | 81 + .../ios/plugins/module_utils/network/ios/ios.py | 197 + .../cisco/ios/plugins/modules/ios_command.py | 229 + .../cisco/ios/plugins/modules/ios_config.py | 596 ++ .../cisco/ios/plugins/terminal/ios.py | 115 + .../vyos/vyos/plugins/action/vyos.py | 129 + .../vyos/vyos/plugins/cliconf/vyos.py | 342 + .../vyos/vyos/plugins/doc_fragments/vyos.py | 63 + .../network/vyos/argspec/facts/facts.py | 22 + .../vyos/argspec/firewall_rules/firewall_rules.py | 263 + .../network/vyos/argspec/interfaces/interfaces.py | 69 + .../vyos/argspec/l3_interfaces/l3_interfaces.py | 81 + .../vyos/argspec/lag_interfaces/lag_interfaces.py | 80 + .../vyos/argspec/lldp_global/lldp_global.py | 56 + .../argspec/lldp_interfaces/lldp_interfaces.py | 89 + .../vyos/argspec/static_routes/static_routes.py | 99 + .../vyos/config/lldp_interfaces/lldp_interfaces.py | 438 ++ .../module_utils/network/vyos/facts/facts.py | 83 + .../vyos/facts/firewall_rules/firewall_rules.py | 380 ++ .../network/vyos/facts/interfaces/interfaces.py | 134 + .../vyos/facts/l3_interfaces/l3_interfaces.py | 143 + .../vyos/facts/lag_interfaces/lag_interfaces.py | 152 + .../module_utils/network/vyos/facts/legacy/base.py | 162 + .../network/vyos/facts/lldp_global/lldp_global.py | 116 + .../vyos/facts/lldp_interfaces/lldp_interfaces.py | 155 + .../vyos/facts/static_routes/static_routes.py | 181 + .../module_utils/network/vyos/utils/utils.py | 231 + .../vyos/plugins/module_utils/network/vyos/vyos.py | 124 + .../vyos/vyos/plugins/modules/vyos_command.py | 223 + .../vyos/vyos/plugins/modules/vyos_config.py | 354 + .../vyos/vyos/plugins/modules/vyos_facts.py | 174 + .../vyos/plugins/modules/vyos_lldp_interfaces.py | 513 ++ .../vyos/vyos/plugins/terminal/vyos.py | 53 + .../ansible/windows/plugins/action/win_copy.py | 1 + .../windows/plugins/modules/async_status.ps1 | 1 + .../ansible/windows/plugins/modules/win_acl.ps1 | 1 + .../ansible/windows/plugins/modules/win_acl.py | 1 + .../ansible/windows/plugins/modules/win_copy.ps1 | 1 + .../ansible/windows/plugins/modules/win_copy.py | 1 + .../ansible/windows/plugins/modules/win_file.ps1 | 1 + .../ansible/windows/plugins/modules/win_file.py | 1 + .../ansible/windows/plugins/modules/win_ping.ps1 | 1 + .../ansible/windows/plugins/modules/win_ping.py | 1 + .../ansible/windows/plugins/modules/win_shell.ps1 | 1 + .../ansible/windows/plugins/modules/win_shell.py | 1 + .../ansible/windows/plugins/modules/win_stat.ps1 | 1 + .../ansible/windows/plugins/modules/win_stat.py | 1 + .../windows-integration/plugins/action/win_copy.py | 522 ++ .../plugins/action/win_reboot.py | 96 + .../plugins/action/win_template.py | 29 + .../windows-integration/plugins/become/runas.py | 70 + .../plugins/module_utils/Ansible.Service.cs | 1341 ++++ .../plugins/modules/async_status.ps1 | 58 + .../windows-integration/plugins/modules/setup.ps1 | 516 ++ .../windows-integration/plugins/modules/slurp.ps1 | 28 + .../plugins/modules/win_acl.ps1 | 225 + .../windows-integration/plugins/modules/win_acl.py | 132 + .../plugins/modules/win_certificate_store.ps1 | 260 + .../plugins/modules/win_certificate_store.py | 208 + .../plugins/modules/win_command.ps1 | 78 + .../plugins/modules/win_command.py | 136 + .../plugins/modules/win_copy.ps1 | 403 ++ .../plugins/modules/win_copy.py | 207 + .../plugins/modules/win_data_deduplication.ps1 | 129 + .../plugins/modules/win_data_deduplication.py | 87 + .../plugins/modules/win_dsc.ps1 | 398 ++ .../windows-integration/plugins/modules/win_dsc.py | 183 + .../plugins/modules/win_feature.ps1 | 111 + .../plugins/modules/win_feature.py | 149 + .../plugins/modules/win_file.ps1 | 152 + .../plugins/modules/win_file.py | 70 + .../plugins/modules/win_find.ps1 | 416 ++ .../plugins/modules/win_find.py | 345 + .../plugins/modules/win_format.ps1 | 200 + .../plugins/modules/win_format.py | 103 + .../plugins/modules/win_get_url.ps1 | 274 + .../plugins/modules/win_get_url.py | 215 + .../plugins/modules/win_lineinfile.ps1 | 450 ++ .../plugins/modules/win_lineinfile.py | 180 + .../plugins/modules/win_path.ps1 | 145 + .../plugins/modules/win_path.py | 79 + .../plugins/modules/win_ping.ps1 | 21 + .../plugins/modules/win_ping.py | 55 + .../plugins/modules/win_psexec.ps1 | 152 + .../plugins/modules/win_psexec.py | 172 + .../plugins/modules/win_reboot.py | 131 + .../plugins/modules/win_regedit.ps1 | 495 ++ .../plugins/modules/win_regedit.py | 210 + .../plugins/modules/win_security_policy.ps1 | 196 + .../plugins/modules/win_security_policy.py | 126 + .../plugins/modules/win_shell.ps1 | 138 + .../plugins/modules/win_shell.py | 167 + .../plugins/modules/win_stat.ps1 | 186 + .../plugins/modules/win_stat.py | 236 + .../plugins/modules/win_tempfile.ps1 | 72 + .../plugins/modules/win_tempfile.py | 67 + .../plugins/modules/win_template.py | 66 + .../plugins/modules/win_user.ps1 | 273 + .../plugins/modules/win_user.py | 194 + .../plugins/modules/win_user_right.ps1 | 349 + .../plugins/modules/win_user_right.py | 108 + .../plugins/modules/win_wait_for.ps1 | 259 + .../plugins/modules/win_wait_for.py | 155 + .../plugins/modules/win_whoami.ps1 | 837 +++ .../plugins/modules/win_whoami.py | 203 + test/units/__init__.py | 0 test/units/_vendor/test_vendor.py | 65 + test/units/ansible_test/__init__.py | 0 test/units/ansible_test/ci/__init__.py | 0 test/units/ansible_test/ci/test_azp.py | 31 + test/units/ansible_test/ci/test_shippable.py | 31 + test/units/ansible_test/ci/util.py | 53 + test/units/ansible_test/conftest.py | 14 + test/units/ansible_test/test_docker_util.py | 131 + test/units/cli/__init__.py | 0 test/units/cli/arguments/test_optparse_helpers.py | 37 + .../cli/galaxy/test_collection_extract_tar.py | 61 + test/units/cli/galaxy/test_display_collection.py | 47 + test/units/cli/galaxy/test_display_header.py | 41 + test/units/cli/galaxy/test_display_role.py | 28 + test/units/cli/galaxy/test_execute_list.py | 40 + .../cli/galaxy/test_execute_list_collection.py | 278 + .../units/cli/galaxy/test_get_collection_widths.py | 37 + test/units/cli/test_adhoc.py | 113 + test/units/cli/test_cli.py | 381 ++ test/units/cli/test_console.py | 51 + .../cli/test_data/collection_skeleton/README.md | 1 + .../collection_skeleton/docs/My Collection.md | 1 + .../test_data/collection_skeleton/galaxy.yml.j2 | 7 + .../collection_skeleton/playbooks/main.yml | 0 .../playbooks/templates/subfolder/test.conf.j2 | 2 + .../playbooks/templates/test.conf.j2 | 2 + .../collection_skeleton/plugins/action/.git_keep | 0 .../collection_skeleton/plugins/filter/.git_keep | 0 .../plugins/inventory/.git_keep | 0 .../collection_skeleton/plugins/lookup/.git_keep | 0 .../plugins/module_utils/.git_keep | 0 .../collection_skeleton/plugins/modules/.git_keep | 0 .../roles/common/tasks/main.yml.j2 | 3 + .../roles/common/templates/subfolder/test.conf.j2 | 2 + .../roles/common/templates/test.conf.j2 | 2 + test/units/cli/test_data/role_skeleton/.travis.yml | 29 + test/units/cli/test_data/role_skeleton/README.md | 38 + .../test_data/role_skeleton/defaults/main.yml.j2 | 2 + .../cli/test_data/role_skeleton/files/.git_keep | 0 .../test_data/role_skeleton/handlers/main.yml.j2 | 2 + test/units/cli/test_data/role_skeleton/inventory | 1 + .../cli/test_data/role_skeleton/meta/main.yml.j2 | 62 + .../cli/test_data/role_skeleton/tasks/main.yml.j2 | 2 + .../test_data/role_skeleton/templates/.git_keep | 0 .../role_skeleton/templates/subfolder/test.conf.j2 | 2 + .../test_data/role_skeleton/templates/test.conf.j2 | 2 + .../role_skeleton/templates_extra/templates.txt.j2 | 1 + .../cli/test_data/role_skeleton/tests/test.yml.j2 | 5 + .../cli/test_data/role_skeleton/vars/main.yml.j2 | 2 + test/units/cli/test_doc.py | 35 + test/units/cli/test_galaxy.py | 1341 ++++ test/units/cli/test_playbook.py | 46 + test/units/cli/test_vault.py | 217 + test/units/compat/__init__.py | 0 test/units/compat/builtins.py | 33 + test/units/compat/mock.py | 122 + test/units/compat/unittest.py | 38 + test/units/config/manager/__init__.py | 0 .../config/manager/test_find_ini_config_file.py | 253 + test/units/config/test.cfg | 4 + test/units/config/test.yml | 55 + test/units/config/test2.cfg | 4 + test/units/config/test_data.py | 41 + test/units/config/test_manager.py | 145 + test/units/errors/__init__.py | 0 test/units/errors/test_errors.py | 151 + test/units/executor/__init__.py | 0 .../executor/module_common/test_modify_module.py | 43 + .../executor/module_common/test_module_common.py | 197 + .../module_common/test_recursive_finder.py | 127 + test/units/executor/test_interpreter_discovery.py | 87 + test/units/executor/test_play_iterator.py | 458 ++ test/units/executor/test_playbook_executor.py | 148 + test/units/executor/test_task_executor.py | 656 ++ .../executor/test_task_queue_manager_callbacks.py | 121 + test/units/executor/test_task_result.py | 171 + test/units/galaxy/__init__.py | 0 test/units/galaxy/test_api.py | 912 +++ test/units/galaxy/test_collection.py | 1326 ++++ test/units/galaxy/test_collection_install.py | 816 +++ test/units/galaxy/test_token.py | 55 + test/units/galaxy/test_user_agent.py | 18 + test/units/inventory/test_group.py | 155 + test/units/inventory/test_host.py | 112 + .../group_vars/noparse/all.yml~ | 2 + .../group_vars/noparse/file.txt | 2 + .../inventory_test_data/group_vars/parse/all.yml | 2 + test/units/mock/__init__.py | 0 test/units/mock/loader.py | 117 + test/units/mock/path.py | 8 + test/units/mock/procenv.py | 90 + test/units/mock/vault_helper.py | 39 + test/units/mock/yaml_helper.py | 124 + test/units/module_utils/__init__.py | 0 test/units/module_utils/basic/__init__.py | 0 .../module_utils/basic/test__log_invocation.py | 55 + .../basic/test__symbolic_mode_to_octal.py | 103 + .../units/module_utils/basic/test_argument_spec.py | 706 ++ test/units/module_utils/basic/test_atomic_move.py | 223 + .../module_utils/basic/test_deprecate_warn.py | 73 + .../module_utils/basic/test_dict_converters.py | 31 + test/units/module_utils/basic/test_exit_json.py | 154 + test/units/module_utils/basic/test_filesystem.py | 136 + .../module_utils/basic/test_get_file_attributes.py | 50 + .../module_utils/basic/test_get_module_path.py | 22 + .../basic/test_heuristic_log_sanitize.py | 89 + test/units/module_utils/basic/test_imports.py | 128 + test/units/module_utils/basic/test_log.py | 152 + test/units/module_utils/basic/test_no_log.py | 160 + .../basic/test_platform_distribution.py | 199 + test/units/module_utils/basic/test_run_command.py | 283 + test/units/module_utils/basic/test_safe_eval.py | 70 + .../units/module_utils/basic/test_sanitize_keys.py | 98 + test/units/module_utils/basic/test_selinux.py | 254 + test/units/module_utils/basic/test_set_cwd.py | 195 + .../basic/test_set_mode_if_different.py | 183 + test/units/module_utils/basic/test_tmpdir.py | 119 + test/units/module_utils/common/__init__.py | 0 .../common/parameters/test_handle_aliases.py | 102 + .../common/parameters/test_list_deprecations.py | 44 + .../common/parameters/test_list_no_log_values.py | 228 + .../common/process/test_get_bin_path.py | 39 + test/units/module_utils/common/test_collections.py | 175 + .../common/test_dict_transformations.py | 135 + test/units/module_utils/common/test_network.py | 68 + test/units/module_utils/common/test_removed.py | 62 + test/units/module_utils/common/test_sys_info.py | 150 + test/units/module_utils/common/test_utils.py | 46 + .../text/converters/test_container_to_bytes.py | 95 + .../text/converters/test_container_to_text.py | 78 + .../text/converters/test_json_encode_fallback.py | 55 + .../common/text/converters/test_jsonify.py | 27 + .../common/text/converters/test_to_str.py | 61 + .../common/text/formatters/test_bytes_to_human.py | 116 + .../common/text/formatters/test_human_to_bytes.py | 185 + .../text/formatters/test_lenient_lowercase.py | 68 + .../validation/test_check_mutually_exclusive.py | 57 + .../validation/test_check_required_arguments.py | 88 + .../validation/test_check_required_together.py | 57 + .../common/validation/test_check_type_bits.py | 43 + .../common/validation/test_check_type_bool.py | 49 + .../common/validation/test_check_type_bytes.py | 50 + .../common/validation/test_check_type_dict.py | 34 + .../common/validation/test_check_type_float.py | 38 + .../common/validation/test_check_type_int.py | 34 + .../common/validation/test_check_type_jsonarg.py | 36 + .../common/validation/test_check_type_list.py | 32 + .../common/validation/test_check_type_path.py | 28 + .../common/validation/test_check_type_raw.py | 23 + .../common/validation/test_check_type_str.py | 33 + .../common/validation/test_count_terms.py | 40 + .../module_utils/common/warnings/test_deprecate.py | 96 + .../module_utils/common/warnings/test_warn.py | 61 + test/units/module_utils/conftest.py | 72 + test/units/module_utils/facts/__init__.py | 0 test/units/module_utils/facts/base.py | 65 + .../facts/fixtures/cpuinfo/aarch64-4cpu-cpuinfo | 40 + .../facts/fixtures/cpuinfo/arm64-4cpu-cpuinfo | 32 + .../facts/fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo | 12 + .../facts/fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo | 75 + .../facts/fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo | 39 + .../cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo | 44 + .../fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo | 125 + .../fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu | 61 + .../facts/fixtures/cpuinfo/x86_64-2cpu-cpuinfo | 56 + .../facts/fixtures/cpuinfo/x86_64-4cpu-cpuinfo | 104 + .../facts/fixtures/cpuinfo/x86_64-8cpu-cpuinfo | 216 + .../facts/fixtures/distribution_files/ClearLinux | 10 + .../facts/fixtures/distribution_files/CoreOS | 10 + .../facts/fixtures/distribution_files/LinuxMint | 12 + .../facts/fixtures/distribution_files/Slackware | 1 + .../fixtures/distribution_files/SlackwareCurrent | 1 + .../facts/fixtures/findmount_output.txt | 40 + test/units/module_utils/facts/hardware/__init__.py | 0 .../module_utils/facts/hardware/linux_data.py | 585 ++ .../module_utils/facts/hardware/test_linux.py | 175 + .../facts/hardware/test_linux_get_cpu_info.py | 62 + .../facts/hardware/test_sunos_get_uptime_facts.py | 20 + test/units/module_utils/facts/network/__init__.py | 0 .../module_utils/facts/network/test_fc_wwn.py | 94 + .../module_utils/facts/network/test_generic_bsd.py | 175 + .../facts/network/test_iscsi_get_initiator.py | 54 + test/units/module_utils/facts/other/__init__.py | 0 test/units/module_utils/facts/other/test_facter.py | 228 + test/units/module_utils/facts/other/test_ohai.py | 6768 ++++++++++++++++++++ test/units/module_utils/facts/system/__init__.py | 0 .../facts/system/distribution/__init__.py | 0 .../facts/system/distribution/conftest.py | 21 + .../distribution/fixtures/almalinux_8_3_beta.json | 53 + .../distribution/fixtures/arch_linux_na.json | 24 + .../fixtures/arch_linux_no_arch-release_na.json | 23 + .../distribution/fixtures/archlinux_rolling.json | 31 + .../system/distribution/fixtures/centos_6.7.json | 31 + .../distribution/fixtures/clearlinux_26580.json | 24 + .../distribution/fixtures/clearlinux_28120.json | 24 + .../distribution/fixtures/core_os_1911.5.0.json | 23 + .../distribution/fixtures/core_os_976.0.0.json | 23 + .../distribution/fixtures/cumulus_linux_2.5.4.json | 23 + .../distribution/fixtures/cumulus_linux_3.7.3.json | 23 + .../system/distribution/fixtures/debian_10.json | 40 + .../system/distribution/fixtures/debian_7.9.json | 39 + .../distribution/fixtures/debian_stretch_sid.json | 36 + .../facts/system/distribution/fixtures/devuan.json | 23 + .../system/distribution/fixtures/fedora_22.json | 25 + .../system/distribution/fixtures/fedora_25.json | 25 + .../system/distribution/fixtures/fedora_31.json | 55 + .../distribution/fixtures/flatcar_2492.0.0.json | 24 + .../system/distribution/fixtures/kali_2019.1.json | 25 + .../distribution/fixtures/kde_neon_16.04.json | 42 + .../distribution/fixtures/linux_mint_18.2.json | 25 + .../distribution/fixtures/linux_mint_19.1.json | 24 + .../system/distribution/fixtures/nexenta_3.json | 25 + .../system/distribution/fixtures/nexenta_4.json | 24 + .../facts/system/distribution/fixtures/omnios.json | 24 + .../distribution/fixtures/openeuler_20.03.json | 28 + .../system/distribution/fixtures/openindiana.json | 24 + .../distribution/fixtures/opensuse_13.2.json | 24 + .../distribution/fixtures/opensuse_leap_15.0.json | 23 + .../distribution/fixtures/opensuse_leap_15.1.json | 36 + .../distribution/fixtures/opensuse_leap_42.1.json | 24 + .../fixtures/opensuse_tumbleweed_20160917.json | 23 + .../system/distribution/fixtures/pop_os_20.04.json | 29 + .../system/distribution/fixtures/redhat_6.7.json | 25 + .../system/distribution/fixtures/redhat_7.2.json | 25 + .../system/distribution/fixtures/redhat_7.7.json | 43 + .../system/distribution/fixtures/sles_11.3.json | 23 + .../system/distribution/fixtures/sles_11.4.json | 24 + .../system/distribution/fixtures/sles_12_sp0.json | 24 + .../system/distribution/fixtures/sles_12_sp1.json | 24 + .../distribution/fixtures/smartos_global_zone.json | 24 + .../system/distribution/fixtures/smartos_zone.json | 25 + .../system/distribution/fixtures/smgl_na.json | 23 + .../system/distribution/fixtures/solaris_10.json | 25 + .../system/distribution/fixtures/solaris_11.3.json | 25 + .../system/distribution/fixtures/solaris_11.4.json | 35 + .../system/distribution/fixtures/solaris_11.json | 26 + .../system/distribution/fixtures/steamos_2.0.json | 40 + .../distribution/fixtures/ubuntu_10.04_guess.json | 23 + .../system/distribution/fixtures/ubuntu_12.04.json | 24 + .../system/distribution/fixtures/ubuntu_14.04.json | 24 + .../system/distribution/fixtures/ubuntu_16.04.json | 24 + .../system/distribution/fixtures/ubuntu_18.04.json | 39 + .../distribution/fixtures/virtuozzo_7.3.json | 25 + .../distribution/test_distribution_sles4sap.py | 33 + .../distribution/test_distribution_version.py | 143 + .../test_parse_distribution_file_ClearLinux.py | 51 + .../test_parse_distribution_file_Slackware.py | 37 + .../module_utils/facts/system/test_cmdline.py | 67 + test/units/module_utils/facts/system/test_lsb.py | 108 + .../module_utils/facts/test_ansible_collector.py | 504 ++ test/units/module_utils/facts/test_collector.py | 563 ++ test/units/module_utils/facts/test_collectors.py | 430 ++ test/units/module_utils/facts/test_date_time.py | 103 + test/units/module_utils/facts/test_facts.py | 644 ++ test/units/module_utils/facts/test_timeout.py | 171 + test/units/module_utils/facts/test_utils.py | 39 + test/units/module_utils/facts/virtual/__init__.py | 0 .../units/module_utils/facts/virtual/test_linux.py | 26 + test/units/module_utils/json_utils/__init__.py | 0 .../json_utils/test_filter_non_json_lines.py | 88 + .../module_utils/parsing/test_convert_bool.py | 60 + test/units/module_utils/test_api.py | 48 + test/units/module_utils/test_distro.py | 38 + test/units/module_utils/urls/__init__.py | 0 test/units/module_utils/urls/fixtures/client.key | 28 + test/units/module_utils/urls/fixtures/client.pem | 81 + test/units/module_utils/urls/fixtures/client.txt | 3 + .../units/module_utils/urls/fixtures/multipart.txt | 166 + test/units/module_utils/urls/fixtures/netrc | 3 + .../urls/test_RedirectHandlerFactory.py | 138 + test/units/module_utils/urls/test_Request.py | 456 ++ .../module_utils/urls/test_RequestWithMethod.py | 22 + test/units/module_utils/urls/test_fetch_url.py | 220 + .../module_utils/urls/test_generic_urlparse.py | 57 + .../module_utils/urls/test_prepare_multipart.py | 102 + test/units/module_utils/urls/test_urls.py | 109 + test/units/modules/__init__.py | 0 test/units/modules/conftest.py | 31 + test/units/modules/test_apt.py | 53 + test/units/modules/test_async_wrapper.py | 57 + test/units/modules/test_copy.py | 215 + test/units/modules/test_iptables.py | 919 +++ test/units/modules/test_known_hosts.py | 110 + test/units/modules/test_pip.py | 38 + test/units/modules/test_systemd.py | 52 + test/units/modules/test_yum.py | 207 + test/units/modules/utils.py | 50 + test/units/parsing/__init__.py | 0 test/units/parsing/fixtures/ajson.json | 19 + test/units/parsing/fixtures/vault.yml | 6 + test/units/parsing/test_ajson.py | 187 + test/units/parsing/test_dataloader.py | 239 + test/units/parsing/test_mod_args.py | 137 + test/units/parsing/test_splitter.py | 110 + test/units/parsing/test_unquote.py | 51 + test/units/parsing/utils/__init__.py | 0 test/units/parsing/utils/test_addresses.py | 98 + test/units/parsing/utils/test_jsonify.py | 39 + test/units/parsing/utils/test_yaml.py | 34 + test/units/parsing/vault/__init__.py | 0 test/units/parsing/vault/test_vault.py | 941 +++ test/units/parsing/vault/test_vault_editor.py | 517 ++ test/units/parsing/yaml/__init__.py | 0 test/units/parsing/yaml/test_dumper.py | 103 + test/units/parsing/yaml/test_loader.py | 436 ++ test/units/parsing/yaml/test_objects.py | 164 + test/units/playbook/__init__.py | 0 test/units/playbook/role/__init__.py | 0 test/units/playbook/role/test_include_role.py | 248 + test/units/playbook/role/test_role.py | 422 ++ test/units/playbook/test_attribute.py | 57 + test/units/playbook/test_base.py | 630 ++ test/units/playbook/test_block.py | 82 + test/units/playbook/test_collectionsearch.py | 78 + test/units/playbook/test_conditional.py | 240 + test/units/playbook/test_helpers.py | 405 ++ test/units/playbook/test_included_file.py | 332 + test/units/playbook/test_play.py | 132 + test/units/playbook/test_play_context.py | 111 + test/units/playbook/test_playbook.py | 61 + test/units/playbook/test_taggable.py | 102 + test/units/playbook/test_task.py | 114 + test/units/plugins/__init__.py | 0 test/units/plugins/action/__init__.py | 0 test/units/plugins/action/test_action.py | 683 ++ test/units/plugins/action/test_gather_facts.py | 87 + test/units/plugins/action/test_raw.py | 105 + test/units/plugins/become/__init__.py | 0 test/units/plugins/become/conftest.py | 37 + test/units/plugins/become/test_su.py | 40 + test/units/plugins/become/test_sudo.py | 45 + test/units/plugins/cache/__init__.py | 0 test/units/plugins/cache/test_cache.py | 167 + test/units/plugins/callback/__init__.py | 0 test/units/plugins/callback/test_callback.py | 412 ++ test/units/plugins/connection/__init__.py | 0 test/units/plugins/connection/test_connection.py | 169 + test/units/plugins/connection/test_local.py | 40 + test/units/plugins/connection/test_paramiko.py | 42 + test/units/plugins/connection/test_psrp.py | 233 + test/units/plugins/connection/test_ssh.py | 688 ++ test/units/plugins/connection/test_winrm.py | 431 ++ test/units/plugins/filter/__init__.py | 0 test/units/plugins/filter/test_core.py | 41 + test/units/plugins/filter/test_mathstuff.py | 176 + test/units/plugins/inventory/__init__.py | 0 test/units/plugins/inventory/test_constructed.py | 206 + test/units/plugins/inventory/test_inventory.py | 207 + test/units/plugins/inventory/test_script.py | 105 + test/units/plugins/loader_fixtures/__init__.py | 0 .../plugins/loader_fixtures/import_fixture.py | 9 + test/units/plugins/lookup/__init__.py | 0 test/units/plugins/lookup/test_env.py | 35 + test/units/plugins/lookup/test_ini.py | 63 + test/units/plugins/lookup/test_password.py | 501 ++ test/units/plugins/shell/__init__.py | 0 test/units/plugins/shell/test_cmd.py | 19 + test/units/plugins/shell/test_powershell.py | 83 + test/units/plugins/strategy/__init__.py | 0 test/units/plugins/strategy/test_linear.py | 177 + test/units/plugins/strategy/test_strategy.py | 546 ++ test/units/plugins/test_plugins.py | 134 + test/units/regex/test_invalid_var_names.py | 27 + test/units/requirements.txt | 6 + test/units/template/__init__.py | 0 test/units/template/test_native_concat.py | 28 + test/units/template/test_safe_eval.py | 44 + test/units/template/test_templar.py | 446 ++ test/units/template/test_template_utilities.py | 117 + test/units/template/test_vars.py | 81 + test/units/test_constants.py | 122 + test/units/test_context.py | 27 + test/units/utils/__init__.py | 0 test/units/utils/collection_loader/__init__.py | 0 .../builtin/plugins/modules/shouldnotload.py | 4 + .../testns/testcoll/meta/runtime.yml | 4 + .../testns/testcoll/plugins/action/my_action.py | 8 + .../testcoll/plugins/module_utils/__init__.py | 0 .../testcoll/plugins/module_utils/my_other_util.py | 4 + .../testcoll/plugins/module_utils/my_util.py | 6 + .../testns/testcoll/plugins/modules/__init__.py | 5 + .../testns/testcoll/plugins/modules/amodule.py | 6 + .../testns/testcoll/roles/some_role/.gitkeep | 0 .../ansible_collections/__init__.py | 5 + .../ansible_collections/ansible/__init__.py | 5 + .../ansible_collections/testns/__init__.py | 5 + .../testns/testcoll/__init__.py | 5 + .../ansible/playbook_adj_other/.gitkeep | 0 .../freshns/playbook_adj_other/.gitkeep | 0 .../testns/playbook_adj_other/.gitkeep | 0 .../collection_loader/test_collection_loader.py | 834 +++ test/units/utils/display/test_display.py | 20 + test/units/utils/display/test_logger.py | 31 + test/units/utils/display/test_warning.py | 42 + test/units/utils/test_cleanup_tmp_file.py | 48 + test/units/utils/test_context_objects.py | 70 + test/units/utils/test_encrypt.py | 168 + test/units/utils/test_helpers.py | 34 + test/units/utils/test_isidentifier.py | 49 + test/units/utils/test_plugin_docs.py | 333 + test/units/utils/test_shlex.py | 41 + test/units/utils/test_unsafe_proxy.py | 110 + test/units/utils/test_vars.py | 282 + test/units/utils/test_version.py | 335 + test/units/vars/__init__.py | 0 test/units/vars/test_module_response_deepcopy.py | 60 + test/units/vars/test_variable_manager.py | 307 + 3407 files changed, 234814 insertions(+) create mode 100644 test/ansible_test/Makefile create mode 100644 test/ansible_test/unit/test_diff.py create mode 100644 test/ansible_test/validate-modules-unit/test_validate_modules_regex.py create mode 100644 test/integration/network-integration.cfg create mode 100644 test/integration/network-integration.requirements.txt create mode 100644 test/integration/targets/add_host/aliases create mode 100644 test/integration/targets/add_host/tasks/main.yml create mode 100644 test/integration/targets/ansiballz_python/aliases create mode 100644 test/integration/targets/ansiballz_python/library/check_rlimit_and_maxfd.py create mode 100644 test/integration/targets/ansiballz_python/library/custom_module.py create mode 100644 test/integration/targets/ansiballz_python/library/sys_check.py create mode 100644 test/integration/targets/ansiballz_python/module_utils/custom_util.py create mode 100644 test/integration/targets/ansiballz_python/tasks/main.yml create mode 100644 test/integration/targets/ansible-doc/aliases create mode 100644 test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/MANIFEST.json create mode 100644 test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py create mode 100644 test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py create mode 100644 test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/lookup/noop.py create mode 100644 test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py create mode 100644 test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py create mode 100644 test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py create mode 100644 test/integration/targets/ansible-doc/fakemodule.output create mode 100644 test/integration/targets/ansible-doc/inventory create mode 100644 test/integration/targets/ansible-doc/library/test_docs.py create mode 100644 test/integration/targets/ansible-doc/library/test_docs_missing_description.py create mode 100644 test/integration/targets/ansible-doc/library/test_docs_no_metadata.py create mode 100644 test/integration/targets/ansible-doc/library/test_docs_no_status.py create mode 100644 test/integration/targets/ansible-doc/library/test_docs_non_iterable_status.py create mode 100644 test/integration/targets/ansible-doc/library/test_docs_removed_precedence.py create mode 100644 test/integration/targets/ansible-doc/library/test_docs_removed_status.py create mode 100644 test/integration/targets/ansible-doc/library/test_docs_returns.py create mode 100644 test/integration/targets/ansible-doc/library/test_docs_returns_broken.py create mode 100644 test/integration/targets/ansible-doc/library/test_docs_suboptions.py create mode 100644 test/integration/targets/ansible-doc/library/test_empty.py create mode 100644 test/integration/targets/ansible-doc/library/test_no_docs.py create mode 100644 test/integration/targets/ansible-doc/library/test_no_docs_no_metadata.py create mode 100644 test/integration/targets/ansible-doc/library/test_no_docs_no_status.py create mode 100644 test/integration/targets/ansible-doc/library/test_no_docs_non_iterable_status.py create mode 100755 test/integration/targets/ansible-doc/runme.sh create mode 100644 test/integration/targets/ansible-doc/test.yml create mode 100644 test/integration/targets/ansible-doc/test_docs_returns.output create mode 100644 test/integration/targets/ansible-doc/test_docs_suboptions.output create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/aliases create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/meta/main.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/download.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/empty_installed_collections.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/individual_collection_repo.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/main.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_all.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_individual.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/reinstalling.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency_deduplication.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/setup.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_multi_collection_repo.yml create mode 100644 test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_recursive_scm_dependency.yml create mode 100644 test/integration/targets/ansible-galaxy-collection/aliases create mode 100644 test/integration/targets/ansible-galaxy-collection/files/build_bad_tar.py create mode 100644 test/integration/targets/ansible-galaxy-collection/library/setup_collections.py create mode 100644 test/integration/targets/ansible-galaxy-collection/meta/main.yml create mode 100644 test/integration/targets/ansible-galaxy-collection/tasks/build.yml create mode 100644 test/integration/targets/ansible-galaxy-collection/tasks/download.yml create mode 100644 test/integration/targets/ansible-galaxy-collection/tasks/init.yml create mode 100644 test/integration/targets/ansible-galaxy-collection/tasks/install.yml create mode 100644 test/integration/targets/ansible-galaxy-collection/tasks/main.yml create mode 100644 test/integration/targets/ansible-galaxy-collection/tasks/publish.yml create mode 100644 test/integration/targets/ansible-galaxy-collection/templates/ansible.cfg.j2 create mode 100644 test/integration/targets/ansible-galaxy-collection/vars/main.yml create mode 100644 test/integration/targets/ansible-galaxy/aliases create mode 100644 test/integration/targets/ansible-galaxy/cleanup-default.yml create mode 100644 test/integration/targets/ansible-galaxy/cleanup-freebsd.yml create mode 100644 test/integration/targets/ansible-galaxy/cleanup.yml create mode 100755 test/integration/targets/ansible-galaxy/runme.sh create mode 100644 test/integration/targets/ansible-galaxy/setup.yml create mode 100644 test/integration/targets/ansible-runner/aliases create mode 100644 test/integration/targets/ansible-runner/files/adhoc_example1.py create mode 100644 test/integration/targets/ansible-runner/files/constraints.txt create mode 100644 test/integration/targets/ansible-runner/files/playbook_example1.py create mode 100644 test/integration/targets/ansible-runner/filter_plugins/parse.py create mode 100644 test/integration/targets/ansible-runner/inventory create mode 100755 test/integration/targets/ansible-runner/runme.sh create mode 100644 test/integration/targets/ansible-runner/tasks/adhoc_example1.yml create mode 100644 test/integration/targets/ansible-runner/tasks/main.yml create mode 100644 test/integration/targets/ansible-runner/tasks/playbook_example1.yml create mode 100644 test/integration/targets/ansible-runner/tasks/setup.yml create mode 100644 test/integration/targets/ansible-runner/test.yml create mode 100644 test/integration/targets/ansible-test-docker/aliases create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/galaxy.yml create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/doc_fragments/ps_util.py create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/PSUtil.psm1 create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/my_util.py create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.ps1 create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.py create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/tasks/main.yml create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py create mode 100644 test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py create mode 100755 test/integration/targets/ansible-test-docker/collection-tests/docker.sh create mode 100755 test/integration/targets/ansible-test-docker/runme.sh create mode 100644 test/integration/targets/ansible-test/aliases create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/README.rst create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/galaxy.yml create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/meta/runtime.yml create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/filter/check_pylint.py create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/__init__.py create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/my_util.py create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/bad.py create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/tasks/main.yml create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/tests/sanity/ignore.txt create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/constraints.txt create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/requirements.txt create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/targets/constraints/tasks/main.yml create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/constraints.txt create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/plugins/modules/test_constraints.py create mode 100644 test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/requirements.txt create mode 100755 test/integration/targets/ansible-test/collection-tests/constraints.sh create mode 100755 test/integration/targets/ansible-test/collection-tests/coverage.sh create mode 100755 test/integration/targets/ansible-test/collection-tests/git-at-collection-base.sh create mode 100755 test/integration/targets/ansible-test/collection-tests/git-at-collection-root.sh create mode 100755 test/integration/targets/ansible-test/collection-tests/git-common.bash create mode 100644 test/integration/targets/ansible-test/collection-tests/install-git.yml create mode 100755 test/integration/targets/ansible-test/collection-tests/venv.sh create mode 100755 test/integration/targets/ansible-test/runme.sh create mode 100644 test/integration/targets/ansible/adhoc-callback.stdout create mode 100644 test/integration/targets/ansible/aliases create mode 100644 "test/integration/targets/ansible/ansible-test\303\251.cfg" create mode 100644 test/integration/targets/ansible/callback_plugins/callback_debug.py create mode 100644 test/integration/targets/ansible/no-extension create mode 100644 test/integration/targets/ansible/playbook.yml create mode 100644 test/integration/targets/ansible/playbookdir_cfg.ini create mode 100755 test/integration/targets/ansible/runme.sh create mode 100644 test/integration/targets/ansible/vars.yml create mode 100644 test/integration/targets/any_errors_fatal/18602.yml create mode 100644 test/integration/targets/any_errors_fatal/aliases create mode 100644 test/integration/targets/any_errors_fatal/always_block.yml create mode 100644 test/integration/targets/any_errors_fatal/inventory create mode 100644 test/integration/targets/any_errors_fatal/on_includes.yml create mode 100644 test/integration/targets/any_errors_fatal/play_level.yml create mode 100755 test/integration/targets/any_errors_fatal/runme.sh create mode 100644 test/integration/targets/any_errors_fatal/test_fatal.yml create mode 100644 test/integration/targets/apt/aliases create mode 100644 test/integration/targets/apt/defaults/main.yml create mode 100644 test/integration/targets/apt/meta/main.yml create mode 100644 test/integration/targets/apt/tasks/apt-builddep.yml create mode 100644 test/integration/targets/apt/tasks/apt-multiarch.yml create mode 100644 test/integration/targets/apt/tasks/apt.yml create mode 100644 test/integration/targets/apt/tasks/main.yml create mode 100644 test/integration/targets/apt/tasks/repo.yml create mode 100644 test/integration/targets/apt/tasks/upgrade.yml create mode 100644 test/integration/targets/apt/tasks/url-with-deps.yml create mode 100644 test/integration/targets/apt/vars/Ubuntu-20.yml create mode 100644 test/integration/targets/apt/vars/default.yml create mode 100644 test/integration/targets/apt_key/aliases create mode 100644 test/integration/targets/apt_key/meta/main.yml create mode 100644 test/integration/targets/apt_key/tasks/apt_key.yml create mode 100644 test/integration/targets/apt_key/tasks/main.yml create mode 100644 test/integration/targets/apt_repository/aliases create mode 100644 test/integration/targets/apt_repository/meta/main.yml create mode 100644 test/integration/targets/apt_repository/tasks/apt.yml create mode 100644 test/integration/targets/apt_repository/tasks/cleanup.yml create mode 100644 test/integration/targets/apt_repository/tasks/main.yml create mode 100644 test/integration/targets/apt_repository/tasks/mode.yaml create mode 100644 test/integration/targets/apt_repository/tasks/mode_cleanup.yaml create mode 100644 test/integration/targets/args/aliases create mode 100755 test/integration/targets/args/runme.sh create mode 100644 test/integration/targets/argspec/aliases create mode 100644 test/integration/targets/argspec/library/argspec.py create mode 100644 test/integration/targets/argspec/tasks/main.yml create mode 100644 test/integration/targets/argspec/tasks/password_no_log.yml create mode 100644 test/integration/targets/assemble/aliases create mode 100644 test/integration/targets/assemble/files/fragment1 create mode 100644 test/integration/targets/assemble/files/fragment2 create mode 100644 test/integration/targets/assemble/files/fragment3 create mode 100644 test/integration/targets/assemble/files/fragment4 create mode 100644 test/integration/targets/assemble/files/fragment5 create mode 100644 test/integration/targets/assemble/meta/main.yml create mode 100644 test/integration/targets/assemble/tasks/main.yml create mode 100644 test/integration/targets/assert/aliases create mode 100644 test/integration/targets/assert/assert_quiet.out.quiet.stderr create mode 100644 test/integration/targets/assert/assert_quiet.out.quiet.stdout create mode 100644 test/integration/targets/assert/inventory create mode 100644 test/integration/targets/assert/quiet.yml create mode 100755 test/integration/targets/assert/runme.sh create mode 100644 test/integration/targets/async/aliases create mode 100644 test/integration/targets/async/library/async_test.py create mode 100644 test/integration/targets/async/meta/main.yml create mode 100644 test/integration/targets/async/tasks/main.yml create mode 100644 test/integration/targets/async_extra_data/aliases create mode 100644 test/integration/targets/async_extra_data/library/junkping.py create mode 100755 test/integration/targets/async_extra_data/runme.sh create mode 100644 test/integration/targets/async_extra_data/test_async.yml create mode 100644 test/integration/targets/async_fail/action_plugins/normal.py create mode 100644 test/integration/targets/async_fail/aliases create mode 100644 test/integration/targets/async_fail/library/async_test.py create mode 100644 test/integration/targets/async_fail/meta/main.yml create mode 100644 test/integration/targets/async_fail/tasks/main.yml create mode 100644 test/integration/targets/become/aliases create mode 100644 test/integration/targets/become/files/baz.txt create mode 100644 test/integration/targets/become/tasks/default.yml create mode 100644 test/integration/targets/become/tasks/main.yml create mode 100644 test/integration/targets/become/tasks/su.yml create mode 100644 test/integration/targets/become/tasks/sudo.yml create mode 100644 test/integration/targets/become/templates/bar.j2 create mode 100644 test/integration/targets/become/vars/default.yml create mode 100644 test/integration/targets/become_su/aliases create mode 100755 test/integration/targets/become_su/runme.sh create mode 100644 test/integration/targets/binary/aliases create mode 100644 test/integration/targets/binary/files/b64_latin1 create mode 100644 test/integration/targets/binary/files/b64_utf8 create mode 100644 test/integration/targets/binary/files/from_playbook create mode 100644 test/integration/targets/binary/meta/main.yml create mode 100644 test/integration/targets/binary/tasks/main.yml create mode 100644 test/integration/targets/binary/templates/b64_latin1_template.j2 create mode 100644 test/integration/targets/binary/templates/b64_utf8_template.j2 create mode 100644 test/integration/targets/binary/templates/from_playbook_template.j2 create mode 100644 test/integration/targets/binary/vars/main.yml create mode 100644 test/integration/targets/binary_modules/Makefile create mode 100644 test/integration/targets/binary_modules/aliases create mode 100644 test/integration/targets/binary_modules/download_binary_modules.yml create mode 100644 test/integration/targets/binary_modules/group_vars/all create mode 100644 test/integration/targets/binary_modules/library/.gitignore create mode 100644 test/integration/targets/binary_modules/library/helloworld.go create mode 100644 test/integration/targets/binary_modules/roles/test_binary_modules/tasks/main.yml create mode 100755 test/integration/targets/binary_modules/test.sh create mode 100644 test/integration/targets/binary_modules/test_binary_modules.yml create mode 100644 test/integration/targets/binary_modules_posix/aliases create mode 100755 test/integration/targets/binary_modules_posix/runme.sh create mode 100644 test/integration/targets/binary_modules_winrm/aliases create mode 100755 test/integration/targets/binary_modules_winrm/runme.sh create mode 100644 test/integration/targets/blockinfile/aliases create mode 100644 test/integration/targets/blockinfile/files/sshd_config create mode 100644 test/integration/targets/blockinfile/meta/main.yml create mode 100644 test/integration/targets/blockinfile/tasks/add_block_to_existing_file.yml create mode 100644 test/integration/targets/blockinfile/tasks/block_without_trailing_newline.yml create mode 100644 test/integration/targets/blockinfile/tasks/create_file.yml create mode 100644 test/integration/targets/blockinfile/tasks/diff.yml create mode 100644 test/integration/targets/blockinfile/tasks/file_without_trailing_newline.yml create mode 100644 test/integration/targets/blockinfile/tasks/insertafter.yml create mode 100644 test/integration/targets/blockinfile/tasks/insertbefore.yml create mode 100644 test/integration/targets/blockinfile/tasks/main.yml create mode 100644 test/integration/targets/blockinfile/tasks/preserve_line_endings.yml create mode 100644 test/integration/targets/blockinfile/tasks/validate.yml create mode 100644 test/integration/targets/blocks/aliases create mode 100644 test/integration/targets/blocks/always_failure_no_rescue_rc.yml create mode 100644 test/integration/targets/blocks/always_failure_with_rescue_rc.yml create mode 100644 test/integration/targets/blocks/always_no_rescue_rc.yml create mode 100644 test/integration/targets/blocks/block_fail.yml create mode 100644 test/integration/targets/blocks/block_fail_tasks.yml create mode 100644 test/integration/targets/blocks/block_in_rescue.yml create mode 100644 test/integration/targets/blocks/block_rescue_vars.yml create mode 100644 test/integration/targets/blocks/fail.yml create mode 100644 test/integration/targets/blocks/finalized_task.yml create mode 100644 test/integration/targets/blocks/issue29047.yml create mode 100644 test/integration/targets/blocks/issue29047_tasks.yml create mode 100644 test/integration/targets/blocks/issue71306.yml create mode 100644 test/integration/targets/blocks/main.yml create mode 100644 test/integration/targets/blocks/nested_fail.yml create mode 100644 test/integration/targets/blocks/nested_nested_fail.yml create mode 100644 test/integration/targets/blocks/roles/fail/tasks/main.yml create mode 100755 test/integration/targets/blocks/runme.sh create mode 100644 test/integration/targets/builtin_vars_prompt/aliases create mode 100755 test/integration/targets/builtin_vars_prompt/runme.sh create mode 100644 test/integration/targets/builtin_vars_prompt/test-vars_prompt.py create mode 100644 test/integration/targets/builtin_vars_prompt/unsafe.yml create mode 100644 test/integration/targets/builtin_vars_prompt/unsupported.yml create mode 100644 test/integration/targets/builtin_vars_prompt/vars_prompt-1.yml create mode 100644 test/integration/targets/builtin_vars_prompt/vars_prompt-2.yml create mode 100644 test/integration/targets/builtin_vars_prompt/vars_prompt-3.yml create mode 100644 test/integration/targets/builtin_vars_prompt/vars_prompt-4.yml create mode 100644 test/integration/targets/builtin_vars_prompt/vars_prompt-5.yml create mode 100644 test/integration/targets/builtin_vars_prompt/vars_prompt-6.yml create mode 100644 test/integration/targets/builtin_vars_prompt/vars_prompt-7.yml create mode 100644 test/integration/targets/callback_default/aliases create mode 100644 test/integration/targets/callback_default/callback_default.out.check_markers_dry.stderr create mode 100644 test/integration/targets/callback_default/callback_default.out.check_markers_dry.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.check_markers_wet.stderr create mode 100644 test/integration/targets/callback_default/callback_default.out.check_markers_wet.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stderr create mode 100644 test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stderr create mode 100644 test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.default.stderr create mode 100644 test/integration/targets/callback_default/callback_default.out.default.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stderr create mode 100644 test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.free.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.hide_ok.stderr create mode 100644 test/integration/targets/callback_default/callback_default.out.hide_ok.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.hide_skipped.stderr create mode 100644 test/integration/targets/callback_default/callback_default.out.hide_skipped.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stderr create mode 100644 test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stdout create mode 100644 test/integration/targets/callback_default/callback_default.out.host_pinned.stdout create mode 100644 test/integration/targets/callback_default/include_me.yml create mode 100644 test/integration/targets/callback_default/inventory create mode 100755 test/integration/targets/callback_default/runme.sh create mode 100644 test/integration/targets/callback_default/test.yml create mode 100644 test/integration/targets/callback_default/test_2.yml create mode 100644 test/integration/targets/callback_default/test_dryrun.yml create mode 100644 test/integration/targets/callback_default/test_non_lockstep.yml create mode 100644 test/integration/targets/changed_when/aliases create mode 100644 test/integration/targets/changed_when/meta/main.yml create mode 100644 test/integration/targets/changed_when/tasks/main.yml create mode 100644 test/integration/targets/check_mode/aliases create mode 100644 test/integration/targets/check_mode/check_mode-not-on-cli.yml create mode 100644 test/integration/targets/check_mode/check_mode-on-cli.yml create mode 100644 test/integration/targets/check_mode/check_mode.yml create mode 100644 test/integration/targets/check_mode/roles/test_always_run/meta/main.yml create mode 100644 test/integration/targets/check_mode/roles/test_always_run/tasks/main.yml create mode 100644 test/integration/targets/check_mode/roles/test_check_mode/files/foo.txt create mode 100644 test/integration/targets/check_mode/roles/test_check_mode/tasks/main.yml create mode 100644 test/integration/targets/check_mode/roles/test_check_mode/templates/foo.j2 create mode 100644 test/integration/targets/check_mode/roles/test_check_mode/vars/main.yml create mode 100755 test/integration/targets/check_mode/runme.sh create mode 100644 test/integration/targets/cli/aliases create mode 100755 test/integration/targets/cli/runme.sh create mode 100644 test/integration/targets/cli/setup.yml create mode 100644 test/integration/targets/cli/test-cli.py create mode 100644 test/integration/targets/cli/test_k_and_K.py create mode 100644 test/integration/targets/collections/a.statichost.yml create mode 100644 test/integration/targets/collections/aliases create mode 100644 test/integration/targets/collections/cache.statichost.yml create mode 100644 test/integration/targets/collections/check_populated_inventory.yml create mode 100644 test/integration/targets/collections/collection_root_sys/ansible_collections/testns/coll_in_sys/plugins/modules/systestmodule.py create mode 100644 test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/maskedmodule.py create mode 100644 test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/testmodule.py create mode 100644 test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/roles/maskedrole/tasks/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/ansible/builtin/plugins/modules/ping.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/ansible/bullcoll/plugins/modules/bullmodule.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/__init__.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/submod.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testbroken/plugins/filter/broken_filter.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/meta/runtime.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/library/embedded_module.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/tasks/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role_to_call/tasks/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/action_subdir/subdir_ping_action.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/bypass_host_loop.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/plugin_lookup.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/subclassed_normal.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/uses_redirected_import.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/callback/usercallback.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/connection/localconn.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/doc_fragments/frag.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/filter_subdir/my_subdir_filters.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters2.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/lookup_subdir/my_subdir_lookup.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup2.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/AnotherCSMU.cs create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyCSMU.cs create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyPSMU.psm1 create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/base.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/leaf.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/__init__.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/__init__.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/nested_same.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/secondary.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/__init__.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subcs.cs create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/submod.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subps.psm1 create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/__init__.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/mod_in_subpkg_with_init.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/deprecated_ping.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/module_subdir/subdir_ping_module.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/ping.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule_bad_docfrags.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_base_mu_granular_nested_import.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_collection_redirected_mu.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_core_redirected_mu.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.bak create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_granular_import.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_module_import_from.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_collection.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_module.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_func.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_module.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_csbasic_only.ps1 create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.ps1 create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_csmu.ps1 create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_psmu.ps1 create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests2.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/test_subdir/my_subdir_tests.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/vars/custom_vars.py create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/call_standalone/tasks/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/meta/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/tasks/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/common_handlers/handlers/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/role_subdir/subdir_testrole/tasks/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/meta/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/tasks/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/meta/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/tasks/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/meta/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/tasks/main.yml create mode 100644 test/integration/targets/collections/collection_root_user/ansible_collections/testns/testredirect/meta/runtime.yml create mode 100644 test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/action/action1.py create mode 100644 test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/modules/action1.py create mode 100644 test/integration/targets/collections/collections/ansible_collections/me/mycoll2/plugins/modules/module1.py create mode 100644 test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/cache/custom_jsonfile.py create mode 100644 test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py create mode 100644 test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/__init__.py create mode 100644 test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/__init__.py create mode 100644 test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/foomodule.py create mode 100644 test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/modules/contentadjmodule.py create mode 100644 test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/vars/custom_adj_vars.py create mode 100644 test/integration/targets/collections/custom_vars_plugins/v1_vars_plugin.py create mode 100644 test/integration/targets/collections/custom_vars_plugins/v2_vars_plugin.py create mode 100644 test/integration/targets/collections/custom_vars_plugins/vars_req_whitelist.py create mode 100644 test/integration/targets/collections/filter_plugins/override_formerly_core_masked_filter.py create mode 100644 test/integration/targets/collections/includeme.yml create mode 100644 test/integration/targets/collections/inventory_test.yml create mode 100644 test/integration/targets/collections/invocation_tests.yml create mode 100644 test/integration/targets/collections/library/ping.py create mode 100644 test/integration/targets/collections/noop.yml create mode 100644 test/integration/targets/collections/posix.yml create mode 100644 test/integration/targets/collections/redirected.statichost.yml create mode 100644 test/integration/targets/collections/roles/standalone/tasks/main.yml create mode 100644 test/integration/targets/collections/roles/testrole/tasks/main.yml create mode 100755 test/integration/targets/collections/runme.sh create mode 100644 test/integration/targets/collections/test_bypass_host_loop.yml create mode 100644 test/integration/targets/collections/test_collection_meta.yml create mode 100644 test/integration/targets/collections/test_plugins/override_formerly_core_masked_test.py create mode 100644 test/integration/targets/collections/test_redirect_list.yml create mode 100644 test/integration/targets/collections/testcoll2/MANIFEST.json create mode 100644 test/integration/targets/collections/testcoll2/plugins/modules/testmodule2.py create mode 100755 test/integration/targets/collections/vars_plugin_tests.sh create mode 100644 test/integration/targets/collections/windows.yml create mode 100644 test/integration/targets/collections_plugin_namespace/aliases create mode 100644 test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/filter/test_filter.py create mode 100644 test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_name.py create mode 100644 test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py create mode 100644 test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/test/test_test.py create mode 100644 test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml create mode 100755 test/integration/targets/collections_plugin_namespace/runme.sh create mode 100644 test/integration/targets/collections_plugin_namespace/test.yml create mode 100644 test/integration/targets/collections_relative_imports/aliases create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/PSRel1.psm1 create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util1.py create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/sub_pkg/PSRel2.psm1 create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/win_relative.ps1 create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/PSRel3.psm1 create mode 100644 test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/sub_pkg/CSRel4.cs create mode 100755 test/integration/targets/collections_relative_imports/runme.sh create mode 100644 test/integration/targets/collections_relative_imports/test.yml create mode 100644 test/integration/targets/collections_relative_imports/windows.yml create mode 100644 test/integration/targets/collections_runtime_pythonpath/aliases create mode 100644 test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/ansible_collections/python/dist/plugins/modules/boo.py create mode 100644 test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/pyproject.toml create mode 100644 test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/setup.cfg create mode 100644 test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-foo/ansible_collections/python/dist/plugins/modules/boo.py create mode 100755 test/integration/targets/collections_runtime_pythonpath/runme.sh create mode 100644 test/integration/targets/command_shell/aliases create mode 100755 test/integration/targets/command_shell/files/create_afile.sh create mode 100755 test/integration/targets/command_shell/files/remove_afile.sh create mode 100755 test/integration/targets/command_shell/files/test.sh create mode 100644 test/integration/targets/command_shell/meta/main.yml create mode 100644 test/integration/targets/command_shell/tasks/main.yml create mode 100644 test/integration/targets/common_network/aliases create mode 100644 test/integration/targets/common_network/tasks/main.yml create mode 100644 test/integration/targets/common_network/test_plugins/is_mac.py create mode 100644 test/integration/targets/conditionals/aliases create mode 100644 test/integration/targets/conditionals/play.yml create mode 100755 test/integration/targets/conditionals/runme.sh create mode 100644 test/integration/targets/conditionals/test_no_warnings.yml create mode 100644 test/integration/targets/conditionals/test_warnings.yml create mode 100644 test/integration/targets/conditionals/vars/main.yml create mode 100644 test/integration/targets/config/aliases create mode 100644 test/integration/targets/config/inline_comment_ansible.cfg create mode 100755 test/integration/targets/config/runme.sh create mode 100644 test/integration/targets/connection/aliases create mode 100755 test/integration/targets/connection/test.sh create mode 100644 test/integration/targets/connection/test_connection.yml create mode 100644 test/integration/targets/connection/test_reset_connection.yml create mode 100644 test/integration/targets/connection_delegation/action_plugins/delegation_action.py create mode 100644 test/integration/targets/connection_delegation/aliases create mode 100644 test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py create mode 100644 test/integration/targets/connection_delegation/inventory.ini create mode 100755 test/integration/targets/connection_delegation/runme.sh create mode 100644 test/integration/targets/connection_delegation/test.yml create mode 100644 test/integration/targets/connection_local/aliases create mode 120000 test/integration/targets/connection_local/runme.sh create mode 100644 test/integration/targets/connection_local/test_connection.inventory create mode 100644 test/integration/targets/connection_paramiko_ssh/aliases create mode 100755 test/integration/targets/connection_paramiko_ssh/runme.sh create mode 120000 test/integration/targets/connection_paramiko_ssh/test.sh create mode 100644 test/integration/targets/connection_paramiko_ssh/test_connection.inventory create mode 100644 test/integration/targets/connection_posix/aliases create mode 100755 test/integration/targets/connection_posix/test.sh create mode 100644 test/integration/targets/connection_psrp/aliases create mode 100644 test/integration/targets/connection_psrp/files/empty.txt create mode 100755 test/integration/targets/connection_psrp/runme.sh create mode 100644 test/integration/targets/connection_psrp/test_connection.inventory.j2 create mode 100644 test/integration/targets/connection_psrp/tests.yml create mode 100644 test/integration/targets/connection_ssh/aliases create mode 120000 test/integration/targets/connection_ssh/posix.sh create mode 100755 test/integration/targets/connection_ssh/runme.sh create mode 100644 test/integration/targets/connection_ssh/test_connection.inventory create mode 100644 test/integration/targets/connection_windows_ssh/aliases create mode 100755 test/integration/targets/connection_windows_ssh/runme.sh create mode 100644 test/integration/targets/connection_windows_ssh/test_connection.inventory.j2 create mode 100644 test/integration/targets/connection_windows_ssh/tests.yml create mode 100644 test/integration/targets/connection_windows_ssh/tests_fetch.yml create mode 100755 test/integration/targets/connection_windows_ssh/windows.sh create mode 100644 test/integration/targets/connection_winrm/aliases create mode 100755 test/integration/targets/connection_winrm/runme.sh create mode 100644 test/integration/targets/connection_winrm/test_connection.inventory.j2 create mode 100644 test/integration/targets/copy/aliases create mode 100644 test/integration/targets/copy/defaults/main.yml create mode 100644 test/integration/targets/copy/files/foo.txt create mode 100644 test/integration/targets/copy/files/subdir/bar.txt create mode 120000 test/integration/targets/copy/files/subdir/subdir1/bar.txt create mode 100644 test/integration/targets/copy/files/subdir/subdir2/baz.txt create mode 100644 test/integration/targets/copy/files/subdir/subdir2/subdir3/subdir4/qux.txt create mode 100644 test/integration/targets/copy/meta/main.yml create mode 100644 test/integration/targets/copy/tasks/acls.yml create mode 100644 test/integration/targets/copy/tasks/check_mode.yml create mode 100644 test/integration/targets/copy/tasks/dest_in_non_existent_directories.yml create mode 100644 test/integration/targets/copy/tasks/dest_in_non_existent_directories_remote_src.yml create mode 100644 test/integration/targets/copy/tasks/main.yml create mode 100644 test/integration/targets/copy/tasks/no_log.yml create mode 100644 test/integration/targets/copy/tasks/selinux.yml create mode 100644 test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir.yml create mode 100644 test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir_remote_src.yml create mode 100644 test/integration/targets/copy/tasks/tests.yml create mode 100644 test/integration/targets/cron/aliases create mode 100644 test/integration/targets/cron/defaults/main.yml create mode 100644 test/integration/targets/cron/meta/main.yml create mode 100644 test/integration/targets/cron/tasks/main.yml create mode 100644 test/integration/targets/dataloader/aliases create mode 100644 test/integration/targets/dataloader/attempt_to_load_invalid_json.yml create mode 100755 test/integration/targets/dataloader/runme.sh create mode 100644 test/integration/targets/dataloader/vars/invalid.json create mode 100644 test/integration/targets/debconf/aliases create mode 100644 test/integration/targets/debconf/meta/main.yml create mode 100644 test/integration/targets/debconf/tasks/main.yml create mode 100644 test/integration/targets/debug/aliases create mode 100644 test/integration/targets/debug/main.yml create mode 100644 test/integration/targets/debug/main_fqcn.yml create mode 100644 test/integration/targets/debug/nosetfacts.yml create mode 100755 test/integration/targets/debug/runme.sh create mode 100644 test/integration/targets/delegate_to/aliases create mode 100644 test/integration/targets/delegate_to/connection_plugins/fakelocal.py create mode 100644 test/integration/targets/delegate_to/delegate_and_nolog.yml create mode 100644 test/integration/targets/delegate_to/delegate_facts_block.yml create mode 100644 test/integration/targets/delegate_to/delegate_local_from_root.yml create mode 100644 test/integration/targets/delegate_to/delegate_vars_hanldling.yml create mode 100644 test/integration/targets/delegate_to/discovery_applied.yml create mode 100644 test/integration/targets/delegate_to/files/testfile create mode 100644 test/integration/targets/delegate_to/has_hostvars.yml create mode 100644 test/integration/targets/delegate_to/inventory create mode 100644 test/integration/targets/delegate_to/inventory_interpreters create mode 100644 test/integration/targets/delegate_to/library/detect_interpreter.py create mode 100644 test/integration/targets/delegate_to/roles/test_template/templates/foo.j2 create mode 100755 test/integration/targets/delegate_to/runme.sh create mode 100644 test/integration/targets/delegate_to/test_delegate_to.yml create mode 100644 test/integration/targets/delegate_to/test_delegate_to_loop_caching.yml create mode 100644 test/integration/targets/delegate_to/test_delegate_to_loop_randomness.yml create mode 100644 test/integration/targets/delegate_to/test_loop_control.yml create mode 100644 test/integration/targets/delegate_to/verify_interpreter.yml create mode 100644 test/integration/targets/dict_transformations/aliases create mode 100644 test/integration/targets/dict_transformations/library/convert_camelCase.py create mode 100644 test/integration/targets/dict_transformations/library/convert_snake_case.py create mode 100644 test/integration/targets/dict_transformations/tasks/main.yml create mode 100644 test/integration/targets/dict_transformations/tasks/test_convert_camelCase.yml create mode 100644 test/integration/targets/dict_transformations/tasks/test_convert_snake_case.yml create mode 100644 test/integration/targets/dnf/aliases create mode 100644 test/integration/targets/dnf/meta/main.yml create mode 100644 test/integration/targets/dnf/tasks/dnf.yml create mode 100644 test/integration/targets/dnf/tasks/dnfinstallroot.yml create mode 100644 test/integration/targets/dnf/tasks/dnfreleasever.yml create mode 100644 test/integration/targets/dnf/tasks/filters.yml create mode 100644 test/integration/targets/dnf/tasks/filters_check_mode.yml create mode 100644 test/integration/targets/dnf/tasks/gpg.yml create mode 100644 test/integration/targets/dnf/tasks/logging.yml create mode 100644 test/integration/targets/dnf/tasks/main.yml create mode 100644 test/integration/targets/dnf/tasks/modularity.yml create mode 100644 test/integration/targets/dnf/tasks/repo.yml create mode 100644 test/integration/targets/dnf/vars/CentOS.yml create mode 100644 test/integration/targets/dnf/vars/Fedora.yml create mode 100644 test/integration/targets/dnf/vars/RedHat.yml create mode 100644 test/integration/targets/dnf/vars/main.yml create mode 100644 test/integration/targets/dpkg_selections/aliases create mode 100644 test/integration/targets/dpkg_selections/defaults/main.yaml create mode 100644 test/integration/targets/dpkg_selections/tasks/dpkg_selections.yaml create mode 100644 test/integration/targets/dpkg_selections/tasks/main.yaml create mode 100644 test/integration/targets/egg-info/aliases create mode 100644 test/integration/targets/egg-info/lookup_plugins/import_pkg_resources.py create mode 100644 test/integration/targets/egg-info/tasks/main.yml create mode 100644 test/integration/targets/embedded_module/aliases create mode 100644 test/integration/targets/embedded_module/library/test_integration_module create mode 100644 test/integration/targets/embedded_module/tasks/main.yml create mode 100644 test/integration/targets/environment/aliases create mode 100755 test/integration/targets/environment/runme.sh create mode 100644 test/integration/targets/environment/test_environment.yml create mode 100644 test/integration/targets/error_from_connection/aliases create mode 100644 test/integration/targets/error_from_connection/connection_plugins/dummy.py create mode 100644 test/integration/targets/error_from_connection/inventory create mode 100644 test/integration/targets/error_from_connection/play.yml create mode 100755 test/integration/targets/error_from_connection/runme.sh create mode 100644 test/integration/targets/expect/aliases create mode 100644 test/integration/targets/expect/files/foo.txt create mode 100644 test/integration/targets/expect/files/test_command.py create mode 100644 test/integration/targets/expect/tasks/main.yml create mode 100644 test/integration/targets/facts_d/aliases create mode 100644 test/integration/targets/facts_d/files/basdscript.fact create mode 100644 test/integration/targets/facts_d/files/goodscript.fact create mode 100644 test/integration/targets/facts_d/files/preferences.fact create mode 100644 test/integration/targets/facts_d/files/unreadable.fact create mode 100644 test/integration/targets/facts_d/meta/main.yml create mode 100644 test/integration/targets/facts_d/tasks/main.yml create mode 100644 test/integration/targets/facts_linux_network/aliases create mode 100644 test/integration/targets/facts_linux_network/meta/main.yml create mode 100644 test/integration/targets/facts_linux_network/tasks/main.yml create mode 100644 test/integration/targets/failed_when/aliases create mode 100644 test/integration/targets/failed_when/tasks/main.yml create mode 100644 test/integration/targets/fetch/aliases create mode 100644 test/integration/targets/fetch/injection/avoid_slurp_return.yml create mode 100644 test/integration/targets/fetch/injection/here.txt create mode 100644 test/integration/targets/fetch/injection/library/slurp.py create mode 100644 test/integration/targets/fetch/roles/fetch_tests/meta/main.yml create mode 100644 test/integration/targets/fetch/roles/fetch_tests/tasks/main.yml create mode 100644 test/integration/targets/fetch/run_fetch_tests.yml create mode 100755 test/integration/targets/fetch/runme.sh create mode 100644 test/integration/targets/file/aliases create mode 100644 test/integration/targets/file/defaults/main.yml create mode 100644 test/integration/targets/file/files/foo.txt create mode 100644 test/integration/targets/file/files/foobar/directory/fileC create mode 100644 test/integration/targets/file/files/foobar/directory/fileD create mode 100644 test/integration/targets/file/files/foobar/fileA create mode 100644 test/integration/targets/file/files/foobar/fileB create mode 100644 test/integration/targets/file/meta/main.yml create mode 100644 test/integration/targets/file/tasks/diff_peek.yml create mode 100644 test/integration/targets/file/tasks/directory_as_dest.yml create mode 100644 test/integration/targets/file/tasks/initialize.yml create mode 100644 test/integration/targets/file/tasks/main.yml create mode 100644 test/integration/targets/file/tasks/selinux_tests.yml create mode 100644 test/integration/targets/file/tasks/state_link.yml create mode 100644 test/integration/targets/file/tasks/unicode_path.yml create mode 100644 test/integration/targets/filter_core/aliases create mode 100644 test/integration/targets/filter_core/files/9851.txt create mode 100644 test/integration/targets/filter_core/files/fileglob/one.txt create mode 100644 test/integration/targets/filter_core/files/fileglob/two.txt create mode 100644 test/integration/targets/filter_core/files/foo.txt create mode 100644 test/integration/targets/filter_core/handle_undefined_type_errors.yml create mode 100644 test/integration/targets/filter_core/host_vars/localhost create mode 100644 test/integration/targets/filter_core/meta/main.yml create mode 100755 test/integration/targets/filter_core/runme.sh create mode 100644 test/integration/targets/filter_core/runme.yml create mode 100644 test/integration/targets/filter_core/tasks/main.yml create mode 100644 test/integration/targets/filter_core/templates/foo.j2 create mode 100644 test/integration/targets/filter_core/templates/py26json.j2 create mode 100644 test/integration/targets/filter_core/vars/main.yml create mode 100644 test/integration/targets/filter_mathstuff/aliases create mode 100644 test/integration/targets/filter_mathstuff/tasks/main.yml create mode 100644 test/integration/targets/filter_urls/aliases create mode 100755 test/integration/targets/filter_urls/runme.sh create mode 100644 test/integration/targets/filter_urls/runme.yml create mode 100644 test/integration/targets/filter_urls/tasks/main.yml create mode 100644 test/integration/targets/filter_urlsplit/aliases create mode 100644 test/integration/targets/filter_urlsplit/tasks/main.yml create mode 100644 test/integration/targets/find/aliases create mode 100644 test/integration/targets/find/meta/main.yml create mode 100644 test/integration/targets/find/tasks/main.yml create mode 100644 test/integration/targets/gathering/aliases create mode 100644 test/integration/targets/gathering/explicit.yml create mode 100644 test/integration/targets/gathering/implicit.yml create mode 100755 test/integration/targets/gathering/runme.sh create mode 100644 test/integration/targets/gathering/smart.yml create mode 100644 test/integration/targets/gathering/uuid.fact create mode 100644 test/integration/targets/gathering_facts/aliases create mode 100644 test/integration/targets/gathering_facts/cache_plugins/none.py create mode 100644 test/integration/targets/gathering_facts/inventory create mode 100644 test/integration/targets/gathering_facts/library/bogus_facts create mode 100644 test/integration/targets/gathering_facts/library/facts_one create mode 100644 test/integration/targets/gathering_facts/library/facts_two create mode 100644 test/integration/targets/gathering_facts/library/file_utils.py create mode 100644 test/integration/targets/gathering_facts/one_two.json create mode 100644 test/integration/targets/gathering_facts/prevent_clobbering.yml create mode 100755 test/integration/targets/gathering_facts/runme.sh create mode 100644 test/integration/targets/gathering_facts/test_gathering_facts.yml create mode 100644 test/integration/targets/gathering_facts/test_prevent_injection.yml create mode 100644 test/integration/targets/gathering_facts/test_run_once.yml create mode 100644 test/integration/targets/gathering_facts/two_one.json create mode 100644 test/integration/targets/gathering_facts/uuid.fact create mode 100644 test/integration/targets/gathering_facts/verify_merge_facts.yml create mode 100644 test/integration/targets/gathering_facts/verify_subset.yml create mode 100644 test/integration/targets/get_url/aliases create mode 100644 test/integration/targets/get_url/files/testserver.py create mode 100644 test/integration/targets/get_url/meta/main.yml create mode 100644 test/integration/targets/get_url/tasks/main.yml create mode 100644 test/integration/targets/getent/aliases create mode 100644 test/integration/targets/getent/meta/main.yml create mode 100644 test/integration/targets/getent/tasks/main.yml create mode 100644 test/integration/targets/git/aliases create mode 100644 test/integration/targets/git/handlers/cleanup-default.yml create mode 100644 test/integration/targets/git/handlers/cleanup-freebsd.yml create mode 100644 test/integration/targets/git/handlers/main.yml create mode 100644 test/integration/targets/git/meta/main.yml create mode 100644 test/integration/targets/git/tasks/ambiguous-ref.yml create mode 100644 test/integration/targets/git/tasks/archive.yml create mode 100644 test/integration/targets/git/tasks/change-repo-url.yml create mode 100644 test/integration/targets/git/tasks/checkout-new-tag.yml create mode 100644 test/integration/targets/git/tasks/depth.yml create mode 100644 test/integration/targets/git/tasks/forcefully-fetch-tag.yml create mode 100644 test/integration/targets/git/tasks/formats.yml create mode 100644 test/integration/targets/git/tasks/gpg-verification.yml create mode 100644 test/integration/targets/git/tasks/localmods.yml create mode 100644 test/integration/targets/git/tasks/main.yml create mode 100644 test/integration/targets/git/tasks/missing_hostkey.yml create mode 100644 test/integration/targets/git/tasks/no-destination.yml create mode 100644 test/integration/targets/git/tasks/reset-origin.yml create mode 100644 test/integration/targets/git/tasks/separate-git-dir.yml create mode 100644 test/integration/targets/git/tasks/setup-local-repos.yml create mode 100644 test/integration/targets/git/tasks/setup.yml create mode 100644 test/integration/targets/git/tasks/specific-revision.yml create mode 100644 test/integration/targets/git/tasks/submodules.yml create mode 100644 test/integration/targets/git/vars/main.yml create mode 100644 test/integration/targets/group/aliases create mode 100644 test/integration/targets/group/files/gidget.py create mode 100644 test/integration/targets/group/files/grouplist.sh create mode 100644 test/integration/targets/group/meta/main.yml create mode 100644 test/integration/targets/group/tasks/main.yml create mode 100644 test/integration/targets/group/tasks/tests.yml create mode 100644 test/integration/targets/group_by/aliases create mode 100644 test/integration/targets/group_by/create_groups.yml create mode 100644 test/integration/targets/group_by/group_vars/all create mode 100644 test/integration/targets/group_by/group_vars/camelus create mode 100644 test/integration/targets/group_by/group_vars/vicugna create mode 100644 test/integration/targets/group_by/inventory.group_by create mode 100755 test/integration/targets/group_by/runme.sh create mode 100644 test/integration/targets/group_by/test_group_by.yml create mode 100644 test/integration/targets/group_by/test_group_by_skipped.yml create mode 100644 test/integration/targets/groupby_filter/aliases create mode 100755 test/integration/targets/groupby_filter/runme.sh create mode 100644 test/integration/targets/groupby_filter/test_jinja2_groupby.yml create mode 100644 test/integration/targets/handler_race/aliases create mode 100644 test/integration/targets/handler_race/inventory create mode 100644 test/integration/targets/handler_race/roles/do_handlers/handlers/main.yml create mode 100644 test/integration/targets/handler_race/roles/do_handlers/tasks/main.yml create mode 100644 test/integration/targets/handler_race/roles/more_sleep/tasks/main.yml create mode 100644 test/integration/targets/handler_race/roles/random_sleep/tasks/main.yml create mode 100755 test/integration/targets/handler_race/runme.sh create mode 100644 test/integration/targets/handler_race/test_handler_race.yml create mode 100644 test/integration/targets/handlers/aliases create mode 100644 test/integration/targets/handlers/from_handlers.yml create mode 100644 test/integration/targets/handlers/handlers.yml create mode 100644 test/integration/targets/handlers/inventory.handlers create mode 100644 test/integration/targets/handlers/roles/test_force_handlers/handlers/main.yml create mode 100644 test/integration/targets/handlers/roles/test_force_handlers/tasks/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers/handlers/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers/meta/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers/tasks/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_include/handlers/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_include/tasks/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_include_role/handlers/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_include_role/meta/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_include_role/tasks/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_listen/handlers/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_listen/tasks/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_meta/handlers/alternate.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_meta/handlers/main.yml create mode 100644 test/integration/targets/handlers/roles/test_handlers_meta/tasks/main.yml create mode 100644 test/integration/targets/handlers/roles/test_templating_in_handlers/handlers/main.yml create mode 100644 test/integration/targets/handlers/roles/test_templating_in_handlers/tasks/main.yml create mode 100755 test/integration/targets/handlers/runme.sh create mode 100644 test/integration/targets/handlers/test_force_handlers.yml create mode 100644 test/integration/targets/handlers/test_handlers.yml create mode 100644 test/integration/targets/handlers/test_handlers_any_errors_fatal.yml create mode 100644 test/integration/targets/handlers/test_handlers_include.yml create mode 100644 test/integration/targets/handlers/test_handlers_include_role.yml create mode 100644 test/integration/targets/handlers/test_handlers_including_task.yml create mode 100644 test/integration/targets/handlers/test_handlers_inexistent_notify.yml create mode 100644 test/integration/targets/handlers/test_handlers_listen.yml create mode 100644 test/integration/targets/handlers/test_handlers_template_run_once.yml create mode 100644 test/integration/targets/handlers/test_listening_handlers.yml create mode 100644 test/integration/targets/handlers/test_templating_in_handlers.yml create mode 100644 test/integration/targets/hash/aliases create mode 100644 test/integration/targets/hash/group_vars/all create mode 100644 test/integration/targets/hash/host_vars/testhost create mode 100644 test/integration/targets/hash/roles/test_hash_behaviour/defaults/main.yml create mode 100644 test/integration/targets/hash/roles/test_hash_behaviour/meta/main.yml create mode 100644 test/integration/targets/hash/roles/test_hash_behaviour/tasks/main.yml create mode 100644 test/integration/targets/hash/roles/test_hash_behaviour/vars/main.yml create mode 100755 test/integration/targets/hash/runme.sh create mode 100644 test/integration/targets/hash/test_hash.yml create mode 100644 test/integration/targets/hash/test_inv1.yml create mode 100644 test/integration/targets/hash/test_inv2.yml create mode 100644 test/integration/targets/hash/test_inventory_hash.yml create mode 100644 test/integration/targets/hash/vars/test_hash_vars.yml create mode 100644 test/integration/targets/hosts_field/aliases create mode 100644 test/integration/targets/hosts_field/inventory.hosts_field create mode 100755 test/integration/targets/hosts_field/runme.sh create mode 100644 test/integration/targets/hosts_field/test_hosts_field.json create mode 100644 test/integration/targets/hosts_field/test_hosts_field.yml create mode 100644 test/integration/targets/ignore_errors/aliases create mode 100644 test/integration/targets/ignore_errors/meta/main.yml create mode 100644 test/integration/targets/ignore_errors/tasks/main.yml create mode 100644 test/integration/targets/ignore_unreachable/aliases create mode 100644 test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py create mode 100644 test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py create mode 100644 test/integration/targets/ignore_unreachable/inventory create mode 100644 test/integration/targets/ignore_unreachable/meta/main.yml create mode 100755 test/integration/targets/ignore_unreachable/runme.sh create mode 100644 test/integration/targets/ignore_unreachable/test_base_cannot_connect.yml create mode 100644 test/integration/targets/ignore_unreachable/test_cannot_connect.yml create mode 100644 test/integration/targets/ignore_unreachable/test_with_bad_plugins.yml create mode 100644 test/integration/targets/incidental_azure_rm_mariadbserver/aliases create mode 100644 test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml create mode 100644 test/integration/targets/incidental_azure_rm_resource/aliases create mode 100644 test/integration/targets/incidental_azure_rm_resource/tasks/main.yml create mode 100644 test/integration/targets/incidental_cloud_init_data_facts/aliases create mode 100644 test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml create mode 100644 test/integration/targets/incidental_cloudformation/aliases create mode 100644 test/integration/targets/incidental_cloudformation/defaults/main.yml create mode 100644 test/integration/targets/incidental_cloudformation/files/cf_template.json create mode 100644 test/integration/targets/incidental_cloudformation/tasks/main.yml create mode 100644 test/integration/targets/incidental_cs_common/aliases create mode 100644 test/integration/targets/incidental_cs_common/defaults/main.yml create mode 100644 test/integration/targets/incidental_deploy_helper/aliases create mode 100644 test/integration/targets/incidental_deploy_helper/tasks/main.yml create mode 100644 test/integration/targets/incidental_flatpak_remote/aliases create mode 100644 test/integration/targets/incidental_flatpak_remote/meta/main.yml create mode 100644 test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml create mode 100644 test/integration/targets/incidental_flatpak_remote/tasks/main.yml create mode 100644 test/integration/targets/incidental_flatpak_remote/tasks/setup.yml create mode 100644 test/integration/targets/incidental_flatpak_remote/tasks/test.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/aliases create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml create mode 100755 test/integration/targets/incidental_inventory_aws_ec2/runme.sh create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml create mode 100644 test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml create mode 100644 test/integration/targets/incidental_inventory_docker_swarm/aliases create mode 100644 test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml create mode 100644 test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml create mode 100644 test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml create mode 100644 test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml create mode 100644 test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml create mode 100644 test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml create mode 100644 test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml create mode 100755 test/integration/targets/incidental_inventory_docker_swarm/runme.sh create mode 100644 test/integration/targets/incidental_inventory_foreman/aliases create mode 100644 test/integration/targets/incidental_inventory_foreman/ansible.cfg create mode 100644 test/integration/targets/incidental_inventory_foreman/inspect_cache.yml create mode 100755 test/integration/targets/incidental_inventory_foreman/runme.sh create mode 100644 test/integration/targets/incidental_inventory_foreman/test_foreman_inventory.yml create mode 100644 test/integration/targets/incidental_ios_file/aliases create mode 100644 test/integration/targets/incidental_ios_file/defaults/main.yaml create mode 100644 test/integration/targets/incidental_ios_file/ios1.cfg create mode 100644 test/integration/targets/incidental_ios_file/nonascii.bin create mode 100644 test/integration/targets/incidental_ios_file/tasks/cli.yaml create mode 100644 test/integration/targets/incidental_ios_file/tasks/main.yaml create mode 100644 test/integration/targets/incidental_ios_file/tests/cli/net_get.yaml create mode 100644 test/integration/targets/incidental_ios_file/tests/cli/net_put.yaml create mode 100644 test/integration/targets/incidental_lookup_rabbitmq/aliases create mode 100644 test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml create mode 100644 test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml create mode 100644 test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml create mode 100644 test/integration/targets/incidental_lvg/aliases create mode 100644 test/integration/targets/incidental_lvg/meta/main.yml create mode 100644 test/integration/targets/incidental_lvg/tasks/main.yml create mode 100644 test/integration/targets/incidental_lvg/tasks/setup.yml create mode 100644 test/integration/targets/incidental_lvg/tasks/teardown.yml create mode 100644 test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml create mode 100644 test/integration/targets/incidental_lvg/tasks/test_indempotency.yml create mode 100644 test/integration/targets/incidental_mongodb_parameter/aliases create mode 100644 test/integration/targets/incidental_mongodb_parameter/defaults/main.yml create mode 100644 test/integration/targets/incidental_mongodb_parameter/meta/main.yml create mode 100644 test/integration/targets/incidental_mongodb_parameter/tasks/main.yml create mode 100644 test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml create mode 100644 test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml create mode 100644 test/integration/targets/incidental_postgresql_user/aliases create mode 100644 test/integration/targets/incidental_postgresql_user/defaults/main.yml create mode 100644 test/integration/targets/incidental_postgresql_user/meta/main.yml create mode 100644 test/integration/targets/incidental_postgresql_user/tasks/main.yml create mode 100644 test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml create mode 100644 test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml create mode 100644 test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml create mode 100644 test/integration/targets/incidental_postgresql_user/tasks/test_password.yml create mode 100644 test/integration/targets/incidental_setup_docker/aliases create mode 100644 test/integration/targets/incidental_setup_docker/defaults/main.yml create mode 100644 test/integration/targets/incidental_setup_docker/handlers/main.yml create mode 100644 test/integration/targets/incidental_setup_docker/meta/main.yml create mode 100644 test/integration/targets/incidental_setup_docker/tasks/Debian.yml create mode 100644 test/integration/targets/incidental_setup_docker/tasks/Fedora.yml create mode 100644 test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml create mode 100644 test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml create mode 100644 test/integration/targets/incidental_setup_docker/tasks/Suse.yml create mode 100644 test/integration/targets/incidental_setup_docker/tasks/main.yml create mode 100644 test/integration/targets/incidental_setup_docker/vars/Debian.yml create mode 100644 test/integration/targets/incidental_setup_docker/vars/Fedora.yml create mode 100644 test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml create mode 100644 test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml create mode 100644 test/integration/targets/incidental_setup_docker/vars/Suse.yml create mode 100644 test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml create mode 100644 test/integration/targets/incidental_setup_docker/vars/default.yml create mode 100644 test/integration/targets/incidental_setup_ec2/aliases create mode 100644 test/integration/targets/incidental_setup_ec2/defaults/main.yml create mode 100644 test/integration/targets/incidental_setup_ec2/tasks/common.yml create mode 100644 test/integration/targets/incidental_setup_ec2/vars/main.yml create mode 100644 test/integration/targets/incidental_setup_flatpak_remote/README.md create mode 100644 test/integration/targets/incidental_setup_flatpak_remote/aliases create mode 100644 test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz create mode 100644 test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml create mode 100644 test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml create mode 100644 test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml create mode 100644 test/integration/targets/incidental_setup_mongodb/aliases create mode 100644 test/integration/targets/incidental_setup_mongodb/defaults/main.yml create mode 100644 test/integration/targets/incidental_setup_mongodb/handlers/main.yml create mode 100644 test/integration/targets/incidental_setup_mongodb/tasks/main.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/aliases create mode 100644 test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql create mode 100644 test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql create mode 100644 test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql create mode 100644 test/integration/targets/incidental_setup_postgresql_db/files/dummy.control create mode 100644 test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf create mode 100644 test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-20-py3.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml create mode 100644 test/integration/targets/incidental_setup_postgresql_db/vars/default.yml create mode 100644 test/integration/targets/incidental_setup_rabbitmq/aliases create mode 100644 test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf create mode 100644 test/integration/targets/incidental_setup_rabbitmq/meta/main.yml create mode 100644 test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml create mode 100644 test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml create mode 100644 test/integration/targets/incidental_setup_tls/aliases create mode 100644 test/integration/targets/incidental_setup_tls/files/ca_certificate.pem create mode 100644 test/integration/targets/incidental_setup_tls/files/ca_key.pem create mode 100644 test/integration/targets/incidental_setup_tls/files/client_certificate.pem create mode 100644 test/integration/targets/incidental_setup_tls/files/client_key.pem create mode 100644 test/integration/targets/incidental_setup_tls/files/server_certificate.pem create mode 100644 test/integration/targets/incidental_setup_tls/files/server_key.pem create mode 100644 test/integration/targets/incidental_setup_tls/tasks/main.yml create mode 100644 test/integration/targets/incidental_synchronize/aliases create mode 100644 test/integration/targets/incidental_synchronize/files/bar.txt create mode 100644 test/integration/targets/incidental_synchronize/files/foo.txt create mode 100644 test/integration/targets/incidental_synchronize/tasks/main.yml create mode 100644 test/integration/targets/incidental_timezone/aliases create mode 100644 test/integration/targets/incidental_timezone/tasks/main.yml create mode 100644 test/integration/targets/incidental_timezone/tasks/test.yml create mode 100644 test/integration/targets/incidental_vyos_config/aliases create mode 100644 test/integration/targets/incidental_vyos_config/defaults/main.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tasks/cli.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tasks/cli_config.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tasks/main.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tests/cli/backup.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tests/cli/check_config.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tests/cli/comment.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tests/cli/config.cfg create mode 100644 test/integration/targets/incidental_vyos_config/tests/cli/save.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tests/cli/simple.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tests/cli_config/cli_backup.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tests/cli_config/cli_basic.yaml create mode 100644 test/integration/targets/incidental_vyos_config/tests/cli_config/cli_comment.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/aliases create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/defaults/main.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/meta/main.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tasks/cli.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tasks/main.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate_intf.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_remove_config.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/deleted.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/empty_config.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/merged.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/overridden.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/replaced.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/rtt.yaml create mode 100644 test/integration/targets/incidental_vyos_lldp_interfaces/vars/main.yaml create mode 100644 test/integration/targets/incidental_vyos_prepare_tests/aliases create mode 100644 test/integration/targets/incidental_vyos_prepare_tests/tasks/main.yaml create mode 100644 test/integration/targets/incidental_win_copy/aliases create mode 100644 test/integration/targets/incidental_win_copy/defaults/main.yml create mode 100644 test/integration/targets/incidental_win_copy/files-different/vault/folder/nested-vault-file create mode 100644 test/integration/targets/incidental_win_copy/files-different/vault/readme.txt create mode 100644 test/integration/targets/incidental_win_copy/files-different/vault/vault-file create mode 100644 test/integration/targets/incidental_win_copy/files/empty.txt create mode 100644 test/integration/targets/incidental_win_copy/files/foo.txt create mode 100644 test/integration/targets/incidental_win_copy/files/subdir/bar.txt create mode 100644 test/integration/targets/incidental_win_copy/files/subdir/subdir2/baz.txt create mode 100644 test/integration/targets/incidental_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt create mode 100644 test/integration/targets/incidental_win_copy/tasks/main.yml create mode 100644 test/integration/targets/incidental_win_copy/tasks/remote_tests.yml create mode 100644 test/integration/targets/incidental_win_copy/tasks/tests.yml create mode 100644 test/integration/targets/incidental_win_data_deduplication/aliases create mode 100644 test/integration/targets/incidental_win_data_deduplication/meta/main.yml create mode 100644 test/integration/targets/incidental_win_data_deduplication/tasks/main.yml create mode 100644 test/integration/targets/incidental_win_data_deduplication/tasks/pre_test.yml create mode 100644 test/integration/targets/incidental_win_data_deduplication/tasks/tests.yml create mode 100644 test/integration/targets/incidental_win_data_deduplication/templates/partition_creation_script.j2 create mode 100644 test/integration/targets/incidental_win_data_deduplication/templates/partition_deletion_script.j2 create mode 100644 test/integration/targets/incidental_win_dsc/aliases create mode 100644 test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1 create mode 100644 test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.schema.mof create mode 100644 test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 create mode 100644 test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof create mode 100644 test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd1 create mode 100644 test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 create mode 100644 test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof create mode 100644 test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd1 create mode 100644 test/integration/targets/incidental_win_dsc/meta/main.yml create mode 100644 test/integration/targets/incidental_win_dsc/tasks/main.yml create mode 100644 test/integration/targets/incidental_win_dsc/tasks/tests.yml create mode 100644 test/integration/targets/incidental_win_lineinfile/aliases create mode 100644 test/integration/targets/incidental_win_lineinfile/files/test.txt create mode 100644 test/integration/targets/incidental_win_lineinfile/files/test_linebreak.txt create mode 100644 test/integration/targets/incidental_win_lineinfile/files/test_quoting.txt create mode 100644 test/integration/targets/incidental_win_lineinfile/files/testempty.txt create mode 100644 test/integration/targets/incidental_win_lineinfile/files/testnoeof.txt create mode 100644 test/integration/targets/incidental_win_lineinfile/meta/main.yml create mode 100644 test/integration/targets/incidental_win_lineinfile/tasks/main.yml create mode 100644 test/integration/targets/incidental_win_ping/aliases create mode 100644 test/integration/targets/incidental_win_ping/library/win_ping_set_attr.ps1 create mode 100644 test/integration/targets/incidental_win_ping/library/win_ping_strict_mode_error.ps1 create mode 100644 test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps1 create mode 100644 test/integration/targets/incidental_win_ping/library/win_ping_throw.ps1 create mode 100644 test/integration/targets/incidental_win_ping/library/win_ping_throw_string.ps1 create mode 100644 test/integration/targets/incidental_win_ping/tasks/main.yml create mode 100644 test/integration/targets/incidental_win_prepare_tests/aliases create mode 100644 test/integration/targets/incidental_win_prepare_tests/meta/main.yml create mode 100644 test/integration/targets/incidental_win_prepare_tests/tasks/main.yml create mode 100644 test/integration/targets/incidental_win_psexec/aliases create mode 100644 test/integration/targets/incidental_win_psexec/meta/main.yml create mode 100644 test/integration/targets/incidental_win_psexec/tasks/main.yml create mode 100644 test/integration/targets/incidental_win_reboot/aliases create mode 100644 test/integration/targets/incidental_win_reboot/tasks/main.yml create mode 100644 test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1 create mode 100644 test/integration/targets/incidental_win_security_policy/aliases create mode 100644 test/integration/targets/incidental_win_security_policy/library/test_win_security_policy.ps1 create mode 100644 test/integration/targets/incidental_win_security_policy/tasks/main.yml create mode 100644 test/integration/targets/incidental_win_security_policy/tasks/tests.yml create mode 100644 test/integration/targets/incidental_xml/aliases create mode 100644 test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml create mode 100644 test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml create mode 100644 test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml create mode 100644 test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml create mode 100644 test/integration/targets/incidental_xml/results/test-add-children-elements.xml create mode 100644 test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml create mode 100644 test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml create mode 100644 test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml create mode 100644 test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml create mode 100644 test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml create mode 100644 test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml create mode 100644 test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml create mode 100644 test/integration/targets/incidental_xml/results/test-pretty-print-only.xml create mode 100644 test/integration/targets/incidental_xml/results/test-pretty-print.xml create mode 100644 test/integration/targets/incidental_xml/results/test-remove-attribute.xml create mode 100644 test/integration/targets/incidental_xml/results/test-remove-element.xml create mode 100644 test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml create mode 100644 test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-attribute-value.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-children-elements.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-element-value.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml create mode 100644 test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml create mode 100644 test/integration/targets/incidental_xml/tasks/main.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-count-unicode.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-count.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-get-element-content.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-pretty-print.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-remove-element.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-element-value.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml create mode 100644 test/integration/targets/incidental_xml/tasks/test-xmlstring.yml create mode 100644 test/integration/targets/incidental_xml/vars/main.yml create mode 100644 test/integration/targets/include_import/aliases create mode 100644 test/integration/targets/include_import/apply/import_apply.yml create mode 100644 test/integration/targets/include_import/apply/include_apply.yml create mode 100644 test/integration/targets/include_import/apply/include_tasks.yml create mode 100644 test/integration/targets/include_import/apply/roles/include_role/tasks/main.yml create mode 100644 test/integration/targets/include_import/apply/roles/include_role2/tasks/main.yml create mode 100644 test/integration/targets/include_import/empty_group_warning/playbook.yml create mode 100644 test/integration/targets/include_import/empty_group_warning/tasks.yml create mode 100644 test/integration/targets/include_import/grandchild/block_include_tasks.yml create mode 100644 test/integration/targets/include_import/grandchild/import.yml create mode 100644 test/integration/targets/include_import/grandchild/import_include_include_tasks.yml create mode 100644 test/integration/targets/include_import/grandchild/include_level_1.yml create mode 100644 test/integration/targets/include_import/handler_addressing/playbook.yml create mode 100644 test/integration/targets/include_import/handler_addressing/roles/import_handler_test/handlers/main.yml create mode 100644 test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/handlers.yml create mode 100644 test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/main.yml create mode 100644 test/integration/targets/include_import/handler_addressing/roles/include_handler_test/handlers/main.yml create mode 100644 test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/handlers.yml create mode 100644 test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/main.yml create mode 100644 test/integration/targets/include_import/inventory create mode 100644 test/integration/targets/include_import/nestedtasks/nested/nested.yml create mode 100644 test/integration/targets/include_import/parent_templating/playbook.yml create mode 100644 test/integration/targets/include_import/parent_templating/roles/test/tasks/localhost.yml create mode 100644 test/integration/targets/include_import/parent_templating/roles/test/tasks/main.yml create mode 100644 test/integration/targets/include_import/parent_templating/roles/test/tasks/other.yml create mode 100644 test/integration/targets/include_import/playbook/group_vars/all.yml create mode 100644 test/integration/targets/include_import/playbook/playbook1.yml create mode 100644 test/integration/targets/include_import/playbook/playbook2.yml create mode 100644 test/integration/targets/include_import/playbook/playbook3.yml create mode 100644 test/integration/targets/include_import/playbook/playbook4.yml create mode 100644 test/integration/targets/include_import/playbook/playbook_needing_vars.yml create mode 100644 test/integration/targets/include_import/playbook/roles/import_playbook_role/tasks/main.yml create mode 100644 test/integration/targets/include_import/playbook/sub_playbook/library/helloworld.py create mode 100644 test/integration/targets/include_import/playbook/sub_playbook/sub_playbook.yml create mode 100644 test/integration/targets/include_import/playbook/test_import_playbook.yml create mode 100644 test/integration/targets/include_import/playbook/test_import_playbook_tags.yml create mode 100644 test/integration/targets/include_import/playbook/validate1.yml create mode 100644 test/integration/targets/include_import/playbook/validate2.yml create mode 100644 test/integration/targets/include_import/playbook/validate34.yml create mode 100644 test/integration/targets/include_import/playbook/validate_tags.yml create mode 100644 test/integration/targets/include_import/public_exposure/no_bleeding.yml create mode 100644 test/integration/targets/include_import/public_exposure/no_overwrite_roles.yml create mode 100644 test/integration/targets/include_import/public_exposure/playbook.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/call_import/tasks/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/dynamic/defaults/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/dynamic/tasks/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/dynamic/vars/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/dynamic_private/defaults/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/dynamic_private/tasks/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/dynamic_private/vars/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/from/defaults/from.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/from/tasks/from.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/from/vars/from.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/regular/defaults/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/regular/tasks/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/regular/vars/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/static/defaults/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/static/tasks/main.yml create mode 100644 test/integration/targets/include_import/public_exposure/roles/static/vars/main.yml create mode 100644 test/integration/targets/include_import/role/test_import_role.yml create mode 100644 test/integration/targets/include_import/role/test_include_role.yml create mode 100644 test/integration/targets/include_import/role/test_include_role_vars_from.yml create mode 100644 test/integration/targets/include_import/roles/delegated_handler/handlers/main.yml create mode 100644 test/integration/targets/include_import/roles/delegated_handler/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/dup_allowed_role/meta/main.yml create mode 100644 test/integration/targets/include_import/roles/dup_allowed_role/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/loop_name_assert/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/defaults/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/meta/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/rund.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/vars/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/defaults/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/meta/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/rune.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/vars/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/defaults/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/meta/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/runf.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/vars/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested_dep_role/defaults/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested_dep_role/meta/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/runc.yml create mode 100644 test/integration/targets/include_import/roles/nested/nested_dep_role/vars/main.yml create mode 100644 test/integration/targets/include_import/roles/nested_include_task/meta/main.yml create mode 100644 test/integration/targets/include_import/roles/nested_include_task/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/nested_include_task/tasks/runa.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/canary1.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/canary2.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/canary3.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/fail.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t01.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t02.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t03.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t04.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t05.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t06.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t07.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t08.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t09.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t10.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t11.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/r1t12.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/tasks.yml create mode 100644 test/integration/targets/include_import/roles/role1/tasks/vartest.yml create mode 100644 test/integration/targets/include_import/roles/role1/vars/main.yml create mode 100644 test/integration/targets/include_import/roles/role1/vars/role1vars.yml create mode 100644 test/integration/targets/include_import/roles/role2/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/role3/defaults/main.yml create mode 100644 test/integration/targets/include_import/roles/role3/handlers/main.yml create mode 100644 test/integration/targets/include_import/roles/role3/tasks/main.yml create mode 100644 test/integration/targets/include_import/roles/role3/tasks/tasks.yml create mode 100644 test/integration/targets/include_import/roles/role3/tasks/vartest.yml create mode 100644 test/integration/targets/include_import/roles/role3/vars/main.yml create mode 100644 test/integration/targets/include_import/roles/role3/vars/role3vars.yml create mode 100644 test/integration/targets/include_import/roles/role_with_deps/meta/main.yml create mode 100644 test/integration/targets/include_import/roles/role_with_deps/tasks/main.yml create mode 100644 test/integration/targets/include_import/run_once/include_me.yml create mode 100644 test/integration/targets/include_import/run_once/playbook.yml create mode 100755 test/integration/targets/include_import/runme.sh create mode 100644 test/integration/targets/include_import/tasks/debug_item.yml create mode 100644 test/integration/targets/include_import/tasks/hello/.gitignore create mode 100644 test/integration/targets/include_import/tasks/hello/keep create mode 100644 test/integration/targets/include_import/tasks/nested/nested.yml create mode 100644 test/integration/targets/include_import/tasks/tasks1.yml create mode 100644 test/integration/targets/include_import/tasks/tasks2.yml create mode 100644 test/integration/targets/include_import/tasks/tasks3.yml create mode 100644 test/integration/targets/include_import/tasks/tasks4.yml create mode 100644 test/integration/targets/include_import/tasks/tasks5.yml create mode 100644 test/integration/targets/include_import/tasks/tasks6.yml create mode 100644 test/integration/targets/include_import/tasks/test_allow_single_role_dup.yml create mode 100644 test/integration/targets/include_import/tasks/test_import_tasks.yml create mode 100644 test/integration/targets/include_import/tasks/test_import_tasks_tags.yml create mode 100644 test/integration/targets/include_import/tasks/test_include_dupe_loop.yml create mode 100644 test/integration/targets/include_import/tasks/test_include_tasks.yml create mode 100644 test/integration/targets/include_import/tasks/test_include_tasks_tags.yml create mode 100644 test/integration/targets/include_import/tasks/test_recursion.yml create mode 100644 test/integration/targets/include_import/tasks/validate3.yml create mode 100644 test/integration/targets/include_import/tasks/validate_tags.yml create mode 100644 test/integration/targets/include_import/test_copious_include_tasks.yml create mode 100644 test/integration/targets/include_import/test_copious_include_tasks_fqcn.yml create mode 100644 test/integration/targets/include_import/test_grandparent_inheritance.yml create mode 100644 test/integration/targets/include_import/test_grandparent_inheritance_fqcn.yml create mode 100644 test/integration/targets/include_import/test_include_loop_fqcn.yml create mode 100644 test/integration/targets/include_import/test_loop_var_bleed.yaml create mode 100644 test/integration/targets/include_import/test_nested_tasks.yml create mode 100644 test/integration/targets/include_import/test_nested_tasks_fqcn.yml create mode 100644 test/integration/targets/include_import/test_role_recursion.yml create mode 100644 test/integration/targets/include_import/test_role_recursion_fqcn.yml create mode 100644 test/integration/targets/include_import/undefined_var/include_tasks.yml create mode 100644 test/integration/targets/include_import/undefined_var/include_that_defines_var.yml create mode 100644 test/integration/targets/include_import/undefined_var/playbook.yml create mode 100644 test/integration/targets/include_import/valid_include_keywords/include_me.yml create mode 100644 test/integration/targets/include_import/valid_include_keywords/include_me_listen.yml create mode 100644 test/integration/targets/include_import/valid_include_keywords/include_me_notify.yml create mode 100644 test/integration/targets/include_import/valid_include_keywords/playbook.yml create mode 100644 test/integration/targets/include_parent_role_vars/aliases create mode 100644 test/integration/targets/include_parent_role_vars/tasks/included_by_other_role.yml create mode 100644 test/integration/targets/include_parent_role_vars/tasks/included_by_ourselves.yml create mode 100644 test/integration/targets/include_parent_role_vars/tasks/main.yml create mode 100644 test/integration/targets/include_vars-ad-hoc/aliases create mode 100644 test/integration/targets/include_vars-ad-hoc/dir/inc.yml create mode 100755 test/integration/targets/include_vars-ad-hoc/runme.sh create mode 100644 test/integration/targets/include_vars/aliases create mode 100644 test/integration/targets/include_vars/defaults/main.yml create mode 100644 test/integration/targets/include_vars/tasks/main.yml create mode 100644 test/integration/targets/include_vars/vars/all/all.yml create mode 100644 test/integration/targets/include_vars/vars/environments/development/all.yml create mode 100644 test/integration/targets/include_vars/vars/environments/development/services/webapp.yml create mode 100644 test/integration/targets/include_vars/vars/services/service_vars.yml create mode 100644 test/integration/targets/include_vars/vars/services/service_vars_fqcn.yml create mode 100644 test/integration/targets/include_vars/vars/services/webapp.yml create mode 100644 test/integration/targets/include_vars/vars/webapp/file_without_extension create mode 100644 test/integration/targets/include_when_parent_is_dynamic/aliases create mode 100644 test/integration/targets/include_when_parent_is_dynamic/playbook.yml create mode 100755 test/integration/targets/include_when_parent_is_dynamic/runme.sh create mode 100644 test/integration/targets/include_when_parent_is_dynamic/syntax_error.yml create mode 100644 test/integration/targets/include_when_parent_is_dynamic/tasks.yml create mode 100644 test/integration/targets/include_when_parent_is_static/aliases create mode 100644 test/integration/targets/include_when_parent_is_static/playbook.yml create mode 100755 test/integration/targets/include_when_parent_is_static/runme.sh create mode 100644 test/integration/targets/include_when_parent_is_static/syntax_error.yml create mode 100644 test/integration/targets/include_when_parent_is_static/tasks.yml create mode 100644 test/integration/targets/includes/aliases create mode 100644 test/integration/targets/includes/roles/test_includes/handlers/main.yml create mode 100644 test/integration/targets/includes/roles/test_includes/handlers/more_handlers.yml create mode 100644 test/integration/targets/includes/roles/test_includes/tasks/branch_toplevel.yml create mode 100644 test/integration/targets/includes/roles/test_includes/tasks/empty.yml create mode 100644 test/integration/targets/includes/roles/test_includes/tasks/included_task1.yml create mode 100644 test/integration/targets/includes/roles/test_includes/tasks/leaf_sublevel.yml create mode 100644 test/integration/targets/includes/roles/test_includes/tasks/main.yml create mode 100644 test/integration/targets/includes/roles/test_includes/tasks/not_a_role_task.yml create mode 100644 test/integration/targets/includes/roles/test_includes_free/tasks/inner.yml create mode 100644 test/integration/targets/includes/roles/test_includes_free/tasks/inner_fqcn.yml create mode 100644 test/integration/targets/includes/roles/test_includes_free/tasks/main.yml create mode 100644 test/integration/targets/includes/roles/test_includes_host_pinned/tasks/inner.yml create mode 100644 test/integration/targets/includes/roles/test_includes_host_pinned/tasks/main.yml create mode 100755 test/integration/targets/includes/runme.sh create mode 100644 test/integration/targets/includes/test_include_free.yml create mode 100644 test/integration/targets/includes/test_include_host_pinned.yml create mode 100644 test/integration/targets/includes/test_includes.yml create mode 100644 test/integration/targets/includes/test_includes2.yml create mode 100644 test/integration/targets/includes/test_includes3.yml create mode 100644 test/integration/targets/includes/test_includes4.yml create mode 100644 test/integration/targets/includes_race/aliases create mode 100644 test/integration/targets/includes_race/inventory create mode 100644 test/integration/targets/includes_race/roles/random_sleep/tasks/main.yml create mode 100644 test/integration/targets/includes_race/roles/set_a_fact/tasks/fact1.yml create mode 100644 test/integration/targets/includes_race/roles/set_a_fact/tasks/fact2.yml create mode 100755 test/integration/targets/includes_race/runme.sh create mode 100644 test/integration/targets/includes_race/test_includes_race.yml create mode 100644 test/integration/targets/infra/aliases create mode 100644 test/integration/targets/infra/inventory.local create mode 100644 test/integration/targets/infra/library/test.py create mode 100755 test/integration/targets/infra/runme.sh create mode 100644 test/integration/targets/infra/test_test_infra.yml create mode 100644 test/integration/targets/interpreter_discovery_python/aliases create mode 100644 test/integration/targets/interpreter_discovery_python/library/test_echo_module.py create mode 100644 test/integration/targets/interpreter_discovery_python/tasks/main.yml create mode 100644 test/integration/targets/interpreter_discovery_python_delegate_facts/aliases create mode 100644 test/integration/targets/interpreter_discovery_python_delegate_facts/delegate_facts.yml create mode 100644 test/integration/targets/interpreter_discovery_python_delegate_facts/inventory create mode 100755 test/integration/targets/interpreter_discovery_python_delegate_facts/runme.sh create mode 100644 test/integration/targets/inventory/aliases create mode 100644 test/integration/targets/inventory/inv_with_int.yml create mode 100644 test/integration/targets/inventory/playbook.yml create mode 100755 test/integration/targets/inventory/runme.sh create mode 100644 test/integration/targets/inventory/strategy.yml create mode 100644 test/integration/targets/inventory_cache/aliases create mode 100644 test/integration/targets/inventory_cache/cache/.keep create mode 100644 test/integration/targets/inventory_cache/cache_host.yml create mode 100644 test/integration/targets/inventory_cache/plugins/inventory/cache_host.py create mode 100755 test/integration/targets/inventory_cache/runme.sh create mode 100644 test/integration/targets/inventory_ini/aliases create mode 100644 test/integration/targets/inventory_ini/inventory.ini create mode 100755 test/integration/targets/inventory_ini/runme.sh create mode 100644 test/integration/targets/inventory_ini/test_ansible_become.yml create mode 100644 test/integration/targets/inventory_script/aliases create mode 100644 test/integration/targets/inventory_script/inventory.json create mode 100755 test/integration/targets/inventory_script/inventory.sh create mode 100755 test/integration/targets/inventory_script/runme.sh create mode 100644 test/integration/targets/inventory_yaml/aliases create mode 100644 test/integration/targets/inventory_yaml/empty.json create mode 100755 test/integration/targets/inventory_yaml/runme.sh create mode 100644 test/integration/targets/inventory_yaml/success.json create mode 100644 test/integration/targets/inventory_yaml/test.yml create mode 100644 test/integration/targets/jinja2_native_types/aliases create mode 100644 test/integration/targets/jinja2_native_types/nested_undefined.yml create mode 100755 test/integration/targets/jinja2_native_types/runme.sh create mode 100644 test/integration/targets/jinja2_native_types/runtests.yml create mode 100644 test/integration/targets/jinja2_native_types/test_bool.yml create mode 100644 test/integration/targets/jinja2_native_types/test_casting.yml create mode 100644 test/integration/targets/jinja2_native_types/test_concatentation.yml create mode 100644 test/integration/targets/jinja2_native_types/test_dunder.yml create mode 100644 test/integration/targets/jinja2_native_types/test_hostvars.yml create mode 100644 test/integration/targets/jinja2_native_types/test_none.yml create mode 100644 test/integration/targets/jinja2_native_types/test_template.yml create mode 100644 test/integration/targets/jinja2_native_types/test_template_newlines.j2 create mode 100644 test/integration/targets/jinja2_native_types/test_types.yml create mode 100644 test/integration/targets/jinja2_native_types/test_vault.yml create mode 100644 test/integration/targets/jinja2_native_types/test_vault_pass create mode 100644 test/integration/targets/json_cleanup/aliases create mode 100644 test/integration/targets/json_cleanup/library/bad_json create mode 100644 test/integration/targets/json_cleanup/module_output_cleaning.yml create mode 100755 test/integration/targets/json_cleanup/runme.sh create mode 100644 test/integration/targets/known_hosts/aliases create mode 100644 test/integration/targets/known_hosts/defaults/main.yml create mode 100644 test/integration/targets/known_hosts/files/existing_known_hosts create mode 100644 test/integration/targets/known_hosts/meta/main.yml create mode 100644 test/integration/targets/known_hosts/tasks/main.yml create mode 100644 test/integration/targets/limit_inventory/aliases create mode 100644 test/integration/targets/limit_inventory/hosts.yml create mode 100755 test/integration/targets/limit_inventory/runme.sh create mode 100644 test/integration/targets/lineinfile/aliases create mode 100644 test/integration/targets/lineinfile/files/firstmatch.txt create mode 100644 test/integration/targets/lineinfile/files/test.conf create mode 100644 test/integration/targets/lineinfile/files/test.txt create mode 100644 test/integration/targets/lineinfile/files/test_58923.txt create mode 100644 test/integration/targets/lineinfile/files/testempty.txt create mode 100644 test/integration/targets/lineinfile/files/testmultiple.txt create mode 100644 test/integration/targets/lineinfile/files/testnoeof.txt create mode 100644 test/integration/targets/lineinfile/meta/main.yml create mode 100644 test/integration/targets/lineinfile/tasks/main.yml create mode 100644 test/integration/targets/lineinfile/vars/main.yml create mode 100644 test/integration/targets/lookup_config/aliases create mode 100644 test/integration/targets/lookup_config/tasks/main.yml create mode 100644 test/integration/targets/lookup_dict/aliases create mode 100644 test/integration/targets/lookup_dict/tasks/main.yml create mode 100644 test/integration/targets/lookup_env/aliases create mode 100644 test/integration/targets/lookup_env/tasks/main.yml create mode 100644 test/integration/targets/lookup_file/aliases create mode 100644 test/integration/targets/lookup_file/tasks/main.yml create mode 100644 test/integration/targets/lookup_fileglob/aliases create mode 100644 test/integration/targets/lookup_fileglob/find_levels/files/play_adj_subdir.txt create mode 100644 test/integration/targets/lookup_fileglob/find_levels/files/somepath/play_adj_subsubdir.txt create mode 100644 test/integration/targets/lookup_fileglob/find_levels/play.yml create mode 100644 test/integration/targets/lookup_fileglob/find_levels/play_adj.txt create mode 100644 test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/in_role.txt create mode 100644 test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/otherpath/in_role_subdir.txt create mode 100644 test/integration/targets/lookup_fileglob/find_levels/roles/get_file/tasks/main.yml create mode 100644 test/integration/targets/lookup_fileglob/non_existent/play.yml create mode 100755 test/integration/targets/lookup_fileglob/runme.sh create mode 100644 test/integration/targets/lookup_first_found/aliases create mode 100644 test/integration/targets/lookup_first_found/files/bar1 create mode 100644 test/integration/targets/lookup_first_found/files/foo1 create mode 100644 test/integration/targets/lookup_first_found/tasks/main.yml create mode 100644 test/integration/targets/lookup_indexed_items/aliases create mode 100644 test/integration/targets/lookup_indexed_items/tasks/main.yml create mode 100644 test/integration/targets/lookup_ini/aliases create mode 100644 test/integration/targets/lookup_ini/lookup-8859-15.ini create mode 100644 test/integration/targets/lookup_ini/lookup.ini create mode 100644 test/integration/targets/lookup_ini/lookup.properties create mode 100755 test/integration/targets/lookup_ini/runme.sh create mode 100644 test/integration/targets/lookup_ini/test_lookup_properties.yml create mode 100644 test/integration/targets/lookup_inventory_hostnames/aliases create mode 100644 test/integration/targets/lookup_inventory_hostnames/inventory create mode 100644 test/integration/targets/lookup_inventory_hostnames/main.yml create mode 100755 test/integration/targets/lookup_inventory_hostnames/runme.sh create mode 100644 test/integration/targets/lookup_items/aliases create mode 100644 test/integration/targets/lookup_items/tasks/main.yml create mode 100644 test/integration/targets/lookup_lines/aliases create mode 100644 test/integration/targets/lookup_lines/tasks/main.yml create mode 100644 test/integration/targets/lookup_list/aliases create mode 100644 test/integration/targets/lookup_list/tasks/main.yml create mode 100644 test/integration/targets/lookup_nested/aliases create mode 100644 test/integration/targets/lookup_nested/tasks/main.yml create mode 100644 test/integration/targets/lookup_password/aliases create mode 100755 test/integration/targets/lookup_password/runme.sh create mode 100644 test/integration/targets/lookup_password/runme.yml create mode 100644 test/integration/targets/lookup_password/tasks/main.yml create mode 100644 test/integration/targets/lookup_pipe/aliases create mode 100644 test/integration/targets/lookup_pipe/tasks/main.yml create mode 100644 test/integration/targets/lookup_random_choice/aliases create mode 100644 test/integration/targets/lookup_random_choice/tasks/main.yml create mode 100644 test/integration/targets/lookup_sequence/aliases create mode 100644 test/integration/targets/lookup_sequence/tasks/main.yml create mode 100644 test/integration/targets/lookup_subelements/aliases create mode 100644 test/integration/targets/lookup_subelements/tasks/main.yml create mode 100644 test/integration/targets/lookup_subelements/vars/main.yml create mode 100644 test/integration/targets/lookup_template/aliases create mode 100644 test/integration/targets/lookup_template/tasks/main.yml create mode 100644 test/integration/targets/lookup_template/templates/hello.txt create mode 100644 test/integration/targets/lookup_template/templates/hello_string.txt create mode 100644 test/integration/targets/lookup_template/templates/world.txt create mode 100644 test/integration/targets/lookup_together/aliases create mode 100644 test/integration/targets/lookup_together/tasks/main.yml create mode 100644 test/integration/targets/lookup_unvault/aliases create mode 100644 test/integration/targets/lookup_unvault/files/foot.txt create mode 100644 test/integration/targets/lookup_unvault/files/foot.txt.vault create mode 100755 test/integration/targets/lookup_unvault/runme.sh create mode 100644 test/integration/targets/lookup_unvault/secret create mode 100644 test/integration/targets/lookup_unvault/unvault.yml create mode 100644 test/integration/targets/lookup_url/aliases create mode 100644 test/integration/targets/lookup_url/meta/main.yml create mode 100644 test/integration/targets/lookup_url/tasks/main.yml create mode 100644 test/integration/targets/lookup_vars/aliases create mode 100644 test/integration/targets/lookup_vars/tasks/main.yml create mode 100644 test/integration/targets/loop_control/aliases create mode 100644 test/integration/targets/loop_control/extended.yml create mode 100644 test/integration/targets/loop_control/inner.yml create mode 100644 test/integration/targets/loop_control/label.yml create mode 100755 test/integration/targets/loop_control/runme.sh create mode 100644 test/integration/targets/loops/aliases create mode 100644 test/integration/targets/loops/files/data1.txt create mode 100644 test/integration/targets/loops/files/data2.txt create mode 100644 test/integration/targets/loops/tasks/index_var_tasks.yml create mode 100644 test/integration/targets/loops/tasks/main.yml create mode 100644 test/integration/targets/loops/tasks/templated_loop_var_tasks.yml create mode 100644 test/integration/targets/loops/vars/64169.yml create mode 100644 test/integration/targets/loops/vars/main.yml create mode 100644 test/integration/targets/meta_tasks/aliases create mode 100644 test/integration/targets/meta_tasks/inventory.yml create mode 100755 test/integration/targets/meta_tasks/runme.sh create mode 100644 test/integration/targets/meta_tasks/test_end_host.yml create mode 100644 test/integration/targets/meta_tasks/test_end_host_all.yml create mode 100644 test/integration/targets/meta_tasks/test_end_host_all_fqcn.yml create mode 100644 test/integration/targets/meta_tasks/test_end_host_fqcn.yml create mode 100644 test/integration/targets/meta_tasks/test_end_play.yml create mode 100644 test/integration/targets/meta_tasks/test_end_play_fqcn.yml create mode 100644 test/integration/targets/missing_required_lib/aliases create mode 100644 test/integration/targets/missing_required_lib/library/missing_required_lib.py create mode 100755 test/integration/targets/missing_required_lib/runme.sh create mode 100644 test/integration/targets/missing_required_lib/runme.yml create mode 100644 test/integration/targets/missing_required_lib/tasks/main.yml create mode 100644 test/integration/targets/module_defaults/aliases create mode 100644 test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/action/other_echoaction.py create mode 100644 test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/modules/other_echo1.py create mode 100644 test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/meta/runtime.yml create mode 100644 test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/echoaction.py create mode 100644 test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py create mode 100644 test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo1.py create mode 100644 test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo2.py create mode 100644 test/integration/targets/module_defaults/library/test_module_defaults.py create mode 100755 test/integration/targets/module_defaults/runme.sh create mode 100644 test/integration/targets/module_defaults/tasks/main.yml create mode 100644 test/integration/targets/module_defaults/test_defaults.yml create mode 100644 test/integration/targets/module_no_log/aliases create mode 100644 test/integration/targets/module_no_log/library/module_that_logs.py create mode 100644 test/integration/targets/module_no_log/tasks/main.yml create mode 100644 test/integration/targets/module_precedence/aliases create mode 100644 test/integration/targets/module_precedence/lib_no_extension/ping create mode 100644 test/integration/targets/module_precedence/lib_with_extension/a.ini create mode 100644 test/integration/targets/module_precedence/lib_with_extension/a.py create mode 100644 test/integration/targets/module_precedence/lib_with_extension/ping.ini create mode 100644 test/integration/targets/module_precedence/lib_with_extension/ping.py create mode 100644 test/integration/targets/module_precedence/modules_test.yml create mode 100644 test/integration/targets/module_precedence/modules_test_envvar.yml create mode 100644 test/integration/targets/module_precedence/modules_test_envvar_ext.yml create mode 100644 test/integration/targets/module_precedence/modules_test_multiple_roles.yml create mode 100644 test/integration/targets/module_precedence/modules_test_multiple_roles_reverse_order.yml create mode 100644 test/integration/targets/module_precedence/modules_test_role.yml create mode 100644 test/integration/targets/module_precedence/modules_test_role_ext.yml create mode 100644 test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py create mode 100644 test/integration/targets/module_precedence/multiple_roles/bar/tasks/main.yml create mode 100644 test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py create mode 100644 test/integration/targets/module_precedence/multiple_roles/foo/tasks/main.yml create mode 100644 test/integration/targets/module_precedence/roles_no_extension/foo/library/ping create mode 100644 test/integration/targets/module_precedence/roles_no_extension/foo/tasks/main.yml create mode 100644 test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini create mode 100644 test/integration/targets/module_precedence/roles_with_extension/foo/library/a.py create mode 100644 test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini create mode 100644 test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py create mode 100644 test/integration/targets/module_precedence/roles_with_extension/foo/tasks/main.yml create mode 100755 test/integration/targets/module_precedence/runme.sh create mode 100644 test/integration/targets/module_tracebacks/aliases create mode 100644 test/integration/targets/module_tracebacks/inventory create mode 100755 test/integration/targets/module_tracebacks/runme.sh create mode 100644 test/integration/targets/module_tracebacks/traceback.yml create mode 100644 test/integration/targets/module_utils/aliases create mode 100644 test/integration/targets/module_utils/callback/pure_json.py create mode 100644 test/integration/targets/module_utils/collections/ansible_collections/testns/testcoll/plugins/module_utils/legit.py create mode 100644 test/integration/targets/module_utils/library/test.py create mode 100644 test/integration/targets/module_utils/library/test_alias_deprecation.py create mode 100644 test/integration/targets/module_utils/library/test_cwd_missing.py create mode 100644 test/integration/targets/module_utils/library/test_cwd_unreadable.py create mode 100644 test/integration/targets/module_utils/library/test_env_override.py create mode 100644 test/integration/targets/module_utils/library/test_failure.py create mode 100644 test/integration/targets/module_utils/library/test_no_log.py create mode 100644 test/integration/targets/module_utils/library/test_optional.py create mode 100644 test/integration/targets/module_utils/library/test_override.py create mode 100644 test/integration/targets/module_utils/library/test_recursive_diff.py create mode 100644 test/integration/targets/module_utils/module_utils/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/a/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/a/b/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/a/b/c/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/a/b/c/d/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/a/b/c/d/e/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/h/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/bar0/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/bar0/foo.py create mode 100644 test/integration/targets/module_utils/module_utils/bar1/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/bar2/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/baz1/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/baz1/one.py create mode 100644 test/integration/targets/module_utils/module_utils/baz2/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/baz2/one.py create mode 100644 test/integration/targets/module_utils/module_utils/facts.py create mode 100644 test/integration/targets/module_utils/module_utils/foo.py create mode 100644 test/integration/targets/module_utils/module_utils/foo0.py create mode 100644 test/integration/targets/module_utils/module_utils/foo1.py create mode 100644 test/integration/targets/module_utils/module_utils/foo2.py create mode 100644 test/integration/targets/module_utils/module_utils/qux1/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/qux1/quux.py create mode 100644 test/integration/targets/module_utils/module_utils/qux2/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/qux2/quux.py create mode 100644 test/integration/targets/module_utils/module_utils/qux2/quuz.py create mode 100644 test/integration/targets/module_utils/module_utils/service.py create mode 100644 test/integration/targets/module_utils/module_utils/spam1/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam1/ham/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam1/ham/eggs/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam2/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam2/ham/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam2/ham/eggs/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam3/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam3/ham/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py create mode 100644 test/integration/targets/module_utils/module_utils/spam4/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam4/ham/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam4/ham/bacon.py create mode 100644 test/integration/targets/module_utils/module_utils/spam5/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam5/ham/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam5/ham/bacon.py create mode 100644 test/integration/targets/module_utils/module_utils/spam5/ham/eggs.py create mode 100644 test/integration/targets/module_utils/module_utils/spam6/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam6/ham/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam7/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam7/ham/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam7/ham/bacon.py create mode 100644 test/integration/targets/module_utils/module_utils/spam8/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam8/ham/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/spam8/ham/bacon.py create mode 100644 test/integration/targets/module_utils/module_utils/sub/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/sub/bam.py create mode 100644 test/integration/targets/module_utils/module_utils/sub/bam/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/sub/bam/bam.py create mode 100644 test/integration/targets/module_utils/module_utils/sub/bar/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/sub/bar/bam.py create mode 100644 test/integration/targets/module_utils/module_utils/sub/bar/bar.py create mode 100644 test/integration/targets/module_utils/module_utils/yak/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/yak/zebra/__init__.py create mode 100644 test/integration/targets/module_utils/module_utils/yak/zebra/foo.py create mode 100644 test/integration/targets/module_utils/module_utils_basic_setcwd.yml create mode 100644 test/integration/targets/module_utils/module_utils_common_dict_transformation.yml create mode 100644 test/integration/targets/module_utils/module_utils_envvar.yml create mode 100644 test/integration/targets/module_utils/module_utils_test.yml create mode 100644 test/integration/targets/module_utils/module_utils_test_no_log.yml create mode 100644 test/integration/targets/module_utils/module_utils_vvvvv.yml create mode 100644 test/integration/targets/module_utils/other_mu_dir/__init__.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/a/__init__.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/a/b/__init__.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/a/b/c/__init__.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/a/b/c/d/__init__.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/__init__.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/__init__.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/__init__.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/h/__init__.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/facts.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/json_utils.py create mode 100644 test/integration/targets/module_utils/other_mu_dir/mork.py create mode 100755 test/integration/targets/module_utils/runme.sh create mode 100644 test/integration/targets/module_utils_Ansible.AccessToken/aliases create mode 100644 test/integration/targets/module_utils_Ansible.AccessToken/library/ansible_access_token_tests.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.AccessToken/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.Basic/aliases create mode 100644 test/integration/targets/module_utils_Ansible.Basic/library/ansible_basic_tests.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.Basic/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.Become/aliases create mode 100644 test/integration/targets/module_utils_Ansible.Become/library/ansible_become_tests.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.Become/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/library/add_type_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/library/argv_parser_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/meta/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/library/backup_file_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/library/camel_conversion_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/library/command_util_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/meta/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/library/file_util_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testlist.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testpath.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/library/symbolic_link_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/library/privilege_util_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.SID/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.SID/library/sid_utils_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.SID/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/aliases create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/library/web_request_test.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/meta/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.Privilege/aliases create mode 100644 test/integration/targets/module_utils_Ansible.Privilege/library/ansible_privilege_tests.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.Privilege/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.Process/aliases create mode 100644 test/integration/targets/module_utils_Ansible.Process/library/ansible_process_tests.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.Process/tasks/main.yml create mode 100644 test/integration/targets/module_utils_Ansible.Service/aliases create mode 100644 test/integration/targets/module_utils_Ansible.Service/library/ansible_service_tests.ps1 create mode 100644 test/integration/targets/module_utils_Ansible.Service/tasks/main.yml create mode 100644 test/integration/targets/network_cli/aliases create mode 100644 test/integration/targets/network_cli/passworded_user.yml create mode 100755 test/integration/targets/network_cli/runme.sh create mode 100644 test/integration/targets/network_cli/setup.yml create mode 100644 test/integration/targets/network_cli/teardown.yml create mode 100644 test/integration/targets/no_log/aliases create mode 100644 test/integration/targets/no_log/dynamic.yml create mode 100644 test/integration/targets/no_log/library/module.py create mode 100644 test/integration/targets/no_log/no_log_local.yml create mode 100644 test/integration/targets/no_log/no_log_suboptions.yml create mode 100644 test/integration/targets/no_log/no_log_suboptions_invalid.yml create mode 100755 test/integration/targets/no_log/runme.sh create mode 100644 test/integration/targets/noexec/aliases create mode 100644 test/integration/targets/noexec/inventory create mode 100755 test/integration/targets/noexec/runme.sh create mode 100644 test/integration/targets/noexec/test-noexec.yml create mode 100644 test/integration/targets/old_style_cache_plugins/aliases create mode 100644 test/integration/targets/old_style_cache_plugins/inventory_config create mode 100644 test/integration/targets/old_style_cache_plugins/plugins/cache/redis.py create mode 100644 test/integration/targets/old_style_cache_plugins/plugins/inventory/test.py create mode 100755 test/integration/targets/old_style_cache_plugins/runme.sh create mode 100644 test/integration/targets/old_style_cache_plugins/test_fact_gathering.yml create mode 100644 test/integration/targets/old_style_modules_posix/aliases create mode 100644 test/integration/targets/old_style_modules_posix/library/helloworld.sh create mode 100644 test/integration/targets/old_style_modules_posix/meta/main.yml create mode 100644 test/integration/targets/old_style_modules_posix/tasks/main.yml create mode 100644 test/integration/targets/omit/48673.yml create mode 100644 test/integration/targets/omit/aliases create mode 100755 test/integration/targets/omit/runme.sh create mode 100644 test/integration/targets/order/aliases create mode 100644 test/integration/targets/order/inventory create mode 100644 test/integration/targets/order/order.yml create mode 100755 test/integration/targets/order/runme.sh create mode 100644 test/integration/targets/package/aliases create mode 100644 test/integration/targets/package/meta/main.yml create mode 100644 test/integration/targets/package/tasks/main.yml create mode 100644 test/integration/targets/package_facts/aliases create mode 100644 test/integration/targets/package_facts/tasks/main.yml create mode 100644 test/integration/targets/parsing/aliases create mode 100644 test/integration/targets/parsing/bad_parsing.yml create mode 100644 test/integration/targets/parsing/good_parsing.yml create mode 100644 test/integration/targets/parsing/roles/test_bad_parsing/tasks/main.yml create mode 100644 test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario1.yml create mode 100644 test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario2.yml create mode 100644 test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario3.yml create mode 100644 test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario4.yml create mode 100644 test/integration/targets/parsing/roles/test_bad_parsing/vars/main.yml create mode 100644 test/integration/targets/parsing/roles/test_good_parsing/tasks/main.yml create mode 100644 test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include.yml create mode 100644 test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_conditional.yml create mode 100644 test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_nested.yml create mode 100644 test/integration/targets/parsing/roles/test_good_parsing/vars/main.yml create mode 100755 test/integration/targets/parsing/runme.sh create mode 100644 test/integration/targets/path_lookups/aliases create mode 100644 test/integration/targets/path_lookups/play.yml create mode 100644 test/integration/targets/path_lookups/roles/showfile/tasks/main.yml create mode 100755 test/integration/targets/path_lookups/runme.sh create mode 100644 test/integration/targets/path_lookups/testplay.yml create mode 100644 test/integration/targets/path_with_comma_in_inventory/aliases create mode 100644 test/integration/targets/path_with_comma_in_inventory/playbook.yml create mode 100755 test/integration/targets/path_with_comma_in_inventory/runme.sh create mode 100644 test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/group_vars/all.yml create mode 100644 test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/hosts create mode 100644 test/integration/targets/pause/aliases create mode 100644 test/integration/targets/pause/pause-1.yml create mode 100644 test/integration/targets/pause/pause-2.yml create mode 100644 test/integration/targets/pause/pause-3.yml create mode 100644 test/integration/targets/pause/pause-4.yml create mode 100644 test/integration/targets/pause/pause-5.yml create mode 100755 test/integration/targets/pause/runme.sh create mode 100644 test/integration/targets/pause/setup.yml create mode 100644 test/integration/targets/pause/test-pause-background.yml create mode 100644 test/integration/targets/pause/test-pause-no-tty.yml create mode 100755 test/integration/targets/pause/test-pause.py create mode 100644 test/integration/targets/pause/test-pause.yml create mode 100644 test/integration/targets/ping/aliases create mode 100644 test/integration/targets/ping/tasks/main.yml create mode 100644 test/integration/targets/pip/aliases create mode 100644 test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py create mode 100755 test/integration/targets/pip/files/setup.py create mode 100644 test/integration/targets/pip/meta/main.yml create mode 100644 test/integration/targets/pip/tasks/default_cleanup.yml create mode 100644 test/integration/targets/pip/tasks/freebsd_cleanup.yml create mode 100644 test/integration/targets/pip/tasks/main.yml create mode 100644 test/integration/targets/pip/tasks/pip.yml create mode 100644 test/integration/targets/pip/vars/main.yml create mode 100644 test/integration/targets/pkg_resources/aliases create mode 100644 test/integration/targets/pkg_resources/lookup_plugins/check_pkg_resources.py create mode 100644 test/integration/targets/pkg_resources/tasks/main.yml create mode 100644 test/integration/targets/play_iterator/aliases create mode 100644 test/integration/targets/play_iterator/playbook.yml create mode 100755 test/integration/targets/play_iterator/runme.sh create mode 100644 test/integration/targets/playbook/aliases create mode 100755 test/integration/targets/playbook/runme.sh create mode 100644 test/integration/targets/playbook/timeout.yml create mode 100644 test/integration/targets/playbook/types.yml create mode 100644 test/integration/targets/plugin_config_for_inventory/aliases create mode 100644 test/integration/targets/plugin_config_for_inventory/config_with_parameter.yml create mode 100644 test/integration/targets/plugin_config_for_inventory/config_without_parameter.yml create mode 100755 test/integration/targets/plugin_config_for_inventory/runme.sh create mode 100644 test/integration/targets/plugin_config_for_inventory/test_inventory.py create mode 100644 test/integration/targets/plugin_filtering/aliases create mode 100644 test/integration/targets/plugin_filtering/copy.yml create mode 100644 test/integration/targets/plugin_filtering/filter_lookup.ini create mode 100644 test/integration/targets/plugin_filtering/filter_lookup.yml create mode 100644 test/integration/targets/plugin_filtering/filter_modules.ini create mode 100644 test/integration/targets/plugin_filtering/filter_modules.yml create mode 100644 test/integration/targets/plugin_filtering/filter_ping.ini create mode 100644 test/integration/targets/plugin_filtering/filter_ping.yml create mode 100644 test/integration/targets/plugin_filtering/filter_stat.ini create mode 100644 test/integration/targets/plugin_filtering/filter_stat.yml create mode 100644 test/integration/targets/plugin_filtering/lookup.yml create mode 100644 test/integration/targets/plugin_filtering/no_blacklist_module.ini create mode 100644 test/integration/targets/plugin_filtering/no_blacklist_module.yml create mode 100644 test/integration/targets/plugin_filtering/no_filters.ini create mode 100644 test/integration/targets/plugin_filtering/pause.yml create mode 100644 test/integration/targets/plugin_filtering/ping.yml create mode 100755 test/integration/targets/plugin_filtering/runme.sh create mode 100644 test/integration/targets/plugin_filtering/stat.yml create mode 100644 test/integration/targets/plugin_filtering/tempfile.yml create mode 100644 test/integration/targets/plugin_loader/aliases create mode 100644 test/integration/targets/plugin_loader/normal/filters.yml create mode 120000 test/integration/targets/plugin_loader/normal/library/_symlink.py create mode 100644 test/integration/targets/plugin_loader/normal/library/_underscore.py create mode 100644 test/integration/targets/plugin_loader/normal/underscore.yml create mode 100644 test/integration/targets/plugin_loader/override/filter_plugins/core.py create mode 100644 test/integration/targets/plugin_loader/override/filters.yml create mode 100755 test/integration/targets/plugin_loader/runme.sh create mode 100644 test/integration/targets/plugin_namespace/aliases create mode 100644 test/integration/targets/plugin_namespace/filter_plugins/test_filter.py create mode 100644 test/integration/targets/plugin_namespace/lookup_plugins/lookup_name.py create mode 100644 test/integration/targets/plugin_namespace/tasks/main.yml create mode 100644 test/integration/targets/plugin_namespace/test_plugins/test_test.py create mode 100644 test/integration/targets/prepare_http_tests/defaults/main.yml create mode 100644 test/integration/targets/prepare_http_tests/meta/main.yml create mode 100644 test/integration/targets/prepare_http_tests/tasks/default.yml create mode 100644 test/integration/targets/prepare_http_tests/tasks/main.yml create mode 100644 test/integration/targets/prepare_http_tests/tasks/windows.yml create mode 100644 test/integration/targets/prepare_http_tests/vars/httptester.yml create mode 100644 test/integration/targets/prepare_tests/tasks/main.yml create mode 100644 test/integration/targets/pull/aliases create mode 100644 test/integration/targets/pull/cleanup.yml create mode 100644 test/integration/targets/pull/pull-integration-test/ansible.cfg create mode 100644 test/integration/targets/pull/pull-integration-test/inventory create mode 100644 test/integration/targets/pull/pull-integration-test/local.yml create mode 100755 test/integration/targets/pull/runme.sh create mode 100644 test/integration/targets/pull/setup.yml create mode 100644 test/integration/targets/raw/aliases create mode 100644 test/integration/targets/raw/meta/main.yml create mode 100755 test/integration/targets/raw/runme.sh create mode 100644 test/integration/targets/raw/runme.yml create mode 100644 test/integration/targets/raw/tasks/main.yml create mode 100644 test/integration/targets/reboot/aliases create mode 100644 test/integration/targets/reboot/tasks/check_reboot.yml create mode 100644 test/integration/targets/reboot/tasks/get_boot_time.yml create mode 100644 test/integration/targets/reboot/tasks/main.yml create mode 100644 test/integration/targets/reboot/vars/main.yml create mode 100644 test/integration/targets/rel_plugin_loading/aliases create mode 100644 test/integration/targets/rel_plugin_loading/notyaml.yml create mode 100755 test/integration/targets/rel_plugin_loading/runme.sh create mode 100644 test/integration/targets/rel_plugin_loading/subdir/inventory_plugins/notyaml.py create mode 100644 test/integration/targets/rel_plugin_loading/subdir/play.yml create mode 100644 test/integration/targets/remote_tmp/aliases create mode 100644 test/integration/targets/remote_tmp/playbook.yml create mode 100755 test/integration/targets/remote_tmp/runme.sh create mode 100644 test/integration/targets/replace/aliases create mode 100644 test/integration/targets/replace/meta/main.yml create mode 100644 test/integration/targets/replace/tasks/main.yml create mode 100644 test/integration/targets/retry_task_name_in_callback/aliases create mode 100755 test/integration/targets/retry_task_name_in_callback/runme.sh create mode 100644 test/integration/targets/retry_task_name_in_callback/test.yml create mode 100644 test/integration/targets/roles/aliases create mode 100644 test/integration/targets/roles/allowed_dupes.yml create mode 100644 test/integration/targets/roles/data_integrity.yml create mode 100644 test/integration/targets/roles/no_dupes.yml create mode 100644 test/integration/targets/roles/roles/a/tasks/main.yml create mode 100644 test/integration/targets/roles/roles/b/meta/main.yml create mode 100644 test/integration/targets/roles/roles/b/tasks/main.yml create mode 100644 test/integration/targets/roles/roles/c/meta/main.yml create mode 100644 test/integration/targets/roles/roles/c/tasks/main.yml create mode 100644 test/integration/targets/roles/roles/data/defaults/main/00.yml create mode 100644 test/integration/targets/roles/roles/data/defaults/main/01.yml create mode 100644 test/integration/targets/roles/roles/data/tasks/main.yml create mode 100755 test/integration/targets/roles/runme.sh create mode 100644 test/integration/targets/rpm_key/aliases create mode 100644 test/integration/targets/rpm_key/defaults/main.yaml create mode 100644 test/integration/targets/rpm_key/tasks/main.yaml create mode 100644 test/integration/targets/rpm_key/tasks/rpm_key.yaml create mode 100644 test/integration/targets/run_modules/aliases create mode 100644 test/integration/targets/run_modules/args.json create mode 100644 test/integration/targets/run_modules/library/test.py create mode 100755 test/integration/targets/run_modules/runme.sh create mode 100644 test/integration/targets/script/aliases create mode 100755 test/integration/targets/script/files/create_afile.sh create mode 100644 test/integration/targets/script/files/no_shebang.py create mode 100755 test/integration/targets/script/files/remove_afile.sh create mode 100755 test/integration/targets/script/files/space path/test.sh create mode 100755 test/integration/targets/script/files/test.sh create mode 100755 test/integration/targets/script/files/test_with_args.sh create mode 100644 test/integration/targets/script/meta/main.yml create mode 100644 test/integration/targets/script/tasks/main.yml create mode 100644 test/integration/targets/service/aliases create mode 100644 test/integration/targets/service/files/ansible-broken.upstart create mode 100644 test/integration/targets/service/files/ansible.rc create mode 100644 test/integration/targets/service/files/ansible.systemd create mode 100755 test/integration/targets/service/files/ansible.sysv create mode 100644 test/integration/targets/service/files/ansible.upstart create mode 100644 test/integration/targets/service/files/ansible_test_service.py create mode 100644 test/integration/targets/service/meta/main.yml create mode 100644 test/integration/targets/service/tasks/main.yml create mode 100644 test/integration/targets/service/tasks/rc_cleanup.yml create mode 100644 test/integration/targets/service/tasks/rc_setup.yml create mode 100644 test/integration/targets/service/tasks/systemd_cleanup.yml create mode 100644 test/integration/targets/service/tasks/systemd_setup.yml create mode 100644 test/integration/targets/service/tasks/sysv_cleanup.yml create mode 100644 test/integration/targets/service/tasks/sysv_setup.yml create mode 100644 test/integration/targets/service/tasks/tests.yml create mode 100644 test/integration/targets/service/tasks/upstart_cleanup.yml create mode 100644 test/integration/targets/service/tasks/upstart_setup.yml create mode 100644 test/integration/targets/service/templates/main.yml create mode 100644 test/integration/targets/service_facts/aliases create mode 100644 test/integration/targets/service_facts/files/ansible.systemd create mode 100644 test/integration/targets/service_facts/files/ansible_test_service.py create mode 100644 test/integration/targets/service_facts/tasks/main.yml create mode 100644 test/integration/targets/service_facts/tasks/systemd_cleanup.yml create mode 100644 test/integration/targets/service_facts/tasks/systemd_setup.yml create mode 100644 test/integration/targets/service_facts/tasks/tests.yml create mode 100644 test/integration/targets/set_fact/aliases create mode 100644 test/integration/targets/set_fact/incremental.yml create mode 100644 test/integration/targets/set_fact/inventory create mode 100644 test/integration/targets/set_fact/nowarn_clean_facts.yml create mode 100755 test/integration/targets/set_fact/runme.sh create mode 100644 test/integration/targets/set_fact/set_fact_bool_conv.yml create mode 100644 test/integration/targets/set_fact/set_fact_bool_conv_jinja2_native.yml create mode 100644 test/integration/targets/set_fact/set_fact_cached_1.yml create mode 100644 test/integration/targets/set_fact/set_fact_cached_2.yml create mode 100644 test/integration/targets/set_fact/set_fact_no_cache.yml create mode 100644 test/integration/targets/setup_cron/defaults/main.yml create mode 100644 test/integration/targets/setup_cron/tasks/main.yml create mode 100644 test/integration/targets/setup_cron/vars/debian.yml create mode 100644 test/integration/targets/setup_cron/vars/default.yml create mode 100644 test/integration/targets/setup_cron/vars/fedora.yml create mode 100644 test/integration/targets/setup_cron/vars/freebsd.yml create mode 100644 test/integration/targets/setup_cron/vars/redhat.yml create mode 100644 test/integration/targets/setup_cron/vars/suse.yml create mode 100644 test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.0 create mode 100644 test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.1 create mode 100644 test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.0 create mode 100644 test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.1 create mode 100644 test/integration/targets/setup_deb_repo/meta/main.yml create mode 100644 test/integration/targets/setup_deb_repo/tasks/main.yml create mode 100644 test/integration/targets/setup_epel/tasks/main.yml create mode 100644 test/integration/targets/setup_gnutar/handlers/main.yml create mode 100644 test/integration/targets/setup_gnutar/tasks/main.yml create mode 100644 test/integration/targets/setup_nobody/handlers/main.yml create mode 100644 test/integration/targets/setup_nobody/tasks/main.yml create mode 100644 test/integration/targets/setup_paramiko/aliases create mode 100644 test/integration/targets/setup_paramiko/constraints.txt create mode 100644 test/integration/targets/setup_paramiko/install-CentOS-6-python-2.yml create mode 100644 test/integration/targets/setup_paramiko/install-Darwin-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/install-FreeBSD-11-python-2.yml create mode 100644 test/integration/targets/setup_paramiko/install-FreeBSD-11-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/install-FreeBSD-12-python-2.yml create mode 100644 test/integration/targets/setup_paramiko/install-FreeBSD-12-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/install-RedHat-8-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/install-Ubuntu-16-python-2.yml create mode 100644 test/integration/targets/setup_paramiko/install-fail.yml create mode 100644 test/integration/targets/setup_paramiko/install-python-2.yml create mode 100644 test/integration/targets/setup_paramiko/install-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/install.yml create mode 100644 test/integration/targets/setup_paramiko/inventory create mode 100644 test/integration/targets/setup_paramiko/library/detect_paramiko.py create mode 100644 test/integration/targets/setup_paramiko/setup-remote-constraints.yml create mode 100644 test/integration/targets/setup_paramiko/setup.sh create mode 100644 test/integration/targets/setup_paramiko/uninstall-Darwin-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-2.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-2.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-RedHat-8-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-apt-python-2.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-apt-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-dnf.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-fail.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-yum.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-zypper-python-2.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall-zypper-python-3.yml create mode 100644 test/integration/targets/setup_paramiko/uninstall.yml create mode 100644 test/integration/targets/setup_passlib/tasks/main.yml create mode 100644 test/integration/targets/setup_pexpect/files/constraints.txt create mode 100644 test/integration/targets/setup_pexpect/meta/main.yml create mode 100644 test/integration/targets/setup_pexpect/tasks/main.yml create mode 100644 test/integration/targets/setup_remote_constraints/aliases create mode 100644 test/integration/targets/setup_remote_constraints/meta/main.yml create mode 100644 test/integration/targets/setup_remote_constraints/tasks/main.yml create mode 100644 test/integration/targets/setup_remote_tmp_dir/handlers/main.yml create mode 100644 test/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml create mode 100644 test/integration/targets/setup_remote_tmp_dir/tasks/default.yml create mode 100644 test/integration/targets/setup_remote_tmp_dir/tasks/main.yml create mode 100644 test/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml create mode 100644 test/integration/targets/setup_remote_tmp_dir/tasks/windows.yml create mode 100644 test/integration/targets/setup_rpm_repo/aliases create mode 100644 test/integration/targets/setup_rpm_repo/defaults/main.yml create mode 100644 test/integration/targets/setup_rpm_repo/files/comps.xml create mode 100644 test/integration/targets/setup_rpm_repo/files/create-repo.py create mode 100644 test/integration/targets/setup_rpm_repo/handlers/main.yml create mode 100644 test/integration/targets/setup_rpm_repo/library/create_repo.py create mode 100644 test/integration/targets/setup_rpm_repo/meta/main.yml create mode 100644 test/integration/targets/setup_rpm_repo/tasks/main.yml create mode 100644 test/integration/targets/setup_rpm_repo/vars/Fedora.yml create mode 100644 test/integration/targets/setup_rpm_repo/vars/RedHat-6.yml create mode 100644 test/integration/targets/setup_rpm_repo/vars/RedHat-7.yml create mode 100644 test/integration/targets/setup_rpm_repo/vars/RedHat-8.yml create mode 100644 test/integration/targets/setup_rpm_repo/vars/main.yml create mode 100644 test/integration/targets/setup_win_printargv/files/PrintArgv.cs create mode 100644 test/integration/targets/setup_win_printargv/meta/main.yml create mode 100644 test/integration/targets/setup_win_printargv/tasks/main.yml create mode 100644 test/integration/targets/shell/action_plugins/test_shell.py create mode 100644 test/integration/targets/shell/aliases create mode 100644 test/integration/targets/shell/connection_plugins/test_connection_default.py create mode 100644 test/integration/targets/shell/connection_plugins/test_connection_override.py create mode 100644 test/integration/targets/shell/tasks/main.yml create mode 100644 test/integration/targets/slurp/aliases create mode 100644 test/integration/targets/slurp/files/bar.bin create mode 100644 test/integration/targets/slurp/tasks/main.yml create mode 100644 test/integration/targets/special_vars/aliases create mode 100644 test/integration/targets/special_vars/meta/main.yml create mode 100644 test/integration/targets/special_vars/tasks/main.yml create mode 100644 test/integration/targets/special_vars/templates/foo.j2 create mode 100644 test/integration/targets/special_vars/vars/main.yml create mode 100644 test/integration/targets/stat/aliases create mode 100644 test/integration/targets/stat/files/foo.txt create mode 100644 test/integration/targets/stat/meta/main.yml create mode 100644 test/integration/targets/stat/tasks/main.yml create mode 100644 test/integration/targets/strategy_linear/aliases create mode 100644 test/integration/targets/strategy_linear/inventory create mode 100644 test/integration/targets/strategy_linear/roles/role1/tasks/main.yml create mode 100644 test/integration/targets/strategy_linear/roles/role1/tasks/tasks.yml create mode 100644 test/integration/targets/strategy_linear/roles/role2/tasks/main.yml create mode 100755 test/integration/targets/strategy_linear/runme.sh create mode 100644 test/integration/targets/strategy_linear/test_include_file_noop.yml create mode 100644 test/integration/targets/subversion/aliases create mode 100644 test/integration/targets/subversion/roles/subversion/defaults/main.yml create mode 100644 test/integration/targets/subversion/roles/subversion/files/create_repo.sh create mode 100644 test/integration/targets/subversion/roles/subversion/tasks/cleanup.yml create mode 100644 test/integration/targets/subversion/roles/subversion/tasks/main.yml create mode 100644 test/integration/targets/subversion/roles/subversion/tasks/setup.yml create mode 100644 test/integration/targets/subversion/roles/subversion/tasks/setup_selinux.yml create mode 100644 test/integration/targets/subversion/roles/subversion/tasks/tests.yml create mode 100644 test/integration/targets/subversion/roles/subversion/tasks/warnings.yml create mode 100644 test/integration/targets/subversion/roles/subversion/templates/subversion.conf.j2 create mode 100755 test/integration/targets/subversion/runme.sh create mode 100644 test/integration/targets/subversion/runme.yml create mode 100644 test/integration/targets/subversion/vars/Debian.yml create mode 100644 test/integration/targets/subversion/vars/FreeBSD.yml create mode 100644 test/integration/targets/subversion/vars/RedHat.yml create mode 100644 test/integration/targets/subversion/vars/Suse.yml create mode 100644 test/integration/targets/subversion/vars/Ubuntu-18.yml create mode 100644 test/integration/targets/subversion/vars/Ubuntu-20.yml create mode 100644 test/integration/targets/systemd/aliases create mode 100644 test/integration/targets/systemd/defaults/main.yml create mode 100644 test/integration/targets/systemd/handlers/main.yml create mode 100644 test/integration/targets/systemd/meta/main.yml create mode 100644 test/integration/targets/systemd/tasks/main.yml create mode 100644 test/integration/targets/systemd/tasks/test_unit_template.yml create mode 100644 test/integration/targets/systemd/templates/sleeper@.service create mode 100644 test/integration/targets/systemd/vars/Debian.yml create mode 100644 test/integration/targets/systemd/vars/default.yml create mode 100644 test/integration/targets/tags/aliases create mode 100755 test/integration/targets/tags/runme.sh create mode 100644 test/integration/targets/tags/test_tags.yml create mode 100644 test/integration/targets/task_ordering/aliases create mode 100644 test/integration/targets/task_ordering/meta/main.yml create mode 100644 test/integration/targets/task_ordering/tasks/main.yml create mode 100644 test/integration/targets/task_ordering/tasks/taskorder-include.yml create mode 100644 test/integration/targets/tasks/aliases create mode 100644 test/integration/targets/tasks/tasks/main.yml create mode 100644 test/integration/targets/template/6653.yml create mode 100644 test/integration/targets/template/72262.yml create mode 100644 test/integration/targets/template/72615.yml create mode 100644 test/integration/targets/template/aliases create mode 100644 test/integration/targets/template/ansible_managed.cfg create mode 100644 test/integration/targets/template/ansible_managed.yml create mode 100644 test/integration/targets/template/corner_cases.yml create mode 100644 test/integration/targets/template/custom_tasks/tasks/main.yml create mode 100644 test/integration/targets/template/custom_tasks/templates/test create mode 100644 test/integration/targets/template/custom_template.yml create mode 100644 test/integration/targets/template/files/encoding_1252_utf-8.expected create mode 100644 test/integration/targets/template/files/encoding_1252_windows-1252.expected create mode 100644 test/integration/targets/template/files/foo-py26.txt create mode 100644 test/integration/targets/template/files/foo.dos.txt create mode 100644 test/integration/targets/template/files/foo.txt create mode 100644 test/integration/targets/template/files/foo.unix.txt create mode 100644 test/integration/targets/template/files/import_as.expected create mode 100644 test/integration/targets/template/files/import_as_with_context.expected create mode 100644 test/integration/targets/template/files/import_with_context.expected create mode 100644 test/integration/targets/template/files/lstrip_blocks_false.expected create mode 100644 test/integration/targets/template/files/lstrip_blocks_true.expected create mode 100644 test/integration/targets/template/files/trim_blocks_false.expected create mode 100644 test/integration/targets/template/files/trim_blocks_true.expected create mode 100644 test/integration/targets/template/filter_plugins.yml create mode 100644 test/integration/targets/template/meta/main.yml create mode 100644 test/integration/targets/template/role_filter/filter_plugins/myplugin.py create mode 100644 test/integration/targets/template/role_filter/tasks/main.yml create mode 100755 test/integration/targets/template/runme.sh create mode 100644 test/integration/targets/template/tasks/backup_test.yml create mode 100644 test/integration/targets/template/tasks/main.yml create mode 100644 test/integration/targets/template/template.yml create mode 100644 test/integration/targets/template/templates/6653-include.j2 create mode 100644 test/integration/targets/template/templates/6653.j2 create mode 100644 test/integration/targets/template/templates/72262-included.j2 create mode 100644 test/integration/targets/template/templates/72262-vars.j2 create mode 100644 test/integration/targets/template/templates/72262.j2 create mode 100644 test/integration/targets/template/templates/72615-macro-nested.j2 create mode 100644 test/integration/targets/template/templates/72615-macro.j2 create mode 100644 test/integration/targets/template/templates/72615.j2 create mode 100644 test/integration/targets/template/templates/bar create mode 100644 "test/integration/targets/template/templates/caf\303\251.j2" create mode 100644 test/integration/targets/template/templates/encoding_1252.j2 create mode 100644 test/integration/targets/template/templates/foo.j2 create mode 100644 test/integration/targets/template/templates/foo2.j2 create mode 100644 test/integration/targets/template/templates/foo3.j2 create mode 100644 test/integration/targets/template/templates/for_loop.j2 create mode 100644 test/integration/targets/template/templates/for_loop_include.j2 create mode 100644 test/integration/targets/template/templates/for_loop_include_nested.j2 create mode 100644 test/integration/targets/template/templates/import_as.j2 create mode 100644 test/integration/targets/template/templates/import_as_with_context.j2 create mode 100644 test/integration/targets/template/templates/import_with_context.j2 create mode 100644 test/integration/targets/template/templates/lstrip_blocks.j2 create mode 100644 test/integration/targets/template/templates/parent.j2 create mode 100644 test/integration/targets/template/templates/qux create mode 100644 test/integration/targets/template/templates/short.j2 create mode 100644 test/integration/targets/template/templates/subtemplate.j2 create mode 100644 test/integration/targets/template/templates/template_destpath_test.j2 create mode 100644 test/integration/targets/template/templates/trim_blocks.j2 create mode 100644 test/integration/targets/template/templates/unused_vars_include.j2 create mode 100644 test/integration/targets/template/templates/unused_vars_template.j2 create mode 100644 test/integration/targets/template/undefined_var_info.yml create mode 100644 test/integration/targets/template/unused_vars_include.yml create mode 100644 test/integration/targets/template/vars/main.yml create mode 100644 test/integration/targets/template_jinja2_latest/aliases create mode 100644 test/integration/targets/template_jinja2_latest/main.yml create mode 100644 test/integration/targets/template_jinja2_latest/requirements.txt create mode 100755 test/integration/targets/template_jinja2_latest/runme.sh create mode 100644 test/integration/targets/templating_lookups/aliases create mode 100755 test/integration/targets/templating_lookups/runme.sh create mode 100644 test/integration/targets/templating_lookups/runme.yml create mode 100644 test/integration/targets/templating_lookups/template_deepcopy/hosts create mode 100644 test/integration/targets/templating_lookups/template_deepcopy/playbook.yml create mode 100644 test/integration/targets/templating_lookups/template_deepcopy/template.in create mode 100644 test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/playbook.yml create mode 100644 test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/template.json.j2 create mode 100644 test/integration/targets/templating_lookups/template_lookup_vaulted/playbook.yml create mode 100644 test/integration/targets/templating_lookups/template_lookup_vaulted/templates/vaulted_hello.j2 create mode 100644 test/integration/targets/templating_lookups/template_lookup_vaulted/test_vault_pass create mode 100644 test/integration/targets/templating_lookups/template_lookups/tasks/errors.yml create mode 100644 test/integration/targets/templating_lookups/template_lookups/tasks/main.yml create mode 100644 test/integration/targets/templating_lookups/template_lookups/vars/main.yml create mode 100644 test/integration/targets/templating_settings/aliases create mode 100644 test/integration/targets/templating_settings/dont_warn_register.yml create mode 100755 test/integration/targets/templating_settings/runme.sh create mode 100644 test/integration/targets/templating_settings/test_templating_settings.yml create mode 100644 test/integration/targets/test_core/aliases create mode 100644 test/integration/targets/test_core/inventory create mode 100755 test/integration/targets/test_core/runme.sh create mode 100644 test/integration/targets/test_core/runme.yml create mode 100644 test/integration/targets/test_core/tasks/main.yml create mode 100644 test/integration/targets/test_core/vault-password create mode 100644 test/integration/targets/test_files/aliases create mode 100644 test/integration/targets/test_files/tasks/main.yml create mode 100644 test/integration/targets/test_mathstuff/aliases create mode 100644 test/integration/targets/test_mathstuff/tasks/main.yml create mode 100644 test/integration/targets/throttle/aliases create mode 100644 test/integration/targets/throttle/group_vars/all.yml create mode 100644 test/integration/targets/throttle/inventory create mode 100755 test/integration/targets/throttle/runme.sh create mode 100755 test/integration/targets/throttle/test_throttle.py create mode 100644 test/integration/targets/throttle/test_throttle.yml create mode 100644 test/integration/targets/unarchive/aliases create mode 100644 test/integration/targets/unarchive/files/foo.txt create mode 100644 "test/integration/targets/unarchive/files/test-unarchive-nonascii-\343\201\217\343\202\211\343\201\250\343\201\277.tar.gz" create mode 100644 test/integration/targets/unarchive/meta/main.yml create mode 100644 test/integration/targets/unarchive/tasks/main.yml create mode 100644 test/integration/targets/unarchive/tasks/prepare_tests.yml create mode 100644 test/integration/targets/unarchive/tasks/test_download.yml create mode 100644 test/integration/targets/unarchive/tasks/test_exclude.yml create mode 100644 test/integration/targets/unarchive/tasks/test_missing_files.yml create mode 100644 test/integration/targets/unarchive/tasks/test_mode.yml create mode 100644 test/integration/targets/unarchive/tasks/test_non_ascii_filename.yml create mode 100644 test/integration/targets/unarchive/tasks/test_parent_not_writeable.yml create mode 100644 test/integration/targets/unarchive/tasks/test_quotable_characters.yml create mode 100644 test/integration/targets/unarchive/tasks/test_symlink.yml create mode 100644 test/integration/targets/unarchive/tasks/test_tar.yml create mode 100644 test/integration/targets/unarchive/tasks/test_tar_gz.yml create mode 100644 test/integration/targets/unarchive/tasks/test_tar_gz_creates.yml create mode 100644 test/integration/targets/unarchive/tasks/test_tar_gz_keep_newer.yml create mode 100644 test/integration/targets/unarchive/tasks/test_tar_gz_owner_group.yml create mode 100644 test/integration/targets/unarchive/tasks/test_unprivileged_user.yml create mode 100644 test/integration/targets/unarchive/tasks/test_zip.yml create mode 100644 test/integration/targets/undefined/aliases create mode 100644 test/integration/targets/undefined/tasks/main.yml create mode 100644 test/integration/targets/unicode/aliases create mode 100644 test/integration/targets/unicode/inventory create mode 100644 "test/integration/targets/unicode/k\305\231\303\255\305\276ek-ansible-project/ansible.cfg" create mode 100755 test/integration/targets/unicode/runme.sh create mode 100755 test/integration/targets/unicode/unicode-test-script create mode 100644 test/integration/targets/unicode/unicode.yml create mode 100644 test/integration/targets/unsafe_writes/aliases create mode 100644 test/integration/targets/unsafe_writes/basic.yml create mode 100755 test/integration/targets/unsafe_writes/runme.sh create mode 100644 test/integration/targets/until/aliases create mode 100644 test/integration/targets/until/tasks/main.yml create mode 100644 test/integration/targets/unvault/aliases create mode 100644 test/integration/targets/unvault/main.yml create mode 100644 test/integration/targets/unvault/password create mode 100755 test/integration/targets/unvault/runme.sh create mode 100644 test/integration/targets/unvault/vault create mode 100644 test/integration/targets/uri/aliases create mode 100644 test/integration/targets/uri/files/README create mode 100644 test/integration/targets/uri/files/fail0.json create mode 100644 test/integration/targets/uri/files/fail1.json create mode 100644 test/integration/targets/uri/files/fail10.json create mode 100644 test/integration/targets/uri/files/fail11.json create mode 100644 test/integration/targets/uri/files/fail12.json create mode 100644 test/integration/targets/uri/files/fail13.json create mode 100644 test/integration/targets/uri/files/fail14.json create mode 100644 test/integration/targets/uri/files/fail15.json create mode 100644 test/integration/targets/uri/files/fail16.json create mode 100644 test/integration/targets/uri/files/fail17.json create mode 100644 test/integration/targets/uri/files/fail18.json create mode 100644 test/integration/targets/uri/files/fail19.json create mode 100644 test/integration/targets/uri/files/fail2.json create mode 100644 test/integration/targets/uri/files/fail20.json create mode 100644 test/integration/targets/uri/files/fail21.json create mode 100644 test/integration/targets/uri/files/fail22.json create mode 100644 test/integration/targets/uri/files/fail23.json create mode 100644 test/integration/targets/uri/files/fail24.json create mode 100644 test/integration/targets/uri/files/fail25.json create mode 100644 test/integration/targets/uri/files/fail26.json create mode 100644 test/integration/targets/uri/files/fail27.json create mode 100644 test/integration/targets/uri/files/fail28.json create mode 100644 test/integration/targets/uri/files/fail29.json create mode 100644 test/integration/targets/uri/files/fail3.json create mode 100644 test/integration/targets/uri/files/fail30.json create mode 100644 test/integration/targets/uri/files/fail4.json create mode 100644 test/integration/targets/uri/files/fail5.json create mode 100644 test/integration/targets/uri/files/fail6.json create mode 100644 test/integration/targets/uri/files/fail7.json create mode 100644 test/integration/targets/uri/files/fail8.json create mode 100644 test/integration/targets/uri/files/fail9.json create mode 100644 test/integration/targets/uri/files/formdata.txt create mode 100644 test/integration/targets/uri/files/pass0.json create mode 100644 test/integration/targets/uri/files/pass1.json create mode 100644 test/integration/targets/uri/files/pass2.json create mode 100644 test/integration/targets/uri/files/pass3.json create mode 100644 test/integration/targets/uri/files/pass4.json create mode 100644 test/integration/targets/uri/files/testserver.py create mode 100644 test/integration/targets/uri/meta/main.yml create mode 100644 test/integration/targets/uri/tasks/main.yml create mode 100644 test/integration/targets/uri/tasks/redirect-all.yml create mode 100644 test/integration/targets/uri/tasks/redirect-none.yml create mode 100644 test/integration/targets/uri/tasks/redirect-safe.yml create mode 100644 test/integration/targets/uri/tasks/redirect-urllib2.yml create mode 100644 test/integration/targets/uri/tasks/return-content.yml create mode 100644 test/integration/targets/uri/tasks/unexpected-failures.yml create mode 100644 test/integration/targets/uri/templates/netrc.j2 create mode 100644 test/integration/targets/uri/vars/main.yml create mode 100644 test/integration/targets/user/aliases create mode 100644 test/integration/targets/user/files/userlist.sh create mode 100644 test/integration/targets/user/meta/main.yml create mode 100644 test/integration/targets/user/tasks/main.yml create mode 100644 test/integration/targets/user/tasks/test_create_system_user.yml create mode 100644 test/integration/targets/user/tasks/test_create_user.yml create mode 100644 test/integration/targets/user/tasks/test_create_user_home.yml create mode 100644 test/integration/targets/user/tasks/test_create_user_password.yml create mode 100644 test/integration/targets/user/tasks/test_create_user_uid.yml create mode 100644 test/integration/targets/user/tasks/test_expires.yml create mode 100644 test/integration/targets/user/tasks/test_expires_new_account.yml create mode 100644 test/integration/targets/user/tasks/test_expires_new_account_epoch_negative.yml create mode 100644 test/integration/targets/user/tasks/test_local.yml create mode 100644 test/integration/targets/user/tasks/test_local_expires.yml create mode 100644 test/integration/targets/user/tasks/test_no_home_fallback.yml create mode 100644 test/integration/targets/user/tasks/test_password_lock.yml create mode 100644 test/integration/targets/user/tasks/test_password_lock_new_user.yml create mode 100644 test/integration/targets/user/tasks/test_remove_user.yml create mode 100644 test/integration/targets/user/tasks/test_shadow_backup.yml create mode 100644 test/integration/targets/user/tasks/test_ssh_key_passphrase.yml create mode 100644 test/integration/targets/user/vars/main.yml create mode 100644 test/integration/targets/var_blending/aliases create mode 100644 test/integration/targets/var_blending/group_vars/all create mode 100644 test/integration/targets/var_blending/group_vars/local create mode 100644 test/integration/targets/var_blending/host_vars/testhost create mode 100644 test/integration/targets/var_blending/inventory create mode 100644 test/integration/targets/var_blending/roles/test_var_blending/defaults/main.yml create mode 100644 test/integration/targets/var_blending/roles/test_var_blending/files/foo.txt create mode 100644 test/integration/targets/var_blending/roles/test_var_blending/tasks/main.yml create mode 100644 test/integration/targets/var_blending/roles/test_var_blending/templates/foo.j2 create mode 100644 test/integration/targets/var_blending/roles/test_var_blending/vars/main.yml create mode 100644 test/integration/targets/var_blending/roles/test_var_blending/vars/more_vars.yml create mode 100755 test/integration/targets/var_blending/runme.sh create mode 100644 test/integration/targets/var_blending/test_var_blending.yml create mode 100644 test/integration/targets/var_blending/test_vars.yml create mode 100644 test/integration/targets/var_blending/vars_file.yml create mode 100644 test/integration/targets/var_precedence/aliases create mode 100755 test/integration/targets/var_precedence/ansible-var-precedence-check.py create mode 100644 test/integration/targets/var_precedence/host_vars/testhost create mode 100644 test/integration/targets/var_precedence/inventory create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence/meta/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence/tasks/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_dep/defaults/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_dep/tasks/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_dep/vars/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_inven_override/tasks/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role1/defaults/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role1/meta/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role1/tasks/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role1/vars/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role2/defaults/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role2/tasks/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role2/vars/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role3/defaults/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role3/tasks/main.yml create mode 100644 test/integration/targets/var_precedence/roles/test_var_precedence_role3/vars/main.yml create mode 100755 test/integration/targets/var_precedence/runme.sh create mode 100644 test/integration/targets/var_precedence/test_var_precedence.yml create mode 100644 test/integration/targets/var_precedence/vars/test_var_precedence.yml create mode 100644 test/integration/targets/var_templating/aliases create mode 100644 test/integration/targets/var_templating/group_vars/all.yml create mode 100755 test/integration/targets/var_templating/runme.sh create mode 100644 test/integration/targets/var_templating/task_vars_templating.yml create mode 100644 test/integration/targets/var_templating/test_connection_vars.yml create mode 100644 test/integration/targets/var_templating/undall.yml create mode 100644 test/integration/targets/var_templating/undefined.yml create mode 100644 test/integration/targets/var_templating/vars/connection.yml create mode 100644 test/integration/targets/vault/aliases create mode 100644 test/integration/targets/vault/empty-password create mode 100644 test/integration/targets/vault/encrypted-vault-password create mode 100644 test/integration/targets/vault/encrypted_file_encrypted_var_password create mode 100644 test/integration/targets/vault/example1_password create mode 100644 test/integration/targets/vault/example2_password create mode 100644 test/integration/targets/vault/example3_password create mode 100755 test/integration/targets/vault/faux-editor.py create mode 100644 test/integration/targets/vault/files/test_assemble/nonsecret.txt create mode 100644 test/integration/targets/vault/files/test_assemble/secret.vault create mode 100644 test/integration/targets/vault/format_1_1_AES256.yml create mode 100644 test/integration/targets/vault/format_1_2_AES256.yml create mode 100644 test/integration/targets/vault/host_vars/myhost.yml create mode 100644 test/integration/targets/vault/host_vars/testhost.yml create mode 100644 test/integration/targets/vault/invalid_format/README.md create mode 100644 test/integration/targets/vault/invalid_format/broken-group-vars-tasks.yml create mode 100644 test/integration/targets/vault/invalid_format/broken-host-vars-tasks.yml create mode 100644 test/integration/targets/vault/invalid_format/group_vars/broken-group-vars.yml create mode 100644 test/integration/targets/vault/invalid_format/host_vars/broken-host-vars.example.com/vars create mode 100644 test/integration/targets/vault/invalid_format/inventory create mode 100644 test/integration/targets/vault/invalid_format/original-broken-host-vars create mode 100644 test/integration/targets/vault/invalid_format/original-group-vars.yml create mode 100644 test/integration/targets/vault/invalid_format/some-vars create mode 100644 test/integration/targets/vault/invalid_format/vault-secret create mode 100644 test/integration/targets/vault/inventory.toml create mode 100755 test/integration/targets/vault/password-script.py create mode 100644 test/integration/targets/vault/roles/test_vault/tasks/main.yml create mode 100644 test/integration/targets/vault/roles/test_vault/vars/main.yml create mode 100644 test/integration/targets/vault/roles/test_vault_embedded/tasks/main.yml create mode 100644 test/integration/targets/vault/roles/test_vault_embedded/vars/main.yml create mode 100644 test/integration/targets/vault/roles/test_vault_embedded_ids/tasks/main.yml create mode 100644 test/integration/targets/vault/roles/test_vault_embedded_ids/vars/main.yml create mode 100644 test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/README.md create mode 100644 test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/tasks/main.yml create mode 100644 test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/vars/main.yml create mode 100644 test/integration/targets/vault/roles/test_vaulted_template/tasks/main.yml create mode 100644 test/integration/targets/vault/roles/test_vaulted_template/templates/vaulted_template.j2 create mode 100755 test/integration/targets/vault/runme.sh create mode 100755 test/integration/targets/vault/runme_change_pip_installed.sh create mode 100644 test/integration/targets/vault/single_vault_as_string.yml create mode 100755 test/integration/targets/vault/test-vault-client.py create mode 100644 test/integration/targets/vault/test_dangling_temp.yml create mode 100644 test/integration/targets/vault/test_utf8_value_in_filename.yml create mode 100644 test/integration/targets/vault/test_vault.yml create mode 100644 test/integration/targets/vault/test_vault_embedded.yml create mode 100644 test/integration/targets/vault/test_vault_embedded_ids.yml create mode 100644 test/integration/targets/vault/test_vault_file_encrypted_embedded.yml create mode 100644 test/integration/targets/vault/test_vaulted_inventory.yml create mode 100644 test/integration/targets/vault/test_vaulted_inventory_toml.yml create mode 100644 test/integration/targets/vault/test_vaulted_template.yml create mode 100644 test/integration/targets/vault/test_vaulted_utf8_value.yml create mode 100644 "test/integration/targets/vault/vault-caf\303\251.yml" create mode 100644 test/integration/targets/vault/vault-password create mode 100644 test/integration/targets/vault/vault-password-ansible create mode 100644 test/integration/targets/vault/vault-password-wrong create mode 100644 test/integration/targets/vault/vault-secret.txt create mode 100644 test/integration/targets/vault/vaulted.inventory create mode 100644 test/integration/targets/wait_for/aliases create mode 100644 test/integration/targets/wait_for/files/testserver.py create mode 100644 test/integration/targets/wait_for/files/zombie.py create mode 100644 test/integration/targets/wait_for/meta/main.yml create mode 100644 test/integration/targets/wait_for/tasks/main.yml create mode 100644 test/integration/targets/wait_for/vars/main.yml create mode 100644 test/integration/targets/wait_for_connection/aliases create mode 100644 test/integration/targets/wait_for_connection/tasks/main.yml create mode 100644 test/integration/targets/want_json_modules_posix/aliases create mode 100644 test/integration/targets/want_json_modules_posix/library/helloworld.py create mode 100644 test/integration/targets/want_json_modules_posix/meta/main.yml create mode 100644 test/integration/targets/want_json_modules_posix/tasks/main.yml create mode 100644 test/integration/targets/win_async_wrapper/aliases create mode 100644 test/integration/targets/win_async_wrapper/library/async_test.ps1 create mode 100644 test/integration/targets/win_async_wrapper/tasks/main.yml create mode 100644 test/integration/targets/win_become/aliases create mode 100644 test/integration/targets/win_become/tasks/main.yml create mode 100644 test/integration/targets/win_exec_wrapper/aliases create mode 100644 test/integration/targets/win_exec_wrapper/library/test_all_options.ps1 create mode 100644 test/integration/targets/win_exec_wrapper/library/test_common_functions.ps1 create mode 100644 test/integration/targets/win_exec_wrapper/library/test_fail.ps1 create mode 100644 test/integration/targets/win_exec_wrapper/library/test_invalid_requires.ps1 create mode 100644 test/integration/targets/win_exec_wrapper/library/test_min_os_version.ps1 create mode 100644 test/integration/targets/win_exec_wrapper/library/test_min_ps_version.ps1 create mode 100644 test/integration/targets/win_exec_wrapper/tasks/main.yml create mode 100644 test/integration/targets/win_fetch/aliases create mode 100644 test/integration/targets/win_fetch/meta/main.yml create mode 100644 test/integration/targets/win_fetch/tasks/main.yml create mode 100644 test/integration/targets/win_module_utils/aliases create mode 100644 test/integration/targets/win_module_utils/library/csharp_util.ps1 create mode 100644 test/integration/targets/win_module_utils/library/legacy_only_new_way.ps1 create mode 100644 test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1 create mode 100644 test/integration/targets/win_module_utils/library/legacy_only_old_way.ps1 create mode 100644 test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1 create mode 100644 test/integration/targets/win_module_utils/library/recursive_requires.ps1 create mode 100644 test/integration/targets/win_module_utils/library/uses_bogus_utils.ps1 create mode 100644 test/integration/targets/win_module_utils/library/uses_local_utils.ps1 create mode 100644 test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive1.psm1 create mode 100644 test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive2.psm1 create mode 100644 test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive3.psm1 create mode 100644 test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.ValidTestModule.psm1 create mode 100644 test/integration/targets/win_module_utils/module_utils/Ansible.Test.cs create mode 100644 test/integration/targets/win_module_utils/tasks/main.yml create mode 100644 test/integration/targets/win_raw/aliases create mode 100644 test/integration/targets/win_raw/tasks/main.yml create mode 100644 test/integration/targets/win_script/aliases create mode 100644 test/integration/targets/win_script/defaults/main.yml create mode 100644 test/integration/targets/win_script/files/fail.bat create mode 100644 test/integration/targets/win_script/files/space path/test_script.ps1 create mode 100644 test/integration/targets/win_script/files/test_script.bat create mode 100644 test/integration/targets/win_script/files/test_script.cmd create mode 100644 test/integration/targets/win_script/files/test_script.ps1 create mode 100644 test/integration/targets/win_script/files/test_script_bool.ps1 create mode 100644 test/integration/targets/win_script/files/test_script_creates_file.ps1 create mode 100644 test/integration/targets/win_script/files/test_script_removes_file.ps1 create mode 100644 test/integration/targets/win_script/files/test_script_whoami.ps1 create mode 100644 test/integration/targets/win_script/files/test_script_with_args.ps1 create mode 100644 test/integration/targets/win_script/files/test_script_with_env.ps1 create mode 100644 test/integration/targets/win_script/files/test_script_with_errors.ps1 create mode 100644 test/integration/targets/win_script/files/test_script_with_splatting.ps1 create mode 100644 test/integration/targets/win_script/tasks/main.yml create mode 100644 test/integration/targets/windows-minimal/aliases create mode 100644 test/integration/targets/windows-minimal/library/win_ping.ps1 create mode 100644 test/integration/targets/windows-minimal/library/win_ping.py create mode 100644 test/integration/targets/windows-minimal/library/win_ping_set_attr.ps1 create mode 100644 test/integration/targets/windows-minimal/library/win_ping_strict_mode_error.ps1 create mode 100644 test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1 create mode 100644 test/integration/targets/windows-minimal/library/win_ping_throw.ps1 create mode 100644 test/integration/targets/windows-minimal/library/win_ping_throw_string.ps1 create mode 100644 test/integration/targets/windows-minimal/tasks/main.yml create mode 100644 test/integration/targets/windows-paths/aliases create mode 100644 test/integration/targets/windows-paths/tasks/main.yml create mode 100644 test/integration/targets/yum/aliases create mode 100644 test/integration/targets/yum/files/yum.conf create mode 100644 test/integration/targets/yum/meta/main.yml create mode 100644 test/integration/targets/yum/tasks/check_mode_consistency.yml create mode 100644 test/integration/targets/yum/tasks/lock.yml create mode 100644 test/integration/targets/yum/tasks/main.yml create mode 100644 test/integration/targets/yum/tasks/proxy.yml create mode 100644 test/integration/targets/yum/tasks/repo.yml create mode 100644 test/integration/targets/yum/tasks/yum.yml create mode 100644 test/integration/targets/yum/tasks/yum_group_remove.yml create mode 100644 test/integration/targets/yum/tasks/yuminstallroot.yml create mode 100644 test/integration/targets/yum_repository/aliases create mode 100644 test/integration/targets/yum_repository/defaults/main.yml create mode 100644 test/integration/targets/yum_repository/handlers/main.yml create mode 100644 test/integration/targets/yum_repository/meta/main.yml create mode 100644 test/integration/targets/yum_repository/tasks/main.yml create mode 100644 test/lib/ansible_test/__init__.py create mode 100644 test/lib/ansible_test/_data/ansible.cfg create mode 100755 test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py create mode 100644 test/lib/ansible_test/_data/collection_detail.py create mode 100644 test/lib/ansible_test/_data/completion/docker.txt create mode 100644 test/lib/ansible_test/_data/completion/network.txt create mode 100644 test/lib/ansible_test/_data/completion/remote.txt create mode 100644 test/lib/ansible_test/_data/completion/windows.txt create mode 100644 test/lib/ansible_test/_data/coveragerc create mode 100644 test/lib/ansible_test/_data/cryptography-constraints.txt create mode 120000 test/lib/ansible_test/_data/injector/ansible create mode 120000 test/lib/ansible_test/_data/injector/ansible-config create mode 120000 test/lib/ansible_test/_data/injector/ansible-connection create mode 120000 test/lib/ansible_test/_data/injector/ansible-console create mode 120000 test/lib/ansible_test/_data/injector/ansible-doc create mode 120000 test/lib/ansible_test/_data/injector/ansible-galaxy create mode 120000 test/lib/ansible_test/_data/injector/ansible-inventory create mode 120000 test/lib/ansible_test/_data/injector/ansible-playbook create mode 120000 test/lib/ansible_test/_data/injector/ansible-pull create mode 120000 test/lib/ansible_test/_data/injector/ansible-test create mode 120000 test/lib/ansible_test/_data/injector/ansible-vault create mode 120000 test/lib/ansible_test/_data/injector/importer.py create mode 120000 test/lib/ansible_test/_data/injector/pytest create mode 100755 test/lib/ansible_test/_data/injector/python.py create mode 100644 test/lib/ansible_test/_data/injector/virtualenv-isolated.sh create mode 100644 test/lib/ansible_test/_data/injector/virtualenv.sh create mode 100644 test/lib/ansible_test/_data/inventory create mode 100644 test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml create mode 100644 test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml create mode 100644 test/lib/ansible_test/_data/pytest.ini create mode 100644 test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py create mode 100644 test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py create mode 100644 test/lib/ansible_test/_data/quiet_pip.py create mode 100644 test/lib/ansible_test/_data/requirements/ansible-test.txt create mode 100644 test/lib/ansible_test/_data/requirements/constraints.txt create mode 100644 test/lib/ansible_test/_data/requirements/coverage.txt create mode 100644 test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt create mode 100644 test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt create mode 100644 test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt create mode 100644 test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt create mode 100644 test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt create mode 100644 test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt create mode 100644 test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt create mode 100644 test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt create mode 100644 test/lib/ansible_test/_data/requirements/integration.txt create mode 100644 test/lib/ansible_test/_data/requirements/network-integration.txt create mode 100644 test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt create mode 100644 test/lib/ansible_test/_data/requirements/sanity.changelog.txt create mode 100644 test/lib/ansible_test/_data/requirements/sanity.import.txt create mode 100644 test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt create mode 100644 test/lib/ansible_test/_data/requirements/sanity.pep8.txt create mode 100755 test/lib/ansible_test/_data/requirements/sanity.ps1 create mode 100644 test/lib/ansible_test/_data/requirements/sanity.pylint.txt create mode 100644 test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt create mode 100644 test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt create mode 100644 test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt create mode 100644 test/lib/ansible_test/_data/requirements/sanity.yamllint.txt create mode 100644 test/lib/ansible_test/_data/requirements/units.txt create mode 100644 test/lib/ansible_test/_data/requirements/windows-integration.txt create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/changelog.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/changelog.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/empty-init.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/empty-init.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/line-endings.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/line-endings.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-assert.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-assert.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/shebang.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/shebang.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/symlinks.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/symlinks.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py create mode 100644 test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json create mode 100755 test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py create mode 100755 test/lib/ansible_test/_data/sanity/compile/compile.py create mode 100755 test/lib/ansible_test/_data/sanity/import/importer.py create mode 100644 test/lib/ansible_test/_data/sanity/import/yaml_to_json.py create mode 100644 test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py create mode 100644 test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt create mode 100755 test/lib/ansible_test/_data/sanity/pslint/pslint.ps1 create mode 100644 test/lib/ansible_test/_data/sanity/pslint/settings.psd1 create mode 100644 test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg create mode 100644 test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg create mode 100644 test/lib/ansible_test/_data/sanity/pylint/config/default.cfg create mode 100644 test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg create mode 100644 test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py create mode 100644 test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py create mode 100644 test/lib/ansible_test/_data/sanity/pylint/plugins/unwanted.py create mode 100644 test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt create mode 100644 test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt create mode 100755 test/lib/ansible_test/_data/sanity/validate-modules/main.py create mode 120000 test/lib/ansible_test/_data/sanity/validate-modules/validate-modules create mode 100644 test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py create mode 100644 test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py create mode 100644 test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py create mode 100755 test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1 create mode 100644 test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py create mode 100644 test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py create mode 100644 test/lib/ansible_test/_data/sanity/yamllint/config/default.yml create mode 100644 test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml create mode 100644 test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml create mode 100644 test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py create mode 100644 test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 create mode 100644 test/lib/ansible_test/_data/setup/docker.sh create mode 100644 test/lib/ansible_test/_data/setup/remote.sh create mode 100644 test/lib/ansible_test/_data/setup/windows-httptester.ps1 create mode 100755 test/lib/ansible_test/_data/sslcheck.py create mode 100755 test/lib/ansible_test/_data/versions.py create mode 100755 test/lib/ansible_test/_data/virtualenvcheck.py create mode 100755 test/lib/ansible_test/_data/yamlcheck.py create mode 100644 test/lib/ansible_test/_internal/__init__.py create mode 100644 test/lib/ansible_test/_internal/ansible_util.py create mode 100644 test/lib/ansible_test/_internal/cache.py create mode 100644 test/lib/ansible_test/_internal/ci/__init__.py create mode 100644 test/lib/ansible_test/_internal/ci/azp.py create mode 100644 test/lib/ansible_test/_internal/ci/local.py create mode 100644 test/lib/ansible_test/_internal/ci/shippable.py create mode 100644 test/lib/ansible_test/_internal/classification.py create mode 100644 test/lib/ansible_test/_internal/cli.py create mode 100644 test/lib/ansible_test/_internal/cloud/__init__.py create mode 100644 test/lib/ansible_test/_internal/cloud/acme.py create mode 100644 test/lib/ansible_test/_internal/cloud/aws.py create mode 100644 test/lib/ansible_test/_internal/cloud/azure.py create mode 100644 test/lib/ansible_test/_internal/cloud/cloudscale.py create mode 100644 test/lib/ansible_test/_internal/cloud/cs.py create mode 100644 test/lib/ansible_test/_internal/cloud/fallaxy.py create mode 100644 test/lib/ansible_test/_internal/cloud/foreman.py create mode 100644 test/lib/ansible_test/_internal/cloud/gcp.py create mode 100644 test/lib/ansible_test/_internal/cloud/hcloud.py create mode 100644 test/lib/ansible_test/_internal/cloud/nios.py create mode 100644 test/lib/ansible_test/_internal/cloud/opennebula.py create mode 100644 test/lib/ansible_test/_internal/cloud/openshift.py create mode 100644 test/lib/ansible_test/_internal/cloud/scaleway.py create mode 100644 test/lib/ansible_test/_internal/cloud/tower.py create mode 100644 test/lib/ansible_test/_internal/cloud/vcenter.py create mode 100644 test/lib/ansible_test/_internal/cloud/vultr.py create mode 100644 test/lib/ansible_test/_internal/config.py create mode 100644 test/lib/ansible_test/_internal/constants.py create mode 100644 test/lib/ansible_test/_internal/core_ci.py create mode 100644 test/lib/ansible_test/_internal/coverage/__init__.py create mode 100644 test/lib/ansible_test/_internal/coverage/analyze/__init__.py create mode 100644 test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py create mode 100644 test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py create mode 100644 test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py create mode 100644 test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py create mode 100644 test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py create mode 100644 test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py create mode 100644 test/lib/ansible_test/_internal/coverage/combine.py create mode 100644 test/lib/ansible_test/_internal/coverage/erase.py create mode 100644 test/lib/ansible_test/_internal/coverage/html.py create mode 100644 test/lib/ansible_test/_internal/coverage/report.py create mode 100644 test/lib/ansible_test/_internal/coverage/xml.py create mode 100644 test/lib/ansible_test/_internal/coverage_util.py create mode 100644 test/lib/ansible_test/_internal/csharp_import_analysis.py create mode 100644 test/lib/ansible_test/_internal/data.py create mode 100644 test/lib/ansible_test/_internal/delegation.py create mode 100644 test/lib/ansible_test/_internal/diff.py create mode 100644 test/lib/ansible_test/_internal/docker_util.py create mode 100644 test/lib/ansible_test/_internal/encoding.py create mode 100644 test/lib/ansible_test/_internal/env.py create mode 100644 test/lib/ansible_test/_internal/executor.py create mode 100644 test/lib/ansible_test/_internal/git.py create mode 100644 test/lib/ansible_test/_internal/http.py create mode 100644 test/lib/ansible_test/_internal/import_analysis.py create mode 100644 test/lib/ansible_test/_internal/init.py create mode 100644 test/lib/ansible_test/_internal/integration/__init__.py create mode 100644 test/lib/ansible_test/_internal/io.py create mode 100644 test/lib/ansible_test/_internal/manage_ci.py create mode 100644 test/lib/ansible_test/_internal/metadata.py create mode 100644 test/lib/ansible_test/_internal/payload.py create mode 100644 test/lib/ansible_test/_internal/powershell_import_analysis.py create mode 100644 test/lib/ansible_test/_internal/provider/__init__.py create mode 100644 test/lib/ansible_test/_internal/provider/layout/__init__.py create mode 100644 test/lib/ansible_test/_internal/provider/layout/ansible.py create mode 100644 test/lib/ansible_test/_internal/provider/layout/collection.py create mode 100644 test/lib/ansible_test/_internal/provider/source/__init__.py create mode 100644 test/lib/ansible_test/_internal/provider/source/git.py create mode 100644 test/lib/ansible_test/_internal/provider/source/installed.py create mode 100644 test/lib/ansible_test/_internal/provider/source/unversioned.py create mode 100644 test/lib/ansible_test/_internal/sanity/__init__.py create mode 100644 test/lib/ansible_test/_internal/sanity/ansible_doc.py create mode 100644 test/lib/ansible_test/_internal/sanity/bin_symlinks.py create mode 100644 test/lib/ansible_test/_internal/sanity/compile.py create mode 100644 test/lib/ansible_test/_internal/sanity/ignores.py create mode 100644 test/lib/ansible_test/_internal/sanity/import.py create mode 100644 test/lib/ansible_test/_internal/sanity/integration_aliases.py create mode 100644 test/lib/ansible_test/_internal/sanity/pep8.py create mode 100644 test/lib/ansible_test/_internal/sanity/pslint.py create mode 100644 test/lib/ansible_test/_internal/sanity/pylint.py create mode 100644 test/lib/ansible_test/_internal/sanity/rstcheck.py create mode 100644 test/lib/ansible_test/_internal/sanity/sanity_docs.py create mode 100644 test/lib/ansible_test/_internal/sanity/shellcheck.py create mode 100644 test/lib/ansible_test/_internal/sanity/validate_modules.py create mode 100644 test/lib/ansible_test/_internal/sanity/yamllint.py create mode 100644 test/lib/ansible_test/_internal/target.py create mode 100644 test/lib/ansible_test/_internal/test.py create mode 100644 test/lib/ansible_test/_internal/thread.py create mode 100644 test/lib/ansible_test/_internal/types.py create mode 100644 test/lib/ansible_test/_internal/units/__init__.py create mode 100644 test/lib/ansible_test/_internal/util.py create mode 100644 test/lib/ansible_test/_internal/util_common.py create mode 100644 test/lib/ansible_test/_internal/venv.py create mode 100644 test/lib/ansible_test/config/cloud-config-aws.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-azure.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-cloudscale.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-cs.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-gcp.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-hcloud.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-opennebula.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template create mode 100644 test/lib/ansible_test/config/cloud-config-scaleway.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-tower.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-vcenter.ini.template create mode 100644 test/lib/ansible_test/config/cloud-config-vultr.ini.template create mode 100644 test/lib/ansible_test/config/inventory.networking.template create mode 100644 test/lib/ansible_test/config/inventory.winrm.template create mode 100644 test/sanity/code-smell/configure-remoting-ps1.json create mode 100755 test/sanity/code-smell/configure-remoting-ps1.py create mode 100644 test/sanity/code-smell/deprecated-config.json create mode 100755 test/sanity/code-smell/deprecated-config.py create mode 100644 test/sanity/code-smell/deprecated-config.requirements.txt create mode 100644 test/sanity/code-smell/docs-build.json create mode 100755 test/sanity/code-smell/docs-build.py create mode 100644 test/sanity/code-smell/docs-build.requirements.txt create mode 100644 test/sanity/code-smell/no-unwanted-files.json create mode 100755 test/sanity/code-smell/no-unwanted-files.py create mode 100644 test/sanity/code-smell/obsolete-files.json create mode 100755 test/sanity/code-smell/obsolete-files.py create mode 100644 test/sanity/code-smell/package-data.json create mode 100755 test/sanity/code-smell/package-data.py create mode 100644 test/sanity/code-smell/package-data.requirements.txt create mode 100644 test/sanity/code-smell/release-names.json create mode 100755 test/sanity/code-smell/release-names.py create mode 100644 test/sanity/code-smell/release-names.requirements.txt create mode 100644 test/sanity/code-smell/required-and-default-attributes.json create mode 100755 test/sanity/code-smell/required-and-default-attributes.py create mode 100644 test/sanity/code-smell/skip.txt create mode 100644 test/sanity/code-smell/test-constraints.json create mode 100755 test/sanity/code-smell/test-constraints.py create mode 100644 test/sanity/code-smell/update-bundled.json create mode 100755 test/sanity/code-smell/update-bundled.py create mode 100644 test/sanity/code-smell/update-bundled.requirements.txt create mode 100644 test/sanity/ignore.txt create mode 100644 test/support/integration/plugins/cache/jsonfile.py create mode 100644 test/support/integration/plugins/filter/json_query.py create mode 100644 test/support/integration/plugins/inventory/aws_ec2.py create mode 100644 test/support/integration/plugins/inventory/docker_swarm.py create mode 100644 test/support/integration/plugins/inventory/foreman.py create mode 100644 test/support/integration/plugins/lookup/rabbitmq.py create mode 100644 test/support/integration/plugins/module_utils/aws/__init__.py create mode 100644 test/support/integration/plugins/module_utils/aws/core.py create mode 100644 test/support/integration/plugins/module_utils/aws/iam.py create mode 100644 test/support/integration/plugins/module_utils/aws/s3.py create mode 100644 test/support/integration/plugins/module_utils/aws/waiters.py create mode 100644 test/support/integration/plugins/module_utils/azure_rm_common.py create mode 100644 test/support/integration/plugins/module_utils/azure_rm_common_rest.py create mode 100644 test/support/integration/plugins/module_utils/cloud.py create mode 100644 test/support/integration/plugins/module_utils/compat/__init__.py create mode 100644 test/support/integration/plugins/module_utils/compat/ipaddress.py create mode 100644 test/support/integration/plugins/module_utils/crypto.py create mode 100644 test/support/integration/plugins/module_utils/database.py create mode 100644 test/support/integration/plugins/module_utils/docker/__init__.py create mode 100644 test/support/integration/plugins/module_utils/docker/common.py create mode 100644 test/support/integration/plugins/module_utils/docker/swarm.py create mode 100644 test/support/integration/plugins/module_utils/ec2.py create mode 100644 test/support/integration/plugins/module_utils/ecs/__init__.py create mode 100644 test/support/integration/plugins/module_utils/ecs/api.py create mode 100644 test/support/integration/plugins/module_utils/mysql.py create mode 100644 test/support/integration/plugins/module_utils/net_tools/__init__.py create mode 100644 test/support/integration/plugins/module_utils/network/__init__.py create mode 100644 test/support/integration/plugins/module_utils/network/common/__init__.py create mode 100644 test/support/integration/plugins/module_utils/network/common/utils.py create mode 100644 test/support/integration/plugins/module_utils/postgres.py create mode 100644 test/support/integration/plugins/module_utils/rabbitmq.py create mode 120000 test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py create mode 120000 test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py create mode 120000 test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py create mode 120000 test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py create mode 120000 test/support/integration/plugins/modules/_azure_rm_resource_facts.py create mode 120000 test/support/integration/plugins/modules/_azure_rm_webapp_facts.py create mode 100644 test/support/integration/plugins/modules/aws_az_info.py create mode 100644 test/support/integration/plugins/modules/aws_s3.py create mode 100644 test/support/integration/plugins/modules/azure_rm_appserviceplan.py create mode 100644 test/support/integration/plugins/modules/azure_rm_functionapp.py create mode 100644 test/support/integration/plugins/modules/azure_rm_functionapp_info.py create mode 100644 test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py create mode 100644 test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py create mode 100644 test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py create mode 100644 test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py create mode 100644 test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py create mode 100644 test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py create mode 100644 test/support/integration/plugins/modules/azure_rm_mariadbserver.py create mode 100644 test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py create mode 100644 test/support/integration/plugins/modules/azure_rm_resource.py create mode 100644 test/support/integration/plugins/modules/azure_rm_resource_info.py create mode 100644 test/support/integration/plugins/modules/azure_rm_storageaccount.py create mode 100644 test/support/integration/plugins/modules/azure_rm_webapp.py create mode 100644 test/support/integration/plugins/modules/azure_rm_webapp_info.py create mode 100644 test/support/integration/plugins/modules/azure_rm_webappslot.py create mode 100644 test/support/integration/plugins/modules/cloud_init_data_facts.py create mode 100644 test/support/integration/plugins/modules/cloudformation.py create mode 100644 test/support/integration/plugins/modules/cloudformation_info.py create mode 100644 test/support/integration/plugins/modules/deploy_helper.py create mode 100644 test/support/integration/plugins/modules/docker_swarm.py create mode 100644 test/support/integration/plugins/modules/ec2.py create mode 100644 test/support/integration/plugins/modules/ec2_ami_info.py create mode 100644 test/support/integration/plugins/modules/ec2_group.py create mode 100644 test/support/integration/plugins/modules/ec2_vpc_net.py create mode 100644 test/support/integration/plugins/modules/ec2_vpc_subnet.py create mode 100644 test/support/integration/plugins/modules/flatpak_remote.py create mode 100644 test/support/integration/plugins/modules/htpasswd.py create mode 100644 test/support/integration/plugins/modules/locale_gen.py create mode 100644 test/support/integration/plugins/modules/lvg.py create mode 100644 test/support/integration/plugins/modules/mongodb_parameter.py create mode 100644 test/support/integration/plugins/modules/mongodb_user.py create mode 100644 test/support/integration/plugins/modules/pids.py create mode 100644 test/support/integration/plugins/modules/pkgng.py create mode 100644 test/support/integration/plugins/modules/postgresql_db.py create mode 100644 test/support/integration/plugins/modules/postgresql_privs.py create mode 100644 test/support/integration/plugins/modules/postgresql_query.py create mode 100644 test/support/integration/plugins/modules/postgresql_set.py create mode 100644 test/support/integration/plugins/modules/postgresql_table.py create mode 100644 test/support/integration/plugins/modules/postgresql_user.py create mode 100644 test/support/integration/plugins/modules/rabbitmq_plugin.py create mode 100644 test/support/integration/plugins/modules/rabbitmq_queue.py create mode 100644 test/support/integration/plugins/modules/s3_bucket.py create mode 100644 test/support/integration/plugins/modules/sefcontext.py create mode 100644 test/support/integration/plugins/modules/selogin.py create mode 100644 test/support/integration/plugins/modules/synchronize.py create mode 100644 test/support/integration/plugins/modules/timezone.py create mode 100644 test/support/integration/plugins/modules/x509_crl.py create mode 100644 test/support/integration/plugins/modules/x509_crl_info.py create mode 100644 test/support/integration/plugins/modules/xml.py create mode 100644 test/support/integration/plugins/modules/zypper.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/cli_config.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_get.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_put.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/network.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/become/enable.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/httpapi.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/netconf.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/persistent.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/ipaddr.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/httpapi/restconf.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/cli_config.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_get.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_put.py create mode 100644 test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/netconf/default.py create mode 100644 test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/action/ios.py create mode 100644 test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py create mode 100644 test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py create mode 100644 test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py create mode 100644 test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py create mode 100644 test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py create mode 100644 test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/terminal/ios.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/action/vyos.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/facts/facts.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/firewall_rules/firewall_rules.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/interfaces/interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/l3_interfaces/l3_interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lag_interfaces/lag_interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_global/lldp_global.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_interfaces/lldp_interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/static_routes/static_routes.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/config/lldp_interfaces/lldp_interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/facts.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/firewall_rules/firewall_rules.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/interfaces/interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/l3_interfaces/l3_interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/legacy/base.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_global/lldp_global.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_interfaces/lldp_interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/static_routes/static_routes.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/utils/utils.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_lldp_interfaces.py create mode 100644 test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/terminal/vyos.py create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1 create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1 create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1 create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1 create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1 create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1 create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1 create mode 120000 test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py create mode 100644 test/support/windows-integration/plugins/action/win_copy.py create mode 100644 test/support/windows-integration/plugins/action/win_reboot.py create mode 100644 test/support/windows-integration/plugins/action/win_template.py create mode 100644 test/support/windows-integration/plugins/become/runas.py create mode 100644 test/support/windows-integration/plugins/module_utils/Ansible.Service.cs create mode 100644 test/support/windows-integration/plugins/modules/async_status.ps1 create mode 100644 test/support/windows-integration/plugins/modules/setup.ps1 create mode 100644 test/support/windows-integration/plugins/modules/slurp.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_acl.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_acl.py create mode 100644 test/support/windows-integration/plugins/modules/win_certificate_store.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_certificate_store.py create mode 100644 test/support/windows-integration/plugins/modules/win_command.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_command.py create mode 100644 test/support/windows-integration/plugins/modules/win_copy.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_copy.py create mode 100644 test/support/windows-integration/plugins/modules/win_data_deduplication.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_data_deduplication.py create mode 100644 test/support/windows-integration/plugins/modules/win_dsc.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_dsc.py create mode 100644 test/support/windows-integration/plugins/modules/win_feature.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_feature.py create mode 100644 test/support/windows-integration/plugins/modules/win_file.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_file.py create mode 100644 test/support/windows-integration/plugins/modules/win_find.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_find.py create mode 100644 test/support/windows-integration/plugins/modules/win_format.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_format.py create mode 100644 test/support/windows-integration/plugins/modules/win_get_url.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_get_url.py create mode 100644 test/support/windows-integration/plugins/modules/win_lineinfile.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_lineinfile.py create mode 100644 test/support/windows-integration/plugins/modules/win_path.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_path.py create mode 100644 test/support/windows-integration/plugins/modules/win_ping.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_ping.py create mode 100644 test/support/windows-integration/plugins/modules/win_psexec.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_psexec.py create mode 100644 test/support/windows-integration/plugins/modules/win_reboot.py create mode 100644 test/support/windows-integration/plugins/modules/win_regedit.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_regedit.py create mode 100644 test/support/windows-integration/plugins/modules/win_security_policy.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_security_policy.py create mode 100644 test/support/windows-integration/plugins/modules/win_shell.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_shell.py create mode 100644 test/support/windows-integration/plugins/modules/win_stat.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_stat.py create mode 100644 test/support/windows-integration/plugins/modules/win_tempfile.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_tempfile.py create mode 100644 test/support/windows-integration/plugins/modules/win_template.py create mode 100644 test/support/windows-integration/plugins/modules/win_user.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_user.py create mode 100644 test/support/windows-integration/plugins/modules/win_user_right.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_user_right.py create mode 100644 test/support/windows-integration/plugins/modules/win_wait_for.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_wait_for.py create mode 100644 test/support/windows-integration/plugins/modules/win_whoami.ps1 create mode 100644 test/support/windows-integration/plugins/modules/win_whoami.py create mode 100644 test/units/__init__.py create mode 100644 test/units/_vendor/test_vendor.py create mode 100644 test/units/ansible_test/__init__.py create mode 100644 test/units/ansible_test/ci/__init__.py create mode 100644 test/units/ansible_test/ci/test_azp.py create mode 100644 test/units/ansible_test/ci/test_shippable.py create mode 100644 test/units/ansible_test/ci/util.py create mode 100644 test/units/ansible_test/conftest.py create mode 100644 test/units/ansible_test/test_docker_util.py create mode 100644 test/units/cli/__init__.py create mode 100644 test/units/cli/arguments/test_optparse_helpers.py create mode 100644 test/units/cli/galaxy/test_collection_extract_tar.py create mode 100644 test/units/cli/galaxy/test_display_collection.py create mode 100644 test/units/cli/galaxy/test_display_header.py create mode 100644 test/units/cli/galaxy/test_display_role.py create mode 100644 test/units/cli/galaxy/test_execute_list.py create mode 100644 test/units/cli/galaxy/test_execute_list_collection.py create mode 100644 test/units/cli/galaxy/test_get_collection_widths.py create mode 100644 test/units/cli/test_adhoc.py create mode 100644 test/units/cli/test_cli.py create mode 100644 test/units/cli/test_console.py create mode 100644 test/units/cli/test_data/collection_skeleton/README.md create mode 100644 test/units/cli/test_data/collection_skeleton/docs/My Collection.md create mode 100644 test/units/cli/test_data/collection_skeleton/galaxy.yml.j2 create mode 100644 test/units/cli/test_data/collection_skeleton/playbooks/main.yml create mode 100644 test/units/cli/test_data/collection_skeleton/playbooks/templates/subfolder/test.conf.j2 create mode 100644 test/units/cli/test_data/collection_skeleton/playbooks/templates/test.conf.j2 create mode 100644 test/units/cli/test_data/collection_skeleton/plugins/action/.git_keep create mode 100644 test/units/cli/test_data/collection_skeleton/plugins/filter/.git_keep create mode 100644 test/units/cli/test_data/collection_skeleton/plugins/inventory/.git_keep create mode 100644 test/units/cli/test_data/collection_skeleton/plugins/lookup/.git_keep create mode 100644 test/units/cli/test_data/collection_skeleton/plugins/module_utils/.git_keep create mode 100644 test/units/cli/test_data/collection_skeleton/plugins/modules/.git_keep create mode 100644 test/units/cli/test_data/collection_skeleton/roles/common/tasks/main.yml.j2 create mode 100644 test/units/cli/test_data/collection_skeleton/roles/common/templates/subfolder/test.conf.j2 create mode 100644 test/units/cli/test_data/collection_skeleton/roles/common/templates/test.conf.j2 create mode 100644 test/units/cli/test_data/role_skeleton/.travis.yml create mode 100644 test/units/cli/test_data/role_skeleton/README.md create mode 100644 test/units/cli/test_data/role_skeleton/defaults/main.yml.j2 create mode 100644 test/units/cli/test_data/role_skeleton/files/.git_keep create mode 100644 test/units/cli/test_data/role_skeleton/handlers/main.yml.j2 create mode 100644 test/units/cli/test_data/role_skeleton/inventory create mode 100644 test/units/cli/test_data/role_skeleton/meta/main.yml.j2 create mode 100644 test/units/cli/test_data/role_skeleton/tasks/main.yml.j2 create mode 100644 test/units/cli/test_data/role_skeleton/templates/.git_keep create mode 100644 test/units/cli/test_data/role_skeleton/templates/subfolder/test.conf.j2 create mode 100644 test/units/cli/test_data/role_skeleton/templates/test.conf.j2 create mode 100644 test/units/cli/test_data/role_skeleton/templates_extra/templates.txt.j2 create mode 100644 test/units/cli/test_data/role_skeleton/tests/test.yml.j2 create mode 100644 test/units/cli/test_data/role_skeleton/vars/main.yml.j2 create mode 100644 test/units/cli/test_doc.py create mode 100644 test/units/cli/test_galaxy.py create mode 100644 test/units/cli/test_playbook.py create mode 100644 test/units/cli/test_vault.py create mode 100644 test/units/compat/__init__.py create mode 100644 test/units/compat/builtins.py create mode 100644 test/units/compat/mock.py create mode 100644 test/units/compat/unittest.py create mode 100644 test/units/config/manager/__init__.py create mode 100644 test/units/config/manager/test_find_ini_config_file.py create mode 100644 test/units/config/test.cfg create mode 100644 test/units/config/test.yml create mode 100644 test/units/config/test2.cfg create mode 100644 test/units/config/test_data.py create mode 100644 test/units/config/test_manager.py create mode 100644 test/units/errors/__init__.py create mode 100644 test/units/errors/test_errors.py create mode 100644 test/units/executor/__init__.py create mode 100644 test/units/executor/module_common/test_modify_module.py create mode 100644 test/units/executor/module_common/test_module_common.py create mode 100644 test/units/executor/module_common/test_recursive_finder.py create mode 100644 test/units/executor/test_interpreter_discovery.py create mode 100644 test/units/executor/test_play_iterator.py create mode 100644 test/units/executor/test_playbook_executor.py create mode 100644 test/units/executor/test_task_executor.py create mode 100644 test/units/executor/test_task_queue_manager_callbacks.py create mode 100644 test/units/executor/test_task_result.py create mode 100644 test/units/galaxy/__init__.py create mode 100644 test/units/galaxy/test_api.py create mode 100644 test/units/galaxy/test_collection.py create mode 100644 test/units/galaxy/test_collection_install.py create mode 100644 test/units/galaxy/test_token.py create mode 100644 test/units/galaxy/test_user_agent.py create mode 100644 test/units/inventory/test_group.py create mode 100644 test/units/inventory/test_host.py create mode 100644 test/units/inventory_test_data/group_vars/noparse/all.yml~ create mode 100644 test/units/inventory_test_data/group_vars/noparse/file.txt create mode 100644 test/units/inventory_test_data/group_vars/parse/all.yml create mode 100644 test/units/mock/__init__.py create mode 100644 test/units/mock/loader.py create mode 100644 test/units/mock/path.py create mode 100644 test/units/mock/procenv.py create mode 100644 test/units/mock/vault_helper.py create mode 100644 test/units/mock/yaml_helper.py create mode 100644 test/units/module_utils/__init__.py create mode 100644 test/units/module_utils/basic/__init__.py create mode 100644 test/units/module_utils/basic/test__log_invocation.py create mode 100644 test/units/module_utils/basic/test__symbolic_mode_to_octal.py create mode 100644 test/units/module_utils/basic/test_argument_spec.py create mode 100644 test/units/module_utils/basic/test_atomic_move.py create mode 100644 test/units/module_utils/basic/test_deprecate_warn.py create mode 100644 test/units/module_utils/basic/test_dict_converters.py create mode 100644 test/units/module_utils/basic/test_exit_json.py create mode 100644 test/units/module_utils/basic/test_filesystem.py create mode 100644 test/units/module_utils/basic/test_get_file_attributes.py create mode 100644 test/units/module_utils/basic/test_get_module_path.py create mode 100644 test/units/module_utils/basic/test_heuristic_log_sanitize.py create mode 100644 test/units/module_utils/basic/test_imports.py create mode 100644 test/units/module_utils/basic/test_log.py create mode 100644 test/units/module_utils/basic/test_no_log.py create mode 100644 test/units/module_utils/basic/test_platform_distribution.py create mode 100644 test/units/module_utils/basic/test_run_command.py create mode 100644 test/units/module_utils/basic/test_safe_eval.py create mode 100644 test/units/module_utils/basic/test_sanitize_keys.py create mode 100644 test/units/module_utils/basic/test_selinux.py create mode 100644 test/units/module_utils/basic/test_set_cwd.py create mode 100644 test/units/module_utils/basic/test_set_mode_if_different.py create mode 100644 test/units/module_utils/basic/test_tmpdir.py create mode 100644 test/units/module_utils/common/__init__.py create mode 100644 test/units/module_utils/common/parameters/test_handle_aliases.py create mode 100644 test/units/module_utils/common/parameters/test_list_deprecations.py create mode 100644 test/units/module_utils/common/parameters/test_list_no_log_values.py create mode 100644 test/units/module_utils/common/process/test_get_bin_path.py create mode 100644 test/units/module_utils/common/test_collections.py create mode 100644 test/units/module_utils/common/test_dict_transformations.py create mode 100644 test/units/module_utils/common/test_network.py create mode 100644 test/units/module_utils/common/test_removed.py create mode 100644 test/units/module_utils/common/test_sys_info.py create mode 100644 test/units/module_utils/common/test_utils.py create mode 100644 test/units/module_utils/common/text/converters/test_container_to_bytes.py create mode 100644 test/units/module_utils/common/text/converters/test_container_to_text.py create mode 100644 test/units/module_utils/common/text/converters/test_json_encode_fallback.py create mode 100644 test/units/module_utils/common/text/converters/test_jsonify.py create mode 100644 test/units/module_utils/common/text/converters/test_to_str.py create mode 100644 test/units/module_utils/common/text/formatters/test_bytes_to_human.py create mode 100644 test/units/module_utils/common/text/formatters/test_human_to_bytes.py create mode 100644 test/units/module_utils/common/text/formatters/test_lenient_lowercase.py create mode 100644 test/units/module_utils/common/validation/test_check_mutually_exclusive.py create mode 100644 test/units/module_utils/common/validation/test_check_required_arguments.py create mode 100644 test/units/module_utils/common/validation/test_check_required_together.py create mode 100644 test/units/module_utils/common/validation/test_check_type_bits.py create mode 100644 test/units/module_utils/common/validation/test_check_type_bool.py create mode 100644 test/units/module_utils/common/validation/test_check_type_bytes.py create mode 100644 test/units/module_utils/common/validation/test_check_type_dict.py create mode 100644 test/units/module_utils/common/validation/test_check_type_float.py create mode 100644 test/units/module_utils/common/validation/test_check_type_int.py create mode 100644 test/units/module_utils/common/validation/test_check_type_jsonarg.py create mode 100644 test/units/module_utils/common/validation/test_check_type_list.py create mode 100644 test/units/module_utils/common/validation/test_check_type_path.py create mode 100644 test/units/module_utils/common/validation/test_check_type_raw.py create mode 100644 test/units/module_utils/common/validation/test_check_type_str.py create mode 100644 test/units/module_utils/common/validation/test_count_terms.py create mode 100644 test/units/module_utils/common/warnings/test_deprecate.py create mode 100644 test/units/module_utils/common/warnings/test_warn.py create mode 100644 test/units/module_utils/conftest.py create mode 100644 test/units/module_utils/facts/__init__.py create mode 100644 test/units/module_utils/facts/base.py create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/aarch64-4cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/arm64-4cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/x86_64-2cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/x86_64-4cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/cpuinfo/x86_64-8cpu-cpuinfo create mode 100644 test/units/module_utils/facts/fixtures/distribution_files/ClearLinux create mode 100644 test/units/module_utils/facts/fixtures/distribution_files/CoreOS create mode 100644 test/units/module_utils/facts/fixtures/distribution_files/LinuxMint create mode 100644 test/units/module_utils/facts/fixtures/distribution_files/Slackware create mode 100644 test/units/module_utils/facts/fixtures/distribution_files/SlackwareCurrent create mode 100644 test/units/module_utils/facts/fixtures/findmount_output.txt create mode 100644 test/units/module_utils/facts/hardware/__init__.py create mode 100644 test/units/module_utils/facts/hardware/linux_data.py create mode 100644 test/units/module_utils/facts/hardware/test_linux.py create mode 100644 test/units/module_utils/facts/hardware/test_linux_get_cpu_info.py create mode 100644 test/units/module_utils/facts/hardware/test_sunos_get_uptime_facts.py create mode 100644 test/units/module_utils/facts/network/__init__.py create mode 100644 test/units/module_utils/facts/network/test_fc_wwn.py create mode 100644 test/units/module_utils/facts/network/test_generic_bsd.py create mode 100644 test/units/module_utils/facts/network/test_iscsi_get_initiator.py create mode 100644 test/units/module_utils/facts/other/__init__.py create mode 100644 test/units/module_utils/facts/other/test_facter.py create mode 100644 test/units/module_utils/facts/other/test_ohai.py create mode 100644 test/units/module_utils/facts/system/__init__.py create mode 100644 test/units/module_utils/facts/system/distribution/__init__.py create mode 100644 test/units/module_utils/facts/system/distribution/conftest.py create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/almalinux_8_3_beta.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/arch_linux_na.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/arch_linux_no_arch-release_na.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/archlinux_rolling.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/centos_6.7.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/clearlinux_26580.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/clearlinux_28120.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/core_os_1911.5.0.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/core_os_976.0.0.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_2.5.4.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_3.7.3.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/debian_10.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/debian_7.9.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/debian_stretch_sid.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/devuan.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/fedora_22.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/fedora_25.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/fedora_31.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/flatcar_2492.0.0.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/kali_2019.1.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/kde_neon_16.04.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/linux_mint_18.2.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/linux_mint_19.1.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/nexenta_3.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/nexenta_4.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/omnios.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/openeuler_20.03.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/openindiana.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/opensuse_13.2.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.0.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.1.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_42.1.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/opensuse_tumbleweed_20160917.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/pop_os_20.04.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/redhat_6.7.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/redhat_7.2.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/redhat_7.7.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/sles_11.3.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/sles_11.4.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp0.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp1.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/smartos_global_zone.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/smartos_zone.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/smgl_na.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/solaris_10.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/solaris_11.3.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/solaris_11.4.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/solaris_11.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/steamos_2.0.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/ubuntu_10.04_guess.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/ubuntu_12.04.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/ubuntu_14.04.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/ubuntu_16.04.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/ubuntu_18.04.json create mode 100644 test/units/module_utils/facts/system/distribution/fixtures/virtuozzo_7.3.json create mode 100644 test/units/module_utils/facts/system/distribution/test_distribution_sles4sap.py create mode 100644 test/units/module_utils/facts/system/distribution/test_distribution_version.py create mode 100644 test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py create mode 100644 test/units/module_utils/facts/system/distribution/test_parse_distribution_file_Slackware.py create mode 100644 test/units/module_utils/facts/system/test_cmdline.py create mode 100644 test/units/module_utils/facts/system/test_lsb.py create mode 100644 test/units/module_utils/facts/test_ansible_collector.py create mode 100644 test/units/module_utils/facts/test_collector.py create mode 100644 test/units/module_utils/facts/test_collectors.py create mode 100644 test/units/module_utils/facts/test_date_time.py create mode 100644 test/units/module_utils/facts/test_facts.py create mode 100644 test/units/module_utils/facts/test_timeout.py create mode 100644 test/units/module_utils/facts/test_utils.py create mode 100644 test/units/module_utils/facts/virtual/__init__.py create mode 100644 test/units/module_utils/facts/virtual/test_linux.py create mode 100644 test/units/module_utils/json_utils/__init__.py create mode 100644 test/units/module_utils/json_utils/test_filter_non_json_lines.py create mode 100644 test/units/module_utils/parsing/test_convert_bool.py create mode 100644 test/units/module_utils/test_api.py create mode 100644 test/units/module_utils/test_distro.py create mode 100644 test/units/module_utils/urls/__init__.py create mode 100644 test/units/module_utils/urls/fixtures/client.key create mode 100644 test/units/module_utils/urls/fixtures/client.pem create mode 100644 test/units/module_utils/urls/fixtures/client.txt create mode 100644 test/units/module_utils/urls/fixtures/multipart.txt create mode 100644 test/units/module_utils/urls/fixtures/netrc create mode 100644 test/units/module_utils/urls/test_RedirectHandlerFactory.py create mode 100644 test/units/module_utils/urls/test_Request.py create mode 100644 test/units/module_utils/urls/test_RequestWithMethod.py create mode 100644 test/units/module_utils/urls/test_fetch_url.py create mode 100644 test/units/module_utils/urls/test_generic_urlparse.py create mode 100644 test/units/module_utils/urls/test_prepare_multipart.py create mode 100644 test/units/module_utils/urls/test_urls.py create mode 100644 test/units/modules/__init__.py create mode 100644 test/units/modules/conftest.py create mode 100644 test/units/modules/test_apt.py create mode 100644 test/units/modules/test_async_wrapper.py create mode 100644 test/units/modules/test_copy.py create mode 100644 test/units/modules/test_iptables.py create mode 100644 test/units/modules/test_known_hosts.py create mode 100644 test/units/modules/test_pip.py create mode 100644 test/units/modules/test_systemd.py create mode 100644 test/units/modules/test_yum.py create mode 100644 test/units/modules/utils.py create mode 100644 test/units/parsing/__init__.py create mode 100644 test/units/parsing/fixtures/ajson.json create mode 100644 test/units/parsing/fixtures/vault.yml create mode 100644 test/units/parsing/test_ajson.py create mode 100644 test/units/parsing/test_dataloader.py create mode 100644 test/units/parsing/test_mod_args.py create mode 100644 test/units/parsing/test_splitter.py create mode 100644 test/units/parsing/test_unquote.py create mode 100644 test/units/parsing/utils/__init__.py create mode 100644 test/units/parsing/utils/test_addresses.py create mode 100644 test/units/parsing/utils/test_jsonify.py create mode 100644 test/units/parsing/utils/test_yaml.py create mode 100644 test/units/parsing/vault/__init__.py create mode 100644 test/units/parsing/vault/test_vault.py create mode 100644 test/units/parsing/vault/test_vault_editor.py create mode 100644 test/units/parsing/yaml/__init__.py create mode 100644 test/units/parsing/yaml/test_dumper.py create mode 100644 test/units/parsing/yaml/test_loader.py create mode 100644 test/units/parsing/yaml/test_objects.py create mode 100644 test/units/playbook/__init__.py create mode 100644 test/units/playbook/role/__init__.py create mode 100644 test/units/playbook/role/test_include_role.py create mode 100644 test/units/playbook/role/test_role.py create mode 100644 test/units/playbook/test_attribute.py create mode 100644 test/units/playbook/test_base.py create mode 100644 test/units/playbook/test_block.py create mode 100644 test/units/playbook/test_collectionsearch.py create mode 100644 test/units/playbook/test_conditional.py create mode 100644 test/units/playbook/test_helpers.py create mode 100644 test/units/playbook/test_included_file.py create mode 100644 test/units/playbook/test_play.py create mode 100644 test/units/playbook/test_play_context.py create mode 100644 test/units/playbook/test_playbook.py create mode 100644 test/units/playbook/test_taggable.py create mode 100644 test/units/playbook/test_task.py create mode 100644 test/units/plugins/__init__.py create mode 100644 test/units/plugins/action/__init__.py create mode 100644 test/units/plugins/action/test_action.py create mode 100644 test/units/plugins/action/test_gather_facts.py create mode 100644 test/units/plugins/action/test_raw.py create mode 100644 test/units/plugins/become/__init__.py create mode 100644 test/units/plugins/become/conftest.py create mode 100644 test/units/plugins/become/test_su.py create mode 100644 test/units/plugins/become/test_sudo.py create mode 100644 test/units/plugins/cache/__init__.py create mode 100644 test/units/plugins/cache/test_cache.py create mode 100644 test/units/plugins/callback/__init__.py create mode 100644 test/units/plugins/callback/test_callback.py create mode 100644 test/units/plugins/connection/__init__.py create mode 100644 test/units/plugins/connection/test_connection.py create mode 100644 test/units/plugins/connection/test_local.py create mode 100644 test/units/plugins/connection/test_paramiko.py create mode 100644 test/units/plugins/connection/test_psrp.py create mode 100644 test/units/plugins/connection/test_ssh.py create mode 100644 test/units/plugins/connection/test_winrm.py create mode 100644 test/units/plugins/filter/__init__.py create mode 100644 test/units/plugins/filter/test_core.py create mode 100644 test/units/plugins/filter/test_mathstuff.py create mode 100644 test/units/plugins/inventory/__init__.py create mode 100644 test/units/plugins/inventory/test_constructed.py create mode 100644 test/units/plugins/inventory/test_inventory.py create mode 100644 test/units/plugins/inventory/test_script.py create mode 100644 test/units/plugins/loader_fixtures/__init__.py create mode 100644 test/units/plugins/loader_fixtures/import_fixture.py create mode 100644 test/units/plugins/lookup/__init__.py create mode 100644 test/units/plugins/lookup/test_env.py create mode 100644 test/units/plugins/lookup/test_ini.py create mode 100644 test/units/plugins/lookup/test_password.py create mode 100644 test/units/plugins/shell/__init__.py create mode 100644 test/units/plugins/shell/test_cmd.py create mode 100644 test/units/plugins/shell/test_powershell.py create mode 100644 test/units/plugins/strategy/__init__.py create mode 100644 test/units/plugins/strategy/test_linear.py create mode 100644 test/units/plugins/strategy/test_strategy.py create mode 100644 test/units/plugins/test_plugins.py create mode 100644 test/units/regex/test_invalid_var_names.py create mode 100644 test/units/requirements.txt create mode 100644 test/units/template/__init__.py create mode 100644 test/units/template/test_native_concat.py create mode 100644 test/units/template/test_safe_eval.py create mode 100644 test/units/template/test_templar.py create mode 100644 test/units/template/test_template_utilities.py create mode 100644 test/units/template/test_vars.py create mode 100644 test/units/test_constants.py create mode 100644 test/units/test_context.py create mode 100644 test/units/utils/__init__.py create mode 100644 test/units/utils/collection_loader/__init__.py create mode 100644 test/units/utils/collection_loader/fixtures/collections/ansible_collections/ansible/builtin/plugins/modules/shouldnotload.py create mode 100644 test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/meta/runtime.yml create mode 100644 test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py create mode 100644 test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/__init__.py create mode 100644 test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_other_util.py create mode 100644 test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py create mode 100644 test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py create mode 100644 test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/amodule.py create mode 100644 test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/roles/some_role/.gitkeep create mode 100644 test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py create mode 100644 test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py create mode 100644 test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py create mode 100644 test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py create mode 100644 test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/ansible/playbook_adj_other/.gitkeep create mode 100644 test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/freshns/playbook_adj_other/.gitkeep create mode 100644 test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/testns/playbook_adj_other/.gitkeep create mode 100644 test/units/utils/collection_loader/test_collection_loader.py create mode 100644 test/units/utils/display/test_display.py create mode 100644 test/units/utils/display/test_logger.py create mode 100644 test/units/utils/display/test_warning.py create mode 100644 test/units/utils/test_cleanup_tmp_file.py create mode 100644 test/units/utils/test_context_objects.py create mode 100644 test/units/utils/test_encrypt.py create mode 100644 test/units/utils/test_helpers.py create mode 100644 test/units/utils/test_isidentifier.py create mode 100644 test/units/utils/test_plugin_docs.py create mode 100644 test/units/utils/test_shlex.py create mode 100644 test/units/utils/test_unsafe_proxy.py create mode 100644 test/units/utils/test_vars.py create mode 100644 test/units/utils/test_version.py create mode 100644 test/units/vars/__init__.py create mode 100644 test/units/vars/test_module_response_deepcopy.py create mode 100644 test/units/vars/test_variable_manager.py (limited to 'test') diff --git a/test/ansible_test/Makefile b/test/ansible_test/Makefile new file mode 100644 index 00000000..7fb2a007 --- /dev/null +++ b/test/ansible_test/Makefile @@ -0,0 +1,13 @@ +all: sanity unit validate-modules-unit + +.PHONY: sanity +sanity: + $(abspath ${CURDIR}/../../bin/ansible-test) sanity test/lib/ ${FLAGS} + +.PHONY: unit +unit: + PYTHONPATH=$(abspath ${CURDIR}/../lib) pytest unit ${FLAGS} + +.PHONY: validate-modules-unit +validate-modules-unit: + PYTHONPATH=$(abspath ${CURDIR}/../lib/ansible_test/_data/sanity/validate-modules):$(abspath ${CURDIR}/../../lib) pytest validate-modules-unit ${FLAGS} diff --git a/test/ansible_test/unit/test_diff.py b/test/ansible_test/unit/test_diff.py new file mode 100644 index 00000000..1f2559d2 --- /dev/null +++ b/test/ansible_test/unit/test_diff.py @@ -0,0 +1,105 @@ +"""Tests for diff module.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import subprocess +import pytest + +from ansible_test._internal.util import ( + to_text, + to_bytes, +) + +from ansible_test._internal.diff import ( + parse_diff, + FileDiff, +) + + +def get_diff(base, head=None): + """Return a git diff between the base and head revision. + :type base: str + :type head: str | None + :rtype: list[str] + """ + if not head or head == 'HEAD': + head = to_text(subprocess.check_output(['git', 'rev-parse', 'HEAD'])).strip() + + cache = '/tmp/git-diff-cache-%s-%s.log' % (base, head) + + if os.path.exists(cache): + with open(cache, 'rb') as cache_fd: + lines = to_text(cache_fd.read()).splitlines() + else: + lines = to_text(subprocess.check_output(['git', 'diff', base, head]), errors='replace').splitlines() + + with open(cache, 'wb') as cache_fd: + cache_fd.write(to_bytes('\n'.join(lines))) + + assert lines + + return lines + + +def get_parsed_diff(base, head=None): + """Return a parsed git diff between the base and head revision. + :type base: str + :type head: str | None + :rtype: list[FileDiff] + """ + lines = get_diff(base, head) + items = parse_diff(lines) + + assert items + + for item in items: + assert item.headers + assert item.is_complete + + item.old.format_lines() + item.new.format_lines() + + for line_range in item.old.ranges: + assert line_range[1] >= line_range[0] > 0 + + for line_range in item.new.ranges: + assert line_range[1] >= line_range[0] > 0 + + return items + + +RANGES_TO_TEST = ( + ('f31421576b00f0b167cdbe61217c31c21a41ac02', 'HEAD'), + ('b8125ac1a61f2c7d1de821c78c884560071895f1', '32146acf4e43e6f95f54d9179bf01f0df9814217') +) + + +@pytest.mark.parametrize("base, head", RANGES_TO_TEST) +def test_parse_diff(base, head): + """Integration test to verify parsing of ansible/ansible history.""" + get_parsed_diff(base, head) + + +def test_parse_delete(): + """Integration test to verify parsing of a deleted file.""" + commit = 'ee17b914554861470b382e9e80a8e934063e0860' + items = get_parsed_diff(commit + '~', commit) + deletes = [item for item in items if not item.new.exists] + + assert len(deletes) == 1 + assert deletes[0].old.path == 'lib/ansible/plugins/connection/nspawn.py' + assert deletes[0].new.path == 'lib/ansible/plugins/connection/nspawn.py' + + +def test_parse_rename(): + """Integration test to verify parsing of renamed files.""" + commit = '16a39639f568f4dd5cb233df2d0631bdab3a05e9' + items = get_parsed_diff(commit + '~', commit) + renames = [item for item in items if item.old.path != item.new.path and item.old.exists and item.new.exists] + + assert len(renames) == 2 + assert renames[0].old.path == 'test/integration/targets/eos_eapi/tests/cli/badtransport.yaml' + assert renames[0].new.path == 'test/integration/targets/eos_eapi/tests/cli/badtransport.1' + assert renames[1].old.path == 'test/integration/targets/eos_eapi/tests/cli/zzz_reset.yaml' + assert renames[1].new.path == 'test/integration/targets/eos_eapi/tests/cli/zzz_reset.1' diff --git a/test/ansible_test/validate-modules-unit/test_validate_modules_regex.py b/test/ansible_test/validate-modules-unit/test_validate_modules_regex.py new file mode 100644 index 00000000..8c0b45ca --- /dev/null +++ b/test/ansible_test/validate-modules-unit/test_validate_modules_regex.py @@ -0,0 +1,43 @@ +"""Tests for validate-modules regexes.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from validate_modules.main import TYPE_REGEX + + +@pytest.mark.parametrize('cstring,cexpected', [ + ['if type(foo) is Bar', True], + ['if Bar is type(foo)', True], + ['if type(foo) is not Bar', True], + ['if Bar is not type(foo)', True], + ['if type(foo) == Bar', True], + ['if Bar == type(foo)', True], + ['if type(foo)==Bar', True], + ['if Bar==type(foo)', True], + ['if type(foo) != Bar', True], + ['if Bar != type(foo)', True], + ['if type(foo)!=Bar', True], + ['if Bar!=type(foo)', True], + ['if foo or type(bar) != Bar', True], + ['x = type(foo)', False], + ["error = err.message + ' ' + str(err) + ' - ' + str(type(err))", False], + # cloud/amazon/ec2_group.py + ["module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))", False], + # files/patch.py + ["p = type('Params', (), module.params)", False], # files/patch.py + # system/osx_defaults.py + ["if self.current_value is not None and not isinstance(self.current_value, type(self.value)):", True], + # system/osx_defaults.py + ['raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)', False], + # network/nxos/nxos_interface.py + ["if get_interface_type(interface) == 'svi':", False], +]) +def test_type_regex(cstring, cexpected): # type: (str, str) -> None + """Check TYPE_REGEX against various examples to verify it correctly matches or does not match.""" + match = TYPE_REGEX.match(cstring) + if cexpected and not match: + assert False, "%s should have matched" % cstring + elif not cexpected and match: + assert False, "%s should not have matched" % cstring diff --git a/test/integration/network-integration.cfg b/test/integration/network-integration.cfg new file mode 100644 index 00000000..00764bcd --- /dev/null +++ b/test/integration/network-integration.cfg @@ -0,0 +1,14 @@ +# NOTE: This file is used by ansible-test to override specific Ansible constants +# This file is used by `ansible-test network-integration` + +[defaults] +host_key_checking = False +timeout = 90 + +[ssh_connection] +ssh_args = '-o UserKnownHostsFile=/dev/null' + +[persistent_connection] +command_timeout = 100 +connect_timeout = 100 +connect_retry_timeout = 100 diff --git a/test/integration/network-integration.requirements.txt b/test/integration/network-integration.requirements.txt new file mode 100644 index 00000000..9c4d78d6 --- /dev/null +++ b/test/integration/network-integration.requirements.txt @@ -0,0 +1 @@ +scp # needed by incidental_ios_file diff --git a/test/integration/targets/add_host/aliases b/test/integration/targets/add_host/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/add_host/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/add_host/tasks/main.yml b/test/integration/targets/add_host/tasks/main.yml new file mode 100644 index 00000000..399b0b6b --- /dev/null +++ b/test/integration/targets/add_host/tasks/main.yml @@ -0,0 +1,159 @@ +# test code for the add_host action +# (c) 2015, Matt Davis + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# See https://github.com/ansible/ansible/issues/36045 +- set_fact: + inventory_data: + ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" + # ansible_ssh_host: "127.0.0.3" + ansible_host: "127.0.0.3" + ansible_ssh_pass: "foobar" + # ansible_ssh_port: "2222" + ansible_port: "2222" + ansible_ssh_private_key_file: "/tmp/inventory-cloudj9cGz5/identity" + ansible_ssh_user: "root" + hostname: "newdynamichost2" + +- name: Show inventory_data for 36045 + debug: + msg: "{{ inventory_data }}" + +- name: Add host from dict 36045 + add_host: "{{ inventory_data }}" + +- name: show newly added host + debug: + msg: "{{hostvars['newdynamichost2'].group_names}}" + +- name: ensure that dynamically-added newdynamichost2 is visible via hostvars, groups 36045 + assert: + that: + - hostvars['newdynamichost2'] is defined + - hostvars['newdynamichost2'].group_names is defined + +# end of https://github.com/ansible/ansible/issues/36045 related tests + +- name: add a host to the runtime inventory + add_host: + name: newdynamichost + groups: newdynamicgroup + a_var: from add_host + +- debug: msg={{hostvars['newdynamichost'].group_names}} + +- name: ensure that dynamically-added host is visible via hostvars, groups, etc (there are several caches that could break this) + assert: + that: + - hostvars['bogushost'] is not defined # there was a bug where an undefined host was a "type" instead of an instance- ensure this works before we rely on it + - hostvars['newdynamichost'] is defined + - hostvars['newdynamichost'].group_names is defined + - "'newdynamicgroup' in hostvars['newdynamichost'].group_names" + - hostvars['newdynamichost']['bogusvar'] is not defined + - hostvars['newdynamichost']['a_var'] is defined + - hostvars['newdynamichost']['a_var'] == 'from add_host' + - groups['bogusgroup'] is not defined # same check as above to ensure that bogus groups are undefined... + - groups['newdynamicgroup'] is defined + - "'newdynamichost' in groups['newdynamicgroup']" + +# Tests for idempotency +- name: Add testhost01 dynamic host + add_host: + name: testhost01 + register: add_testhost01 + +- name: Try adding testhost01 again, with no changes + add_host: + name: testhost01 + register: add_testhost01_idem + +- name: Add a host variable to testhost01 + add_host: + name: testhost01 + foo: bar + register: hostvar_testhost01 + +- name: Add the same host variable to testhost01, with no changes + add_host: + name: testhost01 + foo: bar + register: hostvar_testhost01_idem + +- name: Add another host, testhost02 + add_host: + name: testhost02 + register: add_testhost02 + +- name: Add it again for good measure + add_host: + name: testhost02 + register: add_testhost02_idem + +- name: Add testhost02 to a group + add_host: + name: testhost02 + groups: + - testhostgroup + register: add_group_testhost02 + +- name: Add testhost01 to the same group + add_host: + name: testhost01 + groups: + - testhostgroup + register: add_group_testhost01 + +- name: Add testhost02 to the group again + add_host: + name: testhost02 + groups: + - testhostgroup + register: add_group_testhost02_idem + +- name: Add testhost01 to the group again + add_host: + name: testhost01 + groups: + - testhostgroup + register: add_group_testhost01_idem + +- assert: + that: + - add_testhost01 is changed + - add_testhost01_idem is not changed + - hostvar_testhost01 is changed + - hostvar_testhost01_idem is not changed + - add_testhost02 is changed + - add_testhost02_idem is not changed + - add_group_testhost02 is changed + - add_group_testhost01 is changed + - add_group_testhost02_idem is not changed + - add_group_testhost01_idem is not changed + - groups['testhostgroup']|length == 2 + - "'testhost01' in groups['testhostgroup']" + - "'testhost02' in groups['testhostgroup']" + - hostvars['testhost01']['foo'] == 'bar' + +- name: Give invalid input + add_host: namenewdynamichost groupsnewdynamicgroup a_varfromadd_host + ignore_errors: true + register: badinput + +- name: verify we detected bad input + assert: + that: + - badinput is failed diff --git a/test/integration/targets/ansiballz_python/aliases b/test/integration/targets/ansiballz_python/aliases new file mode 100644 index 00000000..f8e28c7e --- /dev/null +++ b/test/integration/targets/ansiballz_python/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/ansiballz_python/library/check_rlimit_and_maxfd.py b/test/integration/targets/ansiballz_python/library/check_rlimit_and_maxfd.py new file mode 100644 index 00000000..a01ee997 --- /dev/null +++ b/test/integration/targets/ansiballz_python/library/check_rlimit_and_maxfd.py @@ -0,0 +1,31 @@ +#!/usr/bin/python +# +# Copyright 2018 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import resource +import subprocess + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict() + ) + + rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE) + + try: + maxfd = subprocess.MAXFD + except AttributeError: + maxfd = -1 + + module.exit_json(rlimit_nofile=rlimit_nofile, maxfd=maxfd, infinity=resource.RLIM_INFINITY) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansiballz_python/library/custom_module.py b/test/integration/targets/ansiballz_python/library/custom_module.py new file mode 100644 index 00000000..625823ea --- /dev/null +++ b/test/integration/targets/ansiballz_python/library/custom_module.py @@ -0,0 +1,19 @@ +#!/usr/bin/python + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ..module_utils.basic import AnsibleModule # pylint: disable=relative-beyond-top-level +from ..module_utils.custom_util import forty_two # pylint: disable=relative-beyond-top-level + + +def main(): + module = AnsibleModule( + argument_spec=dict() + ) + + module.exit_json(answer=forty_two()) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansiballz_python/library/sys_check.py b/test/integration/targets/ansiballz_python/library/sys_check.py new file mode 100644 index 00000000..aa22fe68 --- /dev/null +++ b/test/integration/targets/ansiballz_python/library/sys_check.py @@ -0,0 +1,23 @@ +#!/usr/bin/python +# https://github.com/ansible/ansible/issues/64664 +# https://github.com/ansible/ansible/issues/64479 + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule({}) + + this_module = sys.modules[__name__] + module.exit_json( + failed=not getattr(this_module, 'AnsibleModule', False) + ) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansiballz_python/module_utils/custom_util.py b/test/integration/targets/ansiballz_python/module_utils/custom_util.py new file mode 100644 index 00000000..0393db47 --- /dev/null +++ b/test/integration/targets/ansiballz_python/module_utils/custom_util.py @@ -0,0 +1,6 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +def forty_two(): + return 42 diff --git a/test/integration/targets/ansiballz_python/tasks/main.yml b/test/integration/targets/ansiballz_python/tasks/main.yml new file mode 100644 index 00000000..0aaa6451 --- /dev/null +++ b/test/integration/targets/ansiballz_python/tasks/main.yml @@ -0,0 +1,68 @@ +- name: get the ansible-test imposed file descriptor limit + check_rlimit_and_maxfd: + register: rlimit_limited_return + +- name: get existing file descriptor limit + check_rlimit_and_maxfd: + register: rlimit_original_return + vars: + ansible_python_module_rlimit_nofile: 0 # ignore limit set by ansible-test + +- name: attempt to set a value lower than existing soft limit + check_rlimit_and_maxfd: + vars: + ansible_python_module_rlimit_nofile: '{{ rlimit_original_return.rlimit_nofile[0] - 1 }}' + register: rlimit_below_soft_return + +- name: attempt to set a value higher than existing soft limit + check_rlimit_and_maxfd: + vars: + ansible_python_module_rlimit_nofile: '{{ rlimit_original_return.rlimit_nofile[0] + 1 }}' + register: rlimit_above_soft_return + +- name: attempt to set a value lower than existing hard limit + check_rlimit_and_maxfd: + vars: + ansible_python_module_rlimit_nofile: '{{ rlimit_original_return.rlimit_nofile[1] - 1 }}' + register: rlimit_below_hard_return + +- name: attempt to set a value higher than existing hard limit + check_rlimit_and_maxfd: + vars: + ansible_python_module_rlimit_nofile: '{{ rlimit_original_return.rlimit_nofile[1] + 1 }}' + register: rlimit_above_hard_return + +- name: run a role module which uses a role module_util using relative imports + custom_module: + register: custom_module_return + +- assert: + that: + # make sure ansible-test was able to set the limit unless it exceeds the hard limit or the value is lower on macOS + - rlimit_limited_return.rlimit_nofile[0] == 1024 or rlimit_original_return.rlimit_nofile[1] < 1024 or (rlimit_limited_return.rlimit_nofile[0] < 1024 and ansible_distribution == 'MacOSX') + # make sure that maxfd matches the soft limit on Python 2.x (-1 on Python 3.x) + - rlimit_limited_return.maxfd == rlimit_limited_return.rlimit_nofile[0] or rlimit_limited_return.maxfd == -1 + + # we should always be able to set the limit lower than the existing soft limit + - rlimit_below_soft_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[0] - 1 + # the hard limit should not have changed + - rlimit_below_soft_return.rlimit_nofile[1] == rlimit_original_return.rlimit_nofile[1] + # lowering the limit should also lower the max file descriptors reported by Python 2.x (-1 on Python 3.x) + - rlimit_below_soft_return.maxfd == rlimit_original_return.rlimit_nofile[0] - 1 or rlimit_below_soft_return.maxfd == -1 + + # we should be able to set the limit higher than the existing soft limit if it does not exceed the hard limit (except on macOS) + - rlimit_above_soft_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[0] + 1 or rlimit_original_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[1] or ansible_distribution == 'MacOSX' + + # we should be able to set the limit lower than the existing hard limit (except on macOS) + - rlimit_below_hard_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[1] - 1 or ansible_distribution == 'MacOSX' + + # setting the limit higher than the existing hard limit should use the hard limit (except on macOS) + - rlimit_above_hard_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[1] or ansible_distribution == 'MacOSX' + + # custom module returned the correct answer + - custom_module_return.answer == 42 + +# https://github.com/ansible/ansible/issues/64664 +# https://github.com/ansible/ansible/issues/64479 +- name: Run module that tries to access itself via sys.modules + sys_check: diff --git a/test/integration/targets/ansible-doc/aliases b/test/integration/targets/ansible-doc/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/ansible-doc/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/MANIFEST.json b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/MANIFEST.json new file mode 100644 index 00000000..243a5e43 --- /dev/null +++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/MANIFEST.json @@ -0,0 +1,30 @@ +{ + "collection_info": { + "description": null, + "repository": "", + "tags": [], + "dependencies": {}, + "authors": [ + "Ansible (https://ansible.com)" + ], + "issues": "", + "name": "testcol", + "license": [ + "GPL-3.0-or-later" + ], + "documentation": "", + "namespace": "testns", + "version": "0.1.1231", + "readme": "README.md", + "license_file": "COPYING", + "homepage": "", + }, + "file_manifest_file": { + "format": 1, + "ftype": "file", + "chksum_sha256": "4c15a867ceba8ba1eaf2f4a58844bb5dbb82fec00645fc7eb74a3d31964900f6", + "name": "FILES.json", + "chksum_type": "sha256" + }, + "format": 1 +} diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py new file mode 100644 index 00000000..ee56f6ee --- /dev/null +++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py @@ -0,0 +1,49 @@ +# (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + cache: notjsonfile + short_description: JSON formatted files. + description: + - This cache uses JSON formatted, per host, files saved to the filesystem. + author: Ansible Core (@ansible-core) + options: + _uri: + required: True + description: + - Path in which the cache plugin will save the JSON files + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the JSON files + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + description: Expiration timeout for the cache plugin data + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer +''' + +from ansible.plugins.cache import BaseFileCacheModule + + +class CacheModule(BaseFileCacheModule): + """ + A caching module backed by json files. + """ + pass diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py new file mode 100644 index 00000000..cbb8f0fb --- /dev/null +++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py @@ -0,0 +1,35 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + inventory: statichost + short_description: Add a single host + description: Add a single host + extends_documentation_fragment: + - inventory_cache + options: + plugin: + description: plugin name (must be statichost) + required: true + hostname: + description: Toggle display of stderr even when script was successful + required: True +''' + +from ansible.errors import AnsibleParserError +from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable + + +class InventoryModule(BaseInventoryPlugin, Cacheable): + + NAME = 'testns.content_adj.statichost' + + def verify_file(self, path): + pass + + def parse(self, inventory, loader, path, cache=None): + + pass diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/lookup/noop.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/lookup/noop.py new file mode 100644 index 00000000..daecac5d --- /dev/null +++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/lookup/noop.py @@ -0,0 +1,37 @@ +# (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ + lookup: noop + author: Ansible core team + short_description: returns input + description: + - this is a noop +""" + +EXAMPLES = """ +- name: do nothing + debug: msg="{{ lookup('testns.testcol.noop', [1,2,3,4] }}" +""" + +RETURN = """ + _list: + description: input given +""" + +from ansible.module_utils.common._collections_compat import Sequence +from ansible.plugins.lookup import LookupBase +from ansible.errors import AnsibleError + + +class LookupModule(LookupBase): + + def run(self, terms, **kwargs): + if not isinstance(terms, Sequence): + raise AnsibleError("testns.testcol.noop expects a list") + return terms diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py new file mode 100644 index 00000000..decdbef4 --- /dev/null +++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py @@ -0,0 +1,26 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = """ + module: fakemodule + short_desciptoin: fake module + description: + - this is a fake module + options: + _notreal: + description: really not a real option + author: + - me +""" + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='testns.testcol.fakemodule'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py new file mode 100644 index 00000000..4479f23f --- /dev/null +++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='testns.testcol.notrealmodule'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py new file mode 100644 index 00000000..ccb33b04 --- /dev/null +++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py @@ -0,0 +1,27 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + vars: noop_vars_plugin + short_description: Do NOT load host and group vars + description: don't test loading host and group vars from a collection + options: + stage: + default: all + choices: ['all', 'inventory', 'task'] + type: str + ini: + - key: stage + section: testns.testcol.noop_vars_plugin + env: + - name: ANSIBLE_VARS_PLUGIN_STAGE +''' + +from ansible.plugins.vars import BaseVarsPlugin + + +class VarsModule(BaseVarsPlugin): + + def get_vars(self, loader, path, entities, cache=True): + super(VarsModule, self).get_vars(loader, path, entities) + return {'collection': 'yes', 'notreal': 'value'} diff --git a/test/integration/targets/ansible-doc/fakemodule.output b/test/integration/targets/ansible-doc/fakemodule.output new file mode 100644 index 00000000..adc27e08 --- /dev/null +++ b/test/integration/targets/ansible-doc/fakemodule.output @@ -0,0 +1,15 @@ +> TESTNS.TESTCOL.FAKEMODULE (./collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py) + + this is a fake module + +OPTIONS (= is mandatory): + +- _notreal + really not a real option + [Default: (null)] + + +AUTHOR: me + +SHORT_DESCIPTOIN: fake module + diff --git a/test/integration/targets/ansible-doc/inventory b/test/integration/targets/ansible-doc/inventory new file mode 100644 index 00000000..ab9b62c8 --- /dev/null +++ b/test/integration/targets/ansible-doc/inventory @@ -0,0 +1 @@ +not_empty # avoid empty empty hosts list warning without defining explicit localhost diff --git a/test/integration/targets/ansible-doc/library/test_docs.py b/test/integration/targets/ansible-doc/library/test_docs.py new file mode 100644 index 00000000..39ae3728 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: test_docs +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_docs_missing_description.py b/test/integration/targets/ansible-doc/library/test_docs_missing_description.py new file mode 100644 index 00000000..6ed41836 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs_missing_description.py @@ -0,0 +1,40 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: test_docs_returns +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +options: + test: + type: str +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + test=dict(type='str'), + ), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_docs_no_metadata.py b/test/integration/targets/ansible-doc/library/test_docs_no_metadata.py new file mode 100644 index 00000000..4ea86f02 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs_no_metadata.py @@ -0,0 +1,35 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: test_docs_no_metadata +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_docs_no_status.py b/test/integration/targets/ansible-doc/library/test_docs_no_status.py new file mode 100644 index 00000000..1b0db4e9 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs_no_status.py @@ -0,0 +1,38 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: test_docs_no_status +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_docs_non_iterable_status.py b/test/integration/targets/ansible-doc/library/test_docs_non_iterable_status.py new file mode 100644 index 00000000..63d080f6 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs_non_iterable_status.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': 1, + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: test_docs_non_iterable_status +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_docs_removed_precedence.py b/test/integration/targets/ansible-doc/library/test_docs_removed_precedence.py new file mode 100644 index 00000000..3de1c690 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs_removed_precedence.py @@ -0,0 +1,40 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: test_docs_removed_precedence +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +deprecated: + alternative: new_module + why: Updated module released with more functionality + removed_at_date: '2022-06-01' + removed_in: '2.14' +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_docs_removed_status.py b/test/integration/targets/ansible-doc/library/test_docs_removed_status.py new file mode 100644 index 00000000..cb48c169 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs_removed_status.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['removed'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: test_docs_removed_status +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_docs_returns.py b/test/integration/targets/ansible-doc/library/test_docs_returns.py new file mode 100644 index 00000000..77c13764 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs_returns.py @@ -0,0 +1,56 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: test_docs_returns +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +z_last: + description: A last result. + type: str + returned: success + +m_middle: + description: + - This should be in the middle. + - Has some more data + type: dict + returned: success and 1st of month + contains: + suboption: + description: A suboption. + type: str + choices: [ARF, BARN, c_without_capital_first_letter] + +a_first: + description: A first result. + type: str + returned: success +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_docs_returns_broken.py b/test/integration/targets/ansible-doc/library/test_docs_returns_broken.py new file mode 100644 index 00000000..d6d62643 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs_returns_broken.py @@ -0,0 +1,40 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: test_docs_returns_broken +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +test: + description: A test return value. + type: str + +broken_key: [ +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_docs_suboptions.py b/test/integration/targets/ansible-doc/library/test_docs_suboptions.py new file mode 100644 index 00000000..c922d1d6 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_docs_suboptions.py @@ -0,0 +1,70 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: test_docs_suboptions +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +options: + with_suboptions: + description: + - An option with suboptions. + - Use with care. + type: dict + suboptions: + z_last: + description: The last suboption. + type: str + m_middle: + description: + - The suboption in the middle. + - Has its own suboptions. + suboptions: + a_suboption: + description: A sub-suboption. + type: str + a_first: + description: The first suboption. + type: str +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + test_docs_suboptions=dict( + type='dict', + options=dict( + a_first=dict(type='str'), + m_middle=dict( + type='dict', + options=dict( + a_suboption=dict(type='str') + ), + ), + z_last=dict(type='str'), + ), + ), + ), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_empty.py b/test/integration/targets/ansible-doc/library/test_empty.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/ansible-doc/library/test_no_docs.py b/test/integration/targets/ansible-doc/library/test_no_docs.py new file mode 100644 index 00000000..5503aedb --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_no_docs.py @@ -0,0 +1,23 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_no_docs_no_metadata.py b/test/integration/targets/ansible-doc/library/test_no_docs_no_metadata.py new file mode 100644 index 00000000..48872684 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_no_docs_no_metadata.py @@ -0,0 +1,18 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_no_docs_no_status.py b/test/integration/targets/ansible-doc/library/test_no_docs_no_status.py new file mode 100644 index 00000000..f90c5c71 --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_no_docs_no_status.py @@ -0,0 +1,22 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'supported_by': 'core'} + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/library/test_no_docs_non_iterable_status.py b/test/integration/targets/ansible-doc/library/test_no_docs_non_iterable_status.py new file mode 100644 index 00000000..44fbedee --- /dev/null +++ b/test/integration/targets/ansible-doc/library/test_no_docs_non_iterable_status.py @@ -0,0 +1,23 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': 1, + 'supported_by': 'core'} + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-doc/runme.sh b/test/integration/targets/ansible-doc/runme.sh new file mode 100755 index 00000000..b5929f60 --- /dev/null +++ b/test/integration/targets/ansible-doc/runme.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -eux +ansible-playbook test.yml -i inventory "$@" + +( +unset ANSIBLE_PLAYBOOK_DIR +cd "$(dirname "$0")" + +# test module docs from collection +current_out="$(ansible-doc --playbook-dir ./ testns.testcol.fakemodule)" +expected_out="$(cat fakemodule.output)" +test "$current_out" == "$expected_out" + +# ensure we do work with valid collection name for list +ansible-doc --list testns.testcol --playbook-dir ./ 2>&1 | grep -v "Invalid collection pattern" + +# ensure we dont break on invalid collection name for list +ansible-doc --list testns.testcol.fakemodule --playbook-dir ./ 2>&1 | grep "Invalid collection pattern" + + +# test listing diff plugin types from collection +for ptype in cache inventory lookup vars +do + # each plugin type adds 1 from collection + # FIXME pre=$(ansible-doc -l -t ${ptype}|wc -l) + # FIXME post=$(ansible-doc -l -t ${ptype} --playbook-dir ./|wc -l) + # FIXME test "$pre" -eq $((post - 1)) + + # ensure we ONLY list from the collection + justcol=$(ansible-doc -l -t ${ptype} --playbook-dir ./ testns.testcol|wc -l) + test "$justcol" -eq 1 + + # ensure we get 0 plugins when restricting to collection, but not supplying it + justcol=$(ansible-doc -l -t ${ptype} testns.testcol|wc -l) + test "$justcol" -eq 0 + + # ensure we get 1 plugins when restricting namespace + justcol=$(ansible-doc -l -t ${ptype} --playbook-dir ./ testns|wc -l) + test "$justcol" -eq 1 +done +) diff --git a/test/integration/targets/ansible-doc/test.yml b/test/integration/targets/ansible-doc/test.yml new file mode 100644 index 00000000..a077a994 --- /dev/null +++ b/test/integration/targets/ansible-doc/test.yml @@ -0,0 +1,138 @@ +- hosts: localhost + gather_facts: no + environment: + ANSIBLE_LIBRARY: "{{ playbook_dir }}/library" + tasks: + - name: module with missing description return docs + command: ansible-doc test_docs_missing_description + register: result + ignore_errors: true + + - assert: + that: + - result is failed + - | + "ERROR! Unable to retrieve documentation from 'test_docs_missing_description' due to: All (sub-)options and return values must have a 'description' field" + in result.stderr + + - name: module with suboptions + command: ansible-doc test_docs_suboptions + register: result + ignore_errors: true + + - set_fact: + actual_output: >- + {{ result.stdout | regex_replace('^(> [A-Z_]+ +\().+library/([a-z_]+.py)\)$', '\1library/\2)', multiline=true) }} + expected_output: "{{ lookup('file', 'test_docs_suboptions.output') }}" + + - assert: + that: + - result is succeeded + - actual_output == expected_output + + - name: module with return docs + command: ansible-doc test_docs_returns + register: result + ignore_errors: true + + - set_fact: + actual_output: >- + {{ result.stdout | regex_replace('^(> [A-Z_]+ +\().+library/([a-z_]+.py)\)$', '\1library/\2)', multiline=true) }} + expected_output: "{{ lookup('file', 'test_docs_returns.output') }}" + + - assert: + that: + - result is succeeded + - actual_output == expected_output + + - name: module with broken return docs + command: ansible-doc test_docs_returns_broken + register: result + ignore_errors: true + + - assert: + that: + - result is failed + - '"ERROR! module test_docs_returns_broken missing documentation (or could not parse documentation)" in result.stderr' + + - name: non-existent module + command: ansible-doc test_does_not_exist + register: result + - assert: + that: + - '"[WARNING]: module test_does_not_exist not found in:" in result.stderr' + + - name: documented module + command: ansible-doc test_docs + register: result + - assert: + that: + - '"WARNING" not in result.stderr' + - '"TEST_DOCS " in result.stdout' + - '"AUTHOR: Ansible Core Team" in result.stdout' + + - name: documented module without metadata + command: ansible-doc test_docs_no_metadata + register: result + - assert: + that: + - '"WARNING" not in result.stderr' + - '"TEST_DOCS_NO_METADATA " in result.stdout' + - '"AUTHOR: Ansible Core Team" in result.stdout' + + - name: documented module with no status in metadata + command: ansible-doc test_docs_no_status + register: result + - assert: + that: + - '"WARNING" not in result.stderr' + - '"TEST_DOCS_NO_STATUS " in result.stdout' + - '"AUTHOR: Ansible Core Team" in result.stdout' + + - name: documented module with non-iterable status in metadata + command: ansible-doc test_docs_non_iterable_status + register: result + - assert: + that: + - '"WARNING" not in result.stderr' + - '"TEST_DOCS_NON_ITERABLE_STATUS " in result.stdout' + - '"AUTHOR: Ansible Core Team" in result.stdout' + + - name: documented module with removed status + command: ansible-doc test_docs_removed_status + register: result + + - assert: + that: + - '"WARNING" not in result.stderr' + - '"TEST_DOCS_REMOVED_STATUS " in result.stdout' + - '"AUTHOR: Ansible Core Team" in result.stdout' + + - name: empty module + command: ansible-doc test_empty + register: result + ignore_errors: true + + - assert: + that: + - result is failed + + - name: module with no documentation + command: ansible-doc test_no_docs + register: result + ignore_errors: true + + - assert: + that: + - result is failed + + - name: deprecated module with both removed date and version (date should get precedence) + command: ansible-doc test_docs_removed_precedence + register: result + + - assert: + that: + - '"DEPRECATED" in result.stdout' + - '"Reason: Updated module released with more functionality" in result.stdout' + - '"Will be removed in a release after 2022-06-01" in result.stdout' + - '"Alternatives: new_module" in result.stdout' diff --git a/test/integration/targets/ansible-doc/test_docs_returns.output b/test/integration/targets/ansible-doc/test_docs_returns.output new file mode 100644 index 00000000..9fbbc8c7 --- /dev/null +++ b/test/integration/targets/ansible-doc/test_docs_returns.output @@ -0,0 +1,37 @@ +> TEST_DOCS_RETURNS (library/test_docs_returns.py) + + Test module + +AUTHOR: Ansible Core Team + +EXAMPLES: + + + + +RETURN VALUES: +- a_first + A first result. + + returned: success + type: str + +- m_middle + This should be in the middle. + Has some more data + + returned: success and 1st of month + type: dict + + CONTAINS: + + - suboption + A suboption. + (Choices: ARF, BARN, c_without_capital_first_letter) + type: str + +- z_last + A last result. + + returned: success + type: str diff --git a/test/integration/targets/ansible-doc/test_docs_suboptions.output b/test/integration/targets/ansible-doc/test_docs_suboptions.output new file mode 100644 index 00000000..52b51d9d --- /dev/null +++ b/test/integration/targets/ansible-doc/test_docs_suboptions.output @@ -0,0 +1,43 @@ +> TEST_DOCS_SUBOPTIONS (library/test_docs_suboptions.py) + + Test module + +OPTIONS (= is mandatory): + +- with_suboptions + An option with suboptions. + Use with care. + [Default: (null)] + type: dict + + SUBOPTIONS: + + - a_first + The first suboption. + [Default: (null)] + type: str + + - m_middle + The suboption in the middle. + Has its own suboptions. + [Default: (null)] + + SUBOPTIONS: + + - a_suboption + A sub-suboption. + [Default: (null)] + type: str + + - z_last + The last suboption. + [Default: (null)] + type: str + + +AUTHOR: Ansible Core Team + +EXAMPLES: + + + diff --git a/test/integration/targets/ansible-galaxy-collection-scm/aliases b/test/integration/targets/ansible-galaxy-collection-scm/aliases new file mode 100644 index 00000000..9c34b360 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/aliases @@ -0,0 +1,3 @@ +shippable/posix/group4 +skip/aix +skip/python2.6 # ansible-galaxy uses tarfile with features not available until 2.7 diff --git a/test/integration/targets/ansible-galaxy-collection-scm/meta/main.yml b/test/integration/targets/ansible-galaxy-collection-scm/meta/main.yml new file mode 100644 index 00000000..e3dd5fb1 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- setup_remote_tmp_dir diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/download.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/download.yml new file mode 100644 index 00000000..672b849c --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/download.yml @@ -0,0 +1,47 @@ +- name: create test download dir + file: + path: '{{ galaxy_dir }}/download' + state: directory + +- name: download a git repository + command: > + ansible-galaxy collection download + git+https://github.com/ansible-collections/amazon.aws.git,37875c5b4ba5bf3cc43e07edf29f3432fd76def5 + git+https://github.com/AlanCoding/awx.git#awx_collection,750c22a150d04eef1cb625fd4f83cce57949416c + args: + chdir: '{{ galaxy_dir }}/download' + register: download_collection + +- name: check that the amazon.aws collection was downloaded + stat: + path: '{{ galaxy_dir }}/download/collections/amazon-aws-1.0.0.tar.gz' + register: download_collection_amazon_actual + +- name: check that the awx.awx collection was downloaded + stat: + path: '{{ galaxy_dir }}/download/collections/awx-awx-0.0.1-devel.tar.gz' + register: download_collection_awx_actual + +- assert: + that: + - '"Downloading collection ''amazon.aws'' to" in download_collection.stdout' + - '"Downloading collection ''awx.awx'' to" in download_collection.stdout' + - download_collection_amazon_actual.stat.exists + - download_collection_awx_actual.stat.exists + +- name: test the downloaded repository can be installed + command: 'ansible-galaxy collection install -r requirements.yml' + args: + chdir: '{{ galaxy_dir }}/download/collections/' + +- name: list installed collections + command: 'ansible-galaxy collection list' + register: installed_collections + +- assert: + that: + - "'amazon.aws' in installed_collections.stdout" + - "'awx.awx' in installed_collections.stdout" + +- include_tasks: ./empty_installed_collections.yml + when: cleanup diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/empty_installed_collections.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/empty_installed_collections.yml new file mode 100644 index 00000000..f21a6f6b --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/empty_installed_collections.yml @@ -0,0 +1,7 @@ +- name: delete installed collections + file: + state: "{{ item }}" + path: "{{ galaxy_dir }}/ansible_collections" + loop: + - absent + - directory diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/individual_collection_repo.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/individual_collection_repo.yml new file mode 100644 index 00000000..1b761f60 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/individual_collection_repo.yml @@ -0,0 +1,20 @@ +- name: Clone a git repository + git: + repo: https://github.com/ansible-collections/amazon.aws.git + dest: '{{ galaxy_dir }}/development/amazon.aws/' + +- name: install + command: 'ansible-galaxy collection install git+file://{{galaxy_dir }}/development/amazon.aws/.git' + args: + chdir: '{{ galaxy_dir }}/development' + +- name: list installed collections + command: 'ansible-galaxy collection list' + register: installed_collections + +- assert: + that: + - "'amazon.aws' in installed_collections.stdout" + +- include_tasks: ./empty_installed_collections.yml + when: cleanup diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/main.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/main.yml new file mode 100644 index 00000000..7db7e1d6 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/main.yml @@ -0,0 +1,41 @@ +--- +- name: set the temp test directory + set_fact: + galaxy_dir: "{{ remote_tmp_dir }}/galaxy" + +- name: Test installing collections from git repositories + environment: + ANSIBLE_COLLECTIONS_PATHS: '{{ galaxy_dir }}' + vars: + cleanup: True + galaxy_dir: "{{ galaxy_dir }}" + block: + + - include_tasks: ./setup.yml + - include_tasks: ./individual_collection_repo.yml + - include_tasks: ./setup_multi_collection_repo.yml + - include_tasks: ./multi_collection_repo_all.yml + - include_tasks: ./scm_dependency.yml + vars: + cleanup: False + - include_tasks: ./reinstalling.yml + - include_tasks: ./multi_collection_repo_individual.yml + - include_tasks: ./setup_recursive_scm_dependency.yml + - include_tasks: ./scm_dependency_deduplication.yml + - include_tasks: ./download.yml + + always: + + - name: Remove the directories for installing collections and git repositories + file: + path: '{{ item }}' + state: absent + loop: + - '{{ galaxy_dir }}/ansible_collections' + - '{{ galaxy_dir }}/development' + + - name: remove git + package: + name: git + state: absent + when: git_install is changed diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_all.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_all.yml new file mode 100644 index 00000000..2992062a --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_all.yml @@ -0,0 +1,14 @@ +- name: Install all collections by default + command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git' + +- name: list installed collections + command: 'ansible-galaxy collection list' + register: installed_collections + +- assert: + that: + - "'ansible_test.collection_1' in installed_collections.stdout" + - "'ansible_test.collection_2' in installed_collections.stdout" + +- include_tasks: ./empty_installed_collections.yml + when: cleanup diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_individual.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_individual.yml new file mode 100644 index 00000000..48f6407a --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_individual.yml @@ -0,0 +1,15 @@ +- name: test installing one collection + command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#collection_2' + +- name: list installed collections + command: 'ansible-galaxy collection list' + register: installed_collections + +- assert: + that: + - "'amazon.aws' not in installed_collections.stdout" + - "'ansible_test.collection_1' not in installed_collections.stdout" + - "'ansible_test.collection_2' in installed_collections.stdout" + +- include_tasks: ./empty_installed_collections.yml + when: cleanup diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/reinstalling.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/reinstalling.yml new file mode 100644 index 00000000..c0f6c910 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/reinstalling.yml @@ -0,0 +1,31 @@ +- name: Rerun installing a collection with a dep + command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#/collection_1/' + register: installed + +- assert: + that: + - "'Skipping' in installed.stdout" + - "'Created' not in installed.stdout" + +- name: Only reinstall the collection + command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#/collection_1/ --force' + register: installed + +- assert: + that: + - "'Created collection for ansible_test.collection_1' in installed.stdout" + - "'Created collection for ansible_test.collection_2' not in installed.stdout" + - "'Skipping' in installed.stdout" + +- name: Reinstall the collection and dependency + command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#/collection_1/ --force-with-deps' + register: installed + +- assert: + that: + - "'Created collection for ansible_test.collection_1' in installed.stdout" + - "'Created collection for ansible_test.collection_2' in installed.stdout" + - "'Skipping' not in installed.stdout" + +- include_tasks: ./empty_installed_collections.yml + when: cleanup diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency.yml new file mode 100644 index 00000000..5a23663e --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency.yml @@ -0,0 +1,14 @@ +- name: test installing one collection that has a SCM dep + command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#/collection_1/' + +- name: list installed collections + command: 'ansible-galaxy collection list' + register: installed_collections + +- assert: + that: + - "'ansible_test.collection_1' in installed_collections.stdout" + - "'ansible_test.collection_2' in installed_collections.stdout" + +- include_tasks: ./empty_installed_collections.yml + when: cleanup diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency_deduplication.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency_deduplication.yml new file mode 100644 index 00000000..bc10f24c --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency_deduplication.yml @@ -0,0 +1,54 @@ +- name: Install all collections in a repo, one of which has a recursive dependency + command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/namespace_1/.git' + register: command + +- assert: + that: + - command.stdout_lines | length == 9 + - command.stdout_lines[0] == "Starting galaxy collection install process" + - command.stdout_lines[1] == "Process install dependency map" + - command.stdout_lines[2] == "Starting collection install process" + - "'namespace_1.collection_1' in command.stdout_lines[3]" + - "'namespace_1.collection_1' in command.stdout_lines[4]" + - "'namespace_1.collection_1' in command.stdout_lines[5]" + - "'namespace_2.collection_2' in command.stdout_lines[6]" + - "'namespace_2.collection_2' in command.stdout_lines[7]" + - "'namespace_2.collection_2' in command.stdout_lines[8]" + +- name: list installed collections + command: 'ansible-galaxy collection list' + register: installed_collections + +- assert: + that: + - "'namespace_1.collection_1' in installed_collections.stdout" + - "'namespace_2.collection_2' in installed_collections.stdout" + +- name: Install a specific collection in a repo with a recursive dependency + command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/namespace_1/.git#/collection_1/ --force-with-deps' + register: command + +- assert: + that: + - command.stdout_lines | length == 9 + - command.stdout_lines[0] == "Starting galaxy collection install process" + - command.stdout_lines[1] == "Process install dependency map" + - command.stdout_lines[2] == "Starting collection install process" + - "'namespace_1.collection_1' in command.stdout_lines[3]" + - "'namespace_1.collection_1' in command.stdout_lines[4]" + - "'namespace_1.collection_1' in command.stdout_lines[5]" + - "'namespace_2.collection_2' in command.stdout_lines[6]" + - "'namespace_2.collection_2' in command.stdout_lines[7]" + - "'namespace_2.collection_2' in command.stdout_lines[8]" + +- name: list installed collections + command: 'ansible-galaxy collection list' + register: installed_collections + +- assert: + that: + - "'namespace_1.collection_1' in installed_collections.stdout" + - "'namespace_2.collection_2' in installed_collections.stdout" + +- include_tasks: ./empty_installed_collections.yml + when: cleanup diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup.yml new file mode 100644 index 00000000..f4beb9d6 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup.yml @@ -0,0 +1,19 @@ +- name: ensure git is installed + package: + name: git + when: ansible_distribution != "MacOSX" + register: git_install + +- name: set git global user.email if not already set + shell: git config --global user.email || git config --global user.email "noreply@example.com" + +- name: set git global user.name if not already set + shell: git config --global user.name || git config --global user.name "Ansible Test Runner" + +- name: Create a directory for installing collections and creating git repositories + file: + path: '{{ item }}' + state: directory + loop: + - '{{ galaxy_dir }}/ansible_collections' + - '{{ galaxy_dir }}/development/ansible_test' diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_multi_collection_repo.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_multi_collection_repo.yml new file mode 100644 index 00000000..4a662ca6 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_multi_collection_repo.yml @@ -0,0 +1,27 @@ +- name: Initialize a git repo + command: 'git init {{ galaxy_dir }}/development/ansible_test' + +- stat: + path: "{{ galaxy_dir }}/development/ansible_test" + +- name: Add a couple collections to the repository + command: 'ansible-galaxy collection init {{ item }}' + args: + chdir: '{{ galaxy_dir }}/development' + loop: + - 'ansible_test.collection_1' + - 'ansible_test.collection_2' + +- name: Add collection_2 as a dependency of collection_1 + lineinfile: + path: '{{ galaxy_dir }}/development/ansible_test/collection_1/galaxy.yml' + regexp: '^dependencies' + line: "dependencies: {'git+file://{{ galaxy_dir }}/development/ansible_test/.git#collection_2/': '*'}" + +- name: Commit the changes + command: '{{ item }}' + args: + chdir: '{{ galaxy_dir }}/development/ansible_test' + loop: + - git add ./ + - git commit -m 'add collections' diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_recursive_scm_dependency.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_recursive_scm_dependency.yml new file mode 100644 index 00000000..df0af917 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_recursive_scm_dependency.yml @@ -0,0 +1,33 @@ +- name: Initialize git repositories + command: 'git init {{ galaxy_dir }}/development/{{ item }}' + loop: + - namespace_1 + - namespace_2 + +- name: Add a couple collections to the repository + command: 'ansible-galaxy collection init {{ item }}' + args: + chdir: '{{ galaxy_dir }}/development' + loop: + - 'namespace_1.collection_1' + - 'namespace_2.collection_2' + +- name: Add collection_2 as a dependency of collection_1 + lineinfile: + path: '{{ galaxy_dir }}/development/namespace_1/collection_1/galaxy.yml' + regexp: '^dependencies' + line: "dependencies: {'git+file://{{ galaxy_dir }}/development/namespace_2/.git#collection_2/': '*'}" + +- name: Add collection_1 as a dependency on collection_2 + lineinfile: + path: '{{ galaxy_dir }}/development/namespace_2/collection_2/galaxy.yml' + regexp: '^dependencies' + line: "dependencies: {'git+file://{{ galaxy_dir }}/development/namespace_1/.git#collection_1/': 'master'}" + +- name: Commit the changes + shell: git add ./; git commit -m 'add collection' + args: + chdir: '{{ galaxy_dir }}/development/{{ item }}' + loop: + - namespace_1 + - namespace_2 diff --git a/test/integration/targets/ansible-galaxy-collection/aliases b/test/integration/targets/ansible-galaxy-collection/aliases new file mode 100644 index 00000000..4b3ebea3 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection/aliases @@ -0,0 +1,3 @@ +shippable/fallaxy/group1 +shippable/fallaxy/smoketest +cloud/fallaxy diff --git a/test/integration/targets/ansible-galaxy-collection/files/build_bad_tar.py b/test/integration/targets/ansible-galaxy-collection/files/build_bad_tar.py new file mode 100644 index 00000000..6182e865 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection/files/build_bad_tar.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import hashlib +import io +import json +import os +import sys +import tarfile + +manifest = { + 'collection_info': { + 'namespace': 'suspicious', + 'name': 'test', + 'version': '1.0.0', + 'dependencies': {}, + }, + 'file_manifest_file': { + 'name': 'FILES.json', + 'ftype': 'file', + 'chksum_type': 'sha256', + 'chksum_sha256': None, + 'format': 1 + }, + 'format': 1, +} + +files = { + 'files': [ + { + 'name': '.', + 'ftype': 'dir', + 'chksum_type': None, + 'chksum_sha256': None, + 'format': 1, + }, + ], + 'format': 1, +} + + +def add_file(tar_file, filename, b_content, update_files=True): + tar_info = tarfile.TarInfo(filename) + tar_info.size = len(b_content) + tar_info.mode = 0o0755 + tar_file.addfile(tarinfo=tar_info, fileobj=io.BytesIO(b_content)) + + if update_files: + sha256 = hashlib.sha256() + sha256.update(b_content) + + files['files'].append({ + 'name': filename, + 'ftype': 'file', + 'chksum_type': 'sha256', + 'chksum_sha256': sha256.hexdigest(), + 'format': 1 + }) + + +collection_tar = os.path.join(sys.argv[1], 'suspicious-test-1.0.0.tar.gz') +with tarfile.open(collection_tar, mode='w:gz') as tar_file: + add_file(tar_file, '../../outside.sh', b"#!/usr/bin/env bash\necho \"you got pwned\"") + + b_files = json.dumps(files).encode('utf-8') + b_files_hash = hashlib.sha256() + b_files_hash.update(b_files) + manifest['file_manifest_file']['chksum_sha256'] = b_files_hash.hexdigest() + add_file(tar_file, 'FILES.json', b_files) + add_file(tar_file, 'MANIFEST.json', json.dumps(manifest).encode('utf-8')) + + b_manifest = json.dumps(manifest).encode('utf-8') + + for name, b in [('MANIFEST.json', b_manifest), ('FILES.json', b_files)]: + b_io = io.BytesIO(b) + tar_info = tarfile.TarInfo(name) + tar_info.size = len(b) + tar_info.mode = 0o0644 + tar_file.addfile(tarinfo=tar_info, fileobj=b_io) diff --git a/test/integration/targets/ansible-galaxy-collection/library/setup_collections.py b/test/integration/targets/ansible-galaxy-collection/library/setup_collections.py new file mode 100644 index 00000000..b876a65f --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection/library/setup_collections.py @@ -0,0 +1,169 @@ +#!/usr/bin/python + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: setup_collections +short_description: Set up test collections based on the input +description: +- Builds and publishes a whole bunch of collections used for testing in bulk. +options: + server: + description: + - The Galaxy server to upload the collections to. + required: yes + type: str + token: + description: + - The token used to authenticate with the Galaxy server. + required: yes + type: str + collections: + description: + - A list of collection details to use for the build. + required: yes + type: list + elements: dict + options: + namespace: + description: + - The namespace of the collection. + required: yes + type: str + name: + description: + - The name of the collection. + required: yes + type: str + version: + description: + - The version of the collection. + type: str + default: '1.0.0' + dependencies: + description: + - The dependencies of the collection. + type: dict + default: '{}' +author: +- Jordan Borean (@jborean93) +''' + +EXAMPLES = ''' +- name: Build test collections + setup_collections: + path: ~/ansible/collections/ansible_collections + collections: + - namespace: namespace1 + name: name1 + version: 0.0.1 + - namespace: namespace1 + name: name1 + version: 0.0.2 +''' + +RETURN = ''' +# +''' + +import os +import tempfile +import yaml + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes + + +def run_module(): + module_args = dict( + server=dict(type='str', required=True), + token=dict(type='str', required=True), + collections=dict( + type='list', + elements='dict', + required=True, + options=dict( + namespace=dict(type='str', required=True), + name=dict(type='str', required=True), + version=dict(type='str', default='1.0.0'), + dependencies=dict(type='dict', default={}), + use_symlink=dict(type='bool', default=False), + ), + ), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=False + ) + + result = dict(changed=True) + + for idx, collection in enumerate(module.params['collections']): + collection_dir = os.path.join(module.tmpdir, "%s-%s-%s" % (collection['namespace'], collection['name'], + collection['version'])) + b_collection_dir = to_bytes(collection_dir, errors='surrogate_or_strict') + os.mkdir(b_collection_dir) + + with open(os.path.join(b_collection_dir, b'README.md'), mode='wb') as fd: + fd.write(b"Collection readme") + + galaxy_meta = { + 'namespace': collection['namespace'], + 'name': collection['name'], + 'version': collection['version'], + 'readme': 'README.md', + 'authors': ['Collection author =0.5.0,<1.0.0' + - namespace: child_dep + name: child_collection + version: 0.4.0 + - namespace: child_dep + name: child_collection + version: 0.5.0 + - namespace: child_dep + name: child_collection + version: 0.9.9 + dependencies: + child_dep.child_dep2: '!=1.2.3' + - namespace: child_dep + name: child_collection + - namespace: child_dep + name: child_dep2 + version: 1.2.2 + - namespace: child_dep + name: child_dep2 + version: 1.2.3 + + # Dep resolution failure + - namespace: fail_namespace + name: fail_collection + version: 2.1.2 + dependencies: + fail_dep.name: '0.0.5' + fail_dep2.name: '<0.0.5' + - namespace: fail_dep + name: name + version: '0.0.5' + dependencies: + fail_dep2.name: '>0.0.5' + - namespace: fail_dep2 + name: name + + # Symlink tests + - namespace: symlink + name: symlink + use_symlink: yes + +- name: run ansible-galaxy collection install tests for {{ test_name }} + include_tasks: install.yml + vars: + test_name: '{{ item.name }}' + test_server: '{{ item.server }}' + with_items: + - name: galaxy + server: '{{ fallaxy_galaxy_server }}' + - name: automation_hub + server: '{{ fallaxy_ah_server }}' + +# fake.fake does not exist but we check the output to ensure it checked all 3 +# servers defined in the config. We hardcode to -vvv as that's what level the +# message is shown +- name: test install fallback on server list + command: ansible-galaxy collection install fake.fake -vvv + ignore_errors: yes + environment: + ANSIBLE_CONFIG: '{{ galaxy_dir }}/ansible.cfg' + register: missing_fallback + +- name: assert test install fallback on server list + assert: + that: + - missing_fallback.rc == 1 + - '"Collection ''fake.fake'' is not available from server galaxy" in missing_fallback.stdout' + - '"Collection ''fake.fake'' is not available from server automation_hub" in missing_fallback.stdout' + +- name: run ansible-galaxy collection download tests + include_tasks: download.yml diff --git a/test/integration/targets/ansible-galaxy-collection/tasks/publish.yml b/test/integration/targets/ansible-galaxy-collection/tasks/publish.yml new file mode 100644 index 00000000..aa137304 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection/tasks/publish.yml @@ -0,0 +1,46 @@ +--- +- name: fail to publish with no token - {{ test_name }} + command: ansible-galaxy collection publish ansible_test-my_collection-1.0.0.tar.gz -s {{ test_server }} {{ galaxy_verbosity }} + args: + chdir: '{{ galaxy_dir }}' + register: fail_no_token + failed_when: '"HTTP Code: 401" not in fail_no_token.stderr' + +- name: fail to publish with invalid token - {{ test_name }} + command: ansible-galaxy collection publish ansible_test-my_collection-1.0.0.tar.gz -s {{ test_server }} --token fail {{ galaxy_verbosity }} + args: + chdir: '{{ galaxy_dir }}' + register: fail_invalid_token + failed_when: '"HTTP Code: 401" not in fail_invalid_token.stderr' + +- name: publish collection - {{ test_name }} + command: ansible-galaxy collection publish ansible_test-my_collection-1.0.0.tar.gz -s {{ test_server }} --token {{ fallaxy_token }} {{ galaxy_verbosity }} + args: + chdir: '{{ galaxy_dir }}' + register: publish_collection + +- name: get result of publish collection - {{ test_name }} + uri: + url: '{{ test_server }}v2/collections/ansible_test/my_collection/versions/1.0.0/' + return_content: yes + register: publish_collection_actual + +- name: assert publish collection - {{ test_name }} + assert: + that: + - '"Collection has been successfully published and imported to the Galaxy server" in publish_collection.stdout' + - publish_collection_actual.json.metadata.name == 'my_collection' + - publish_collection_actual.json.metadata.namespace == 'ansible_test' + - publish_collection_actual.json.metadata.version == '1.0.0' + +- name: fail to publish existing collection version - {{ test_name }} + command: ansible-galaxy collection publish ansible_test-my_collection-1.0.0.tar.gz -s {{ test_server }} --token {{ fallaxy_token }} {{ galaxy_verbosity }} + args: + chdir: '{{ galaxy_dir }}' + register: fail_publish_existing + failed_when: '"Artifact already exists" not in fail_publish_existing.stderr' + +- name: reset published collections - {{ test_name }} + uri: + url: '{{ test_server }}custom/reset/' + method: POST diff --git a/test/integration/targets/ansible-galaxy-collection/templates/ansible.cfg.j2 b/test/integration/targets/ansible-galaxy-collection/templates/ansible.cfg.j2 new file mode 100644 index 00000000..74d36aac --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection/templates/ansible.cfg.j2 @@ -0,0 +1,10 @@ +[galaxy] +server_list=galaxy,automation_hub + +[galaxy_server.galaxy] +url={{ fallaxy_galaxy_server }} +token={{ fallaxy_token }} + +[galaxy_server.automation_hub] +url={{ fallaxy_ah_server }} +token={{ fallaxy_token }} diff --git a/test/integration/targets/ansible-galaxy-collection/vars/main.yml b/test/integration/targets/ansible-galaxy-collection/vars/main.yml new file mode 100644 index 00000000..bc006ca5 --- /dev/null +++ b/test/integration/targets/ansible-galaxy-collection/vars/main.yml @@ -0,0 +1 @@ +galaxy_verbosity: "{{ '' if not ansible_verbosity else '-' ~ ('v' * ansible_verbosity) }}" diff --git a/test/integration/targets/ansible-galaxy/aliases b/test/integration/targets/ansible-galaxy/aliases new file mode 100644 index 00000000..48ed7d60 --- /dev/null +++ b/test/integration/targets/ansible-galaxy/aliases @@ -0,0 +1,4 @@ +destructive +shippable/posix/group4 +skip/python2.6 # build uses tarfile with features not available until 2.7 +skip/aix diff --git a/test/integration/targets/ansible-galaxy/cleanup-default.yml b/test/integration/targets/ansible-galaxy/cleanup-default.yml new file mode 100644 index 00000000..f2265c09 --- /dev/null +++ b/test/integration/targets/ansible-galaxy/cleanup-default.yml @@ -0,0 +1,5 @@ +- name: remove unwanted packages + package: + name: git + state: absent + when: git_install.changed diff --git a/test/integration/targets/ansible-galaxy/cleanup-freebsd.yml b/test/integration/targets/ansible-galaxy/cleanup-freebsd.yml new file mode 100644 index 00000000..fa224d83 --- /dev/null +++ b/test/integration/targets/ansible-galaxy/cleanup-freebsd.yml @@ -0,0 +1,6 @@ +- name: remove auto-installed packages from FreeBSD + pkgng: + name: git + state: absent + autoremove: yes + when: git_install.changed diff --git a/test/integration/targets/ansible-galaxy/cleanup.yml b/test/integration/targets/ansible-galaxy/cleanup.yml new file mode 100644 index 00000000..57442631 --- /dev/null +++ b/test/integration/targets/ansible-galaxy/cleanup.yml @@ -0,0 +1,19 @@ +- hosts: localhost + vars: + git_install: '{{ lookup("file", lookup("env", "OUTPUT_DIR") + "/git_install.json") | from_json }}' + tasks: + - name: cleanup + include_tasks: "{{ cleanup_filename }}" + with_first_found: + - "cleanup-{{ ansible_distribution | lower }}.yml" + - "cleanup-default.yml" + loop_control: + loop_var: cleanup_filename + + - name: Remove default collection directories + file: + path: "{{ item }}" + state: absent + loop: + - "~/.ansible/collections/ansible_collections" + - /usr/share/ansible/collections/ansible_collections diff --git a/test/integration/targets/ansible-galaxy/runme.sh b/test/integration/targets/ansible-galaxy/runme.sh new file mode 100755 index 00000000..22587001 --- /dev/null +++ b/test/integration/targets/ansible-galaxy/runme.sh @@ -0,0 +1,449 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + +ansible-playbook setup.yml "$@" + +trap 'ansible-playbook ${ANSIBLE_PLAYBOOK_DIR}/cleanup.yml' EXIT + +# Very simple version test +ansible-galaxy --version + +# Need a relative custom roles path for testing various scenarios of -p +galaxy_relative_rolespath="my/custom/roles/path" + +# Status message function (f_ to designate that it's a function) +f_ansible_galaxy_status() +{ + printf "\n\n\n### Testing ansible-galaxy: %s\n" "${@}" +} + +# Use to initialize a repository. Must call the post function too. +f_ansible_galaxy_create_role_repo_pre() +{ + repo_name=$1 + repo_dir=$2 + + pushd "${repo_dir}" + ansible-galaxy init "${repo_name}" + pushd "${repo_name}" + git init . + + # Prep git, because it doesn't work inside a docker container without it + git config user.email "tester@ansible.com" + git config user.name "Ansible Tester" + + # f_ansible_galaxy_create_role_repo_post +} + +# Call after f_ansible_galaxy_create_repo_pre. +f_ansible_galaxy_create_role_repo_post() +{ + repo_name=$1 + repo_tar=$2 + + # f_ansible_galaxy_create_role_repo_pre + + git add . + git commit -m "local testing ansible galaxy role" + + git archive \ + --format=tar \ + --prefix="${repo_name}/" \ + master > "${repo_tar}" + popd # "${repo_name}" + popd # "${repo_dir}" +} + +# Prep the local git repos with role and make a tar archive so we can test +# different things +galaxy_local_test_role="test-role" +galaxy_local_test_role_dir=$(mktemp -d) +galaxy_local_test_role_git_repo="${galaxy_local_test_role_dir}/${galaxy_local_test_role}" +galaxy_local_test_role_tar="${galaxy_local_test_role_dir}/${galaxy_local_test_role}.tar" + +f_ansible_galaxy_create_role_repo_pre "${galaxy_local_test_role}" "${galaxy_local_test_role_dir}" +f_ansible_galaxy_create_role_repo_post "${galaxy_local_test_role}" "${galaxy_local_test_role_tar}" + +galaxy_local_parent_role="parent-role" +galaxy_local_parent_role_dir=$(mktemp -d) +galaxy_local_parent_role_git_repo="${galaxy_local_parent_role_dir}/${galaxy_local_parent_role}" +galaxy_local_parent_role_tar="${galaxy_local_parent_role_dir}/${galaxy_local_parent_role}.tar" + +# Create parent-role repository +f_ansible_galaxy_create_role_repo_pre "${galaxy_local_parent_role}" "${galaxy_local_parent_role_dir}" + + cat < meta/requirements.yml +- src: git+file:///${galaxy_local_test_role_git_repo} +EOF +f_ansible_galaxy_create_role_repo_post "${galaxy_local_parent_role}" "${galaxy_local_parent_role_tar}" + +# Galaxy install test case +# +# Install local git repo +f_ansible_galaxy_status "install of local git repo" +galaxy_testdir=$(mktemp -d) +pushd "${galaxy_testdir}" + + ansible-galaxy install git+file:///"${galaxy_local_test_role_git_repo}" "$@" + + # Test that the role was installed to the expected directory + [[ -d "${HOME}/.ansible/roles/${galaxy_local_test_role}" ]] +popd # ${galaxy_testdir} +rm -fr "${galaxy_testdir}" +rm -fr "${HOME}/.ansible/roles/${galaxy_local_test_role}" + +# Galaxy install test case +# +# Install local git repo and ensure that if a role_path is passed, it is in fact used +f_ansible_galaxy_status "install of local git repo with -p \$role_path" +galaxy_testdir=$(mktemp -d) +pushd "${galaxy_testdir}" + mkdir -p "${galaxy_relative_rolespath}" + + ansible-galaxy install git+file:///"${galaxy_local_test_role_git_repo}" -p "${galaxy_relative_rolespath}" "$@" + + # Test that the role was installed to the expected directory + [[ -d "${galaxy_relative_rolespath}/${galaxy_local_test_role}" ]] +popd # ${galaxy_testdir} +rm -fr "${galaxy_testdir}" + +# Galaxy install test case +# +# Install local git repo with a meta/requirements.yml +f_ansible_galaxy_status "install of local git repo with meta/requirements.yml" +galaxy_testdir=$(mktemp -d) +pushd "${galaxy_testdir}" + + ansible-galaxy install git+file:///"${galaxy_local_parent_role_git_repo}" "$@" + + # Test that the role was installed to the expected directory + [[ -d "${HOME}/.ansible/roles/${galaxy_local_parent_role}" ]] + + # Test that the dependency was also installed + [[ -d "${HOME}/.ansible/roles/${galaxy_local_test_role}" ]] + +popd # ${galaxy_testdir} +rm -fr "${galaxy_testdir}" +rm -fr "${HOME}/.ansible/roles/${galaxy_local_parent_role}" +rm -fr "${HOME}/.ansible/roles/${galaxy_local_test_role}" + +# Galaxy install test case +# +# Install local git repo with a meta/requirements.yml + --no-deps argument +f_ansible_galaxy_status "install of local git repo with meta/requirements.yml + --no-deps argument" +galaxy_testdir=$(mktemp -d) +pushd "${galaxy_testdir}" + + ansible-galaxy install git+file:///"${galaxy_local_parent_role_git_repo}" --no-deps "$@" + + # Test that the role was installed to the expected directory + [[ -d "${HOME}/.ansible/roles/${galaxy_local_parent_role}" ]] + + # Test that the dependency was not installed + [[ ! -d "${HOME}/.ansible/roles/${galaxy_local_test_role}" ]] + +popd # ${galaxy_testdir} +rm -fr "${galaxy_testdir}" +rm -fr "${HOME}/.ansible/roles/${galaxy_local_test_role}" + +# Galaxy install test case +# +# Ensure that if both a role_file and role_path is provided, they are both +# honored +# +# Protect against regression (GitHub Issue #35217) +# https://github.com/ansible/ansible/issues/35217 + +f_ansible_galaxy_status \ + "install of local git repo and local tarball with -p \$role_path and -r \$role_file" \ + "Protect against regression (Issue #35217)" +galaxy_testdir=$(mktemp -d) +pushd "${galaxy_testdir}" + + git clone "${galaxy_local_test_role_git_repo}" "${galaxy_local_test_role}" + ansible-galaxy init roles-path-bug "$@" + pushd roles-path-bug + cat < ansible.cfg +[defaults] +roles_path = ../:../../:../roles:roles/ +EOF + cat < requirements.yml +--- +- src: ${galaxy_local_test_role_tar} + name: ${galaxy_local_test_role} +EOF + + ansible-galaxy install -r requirements.yml -p roles/ "$@" + popd # roles-path-bug + + # Test that the role was installed to the expected directory + [[ -d "${galaxy_testdir}/roles-path-bug/roles/${galaxy_local_test_role}" ]] + +popd # ${galaxy_testdir} +rm -fr "${galaxy_testdir}" + + +# Galaxy role list tests +# +# Basic tests to ensure listing roles works + +f_ansible_galaxy_status "role list" +galaxy_testdir=$(mktemp -d) +pushd "${galaxy_testdir}" + ansible-galaxy install git+file:///"${galaxy_local_test_role_git_repo}" "$@" + + ansible-galaxy role list | tee out.txt + ansible-galaxy role list test-role | tee -a out.txt + + [[ $(grep -c '^- test-role' out.txt ) -eq 2 ]] +popd # ${galaxy_testdir} + +# Galaxy role test case +# +# Test listing a specific role that is not in the first path in ANSIBLE_ROLES_PATH. +# https://github.com/ansible/ansible/issues/60167#issuecomment-585460706 + +f_ansible_galaxy_status \ + "list specific role not in the first path in ANSIBLE_ROLES_PATH" + +role_testdir=$(mktemp -d) +pushd "${role_testdir}" + + mkdir testroles + ansible-galaxy role init --init-path ./local-roles quark + ANSIBLE_ROLES_PATH=./local-roles:${HOME}/.ansible/roles ansible-galaxy role list quark | tee out.txt + + [[ $(grep -c 'not found' out.txt) -eq 0 ]] + + ANSIBLE_ROLES_PATH=${HOME}/.ansible/roles:./local-roles ansible-galaxy role list quark | tee out.txt + + [[ $(grep -c 'not found' out.txt) -eq 0 ]] + +popd # ${role_testdir} +rm -fr "${role_testdir}" + + +# Galaxy role info tests + +# Get info about role that is not installed + +f_ansible_galaxy_status "role info" +galaxy_testdir=$(mktemp -d) +pushd "${galaxy_testdir}" + ansible-galaxy role info samdoran.fish | tee out.txt + + [[ $(grep -c 'not found' out.txt ) -eq 0 ]] + [[ $(grep -c 'Role:.*samdoran\.fish' out.txt ) -eq 1 ]] + +popd # ${galaxy_testdir} + +f_ansible_galaxy_status \ + "role info non-existant role" + +role_testdir=$(mktemp -d) +pushd "${role_testdir}" + + ansible-galaxy role info notaroll | tee out.txt + + grep -- '- the role notaroll was not found' out.txt + +f_ansible_galaxy_status \ + "role info description offline" + + mkdir testroles + ansible-galaxy role init testdesc --init-path ./testroles + + # Only galaxy_info['description'] exists in file + sed -i -e 's#[[:space:]]\{1,\}description:.*$# description: Description in galaxy_info#' ./testroles/testdesc/meta/main.yml + ansible-galaxy role info -p ./testroles --offline testdesc | tee out.txt + grep 'description: Description in galaxy_info' out.txt + + # Both top level 'description' and galaxy_info['description'] exist in file + # Use shell-fu instead of sed to prepend a line to a file because BSD + # and macOS sed don't work the same as GNU sed. + echo 'description: Top level' | \ + cat - ./testroles/testdesc/meta/main.yml > tmp.yml && \ + mv tmp.yml ./testroles/testdesc/meta/main.yml + ansible-galaxy role info -p ./testroles --offline testdesc | tee out.txt + grep 'description: Top level' out.txt + + # Only top level 'description' exists in file + sed -i.bak '/^[[:space:]]\{1,\}description: Description in galaxy_info/d' ./testroles/testdesc/meta/main.yml + ansible-galaxy role info -p ./testroles --offline testdesc | tee out.txt + grep 'description: Top level' out.txt + +popd # ${role_testdir} +rm -fr "${role_testdir}" + +# Properly list roles when the role name is a subset of the path, or the role +# name is the same name as the parent directory of the role. Issue #67365 +# +# ./parrot/parrot +# ./parrot/arr +# ./testing-roles/test + +f_ansible_galaxy_status \ + "list roles where the role name is the same or a subset of the role path (#67365)" + +role_testdir=$(mktemp -d) +pushd "${role_testdir}" + + mkdir parrot + ansible-galaxy role init --init-path ./parrot parrot + ansible-galaxy role init --init-path ./parrot parrot-ship + ansible-galaxy role init --init-path ./parrot arr + + ansible-galaxy role list -p ./parrot | tee out.txt + + [[ $(grep -Ec '\- (parrot|arr)' out.txt) -eq 3 ]] + ansible-galaxy role list test-role | tee -a out.txt + +popd # ${role_testdir} +rm -rf "${role_testdir}" + +f_ansible_galaxy_status \ + "Test role with non-ascii characters" + +role_testdir=$(mktemp -d) +pushd "${role_testdir}" + + mkdir nonascii + ansible-galaxy role init --init-path ./nonascii nonascii + touch nonascii/ÅÑŚÌβŁÈ.txt + tar czvf nonascii.tar.gz nonascii + ansible-galaxy role install -p ./roles nonascii.tar.gz + +popd # ${role_testdir} +rm -rf "${role_testdir}" + +f_ansible_galaxy_status \ + "Test if git hidden directories are skipped while using role skeleton (#71977)" + +role_testdir=$(mktemp -d) +pushd "${role_testdir}" + + ansible-galaxy role init sample-role-skeleton + git init ./sample-role-skeleton + ansible-galaxy role init --role-skeleton=sample-role-skeleton example + +popd # ${role_testdir} +rm -rf "${role_testdir}" + +################################# +# ansible-galaxy collection tests +################################# +# TODO: Move these to ansible-galaxy-collection + +galaxy_testdir=$(mktemp -d) +pushd "${galaxy_testdir}" + +## ansible-galaxy collection list tests + +# Create more collections and put them in various places +f_ansible_galaxy_status \ + "setting up for collection list tests" + +rm -rf ansible_test/* install/* + +NAMES=(zoo museum airport) +for n in "${NAMES[@]}"; do + ansible-galaxy collection init "ansible_test.$n" + ansible-galaxy collection build "ansible_test/$n" +done + +ansible-galaxy collection install ansible_test-zoo-1.0.0.tar.gz +ansible-galaxy collection install ansible_test-museum-1.0.0.tar.gz -p ./install +ansible-galaxy collection install ansible_test-airport-1.0.0.tar.gz -p ./local + +# Change the collection version and install to another location +sed -i -e 's#^version:.*#version: 2.5.0#' ansible_test/zoo/galaxy.yml +ansible-galaxy collection build ansible_test/zoo +ansible-galaxy collection install ansible_test-zoo-2.5.0.tar.gz -p ./local + +# Test listing a collection that contains a galaxy.yml +ansible-galaxy collection init "ansible_test.development" +mv ./ansible_test/development "${galaxy_testdir}/local/ansible_collections/ansible_test/" + +export ANSIBLE_COLLECTIONS_PATH=~/.ansible/collections:${galaxy_testdir}/local + +f_ansible_galaxy_status \ + "collection list all collections" + + ansible-galaxy collection list -p ./install | tee out.txt + + [[ $(grep -c ansible_test out.txt) -eq 5 ]] + +f_ansible_galaxy_status \ + "collection list specific collection" + + ansible-galaxy collection list -p ./install ansible_test.airport | tee out.txt + + [[ $(grep -c 'ansible_test\.airport' out.txt) -eq 1 ]] + +f_ansible_galaxy_status \ + "collection list specific collection which contains galaxy.yml" + + ansible-galaxy collection list -p ./install ansible_test.development 2>&1 | tee out.txt + + [[ $(grep -c 'ansible_test\.development' out.txt) -eq 1 ]] + [[ $(grep -c 'WARNING' out.txt) -eq 0 ]] + +f_ansible_galaxy_status \ + "collection list specific collection found in multiple places" + + ansible-galaxy collection list -p ./install ansible_test.zoo | tee out.txt + + [[ $(grep -c 'ansible_test\.zoo' out.txt) -eq 2 ]] + +f_ansible_galaxy_status \ + "collection list all with duplicate paths" + + ansible-galaxy collection list -p ~/.ansible/collections | tee out.txt + + [[ $(grep -c '# /root/.ansible/collections/ansible_collections' out.txt) -eq 1 ]] + +f_ansible_galaxy_status \ + "collection list invalid collection name" + + ansible-galaxy collection list -p ./install dirty.wraughten.name "$@" 2>&1 | tee out.txt || echo "expected failure" + + grep 'ERROR! Invalid collection name' out.txt + +f_ansible_galaxy_status \ + "collection list path not found" + + ansible-galaxy collection list -p ./nope "$@" 2>&1 | tee out.txt || echo "expected failure" + + grep '\[WARNING\]: - the configured path' out.txt + +f_ansible_galaxy_status \ + "collection list missing ansible_collections dir inside path" + + mkdir emptydir + + ansible-galaxy collection list -p ./emptydir "$@" + + rmdir emptydir + +unset ANSIBLE_COLLECTIONS_PATH + +f_ansible_galaxy_status \ + "collection list with collections installed from python package" + + mkdir -p test-site-packages + ln -s "${galaxy_testdir}/local/ansible_collections" test-site-packages/ansible_collections + ansible-galaxy collection list + PYTHONPATH="./test-site-packages/:$PYTHONPATH" ansible-galaxy collection list | tee out.txt + + grep ".ansible/collections/ansible_collections" out.txt + grep "test-site-packages/ansible_collections" out.txt + +## end ansible-galaxy collection list + + +popd # ${galaxy_testdir} + +rm -fr "${galaxy_testdir}" + +rm -fr "${galaxy_local_test_role_dir}" diff --git a/test/integration/targets/ansible-galaxy/setup.yml b/test/integration/targets/ansible-galaxy/setup.yml new file mode 100644 index 00000000..a82d02ae --- /dev/null +++ b/test/integration/targets/ansible-galaxy/setup.yml @@ -0,0 +1,11 @@ +- hosts: localhost + tasks: + - name: install git + package: + name: git + when: ansible_distribution != "MacOSX" + register: git_install + - name: save install result + copy: + content: '{{ git_install }}' + dest: '{{ lookup("env", "OUTPUT_DIR") }}/git_install.json' diff --git a/test/integration/targets/ansible-runner/aliases b/test/integration/targets/ansible-runner/aliases new file mode 100644 index 00000000..ec9eb3af --- /dev/null +++ b/test/integration/targets/ansible-runner/aliases @@ -0,0 +1,6 @@ +shippable/posix/group3 +skip/python3 +skip/aix +skip/osx +skip/macos +skip/freebsd diff --git a/test/integration/targets/ansible-runner/files/adhoc_example1.py b/test/integration/targets/ansible-runner/files/adhoc_example1.py new file mode 100644 index 00000000..3e0d8414 --- /dev/null +++ b/test/integration/targets/ansible-runner/files/adhoc_example1.py @@ -0,0 +1,26 @@ +import json +import os +import sys +import ansible_runner + +# the first positional arg should be where the artifacts live +output_dir = sys.argv[1] + +# this calls a single module directly, aka "adhoc" mode +r = ansible_runner.run( + private_data_dir=output_dir, + host_pattern='localhost', + module='shell', + module_args='whoami' +) + +data = { + 'rc': r.rc, + 'status': r.status, + 'events': [x['event'] for x in r.events], + 'stats': r.stats +} + +# insert this header for the flask controller +print('#STARTJSON') +json.dump(data, sys.stdout) diff --git a/test/integration/targets/ansible-runner/files/constraints.txt b/test/integration/targets/ansible-runner/files/constraints.txt new file mode 100644 index 00000000..c3e39402 --- /dev/null +++ b/test/integration/targets/ansible-runner/files/constraints.txt @@ -0,0 +1,5 @@ +psutil < 5.7.0 # Greater than this version breaks on older pip +pexpect >= 4.5, <= 4.8.0 +python-daemon <= 2.2.4 +pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later +six <= 1.14.0 diff --git a/test/integration/targets/ansible-runner/files/playbook_example1.py b/test/integration/targets/ansible-runner/files/playbook_example1.py new file mode 100644 index 00000000..83cb19ff --- /dev/null +++ b/test/integration/targets/ansible-runner/files/playbook_example1.py @@ -0,0 +1,38 @@ +import json +import os +import sys +import ansible_runner + + +PLAYBOOK = ''' +- hosts: localhost + gather_facts: False + tasks: + - set_fact: + foo: bar +''' + +# the first positional arg should be where the artifacts live +output_dir = sys.argv[1] + +invdir = os.path.join(output_dir, 'inventory') +if not os.path.isdir(invdir): + os.makedirs(invdir) +with open(os.path.join(invdir, 'hosts'), 'w') as f: + f.write('localhost\n') +pbfile = os.path.join(output_dir, 'test.yml') +with open(pbfile, 'w') as f: + f.write(PLAYBOOK) + +r = ansible_runner.run(private_data_dir=output_dir, playbook='test.yml') + +data = { + 'rc': r.rc, + 'status': r.status, + 'events': [x['event'] for x in r.events], + 'stats': r.stats +} + +# insert this header for the flask controller +print('#STARTJSON') +json.dump(data, sys.stdout) diff --git a/test/integration/targets/ansible-runner/filter_plugins/parse.py b/test/integration/targets/ansible-runner/filter_plugins/parse.py new file mode 100644 index 00000000..7842f6c6 --- /dev/null +++ b/test/integration/targets/ansible-runner/filter_plugins/parse.py @@ -0,0 +1,17 @@ +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import re +import json + + +def parse_json(value): + return json.dumps(json.loads(re.sub('^.*\n#STARTJSON\n', '', value, flags=re.DOTALL)), indent=4, sort_keys=True) + + +class FilterModule(object): + def filters(self): + return { + 'parse_json': parse_json, + } diff --git a/test/integration/targets/ansible-runner/inventory b/test/integration/targets/ansible-runner/inventory new file mode 100644 index 00000000..009f6c33 --- /dev/null +++ b/test/integration/targets/ansible-runner/inventory @@ -0,0 +1 @@ +# no hosts required, test only requires implicit localhost diff --git a/test/integration/targets/ansible-runner/runme.sh b/test/integration/targets/ansible-runner/runme.sh new file mode 100755 index 00000000..384de80f --- /dev/null +++ b/test/integration/targets/ansible-runner/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_ROLES_PATH=../ ansible-playbook test.yml -i inventory "$@" diff --git a/test/integration/targets/ansible-runner/tasks/adhoc_example1.yml b/test/integration/targets/ansible-runner/tasks/adhoc_example1.yml new file mode 100644 index 00000000..c6fdf03f --- /dev/null +++ b/test/integration/targets/ansible-runner/tasks/adhoc_example1.yml @@ -0,0 +1,16 @@ +- name: execute the script + command: "'{{ ansible_python_interpreter }}' '{{ role_path }}/files/adhoc_example1.py' '{{ lookup('env', 'OUTPUT_DIR') }}'" + environment: + AWX_LIB_DIRECTORY: "{{ callback_path }}" + register: script + +- name: parse script output + # work around for ansible-runner showing ansible warnings on stdout + set_fact: + adexec1_json: "{{ script.stdout | parse_json }}" + +- assert: + that: + - "adexec1_json.rc == 0" + - "adexec1_json.events|length == 4" + - "'localhost' in adexec1_json.stats.ok" diff --git a/test/integration/targets/ansible-runner/tasks/main.yml b/test/integration/targets/ansible-runner/tasks/main.yml new file mode 100644 index 00000000..5608786b --- /dev/null +++ b/test/integration/targets/ansible-runner/tasks/main.yml @@ -0,0 +1,5 @@ +- block: + - include_tasks: setup.yml + - include_tasks: adhoc_example1.yml + - include_tasks: playbook_example1.yml + when: ansible_distribution in ('RedHat', 'CentOS') and ansible_distribution_major_version == '7' diff --git a/test/integration/targets/ansible-runner/tasks/playbook_example1.yml b/test/integration/targets/ansible-runner/tasks/playbook_example1.yml new file mode 100644 index 00000000..ec1f7cda --- /dev/null +++ b/test/integration/targets/ansible-runner/tasks/playbook_example1.yml @@ -0,0 +1,16 @@ +- name: execute the script + command: "'{{ ansible_python_interpreter }}' '{{ role_path }}/files/playbook_example1.py' '{{ lookup('env', 'OUTPUT_DIR') }}'" + environment: + AWX_LIB_DIRECTORY: "{{ callback_path }}" + register: script + +- name: parse script output + # work around for ansible-runner showing ansible warnings on stdout + set_fact: + pbexec_json: "{{ script.stdout | parse_json }}" + +- assert: + that: + - "pbexec_json.rc == 0" + - "pbexec_json.events|length == 7" + - "'localhost' in pbexec_json.stats.ok" diff --git a/test/integration/targets/ansible-runner/tasks/setup.yml b/test/integration/targets/ansible-runner/tasks/setup.yml new file mode 100644 index 00000000..ea24ced5 --- /dev/null +++ b/test/integration/targets/ansible-runner/tasks/setup.yml @@ -0,0 +1,19 @@ +- name: Install docutils + pip: + name: docutils + +- name: Install ansible-runner + pip: + name: ansible-runner + version: 1.2.0 + extra_args: + -c {{ role_path }}/files/constraints.txt + +- name: Find location of ansible-runner installation + command: "'{{ ansible_python_interpreter }}' -c 'import os, ansible_runner; print(os.path.dirname(ansible_runner.__file__))'" + register: ansible_runner_path + +# work around for https://github.com/ansible/ansible-runner/issues/132 +- name: Set callback path to work around ansible-runner bug + set_fact: + callback_path: ":{{ ansible_runner_path.stdout }}/callbacks" diff --git a/test/integration/targets/ansible-runner/test.yml b/test/integration/targets/ansible-runner/test.yml new file mode 100644 index 00000000..113f8e7c --- /dev/null +++ b/test/integration/targets/ansible-runner/test.yml @@ -0,0 +1,3 @@ +- hosts: localhost + roles: + - ansible-runner diff --git a/test/integration/targets/ansible-test-docker/aliases b/test/integration/targets/ansible-test-docker/aliases new file mode 100644 index 00000000..d1284cf7 --- /dev/null +++ b/test/integration/targets/ansible-test-docker/aliases @@ -0,0 +1 @@ +shippable/generic/group1 # Runs in the default test container so access to tools like pwsh diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/galaxy.yml b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/galaxy.yml new file mode 100644 index 00000000..08a32e80 --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/galaxy.yml @@ -0,0 +1,6 @@ +namespace: ns +name: col +version: 1.0.0 +readme: README.rst +authors: + - Ansible diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/doc_fragments/ps_util.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/doc_fragments/ps_util.py new file mode 100644 index 00000000..e69844b3 --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/doc_fragments/ps_util.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment: + + DOCUMENTATION = r''' +options: + option1: + description: + - Test description + required: yes + aliases: + - alias1 + type: str +''' diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/PSUtil.psm1 b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/PSUtil.psm1 new file mode 100644 index 00000000..d37e681a --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/PSUtil.psm1 @@ -0,0 +1,16 @@ +# Copyright (c) 2020 Ansible Project +# # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +Function Get-PSUtilSpec { + <# + .SYNOPSIS + Shared util spec test + #> + @{ + options = @{ + option1 = @{ type = 'str'; required = $true; aliases = 'alias1' } + } + } +} + +Export-ModuleMember -Function Get-PSUtilSpec diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/my_util.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/my_util.py new file mode 100644 index 00000000..b9c531cf --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/my_util.py @@ -0,0 +1,6 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +def hello(name): + return 'Hello %s' % name diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py new file mode 100644 index 00000000..c8a0cf75 --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py @@ -0,0 +1,46 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: hello +short_description: Hello test module +description: Hello test module. +options: + name: + description: Name to say hello to. + type: str +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +- minimal: +''' + +RETURN = '''''' + +from ansible.module_utils.basic import AnsibleModule +from ..module_utils.my_util import hello + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str'), + ), + ) + + module.exit_json(**say_hello(module.params['name'])) + + +def say_hello(name): + return dict( + message=hello(name), + ) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.ps1 b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.ps1 new file mode 100644 index 00000000..9dab99da --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.ps1 @@ -0,0 +1,16 @@ +#!powershell + +# Copyright (c) 2020 Ansible Project +# # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#AnsibleRequires -PowerShell ..module_utils.PSUtil + +$spec = @{ + options = @{ + my_opt = @{ type = "str"; required = $true } + } +} + +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-PSUtilSpec)) +$module.ExitJson() diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.py new file mode 100644 index 00000000..ed49f4ea --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_util_args +short_description: Short description +description: +- Some test description for the module +options: + my_opt: + description: + - Test description + required: yes + type: str +extends_documentation_fragment: +- ns.col.ps_util + +author: +- Ansible Test (@ansible) +''' + +EXAMPLES = r''' +- win_util_args: + option1: test + my_opt: test +''' + +RETURN = r''' +# +''' diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/tasks/main.yml b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/tasks/main.yml new file mode 100644 index 00000000..c45c199c --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/tasks/main.yml @@ -0,0 +1,7 @@ +- hello: + name: Ansibull + register: hello + +- assert: + that: + - hello.message == 'Hello Ansibull' diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py new file mode 100644 index 00000000..7df87103 --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from .....plugins.module_utils.my_util import hello + + +def test_hello(): + assert hello('Ansibull') == 'Hello Ansibull' diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py new file mode 100644 index 00000000..95ee0574 --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from .....plugins.modules.hello import say_hello + + +def test_say_hello(): + assert say_hello('Ansibull') == dict(message='Hello Ansibull') diff --git a/test/integration/targets/ansible-test-docker/collection-tests/docker.sh b/test/integration/targets/ansible-test-docker/collection-tests/docker.sh new file mode 100755 index 00000000..e0e34290 --- /dev/null +++ b/test/integration/targets/ansible-test-docker/collection-tests/docker.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + +cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}" +cd "${WORK_DIR}/ansible_collections/ns/col" + +# common args for all tests +# because we are running in shippable/generic/ we are already in the default docker container +common=(--python "${ANSIBLE_TEST_PYTHON_VERSION}" --color --truncate 0 "${@}") + +# prime the venv to work around issue with PyYAML detection in ansible-test +ansible-test sanity "${common[@]}" --test ignores + +# tests +ansible-test sanity "${common[@]}" +ansible-test units "${common[@]}" +ansible-test integration "${common[@]}" diff --git a/test/integration/targets/ansible-test-docker/runme.sh b/test/integration/targets/ansible-test-docker/runme.sh new file mode 100755 index 00000000..7c956b4f --- /dev/null +++ b/test/integration/targets/ansible-test-docker/runme.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -eu -o pipefail + +# tests must be executed outside of the ansible source tree +# otherwise ansible-test will test the ansible source instead of the test collection +# the temporary directory provided by ansible-test resides within the ansible source tree +tmp_dir=$(mktemp -d) + +trap 'rm -rf "${tmp_dir}"' EXIT + +export TEST_DIR +export WORK_DIR + +TEST_DIR="$PWD" + +for test in collection-tests/*.sh; do + WORK_DIR="${tmp_dir}/$(basename "${test}" ".sh")" + mkdir "${WORK_DIR}" + echo "**********************************************************************" + echo "TEST: ${test}: STARTING" + "${test}" "${@}" || (echo "TEST: ${test}: FAILED" && exit 1) + echo "TEST: ${test}: PASSED" +done diff --git a/test/integration/targets/ansible-test/aliases b/test/integration/targets/ansible-test/aliases new file mode 100644 index 00000000..f8e28c7e --- /dev/null +++ b/test/integration/targets/ansible-test/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/README.rst b/test/integration/targets/ansible-test/ansible_collections/ns/col/README.rst new file mode 100644 index 00000000..d8138d3b --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/README.rst @@ -0,0 +1,3 @@ +README +------ +This is a simple collection used to verify that ``ansible-test`` works on a collection. diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/galaxy.yml b/test/integration/targets/ansible-test/ansible_collections/ns/col/galaxy.yml new file mode 100644 index 00000000..08a32e80 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/galaxy.yml @@ -0,0 +1,6 @@ +namespace: ns +name: col +version: 1.0.0 +readme: README.rst +authors: + - Ansible diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/meta/runtime.yml b/test/integration/targets/ansible-test/ansible_collections/ns/col/meta/runtime.yml new file mode 100644 index 00000000..1ac15484 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/meta/runtime.yml @@ -0,0 +1,4 @@ +plugin_routing: + modules: + hi: + redirect: hello diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/filter/check_pylint.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/filter/check_pylint.py new file mode 100644 index 00000000..359fbf07 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/filter/check_pylint.py @@ -0,0 +1,21 @@ +""" +These test cases verify ansible-test version constraints for pylint and its dependencies across Python versions. +The initial test cases were discovered while testing various Python versions against ansible/ansible. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +# Python 3.8 fails with astroid 2.2.5 but works on 2.3.3 +# syntax-error: Cannot import 'string' due to syntax error 'invalid syntax (<unknown>, line 109)' +# Python 3.9 fails with astroid 2.2.5 but works on 2.3.3 +# syntax-error: Cannot import 'string' due to syntax error 'invalid syntax (<unknown>, line 104)' +import string + +# Python 3.9 fails with pylint 2.3.1 or 2.4.4 with astroid 2.3.3 but works with pylint 2.5.0 and astroid 2.4.0 +# 'Call' object has no attribute 'value' +result = {None: None}[{}.get('something')] + +# pylint 2.3.1 and 2.4.4 report the following error but 2.5.0 and 2.6.0 do not +# blacklisted-name: Black listed name "foo" +# see: https://github.com/PyCQA/pylint/issues/3701 +foo = {}.keys() diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/__init__.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/my_util.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/my_util.py new file mode 100644 index 00000000..b9c531cf --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/my_util.py @@ -0,0 +1,6 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +def hello(name): + return 'Hello %s' % name diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/bad.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/bad.py new file mode 100644 index 00000000..e79613bb --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/bad.py @@ -0,0 +1,34 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: bad +short_description: Bad test module +description: Bad test module. +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +- bad: +''' + +RETURN = '''''' + +from ansible.module_utils.basic import AnsibleModule +from ansible import constants # intentionally trigger pylint ansible-bad-module-import error + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py new file mode 100644 index 00000000..033b6c90 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py @@ -0,0 +1,46 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: hello +short_description: Hello test module +description: Hello test module. +options: + name: + description: Name to say hello to. + type: str +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +- hello: +''' + +RETURN = '''''' + +from ansible.module_utils.basic import AnsibleModule +from ..module_utils.my_util import hello + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str'), + ), + ) + + module.exit_json(**say_hello(module.params['name'])) + + +def say_hello(name): + return dict( + message=hello(name), + ) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py new file mode 100644 index 00000000..82215438 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py @@ -0,0 +1,16 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import tempfile + +try: + import urllib2 # intentionally trigger pylint ansible-bad-import error +except ImportError: + urllib2 = None + +try: + from urllib2 import Request # intentionally trigger pylint ansible-bad-import-from error +except ImportError: + Request = None + +tempfile.mktemp() # intentionally trigger pylint ansible-bad-function error diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/tasks/main.yml b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/tasks/main.yml new file mode 100644 index 00000000..c45c199c --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/tasks/main.yml @@ -0,0 +1,7 @@ +- hello: + name: Ansibull + register: hello + +- assert: + that: + - hello.message == 'Hello Ansibull' diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/sanity/ignore.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/sanity/ignore.txt new file mode 100644 index 00000000..079d0161 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/sanity/ignore.txt @@ -0,0 +1,6 @@ +plugins/filter/check_pylint.py pylint:blacklisted-name +plugins/modules/bad.py import +plugins/modules/bad.py pylint:ansible-bad-module-import +tests/integration/targets/hello/files/bad.py pylint:ansible-bad-function +tests/integration/targets/hello/files/bad.py pylint:ansible-bad-import +tests/integration/targets/hello/files/bad.py pylint:ansible-bad-import-from diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py new file mode 100644 index 00000000..7df87103 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from .....plugins.module_utils.my_util import hello + + +def test_hello(): + assert hello('Ansibull') == 'Hello Ansibull' diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py new file mode 100644 index 00000000..95ee0574 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from .....plugins.modules.hello import say_hello + + +def test_say_hello(): + assert say_hello('Ansibull') == dict(message='Hello Ansibull') diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/constraints.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/constraints.txt new file mode 100644 index 00000000..01bb5cff --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/constraints.txt @@ -0,0 +1 @@ +botocore == 1.13.49 diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/requirements.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/requirements.txt new file mode 100644 index 00000000..c5b9e129 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/requirements.txt @@ -0,0 +1 @@ +botocore diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/targets/constraints/tasks/main.yml b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/targets/constraints/tasks/main.yml new file mode 100644 index 00000000..c2c1f1a4 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/targets/constraints/tasks/main.yml @@ -0,0 +1,7 @@ +- name: get botocore version + command: python -c "import botocore; print(botocore.__version__)" + register: botocore_version +- name: check botocore version + assert: + that: + - 'botocore_version.stdout == "1.13.49"' diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/constraints.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/constraints.txt new file mode 100644 index 00000000..d0986894 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/constraints.txt @@ -0,0 +1 @@ +botocore == 1.13.50 diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/plugins/modules/test_constraints.py b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/plugins/modules/test_constraints.py new file mode 100644 index 00000000..857e8e55 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/plugins/modules/test_constraints.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import botocore + + +def test_constraints(): + assert botocore.__version__ == '1.13.50' diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/requirements.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/requirements.txt new file mode 100644 index 00000000..c5b9e129 --- /dev/null +++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/requirements.txt @@ -0,0 +1 @@ +botocore diff --git a/test/integration/targets/ansible-test/collection-tests/constraints.sh b/test/integration/targets/ansible-test/collection-tests/constraints.sh new file mode 100755 index 00000000..d3bbc6ab --- /dev/null +++ b/test/integration/targets/ansible-test/collection-tests/constraints.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + +cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}" +cd "${WORK_DIR}/ansible_collections/ns/col_constraints" + +# common args for all tests +# each test will be run in a separate venv to verify that requirements have been properly specified +common=(--venv --python "${ANSIBLE_TEST_PYTHON_VERSION}" --color --truncate 0 "${@}") + +# unit tests + +rm -rf "tests/output" +ansible-test units "${common[@]}" + +# integration tests + +rm -rf "tests/output" +ansible-test integration "${common[@]}" diff --git a/test/integration/targets/ansible-test/collection-tests/coverage.sh b/test/integration/targets/ansible-test/collection-tests/coverage.sh new file mode 100755 index 00000000..3d01dd4b --- /dev/null +++ b/test/integration/targets/ansible-test/collection-tests/coverage.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + +cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}" +cd "${WORK_DIR}/ansible_collections/ns/col" + +# rename the sanity ignore file to match the current ansible version and update import ignores with the python version +ansible_version="$(python -c 'import ansible.release; print(".".join(ansible.release.__version__.split(".")[:2]))')" +sed "s/ import$/ import-${ANSIBLE_TEST_PYTHON_VERSION}/;" < "tests/sanity/ignore.txt" > "tests/sanity/ignore-${ansible_version}.txt" + +# common args for all tests +common=(--venv --color --truncate 0 "${@}") +test_common=("${common[@]}" --python "${ANSIBLE_TEST_PYTHON_VERSION}") + +# run a lightweight test that generates code coverge output +ansible-test sanity --test import "${test_common[@]}" --coverage + +# report on code coverage in all supported formats +ansible-test coverage report "${common[@]}" +ansible-test coverage html "${common[@]}" +ansible-test coverage xml "${common[@]}" diff --git a/test/integration/targets/ansible-test/collection-tests/git-at-collection-base.sh b/test/integration/targets/ansible-test/collection-tests/git-at-collection-base.sh new file mode 100755 index 00000000..31ebfbbf --- /dev/null +++ b/test/integration/targets/ansible-test/collection-tests/git-at-collection-base.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + +export GIT_TOP_LEVEL SUBMODULE_DST + +GIT_TOP_LEVEL="${WORK_DIR}/super/ansible_collections/ns/col" +SUBMODULE_DST="sub" + +source collection-tests/git-common.bash diff --git a/test/integration/targets/ansible-test/collection-tests/git-at-collection-root.sh b/test/integration/targets/ansible-test/collection-tests/git-at-collection-root.sh new file mode 100755 index 00000000..8af4387a --- /dev/null +++ b/test/integration/targets/ansible-test/collection-tests/git-at-collection-root.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + +export GIT_TOP_LEVEL SUBMODULE_DST + +GIT_TOP_LEVEL="${WORK_DIR}/super" +SUBMODULE_DST="ansible_collections/ns/col/sub" + +source collection-tests/git-common.bash diff --git a/test/integration/targets/ansible-test/collection-tests/git-common.bash b/test/integration/targets/ansible-test/collection-tests/git-common.bash new file mode 100755 index 00000000..069b157c --- /dev/null +++ b/test/integration/targets/ansible-test/collection-tests/git-common.bash @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + +# make sure git is installed +git --version || ansible-playbook collection-tests/install-git.yml -i ../../inventory "$@" + +# init sub project +mkdir "${WORK_DIR}/sub" +cd "${WORK_DIR}/sub" +touch "README.md" +git init +git config user.name 'Ansible Test' +git config user.email 'ansible-test@ansible.com' +git add "README.md" +git commit -m "Initial commit." + +# init super project +rm -rf "${WORK_DIR}/super" # needed when re-creating in place +mkdir "${WORK_DIR}/super" +cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}/super" +cd "${GIT_TOP_LEVEL}" +git init + +# add submodule +git submodule add "${WORK_DIR}/sub" "${SUBMODULE_DST}" + +# prepare for tests +expected="${WORK_DIR}/expected.txt" +actual="${WORK_DIR}/actual.txt" +cd "${WORK_DIR}/super/ansible_collections/ns/col" +mkdir tests/.git +touch tests/.git/keep.txt # make sure ansible-test correctly ignores version control within collection subdirectories +find . -type f ! -path '*/.git/*' ! -name .git | sed 's|^\./||' | sort >"${expected}" +set -x + +# test at the collection base +ansible-test env --list-files | sort >"${actual}" +diff --unified "${expected}" "${actual}" + +# test at the submodule base +(cd sub && ansible-test env --list-files | sort >"${actual}") +diff --unified "${expected}" "${actual}" diff --git a/test/integration/targets/ansible-test/collection-tests/install-git.yml b/test/integration/targets/ansible-test/collection-tests/install-git.yml new file mode 100644 index 00000000..29adead7 --- /dev/null +++ b/test/integration/targets/ansible-test/collection-tests/install-git.yml @@ -0,0 +1,5 @@ +- hosts: localhost + tasks: + - name: Make sure git is installed + package: + name: git diff --git a/test/integration/targets/ansible-test/collection-tests/venv.sh b/test/integration/targets/ansible-test/collection-tests/venv.sh new file mode 100755 index 00000000..862c8ad9 --- /dev/null +++ b/test/integration/targets/ansible-test/collection-tests/venv.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + +cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}" +cd "${WORK_DIR}/ansible_collections/ns/col" + +# rename the sanity ignore file to match the current ansible version and update import ignores with the python version +ansible_version="$(python -c 'import ansible.release; print(".".join(ansible.release.__version__.split(".")[:2]))')" +sed "s/ import$/ import-${ANSIBLE_TEST_PYTHON_VERSION}/;" < "tests/sanity/ignore.txt" > "tests/sanity/ignore-${ansible_version}.txt" + +# common args for all tests +# each test will be run in a separate venv to verify that requirements have been properly specified +common=(--venv --python "${ANSIBLE_TEST_PYTHON_VERSION}" --color --truncate 0 "${@}") + +# sanity tests + +tests=() + +set +x + +while IFS='' read -r line; do + tests+=("$line"); +done < <( + ansible-test sanity --list-tests +) + +set -x + +for test in "${tests[@]}"; do + rm -rf "tests/output" + ansible-test sanity "${common[@]}" --test "${test}" +done + +# unit tests + +rm -rf "tests/output" +ansible-test units "${common[@]}" + +# integration tests + +rm -rf "tests/output" +ansible-test integration "${common[@]}" diff --git a/test/integration/targets/ansible-test/runme.sh b/test/integration/targets/ansible-test/runme.sh new file mode 100755 index 00000000..7c956b4f --- /dev/null +++ b/test/integration/targets/ansible-test/runme.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -eu -o pipefail + +# tests must be executed outside of the ansible source tree +# otherwise ansible-test will test the ansible source instead of the test collection +# the temporary directory provided by ansible-test resides within the ansible source tree +tmp_dir=$(mktemp -d) + +trap 'rm -rf "${tmp_dir}"' EXIT + +export TEST_DIR +export WORK_DIR + +TEST_DIR="$PWD" + +for test in collection-tests/*.sh; do + WORK_DIR="${tmp_dir}/$(basename "${test}" ".sh")" + mkdir "${WORK_DIR}" + echo "**********************************************************************" + echo "TEST: ${test}: STARTING" + "${test}" "${@}" || (echo "TEST: ${test}: FAILED" && exit 1) + echo "TEST: ${test}: PASSED" +done diff --git a/test/integration/targets/ansible/adhoc-callback.stdout b/test/integration/targets/ansible/adhoc-callback.stdout new file mode 100644 index 00000000..05a93dd6 --- /dev/null +++ b/test/integration/targets/ansible/adhoc-callback.stdout @@ -0,0 +1,12 @@ +v2_playbook_on_start +v2_on_any +v2_playbook_on_play_start +v2_on_any +v2_playbook_on_task_start +v2_on_any +v2_runner_on_start +v2_on_any +v2_runner_on_ok +v2_on_any +v2_playbook_on_stats +v2_on_any diff --git a/test/integration/targets/ansible/aliases b/test/integration/targets/ansible/aliases new file mode 100644 index 00000000..f71c8117 --- /dev/null +++ b/test/integration/targets/ansible/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +skip/aix diff --git "a/test/integration/targets/ansible/ansible-test\303\251.cfg" "b/test/integration/targets/ansible/ansible-test\303\251.cfg" new file mode 100644 index 00000000..61a99f48 --- /dev/null +++ "b/test/integration/targets/ansible/ansible-test\303\251.cfg" @@ -0,0 +1,2 @@ +[defaults] +remote_user = admin diff --git a/test/integration/targets/ansible/callback_plugins/callback_debug.py b/test/integration/targets/ansible/callback_plugins/callback_debug.py new file mode 100644 index 00000000..cac122c1 --- /dev/null +++ b/test/integration/targets/ansible/callback_plugins/callback_debug.py @@ -0,0 +1,24 @@ +# (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'callback_debug' + + def __init__(self, *args, **kwargs): + super(CallbackModule, self).__init__(*args, **kwargs) + self._display.display('__init__') + + for cb in [x for x in dir(CallbackBase) if x.startswith('v2_')]: + delattr(CallbackBase, cb) + + def __getattr__(self, name): + if name.startswith('v2_'): + return(lambda *args, **kwargs: self._display.display(name)) diff --git a/test/integration/targets/ansible/no-extension b/test/integration/targets/ansible/no-extension new file mode 100644 index 00000000..61a99f48 --- /dev/null +++ b/test/integration/targets/ansible/no-extension @@ -0,0 +1,2 @@ +[defaults] +remote_user = admin diff --git a/test/integration/targets/ansible/playbook.yml b/test/integration/targets/ansible/playbook.yml new file mode 100644 index 00000000..c38b9060 --- /dev/null +++ b/test/integration/targets/ansible/playbook.yml @@ -0,0 +1,5 @@ +- hosts: all + gather_facts: false + tasks: + - debug: + msg: "{{ username }}" diff --git a/test/integration/targets/ansible/playbookdir_cfg.ini b/test/integration/targets/ansible/playbookdir_cfg.ini new file mode 100644 index 00000000..f4bf8af8 --- /dev/null +++ b/test/integration/targets/ansible/playbookdir_cfg.ini @@ -0,0 +1,2 @@ +[defaults] +playbook_dir = /tmp diff --git a/test/integration/targets/ansible/runme.sh b/test/integration/targets/ansible/runme.sh new file mode 100755 index 00000000..23ae1863 --- /dev/null +++ b/test/integration/targets/ansible/runme.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + +ansible --version +ansible --help + +ansible testhost -i ../../inventory -m ping "$@" +ansible testhost -i ../../inventory -m setup "$@" + +ansible-config view -c ./ansible-testé.cfg | grep 'remote_user = admin' +ansible-config dump -c ./ansible-testé.cfg | grep 'DEFAULT_REMOTE_USER([^)]*) = admin\>' +ANSIBLE_REMOTE_USER=administrator ansible-config dump| grep 'DEFAULT_REMOTE_USER([^)]*) = administrator\>' +ansible-config list | grep 'DEFAULT_REMOTE_USER' + +# 'view' command must fail when config file is missing or has an invalid file extension +ansible-config view -c ./ansible-non-existent.cfg 2> err1.txt || grep -Eq 'ERROR! The provided configuration file is missing or not accessible:' err1.txt || (cat err*.txt; rm -f err1.txt; exit 1) +ansible-config view -c ./no-extension 2> err2.txt || grep -q 'Unsupported configuration file extension' err2.txt || (cat err2.txt; rm -f err*.txt; exit 1) +rm -f err*.txt + +# test setting playbook_dir via envvar +ANSIBLE_PLAYBOOK_DIR=/tmp ansible localhost -m debug -a var=playbook_dir | grep '"playbook_dir": "/tmp"' + +# test setting playbook_dir via cmdline +ansible localhost -m debug -a var=playbook_dir --playbook-dir=/tmp | grep '"playbook_dir": "/tmp"' + +# test setting playbook dir via ansible.cfg +env -u ANSIBLE_PLAYBOOK_DIR ANSIBLE_CONFIG=./playbookdir_cfg.ini ansible localhost -m debug -a var=playbook_dir | grep '"playbook_dir": "/tmp"' + +# test adhoc callback triggers +ANSIBLE_STDOUT_CALLBACK=callback_debug ANSIBLE_LOAD_CALLBACK_PLUGINS=1 ansible --playbook-dir . testhost -i ../../inventory -m ping | grep -E '^v2_' | diff -u adhoc-callback.stdout - + +# Test that no tmp dirs are left behind when running ansible-config +TMP_DIR=~/.ansible/tmptest +if [[ -d "$TMP_DIR" ]]; then + rm -rf "$TMP_DIR" +fi +ANSIBLE_LOCAL_TEMP="$TMP_DIR" ansible-config list > /dev/null +ANSIBLE_LOCAL_TEMP="$TMP_DIR" ansible-config dump > /dev/null +ANSIBLE_LOCAL_TEMP="$TMP_DIR" ansible-config view > /dev/null + +# wc on macOS is dumb and returns leading spaces +file_count=$(find "$TMP_DIR" -type d -maxdepth 1 | wc -l | sed 's/^ *//') +if [[ $file_count -ne 1 ]]; then + echo "$file_count temporary files were left behind by ansible-config" + if [[ -d "$TMP_DIR" ]]; then + rm -rf "$TMP_DIR" + fi + exit 1 +fi + +# Ensure extra vars filename is prepended with '@' sign +if ansible-playbook -i ../../inventory --extra-vars /tmp/non-existing-file playbook.yml; then + echo "extra_vars filename without '@' sign should cause failure" + exit 1 +fi + +# Ensure extra vars filename is prepended with '@' sign +if ansible-playbook -i ../../inventory --extra-vars ./vars.yml playbook.yml; then + echo "extra_vars filename without '@' sign should cause failure" + exit 1 +fi + +ansible-playbook -i ../../inventory --extra-vars @./vars.yml playbook.yml diff --git a/test/integration/targets/ansible/vars.yml b/test/integration/targets/ansible/vars.yml new file mode 100644 index 00000000..a19e454f --- /dev/null +++ b/test/integration/targets/ansible/vars.yml @@ -0,0 +1 @@ +username: ansiboy diff --git a/test/integration/targets/any_errors_fatal/18602.yml b/test/integration/targets/any_errors_fatal/18602.yml new file mode 100644 index 00000000..66bcb88b --- /dev/null +++ b/test/integration/targets/any_errors_fatal/18602.yml @@ -0,0 +1,21 @@ +--- + - hosts: localhost + any_errors_fatal: true + tasks: + - block: + - debug: msg='i execute normally' + - name: EXPECTED FAILURE primary block command + command: /bin/false + - debug: msg='i never execute, cause ERROR!' + rescue: + - name: rescue block debug + debug: msg='I caught an error' + - name: EXPECTED FAILURE rescue block command + command: /bin/false + - debug: msg='I also never execute :-(' + always: + - name: A debug task in the always block + debug: msg="this always executes" + + - set_fact: + always_ran: true diff --git a/test/integration/targets/any_errors_fatal/aliases b/test/integration/targets/any_errors_fatal/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/any_errors_fatal/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/any_errors_fatal/always_block.yml b/test/integration/targets/any_errors_fatal/always_block.yml new file mode 100644 index 00000000..8c6fbffa --- /dev/null +++ b/test/integration/targets/any_errors_fatal/always_block.yml @@ -0,0 +1,27 @@ +--- +- hosts: testhost + gather_facts: false + any_errors_fatal: true + tasks: + - block: + - name: initial block debug + debug: msg='any_errors_fatal_block, i execute normally' + + - name: EXPECTED FAILURE any_errors_fatal, initial block, bin/false to simulate failure + command: /bin/false + + - name: after a task that fails I should never execute + debug: + msg: 'any_errors_fatal_block_post_fail ... i never execute, cause ERROR!' + rescue: + - name: any_errors_fatal_rescue_block debug + debug: msg='any_errors_fatal_rescue_block_start ... I caught an error' + + - name: EXPECTED FAILURE any_errors_fatal in rescue block, using bin/false to simulate error + command: /bin/false + + - name: any_errors_fatal post debug + debug: msg='any_errors_fatal_rescue_block_post_fail ... I also never execute :-(' + always: + - name: any errors fatal always block debug + debug: msg='any_errors_fatal_always_block_start' diff --git a/test/integration/targets/any_errors_fatal/inventory b/test/integration/targets/any_errors_fatal/inventory new file mode 100644 index 00000000..3ae8d9c3 --- /dev/null +++ b/test/integration/targets/any_errors_fatal/inventory @@ -0,0 +1,6 @@ +[local] +testhost ansible_connection=local host_var_role_name=role3 +testhost2 ansible_connection=local host_var_role_name=role2 + +[local:vars] +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/any_errors_fatal/on_includes.yml b/test/integration/targets/any_errors_fatal/on_includes.yml new file mode 100644 index 00000000..981d9f46 --- /dev/null +++ b/test/integration/targets/any_errors_fatal/on_includes.yml @@ -0,0 +1,7 @@ +--- +# based on https://github.com/ansible/ansible/issues/22924 +- name: Test any errors fatal + hosts: testhost,testhost2 + any_errors_fatal: True + tasks: + - include: test_fatal.yml diff --git a/test/integration/targets/any_errors_fatal/play_level.yml b/test/integration/targets/any_errors_fatal/play_level.yml new file mode 100644 index 00000000..d5a89206 --- /dev/null +++ b/test/integration/targets/any_errors_fatal/play_level.yml @@ -0,0 +1,15 @@ +- hosts: testhost + gather_facts: no + any_errors_fatal: true + tasks: + - name: EXPECTED FAILURE shell exe of /bin/false for testhost + shell: '{{ "/bin/false" if inventory_hostname == "testhost" else "/bin/true" }}' + + - debug: + msg: "any_errors_fatal_play_level_post_fail" + +- hosts: testhost + any_errors_fatal: true + tasks: + - debug: + msg: "and in another play" diff --git a/test/integration/targets/any_errors_fatal/runme.sh b/test/integration/targets/any_errors_fatal/runme.sh new file mode 100755 index 00000000..02cd499f --- /dev/null +++ b/test/integration/targets/any_errors_fatal/runme.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -ux +ansible-playbook -i inventory "$@" play_level.yml| tee out.txt | grep 'any_errors_fatal_play_level_post_fail' +res=$? +cat out.txt +if [ "${res}" -eq 0 ] ; then + exit 1 +fi + +ansible-playbook -i inventory "$@" on_includes.yml | tee out.txt | grep 'any_errors_fatal_this_should_never_be_reached' +res=$? +cat out.txt +if [ "${res}" -eq 0 ] ; then + exit 1 +fi + +set -ux + +ansible-playbook -i inventory "$@" always_block.yml | tee out.txt | grep 'any_errors_fatal_always_block_start' +res=$? +cat out.txt +exit $res diff --git a/test/integration/targets/any_errors_fatal/test_fatal.yml b/test/integration/targets/any_errors_fatal/test_fatal.yml new file mode 100644 index 00000000..a12d741e --- /dev/null +++ b/test/integration/targets/any_errors_fatal/test_fatal.yml @@ -0,0 +1,12 @@ +--- +- name: Setting the fact for 'test' to 'test value' + set_fact: + test: "test value" + when: inventory_hostname == 'testhost2' + +- name: EXPECTED FAILURE ejinja eval of a var that should not exist + debug: msg="{{ test }}" + +- name: testhost should never reach here as testhost2 failure above should end play + debug: + msg: "any_errors_fatal_this_should_never_be_reached" diff --git a/test/integration/targets/apt/aliases b/test/integration/targets/apt/aliases new file mode 100644 index 00000000..941bce38 --- /dev/null +++ b/test/integration/targets/apt/aliases @@ -0,0 +1,7 @@ +shippable/posix/group5 +destructive +skip/freebsd +skip/osx +skip/macos +skip/rhel +skip/aix diff --git a/test/integration/targets/apt/defaults/main.yml b/test/integration/targets/apt/defaults/main.yml new file mode 100644 index 00000000..05a5780f --- /dev/null +++ b/test/integration/targets/apt/defaults/main.yml @@ -0,0 +1 @@ +apt_foreign_arch: i386 diff --git a/test/integration/targets/apt/meta/main.yml b/test/integration/targets/apt/meta/main.yml new file mode 100644 index 00000000..162d7fab --- /dev/null +++ b/test/integration/targets/apt/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - setup_deb_repo diff --git a/test/integration/targets/apt/tasks/apt-builddep.yml b/test/integration/targets/apt/tasks/apt-builddep.yml new file mode 100644 index 00000000..24ee1dc2 --- /dev/null +++ b/test/integration/targets/apt/tasks/apt-builddep.yml @@ -0,0 +1,55 @@ +# test installing build-deps using netcat and quilt as test victims. +# +# Deps can be discovered like so (taken from ubuntu 12.04) +# ==== +# root@localhost:~ # apt-rdepends --build-depends --follow=DEPENDS netcat +# Reading package lists... Done +# Building dependency tree +# Reading state information... Done +# netcat +# Build-Depends: debhelper (>= 8.0.0) +# Build-Depends: quilt +# root@localhost:~ # +# ==== +# Since many things depend on debhelper, let's just uninstall quilt, then +# install build-dep for netcat to get it back. build-dep doesn't have an +# uninstall, so we don't need to test for reverse actions (eg, uninstall +# build-dep and ensure things are clean) + +# uninstall quilt +- name: check quilt with dpkg + shell: dpkg -s quilt + register: dpkg_result + ignore_errors: true + tags: ['test_apt_builddep'] + +- name: uninstall quilt with apt + apt: pkg=quilt state=absent purge=yes + register: apt_result + when: dpkg_result is successful + tags: ['test_apt_builddep'] + +# install build-dep for rolldice +- name: install rolldice build-dep with apt + apt: pkg=rolldice state=build-dep + register: apt_result + tags: ['test_apt_builddep'] + +- name: verify build_dep of netcat + assert: + that: + - "'changed' in apt_result" + tags: ['test_apt_builddep'] + +# ensure debhelper and qilt are installed +- name: check build_deps with dpkg + shell: dpkg --get-selections | egrep '(debhelper|quilt)' + failed_when: False + register: dpkg_result + tags: ['test_apt_builddep'] + +- name: verify build_deps are really there + assert: + that: + - "dpkg_result.rc == 0" + tags: ['test_apt_builddep'] diff --git a/test/integration/targets/apt/tasks/apt-multiarch.yml b/test/integration/targets/apt/tasks/apt-multiarch.yml new file mode 100644 index 00000000..df008d47 --- /dev/null +++ b/test/integration/targets/apt/tasks/apt-multiarch.yml @@ -0,0 +1,47 @@ +# verify that apt is handling multi-arch systems properly + +- name: load version specific vars + include_vars: '{{ item }}' + with_first_found: + - files: + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' + - 'default.yml' + paths: '../vars' + +- name: add architecture {{ apt_foreign_arch }} + command: dpkg --add-architecture {{ apt_foreign_arch }} + +- name: install {{ multiarch_test_pkg }}:{{ apt_foreign_arch }} with apt + apt: pkg={{ multiarch_test_pkg }}:{{ apt_foreign_arch }} state=present update_cache=yes + register: apt_result + until: apt_result is success + +- name: check {{ multiarch_test_pkg }} version + shell: dpkg -s {{ multiarch_test_pkg }} | grep Version | awk '{print $2}' + register: pkg_version + +- name: uninstall {{ multiarch_test_pkg }}:{{ apt_foreign_arch }} with apt + apt: pkg={{ multiarch_test_pkg }}:{{ apt_foreign_arch }} state=absent purge=yes + +- name: install deb file + apt: deb="/var/cache/apt/archives/{{ multiarch_test_pkg }}_{{ pkg_version.stdout }}_{{ apt_foreign_arch }}.deb" + register: apt_multi_initial + +- name: install deb file again + apt: deb="/var/cache/apt/archives/{{ multiarch_test_pkg }}_{{ pkg_version.stdout }}_{{ apt_foreign_arch }}.deb" + register: apt_multi_secondary + +- name: verify installation of {{ multiarch_test_pkg }}:{{ apt_foreign_arch }} + assert: + that: + - "apt_multi_initial.changed" + - "not apt_multi_secondary.changed" + +- name: remove all {{ apt_foreign_arch }} packages + apt: + name: "*:{{ apt_foreign_arch }}" + state: absent + purge: yes + +- name: remove {{ apt_foreign_arch }} architecture + command: dpkg --remove-architecture {{ apt_foreign_arch }} diff --git a/test/integration/targets/apt/tasks/apt.yml b/test/integration/targets/apt/tasks/apt.yml new file mode 100644 index 00000000..68728376 --- /dev/null +++ b/test/integration/targets/apt/tasks/apt.yml @@ -0,0 +1,416 @@ +- name: use python-apt + set_fact: + python_apt: python-apt + when: ansible_python_version is version('3', '<') + +- name: use python3-apt + set_fact: + python_apt: python3-apt + when: ansible_python_version is version('3', '>=') + +- name: use Debian mirror + set_fact: + distro_mirror: http://ftp.debian.org/debian + when: ansible_distribution == 'Debian' + +- name: use Ubuntu mirror + set_fact: + distro_mirror: http://archive.ubuntu.com/ubuntu + when: ansible_distribution == 'Ubuntu' + +# UNINSTALL 'python-apt' +# The `apt` module has the smarts to auto-install `python-apt`. To test, we +# will first uninstall `python-apt`. +- name: check {{ python_apt }} with dpkg + shell: dpkg -s {{ python_apt }} + register: dpkg_result + ignore_errors: true + +- name: uninstall {{ python_apt }} with apt + apt: pkg={{ python_apt }} state=absent purge=yes + register: apt_result + when: dpkg_result is successful + +# In check mode, auto-install of `python-apt` must fail +- name: test fail uninstall hello without required apt deps in check mode + apt: + pkg: hello + state: absent + purge: yes + register: apt_result + check_mode: yes + ignore_errors: yes + +- name: verify fail uninstall hello without required apt deps in check mode + assert: + that: + - apt_result is failed + - '"If run normally this module can auto-install it." in apt_result.msg' + +- name: check {{ python_apt }} with dpkg + shell: dpkg -s {{ python_apt }} + register: dpkg_result + ignore_errors: true + +# UNINSTALL 'hello' +# With 'python-apt' uninstalled, the first call to 'apt' should install +# python-apt without updating the cache. +- name: uninstall hello with apt and prevent updating the cache + apt: + pkg: hello + state: absent + purge: yes + update_cache: no + register: apt_result + +- name: check hello with dpkg + shell: dpkg-query -l hello + failed_when: False + register: dpkg_result + +- name: verify uninstall hello with apt and prevent updating the cache + assert: + that: + - "'changed' in apt_result" + - apt_result is not changed + - "dpkg_result.rc == 1" + - "'Auto-installing missing dependency without updating cache: {{ python_apt }}' in apt_result.warnings" + +- name: Test installing fnmatch package + apt: + name: + - hel?o + - he?lo + register: apt_install_fnmatch + +- name: Test uninstalling fnmatch package + apt: + name: + - hel?o + - he?lo + state: absent + register: apt_uninstall_fnmatch + +- name: verify fnmatch + assert: + that: + - apt_install_fnmatch is changed + - apt_uninstall_fnmatch is changed + +- name: Test update_cache 1 + apt: + update_cache: true + cache_valid_time: 10 + register: apt_update_cache_1 + +- name: Test update_cache 2 + apt: + update_cache: true + cache_valid_time: 10 + register: apt_update_cache_2 + +- name: verify update_cache + assert: + that: + - apt_update_cache_1 is changed + - apt_update_cache_2 is not changed + +- name: uninstall {{ python_apt }} with apt again + apt: + pkg: "{{ python_apt }}" + state: absent + purge: yes + +# UNINSTALL 'hello' +# With 'python-apt' uninstalled, the first call to 'apt' should install +# python-apt. +- name: uninstall hello with apt + apt: pkg=hello state=absent purge=yes + register: apt_result + until: apt_result is success + +- name: check hello with dpkg + shell: dpkg-query -l hello + failed_when: False + register: dpkg_result + +- name: verify uninstallation of hello + assert: + that: + - "'changed' in apt_result" + - apt_result is not changed + - "dpkg_result.rc == 1" + - "'Updating cache and auto-installing missing dependency: {{ python_apt }}' in apt_result.warnings" + +# UNINSTALL AGAIN +- name: uninstall hello with apt + apt: pkg=hello state=absent purge=yes + register: apt_result + +- name: verify no change on re-uninstall + assert: + that: + - "not apt_result.changed" + +# INSTALL +- name: install hello with apt + apt: name=hello state=present + register: apt_result + +- name: check hello with dpkg + shell: dpkg-query -l hello + failed_when: False + register: dpkg_result + +- name: verify installation of hello + assert: + that: + - "apt_result.changed" + - "dpkg_result.rc == 0" + +- name: verify apt module outputs + assert: + that: + - "'changed' in apt_result" + - "'stderr' in apt_result" + - "'stdout' in apt_result" + - "'stdout_lines' in apt_result" + +# INSTALL AGAIN +- name: install hello with apt + apt: name=hello state=present + register: apt_result + +- name: verify no change on re-install + assert: + that: + - "not apt_result.changed" + +# UNINSTALL AGAIN +- name: uninstall hello with apt + apt: pkg=hello state=absent purge=yes + register: apt_result + +# INSTALL WITH VERSION WILDCARD +- name: install hello with apt + apt: name=hello=2.* state=present + register: apt_result + +- name: check hello with wildcard with dpkg + shell: dpkg-query -l hello + failed_when: False + register: dpkg_result + +- name: verify installation of hello + assert: + that: + - "apt_result.changed" + - "dpkg_result.rc == 0" + +- name: check hello version + shell: dpkg -s hello | grep Version | awk '{print $2}' + register: hello_version + +- name: check hello architecture + shell: dpkg -s hello | grep Architecture | awk '{print $2}' + register: hello_architecture + +- name: uninstall hello with apt + apt: pkg=hello state=absent purge=yes + +- name: install deb file + apt: deb="/var/cache/apt/archives/hello_{{ hello_version.stdout }}_{{ hello_architecture.stdout }}.deb" + register: apt_initial + +- name: install deb file again + apt: deb="/var/cache/apt/archives/hello_{{ hello_version.stdout }}_{{ hello_architecture.stdout }}.deb" + register: apt_secondary + +- name: verify installation of hello + assert: + that: + - "apt_initial.changed" + - "not apt_secondary.changed" + +- name: uninstall hello with apt + apt: pkg=hello state=absent purge=yes + +- name: install deb file from URL + apt: deb="{{ distro_mirror }}/pool/main/h/hello/hello_{{ hello_version.stdout }}_{{ hello_architecture.stdout }}.deb" + register: apt_url + +- name: verify installation of hello + assert: + that: + - "apt_url.changed" + +- name: uninstall hello with apt + apt: pkg=hello state=absent purge=yes + +- name: force install of deb + apt: deb="/var/cache/apt/archives/hello_{{ hello_version.stdout }}_{{ hello_architecture.stdout }}.deb" force=true + register: dpkg_force + +- name: verify installation of hello + assert: + that: + - "dpkg_force.changed" + +# NEGATIVE: upgrade all packages while providing additional packages to install +- name: provide additional packages to install while upgrading all installed packages + apt: pkg=*,test state=latest + ignore_errors: True + register: apt_result + +- name: verify failure of upgrade packages and install + assert: + that: + - "not apt_result.changed" + - "apt_result.failed" + +- name: autoclean during install + apt: pkg=hello state=present autoclean=yes + +- name: undo previous install + apt: pkg=hello state=absent + +# https://github.com/ansible/ansible/issues/23155 +- name: create a repo file + copy: + dest: /etc/apt/sources.list.d/non-existing.list + content: deb http://ppa.launchpad.net/non-existing trusty main + +- name: test for sane error message + apt: + update_cache: yes + register: apt_result + ignore_errors: yes + +- name: verify sane error message + assert: + that: + - "'Failed to fetch' in apt_result['msg']" + - "'403' in apt_result['msg']" + +- name: Clean up + file: + name: /etc/apt/sources.list.d/non-existing.list + state: absent + +# https://github.com/ansible/ansible/issues/28907 +- name: Install parent package + apt: + name: libcaca-dev + +- name: Install child package + apt: + name: libslang2-dev + +- shell: apt-mark showmanual | grep libcaca-dev + ignore_errors: yes + register: parent_output + +- name: Check that parent package is marked as installed manually + assert: + that: + - "'libcaca-dev' in parent_output.stdout" + +- shell: apt-mark showmanual | grep libslang2-dev + ignore_errors: yes + register: child_output + +- name: Check that child package is marked as installed manually + assert: + that: + - "'libslang2-dev' in child_output.stdout" + +- name: Clean up + apt: + name: "{{ pkgs }}" + state: absent + vars: + pkgs: + - libcaca-dev + - libslang2-dev + +# https://github.com/ansible/ansible/issues/38995 +- name: build-dep for a package + apt: + name: tree + state: build-dep + register: apt_result + +- name: Check the result + assert: + that: + - apt_result is changed + +- name: build-dep for a package (idempotency) + apt: + name: tree + state: build-dep + register: apt_result + +- name: Check the result + assert: + that: + - apt_result is not changed + +# check policy_rc_d parameter + +- name: Install unscd but forbid service start + apt: + name: unscd + policy_rc_d: 101 + +- name: Stop unscd service + service: + name: unscd + state: stopped + register: service_unscd_stop + +- name: unscd service shouldn't have been stopped by previous task + assert: + that: service_unscd_stop is not changed + +- name: Uninstall unscd + apt: + name: unscd + policy_rc_d: 101 + +- name: Create incorrect /usr/sbin/policy-rc.d + copy: + dest: /usr/sbin/policy-rc.d + content: apt integration test + mode: 0755 + +- name: Install unscd but forbid service start + apt: + name: unscd + policy_rc_d: 101 + +- name: Stop unscd service + service: + name: unscd + state: stopped + register: service_unscd_stop + +- name: unscd service shouldn't have been stopped by previous task + assert: + that: service_unscd_stop is not changed + +- name: Create incorrect /usr/sbin/policy-rc.d + copy: + dest: /usr/sbin/policy-rc.d + content: apt integration test + mode: 0755 + register: policy_rc_d + +- name: Check if /usr/sbin/policy-rc.d was correctly backed-up during unscd install + assert: + that: policy_rc_d is not changed + +- name: Delete /usr/sbin/policy-rc.d + file: + path: /usr/sbin/policy-rc.d + state: absent diff --git a/test/integration/targets/apt/tasks/main.yml b/test/integration/targets/apt/tasks/main.yml new file mode 100644 index 00000000..1ecd8a63 --- /dev/null +++ b/test/integration/targets/apt/tasks/main.yml @@ -0,0 +1,40 @@ +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- block: + - include: 'apt.yml' + + - include: 'url-with-deps.yml' + + - include: 'apt-multiarch.yml' + when: + - ansible_userspace_architecture != apt_foreign_arch + + - include: 'apt-builddep.yml' + + - block: + - include: 'repo.yml' + always: + - file: + path: /etc/apt/sources.list.d/file_tmp_repo.list + state: absent + - file: + name: "{{ repodir }}" + state: absent + + when: + - ansible_distribution in ('Ubuntu', 'Debian') diff --git a/test/integration/targets/apt/tasks/repo.yml b/test/integration/targets/apt/tasks/repo.yml new file mode 100644 index 00000000..f568be9f --- /dev/null +++ b/test/integration/targets/apt/tasks/repo.yml @@ -0,0 +1,253 @@ +- block: + - name: Install foo package version 1.0.0 + apt: + name: foo=1.0.0 + allow_unauthenticated: yes + register: apt_result + + - name: Check install with dpkg + shell: dpkg-query -l foo + register: dpkg_result + + - name: Check if install was successful + assert: + that: + - "apt_result is success" + - "dpkg_result is success" + - "'1.0.0' in dpkg_result.stdout" + + - name: Update to foo version 1.0.1 + apt: + name: foo + state: latest + allow_unauthenticated: yes + register: apt_result + + - name: Check install with dpkg + shell: dpkg-query -l foo + register: dpkg_result + + - name: Check if install was successful + assert: + that: + - "apt_result is success" + - "dpkg_result is success" + - "'1.0.1' in dpkg_result.stdout" + always: + - name: Clean up + apt: + name: foo + state: absent + allow_unauthenticated: yes + + +# https://github.com/ansible/ansible/issues/30638 +- block: + - name: Fail to install foo=1.0.1 since foo is not installed and only_upgrade is set + apt: + name: foo=1.0.1 + state: present + only_upgrade: yes + allow_unauthenticated: yes + ignore_errors: yes + register: apt_result + + - name: Check that foo was not upgraded + assert: + that: + - "apt_result is not changed" + + - apt: + name: foo=1.0.0 + allow_unauthenticated: yes + + - name: Upgrade foo to 1.0.1 + apt: + name: foo=1.0.1 + state: present + only_upgrade: yes + allow_unauthenticated: yes + register: apt_result + + - name: Check install with dpkg + shell: dpkg-query -l foo + register: dpkg_result + + - name: Check if install was successful + assert: + that: + - "apt_result is success" + - "dpkg_result is success" + - "'1.0.1' in dpkg_result.stdout" + always: + - name: Clean up + apt: + name: foo + state: absent + allow_unauthenticated: yes + + +# https://github.com/ansible/ansible/issues/35900 +- block: + - name: Disable ubuntu repos so system packages are not upgraded and do not change testing env + command: mv /etc/apt/sources.list /etc/apt/sources.list.backup + + - name: Install foobar, installs foo as a dependency + apt: + name: foobar=1.0.0 + allow_unauthenticated: yes + + - name: Upgrade foobar to a version which does not depend on foo, autoremove should remove foo + apt: + upgrade: dist + autoremove: yes + allow_unauthenticated: yes + + - name: Check foo with dpkg + shell: dpkg-query -l foo + register: dpkg_result + ignore_errors: yes + + - name: Check that foo was removed by autoremove + assert: + that: + - "dpkg_result is failed" + + always: + - name: Clean up + apt: + pkg: foo,foobar + state: absent + autoclean: yes + + - name: Restore ubuntu repos + command: mv /etc/apt/sources.list.backup /etc/apt/sources.list + + +# https://github.com/ansible/ansible/issues/26298 +- block: + - name: Disable ubuntu repos so system packages are not upgraded and do not change testing env + command: mv /etc/apt/sources.list /etc/apt/sources.list.backup + + - name: Install foobar, installs foo as a dependency + apt: + name: foobar=1.0.0 + allow_unauthenticated: yes + + - name: Upgrade foobar to a version which does not depend on foo + apt: + upgrade: dist + force: yes # workaround for --allow-unauthenticated used along with upgrade + + - name: autoremove should remove foo + apt: + autoremove: yes + register: autoremove_result + + - name: Check that autoremove correctly reports changed=True + assert: + that: + - "autoremove_result is changed" + + - name: Check foo with dpkg + shell: dpkg-query -l foo + register: dpkg_result + ignore_errors: yes + + - name: Check that foo was removed by autoremove + assert: + that: + - "dpkg_result is failed" + + - name: Nothing to autoremove + apt: + autoremove: yes + register: autoremove_result + + - name: Check that autoremove correctly reports changed=False + assert: + that: + - "autoremove_result is not changed" + + - name: Create a fake .deb file for autoclean to remove + file: + name: /var/cache/apt/archives/python3-q_2.4-1_all.deb + state: touch + + - name: autoclean fake .deb file + apt: + autoclean: yes + register: autoclean_result + + - name: Check if the .deb file exists + stat: + path: /var/cache/apt/archives/python3-q_2.4-1_all.deb + register: stat_result + + - name: Check that autoclean correctly reports changed=True and file was removed + assert: + that: + - "autoclean_result is changed" + - "not stat_result.stat.exists" + + - name: Nothing to autoclean + apt: + autoclean: yes + register: autoclean_result + + - name: Check that autoclean correctly reports changed=False + assert: + that: + - "autoclean_result is not changed" + + always: + - name: Clean up + apt: + pkg: foo,foobar + state: absent + autoclean: yes + + - name: Restore ubuntu repos + command: mv /etc/apt/sources.list.backup /etc/apt/sources.list + + +- name: Upgrades + block: + - include: "upgrade.yml aptitude_present={{ True | bool }} upgrade_type=dist force_apt_get={{ False | bool }}" + + - name: Check if aptitude is installed + command: dpkg-query --show --showformat='${db:Status-Abbrev}' aptitude + register: aptitude_status + + - name: Remove aptitude, if installed, to test fall-back to apt-get + apt: + pkg: aptitude + state: absent + when: + - aptitude_status.stdout.find('ii') != -1 + + - include: "upgrade.yml aptitude_present={{ False | bool }} upgrade_type={{ item.upgrade_type }} force_apt_get={{ item.force_apt_get }}" + with_items: + - { upgrade_type: safe, force_apt_get: False } + - { upgrade_type: full, force_apt_get: False } + - { upgrade_type: safe, force_apt_get: True } + - { upgrade_type: full, force_apt_get: True } + + - name: (Re-)Install aptitude, run same tests again + apt: + pkg: aptitude + state: present + + - include: "upgrade.yml aptitude_present={{ True | bool }} upgrade_type={{ item.upgrade_type }} force_apt_get={{ item.force_apt_get }}" + with_items: + - { upgrade_type: safe, force_apt_get: False } + - { upgrade_type: full, force_apt_get: False } + - { upgrade_type: safe, force_apt_get: True } + - { upgrade_type: full, force_apt_get: True } + + - name: Remove aptitude if not originally present + apt: + pkg: aptitude + state: absent + when: + - aptitude_status.stdout.find('ii') == -1 diff --git a/test/integration/targets/apt/tasks/upgrade.yml b/test/integration/targets/apt/tasks/upgrade.yml new file mode 100644 index 00000000..cf747c81 --- /dev/null +++ b/test/integration/targets/apt/tasks/upgrade.yml @@ -0,0 +1,64 @@ +- block: + - name: Disable ubuntu repos so system packages are not upgraded and do not change testing env + command: mv /etc/apt/sources.list /etc/apt/sources.list.backup + + - name: install foo-1.0.0 + apt: + name: foo=1.0.0 + state: present + allow_unauthenticated: yes + + - name: check foo version + shell: dpkg -s foo | grep Version | awk '{print $2}' + register: foo_version + + - name: ensure the correct version of foo has been installed + assert: + that: + - "'1.0.0' in foo_version.stdout" + + - name: "(upgrade type: {{upgrade_type}}) upgrade packages to latest version, force_apt_get: {{force_apt_get}}" + apt: + upgrade: "{{ upgrade_type }}" + force_apt_get: "{{ force_apt_get }}" + force: yes + register: upgrade_result + + - name: check foo version + shell: dpkg -s foo | grep Version | awk '{print $2}' + register: foo_version + + - name: check that warning is not given when force_apt_get set + assert: + that: + - "'warnings' not in upgrade_result" + when: + - force_apt_get + + - name: check that old version upgraded correctly + assert: + that: + - "'1.0.0' not in foo_version.stdout" + - "{{ foo_version.changed }}" + + - name: "(upgrade type: {{upgrade_type}}) upgrade packages to latest version (Idempotant)" + apt: + upgrade: "{{ upgrade_type }}" + force_apt_get: "{{ force_apt_get }}" + force: yes + register: second_upgrade_result + + - name: check that nothing has changed (Idempotant) + assert: + that: + - "second_upgrade_result.changed == false" + + always: + - name: Clean up + apt: + pkg: foo,foobar + state: absent + autoclean: yes + + - name: Restore ubuntu repos + command: mv /etc/apt/sources.list.backup /etc/apt/sources.list diff --git a/test/integration/targets/apt/tasks/url-with-deps.yml b/test/integration/targets/apt/tasks/url-with-deps.yml new file mode 100644 index 00000000..ed2f7073 --- /dev/null +++ b/test/integration/targets/apt/tasks/url-with-deps.yml @@ -0,0 +1,56 @@ +- block: + - name: Install https transport for apt + apt: + name: apt-transport-https + + - name: Ensure echo-hello is not installed + apt: + name: echo-hello + state: absent + purge: yes + + # Note that this .deb is just a stupidly tiny one that has a dependency + # on vim-tiny. Really any .deb will work here so long as it has + # dependencies that exist in a repo and get brought in. + # The source and files for building this .deb can be found here: + # https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/apt/echo-hello-source.tar.gz + - name: Install deb file with dependencies from URL (check_mode) + apt: + deb: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/apt/echo-hello_1.0_all.deb + check_mode: true + register: apt_url_deps_check_mode + + - name: check to make sure we didn't install the package due to check_mode + shell: dpkg-query -l echo-hello + failed_when: false + register: dpkg_result_check_mode + + - name: verify check_mode installation of echo-hello + assert: + that: + - apt_url_deps_check_mode is changed + - dpkg_result_check_mode.rc != 0 + + - name: Install deb file with dependencies from URL (for real this time) + apt: + deb: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/apt/echo-hello_1.0_all.deb + register: apt_url_deps + + - name: check to make sure we installed the package + shell: dpkg-query -l echo-hello + failed_when: False + register: dpkg_result + + - name: verify real installation of echo-hello + assert: + that: + - apt_url_deps is changed + - dpkg_result is successful + - dpkg_result.rc == 0 + + always: + - name: uninstall echo-hello with apt + apt: + pkg: echo-hello + state: absent + purge: yes diff --git a/test/integration/targets/apt/vars/Ubuntu-20.yml b/test/integration/targets/apt/vars/Ubuntu-20.yml new file mode 100644 index 00000000..7b32755f --- /dev/null +++ b/test/integration/targets/apt/vars/Ubuntu-20.yml @@ -0,0 +1 @@ +multiarch_test_pkg: libunistring2 diff --git a/test/integration/targets/apt/vars/default.yml b/test/integration/targets/apt/vars/default.yml new file mode 100644 index 00000000..bed3a965 --- /dev/null +++ b/test/integration/targets/apt/vars/default.yml @@ -0,0 +1 @@ +multiarch_test_pkg: hello diff --git a/test/integration/targets/apt_key/aliases b/test/integration/targets/apt_key/aliases new file mode 100644 index 00000000..f46fd701 --- /dev/null +++ b/test/integration/targets/apt_key/aliases @@ -0,0 +1,6 @@ +shippable/posix/group1 +skip/freebsd +skip/osx +skip/macos +skip/rhel +skip/aix diff --git a/test/integration/targets/apt_key/meta/main.yml b/test/integration/targets/apt_key/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/apt_key/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/apt_key/tasks/apt_key.yml b/test/integration/targets/apt_key/tasks/apt_key.yml new file mode 100644 index 00000000..a5969b6f --- /dev/null +++ b/test/integration/targets/apt_key/tasks/apt_key.yml @@ -0,0 +1,19 @@ +- name: run first docs example + apt_key: + keyserver: keyserver.ubuntu.com + id: 36A1D7869245C8950F966E92D8576A8BA88D21E9 + register: apt_key_test0 +- debug: var=apt_key_test0 + +- name: re-run first docs example + apt_key: + keyserver: keyserver.ubuntu.com + id: 36A1D7869245C8950F966E92D8576A8BA88D21E9 + register: apt_key_test1 + +- name: validate results + assert: + that: + - 'apt_key_test0.changed is defined' + - 'apt_key_test0.changed' + - 'not apt_key_test1.changed' diff --git a/test/integration/targets/apt_key/tasks/main.yml b/test/integration/targets/apt_key/tasks/main.yml new file mode 100644 index 00000000..a268b2b9 --- /dev/null +++ b/test/integration/targets/apt_key/tasks/main.yml @@ -0,0 +1,28 @@ +# Test code for the apt_key module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: output_dir_test={{output_dir}}/apt_key + +- name: make sure our testing sub-directory does not exist + file: path="{{ output_dir_test }}" state=absent + +- name: create our testing sub-directory + file: path="{{ output_dir_test }}" state=directory + +- include: 'apt_key.yml' + when: ansible_distribution in ('Ubuntu', 'Debian') diff --git a/test/integration/targets/apt_repository/aliases b/test/integration/targets/apt_repository/aliases new file mode 100644 index 00000000..7e462190 --- /dev/null +++ b/test/integration/targets/apt_repository/aliases @@ -0,0 +1,7 @@ +destructive +shippable/posix/group1 +skip/freebsd +skip/osx +skip/macos +skip/rhel +skip/aix diff --git a/test/integration/targets/apt_repository/meta/main.yml b/test/integration/targets/apt_repository/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/apt_repository/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/apt_repository/tasks/apt.yml b/test/integration/targets/apt_repository/tasks/apt.yml new file mode 100644 index 00000000..66790bb0 --- /dev/null +++ b/test/integration/targets/apt_repository/tasks/apt.yml @@ -0,0 +1,243 @@ +--- + +- set_fact: + test_ppa_name: 'ppa:git-core/ppa' + test_ppa_filename: 'git-core' + test_ppa_spec: 'deb http://ppa.launchpad.net/git-core/ppa/ubuntu {{ansible_distribution_release}} main' + test_ppa_key: 'E1DF1F24' # http://keyserver.ubuntu.com:11371/pks/lookup?search=0xD06AAF4C11DAB86DF421421EFE6B20ECA7AD98A1&op=index + +- name: show python version + debug: var=ansible_python_version + +- name: use python-apt + set_fact: + python_apt: python-apt + when: ansible_python_version is version('3', '<') + +- name: use python3-apt + set_fact: + python_apt: python3-apt + when: ansible_python_version is version('3', '>=') + +# UNINSTALL 'python-apt' +# The `apt_repository` module has the smarts to auto-install `python-apt`. To +# test, we will first uninstall `python-apt`. +- name: check {{ python_apt }} with dpkg + shell: dpkg -s {{ python_apt }} + register: dpkg_result + ignore_errors: true + +- name: uninstall {{ python_apt }} with apt + apt: pkg={{ python_apt }} state=absent purge=yes + register: apt_result + when: dpkg_result is successful + +# +# TEST: apt_repository: repo= +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name= (expect: pass)' + apt_repository: repo='{{test_ppa_name}}' state=present + register: result + +- name: 'assert the apt cache did *NOT* change' + assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_name}}"' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +- name: 'ensure ppa key is installed (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=present + +# +# TEST: apt_repository: repo= update_cache=no +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name= update_cache=no (expect: pass)' + apt_repository: repo='{{test_ppa_name}}' state=present update_cache=no + register: result + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_name}}"' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did *NOT* change' + assert: + that: + - 'cache_before.stat.mtime == cache_after.stat.mtime' + +- name: 'ensure ppa key is installed (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=present + +# +# TEST: apt_repository: repo= update_cache=yes +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name= update_cache=yes (expect: pass)' + apt_repository: repo='{{test_ppa_name}}' state=present update_cache=yes + register: result + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_name}}"' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +- name: 'ensure ppa key is installed (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=present + +# +# TEST: apt_repository: repo= +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: ensure ppa key is present before adding repo that requires authentication + apt_key: keyserver=keyserver.ubuntu.com id='{{test_ppa_key}}' state=present + +- name: 'name= (expect: pass)' + apt_repository: repo='{{test_ppa_spec}}' state=present + register: result + +- name: update the cache + apt: + update_cache: true + register: result_cache + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_spec}}"' + - result_cache is not changed + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +- name: remove repo by spec + apt_repository: repo='{{test_ppa_spec}}' state=absent + register: result + +# When installing a repo with the spec, the key is *NOT* added +- name: 'ensure ppa key is absent (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=absent + +# +# TEST: apt_repository: repo= filename= +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: ensure ppa key is present before adding repo that requires authentication + apt_key: keyserver=keyserver.ubuntu.com id='{{test_ppa_key}}' state=present + +- name: 'name= filename= (expect: pass)' + apt_repository: repo='{{test_ppa_spec}}' filename='{{test_ppa_filename}}' state=present + register: result + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_spec}}"' + +- name: 'examine source file' + stat: path='/etc/apt/sources.list.d/{{test_ppa_filename}}.list' + register: source_file + +- name: 'assert source file exists' + assert: + that: + - 'source_file.stat.exists == True' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +# When installing a repo with the spec, the key is *NOT* added +- name: 'ensure ppa key is absent (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=absent + +- name: Test apt_repository with a null value for repo + apt_repository: + repo: + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + - result.msg == 'Please set argument \'repo\' to a non-empty value' + +- name: Test apt_repository with an empty value for repo + apt_repository: + repo: "" + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + - result.msg == 'Please set argument \'repo\' to a non-empty value' + +# +# TEARDOWN +# +- include: 'cleanup.yml' diff --git a/test/integration/targets/apt_repository/tasks/cleanup.yml b/test/integration/targets/apt_repository/tasks/cleanup.yml new file mode 100644 index 00000000..92280ced --- /dev/null +++ b/test/integration/targets/apt_repository/tasks/cleanup.yml @@ -0,0 +1,17 @@ +--- +# tasks to cleanup a repo and assert it is gone + +- name: remove existing ppa + apt_repository: repo={{test_ppa_name}} state=absent + ignore_errors: true + +- name: test that ppa does not exist (expect pass) + shell: cat /etc/apt/sources.list /etc/apt/sources.list.d/* | grep "{{test_ppa_spec}}" + register: command + failed_when: command.rc == 0 + changed_when: false + +# Should this use apt-key, maybe? +- name: remove ppa key + apt_key: id={{test_ppa_key}} state=absent + ignore_errors: true diff --git a/test/integration/targets/apt_repository/tasks/main.yml b/test/integration/targets/apt_repository/tasks/main.yml new file mode 100644 index 00000000..41010112 --- /dev/null +++ b/test/integration/targets/apt_repository/tasks/main.yml @@ -0,0 +1,25 @@ +# test code for the apt_repository module +# (c) 2014, James Laska + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- include: 'apt.yml' + when: ansible_distribution in ('Ubuntu') + +- include: mode.yaml + when: ansible_distribution in ('Ubuntu') + tags: + - test_apt_repository_mode \ No newline at end of file diff --git a/test/integration/targets/apt_repository/tasks/mode.yaml b/test/integration/targets/apt_repository/tasks/mode.yaml new file mode 100644 index 00000000..d9895368 --- /dev/null +++ b/test/integration/targets/apt_repository/tasks/mode.yaml @@ -0,0 +1,130 @@ +--- + +# These tests are likely slower than they should be, since each +# invocation of apt_repository seems to end up querying for +# lots (all?) configured repos. + +- set_fact: + test_repo_spec: "deb http://apt.postgresql.org/pub/repos/apt/ {{ ansible_distribution_release }}-pgdg main" + test_repo_path: /etc/apt/sources.list.d/apt_postgresql_org_pub_repos_apt.list + +- include: mode_cleanup.yaml + +- name: Add GPG key to verify signatures + apt_key: + id: 7FCC7D46ACCC4CF8 + keyserver: keyserver.ubuntu.com + +- name: Mode specified as yaml literal 0600 + apt_repository: + repo: "{{ test_repo_spec }}" + state: present + mode: 0600 + register: mode_given_results + +- name: Gather mode_given_as_literal_yaml stat + stat: + path: "{{ test_repo_path }}" + register: mode_given_yaml_literal_0600 + +- name: Show mode_given_yaml_literal_0600 + debug: + var: mode_given_yaml_literal_0600 + +- include: mode_cleanup.yaml + +- name: Assert mode_given_yaml_literal_0600 is correct + assert: + that: "mode_given_yaml_literal_0600.stat.mode == '0600'" + +- name: No mode specified + apt_repository: + repo: "{{ test_repo_spec }}" + state: present + register: no_mode_results + +- name: Gather no mode stat + stat: + path: "{{ test_repo_path }}" + register: no_mode_stat + +- name: Show no mode stat + debug: + var: no_mode_stat + +- include: mode_cleanup.yaml + +- name: Assert no_mode_stat is correct + assert: + that: "no_mode_stat.stat.mode == '0644'" + +- name: Mode specified as string 0600 + apt_repository: + repo: "{{ test_repo_spec }}" + state: present + mode: "0600" + register: mode_given_string_results + +- name: Gather mode_given_string stat + stat: + path: "{{ test_repo_path }}" + register: mode_given_string_stat + +- name: Show mode_given_string_stat + debug: + var: mode_given_string_stat + +- include: mode_cleanup.yaml + +- name: Mode specified as string 600 + apt_repository: + repo: "{{ test_repo_spec }}" + state: present + mode: "600" + register: mode_given_string_600_results + +- name: Gather mode_given_600_string stat + stat: + path: "{{ test_repo_path }}" + register: mode_given_string_600_stat + +- name: Show mode_given_string_stat + debug: + var: mode_given_string_600_stat + +- include: mode_cleanup.yaml + +- name: Assert mode is correct + assert: + that: "mode_given_string_600_stat.stat.mode == '0600'" + +- name: Mode specified as yaml literal 600 + apt_repository: + repo: "{{ test_repo_spec }}" + state: present + mode: 600 + register: mode_given_short_results + +- name: Gather mode_given_yaml_literal_600 stat + stat: + path: "{{ test_repo_path }}" + register: mode_given_yaml_literal_600 + +- name: Show mode_given_yaml_literal_600 + debug: + var: mode_given_yaml_literal_600 + +- include: mode_cleanup.yaml + +# a literal 600 as the mode will fail currently, in the sense that it +# doesn't guess and consider 600 and 0600 to be the same, and will instead +# intepret literal 600 as the decimal 600 (and thereby octal 1130). +# The literal 0600 can be interpreted as octal correctly. Note that +# a decimal 644 is octal 420. The default perm is 0644 so a mis intrpretation +# of 644 was previously resulting in a default file mode of 0420. +# 'mode: 600' is likely not what a user meant but there isnt enough info +# to determine that. Note that a string arg of '600' will be intrepeted as 0600. +# See https://github.com/ansible/ansible/issues/16370 +- name: Assert mode_given_yaml_literal_600 is correct + assert: + that: "mode_given_yaml_literal_600.stat.mode == '1130'" \ No newline at end of file diff --git a/test/integration/targets/apt_repository/tasks/mode_cleanup.yaml b/test/integration/targets/apt_repository/tasks/mode_cleanup.yaml new file mode 100644 index 00000000..726de111 --- /dev/null +++ b/test/integration/targets/apt_repository/tasks/mode_cleanup.yaml @@ -0,0 +1,7 @@ +--- +# tasks to cleanup after creating a repo file, specifically for testing the 'mode' arg + +- name: Delete existing repo + file: + path: "{{ test_repo_path }}" + state: absent \ No newline at end of file diff --git a/test/integration/targets/args/aliases b/test/integration/targets/args/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/args/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/args/runme.sh b/test/integration/targets/args/runme.sh new file mode 100755 index 00000000..af1c31d7 --- /dev/null +++ b/test/integration/targets/args/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -eu + +echo "arg[#]: $#" +echo "arg[0]: $0" + +i=0 +for arg in "$@"; do + i=$((i+1)) + echo "arg[$i]: ${arg}" +done diff --git a/test/integration/targets/argspec/aliases b/test/integration/targets/argspec/aliases new file mode 100644 index 00000000..70a7b7a9 --- /dev/null +++ b/test/integration/targets/argspec/aliases @@ -0,0 +1 @@ +shippable/posix/group5 diff --git a/test/integration/targets/argspec/library/argspec.py b/test/integration/targets/argspec/library/argspec.py new file mode 100644 index 00000000..08dad1a0 --- /dev/null +++ b/test/integration/targets/argspec/library/argspec.py @@ -0,0 +1,153 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + { + 'required': { + 'required': True, + }, + 'required_one_of_one': {}, + 'required_one_of_two': {}, + 'required_by_one': {}, + 'required_by_two': {}, + 'required_by_three': {}, + 'state': { + 'type': 'str', + 'choices': ['absent', 'present'], + }, + 'path': {}, + 'content': {}, + 'mapping': { + 'type': 'dict', + }, + 'required_one_of': { + 'required_one_of': [['thing', 'other']], + 'type': 'list', + 'elements': 'dict', + 'options': { + 'thing': {}, + 'other': {}, + }, + }, + 'required_by': { + 'required_by': {'thing': 'other'}, + 'type': 'list', + 'elements': 'dict', + 'options': { + 'thing': {}, + 'other': {}, + }, + }, + 'required_together': { + 'required_together': [['thing', 'other']], + 'type': 'list', + 'elements': 'dict', + 'options': { + 'thing': {}, + 'other': {}, + 'another': {}, + }, + }, + 'required_if': { + 'required_if': ( + ('thing', 'foo', ('other',), True), + ), + 'type': 'list', + 'elements': 'dict', + 'options': { + 'thing': {}, + 'other': {}, + 'another': {}, + }, + }, + 'json': { + 'type': 'json', + }, + 'fail_on_missing_params': { + 'type': 'list', + 'default': [], + }, + 'needed_param': {}, + 'required_together_one': {}, + 'required_together_two': {}, + 'suboptions_list_no_elements': { + 'type': 'list', + 'options': { + 'thing': {}, + }, + }, + 'choices_with_strings_like_bools': { + 'type': 'str', + 'choices': [ + 'on', + 'off', + ], + }, + 'choices': { + 'type': 'str', + 'choices': [ + 'foo', + 'bar', + ], + }, + 'list_choices': { + 'type': 'list', + 'choices': [ + 'foo', + 'bar', + 'baz', + ], + }, + 'primary': { + 'type': 'str', + 'aliases': [ + 'alias', + ], + }, + 'password': { + 'type': 'str', + 'no_log': True, + }, + 'not_a_password': { + 'type': 'str', + 'no_log': False, + }, + 'maybe_password': { + 'type': 'str', + }, + 'int': { + 'type': 'int', + }, + }, + required_if=( + ('state', 'present', ('path', 'content'), True), + ), + mutually_exclusive=( + ('path', 'content'), + ), + required_one_of=( + ('required_one_of_one', 'required_one_of_two'), + ), + required_by={ + 'required_by_one': ('required_by_two', 'required_by_three'), + }, + required_together=( + ('required_together_one', 'required_together_two'), + ), + ) + + module.fail_on_missing_params(module.params['fail_on_missing_params']) + + module.exit_json(**module.params) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/argspec/tasks/main.yml b/test/integration/targets/argspec/tasks/main.yml new file mode 100644 index 00000000..d90bdf02 --- /dev/null +++ b/test/integration/targets/argspec/tasks/main.yml @@ -0,0 +1,419 @@ +- argspec: + required: value + required_one_of_one: value + +- argspec: + required_one_of_one: value + register: argspec_required_fail + ignore_errors: true + +- argspec: + required: value + required_one_of_two: value + +- argspec: + required: value + register: argspec_required_one_of_fail + ignore_errors: true + +- argspec: + required: value + required_one_of_two: value + required_by_one: value + required_by_two: value + required_by_three: value + +- argspec: + required: value + required_one_of_two: value + required_by_one: value + required_by_two: value + register: argspec_required_by_fail + ignore_errors: true + +- argspec: + state: absent + required: value + required_one_of_one: value + +- argspec: + state: present + required: value + required_one_of_one: value + register: argspec_required_if_fail + ignore_errors: true + +- argspec: + state: present + path: foo + required: value + required_one_of_one: value + +- argspec: + state: present + content: foo + required: value + required_one_of_one: value + +- argspec: + state: present + content: foo + path: foo + required: value + required_one_of_one: value + register: argspec_mutually_exclusive_fail + ignore_errors: true + +- argspec: + mapping: + foo: bar + required: value + required_one_of_one: value + register: argspec_good_mapping + +- argspec: + mapping: foo=bar + required: value + required_one_of_one: value + register: argspec_good_mapping_kv + +- argspec: + mapping: !!str '{"foo": "bar"}' + required: value + required_one_of_one: value + register: argspec_good_mapping_json + +- argspec: + mapping: !!str '{"foo": False}' + required: value + required_one_of_one: value + register: argspec_good_mapping_dict_repr + +- argspec: + mapping: foo + required: value + required_one_of_one: value + register: argspec_bad_mapping_string + ignore_errors: true + +- argspec: + mapping: 1 + required: value + required_one_of_one: value + register: argspec_bad_mapping_int + ignore_errors: true + +- argspec: + mapping: + - foo + - bar + required: value + required_one_of_one: value + register: argspec_bad_mapping_list + ignore_errors: true + +- argspec: + required_together: + - thing: foo + other: bar + another: baz + required: value + required_one_of_one: value + +- argspec: + required_together: + - another: baz + required: value + required_one_of_one: value + +- argspec: + required_together: + - thing: foo + required: value + required_one_of_one: value + register: argspec_required_together_fail + ignore_errors: true + +- argspec: + required_together: + - thing: foo + other: bar + required: value + required_one_of_one: value + +- argspec: + required_if: + - thing: bar + required: value + required_one_of_one: value + +- argspec: + required_if: + - thing: foo + other: bar + required: value + required_one_of_one: value + +- argspec: + required_if: + - thing: foo + required: value + required_one_of_one: value + register: argspec_required_if_fail_2 + ignore_errors: true + +- argspec: + required_one_of: + - thing: foo + other: bar + required: value + required_one_of_one: value + +- argspec: + required_one_of: + - {} + required: value + required_one_of_one: value + register: argspec_required_one_of_fail_2 + ignore_errors: true + +- argspec: + required_by: + - thing: foo + other: bar + required: value + required_one_of_one: value + +- argspec: + required_by: + - thing: foo + required: value + required_one_of_one: value + register: argspec_required_by_fail_2 + ignore_errors: true + +- argspec: + json: !!str '{"foo": "bar"}' + required: value + required_one_of_one: value + register: argspec_good_json_string + +- argspec: + json: + foo: bar + required: value + required_one_of_one: value + register: argspec_good_json_dict + +- argspec: + json: 1 + required: value + required_one_of_one: value + register: argspec_bad_json + ignore_errors: true + +- argspec: + fail_on_missing_params: + - needed_param + needed_param: whatever + required: value + required_one_of_one: value + +- argspec: + fail_on_missing_params: + - needed_param + required: value + required_one_of_one: value + register: argspec_fail_on_missing_params_bad + ignore_errors: true + +- argspec: + required_together_one: foo + required_together_two: bar + required: value + required_one_of_one: value + +- argspec: + required_together_one: foo + required: value + required_one_of_one: value + register: argspec_fail_required_together_2 + ignore_errors: true + +- argspec: + suboptions_list_no_elements: + - thing: foo + required: value + required_one_of_one: value + register: argspec_suboptions_list_no_elements + +- argspec: + choices_with_strings_like_bools: on + required: value + required_one_of_one: value + register: argspec_choices_with_strings_like_bools_true + +- argspec: + choices_with_strings_like_bools: 'on' + required: value + required_one_of_one: value + register: argspec_choices_with_strings_like_bools_true_bool + +- argspec: + choices_with_strings_like_bools: off + required: value + required_one_of_one: value + register: argspec_choices_with_strings_like_bools_false + +- argspec: + required: value + required_one_of_one: value + choices: foo + +- argspec: + required: value + required_one_of_one: value + choices: baz + register: argspec_choices_bad_choice + ignore_errors: true + +- argspec: + required: value + required_one_of_one: value + list_choices: + - bar + - baz + +- argspec: + required: value + required_one_of_one: value + list_choices: + - bar + - baz + - qux + register: argspec_list_choices_bad_choice + ignore_errors: true + +- argspec: + required: value + required_one_of_one: value + primary: foo + register: argspec_aliases_primary + +- argspec: + required: value + required_one_of_one: value + alias: foo + register: argspec_aliases_alias + +- argspec: + required: value + required_one_of_one: value + primary: foo + alias: foo + register: argspec_aliases_both + +- argspec: + required: value + required_one_of_one: value + primary: foo + alias: bar + register: argspec_aliases_both_different + +- command: >- + ansible localhost -m argspec + -a 'required=value required_one_of_one=value primary=foo alias=bar' + environment: + ANSIBLE_LIBRARY: '{{ role_path }}/library' + register: argspec_aliases_both_warning + +- command: ansible localhost -m import_role -a 'role=argspec tasks_from=password_no_log.yml' + register: argspec_password_no_log + +- argspec: + required: value + required_one_of_one: value + int: 1 + +- argspec: + required: value + required_one_of_one: value + int: foo + register: argspec_int_invalid + ignore_errors: true + +- assert: + that: + - argspec_required_fail is failed + + - argspec_required_one_of_fail is failed + + - argspec_required_by_fail is failed + + - argspec_required_if_fail is failed + + - argspec_mutually_exclusive_fail is failed + + - argspec_good_mapping is successful + - >- + argspec_good_mapping.mapping == {'foo': 'bar'} + - argspec_good_mapping_json is successful + - >- + argspec_good_mapping_json.mapping == {'foo': 'bar'} + - argspec_good_mapping_dict_repr is successful + - >- + argspec_good_mapping_dict_repr.mapping == {'foo': False} + - argspec_good_mapping_kv is successful + - >- + argspec_good_mapping_kv.mapping == {'foo': 'bar'} + - argspec_bad_mapping_string is failed + - argspec_bad_mapping_int is failed + - argspec_bad_mapping_list is failed + + - argspec_required_together_fail is failed + + - argspec_required_if_fail_2 is failed + + - argspec_required_one_of_fail_2 is failed + + - argspec_required_by_fail_2 is failed + + - argspec_good_json_string is successful + - >- + argspec_good_json_string.json == '{"foo": "bar"}' + - argspec_good_json_dict is successful + - >- + argspec_good_json_dict.json == '{"foo": "bar"}' + - argspec_bad_json is failed + + - argspec_fail_on_missing_params_bad is failed + + - argspec_fail_required_together_2 is failed + + - >- + argspec_suboptions_list_no_elements.suboptions_list_no_elements.0 == {'thing': 'foo'} + + - argspec_choices_with_strings_like_bools_true.choices_with_strings_like_bools == 'on' + - argspec_choices_with_strings_like_bools_true_bool.choices_with_strings_like_bools == 'on' + - argspec_choices_with_strings_like_bools_false.choices_with_strings_like_bools == 'off' + + - argspec_choices_bad_choice is failed + + - argspec_list_choices_bad_choice is failed + + - argspec_aliases_primary.primary == 'foo' + - argspec_aliases_primary.alias is undefined + - argspec_aliases_alias.primary == 'foo' + - argspec_aliases_alias.alias == 'foo' + - argspec_aliases_both.primary == 'foo' + - argspec_aliases_both.alias == 'foo' + - argspec_aliases_both_different.primary == 'bar' + - argspec_aliases_both_different.alias == 'bar' + - '"[WARNING]: Both option primary and its alias alias are set." in argspec_aliases_both_warning.stderr' + + - '"Module did not set no_log for maybe_password" in argspec_password_no_log.stderr' + - '"Module did not set no_log for password" not in argspec_password_no_log.stderr' + - '"Module did not set no_log for not_a_password" not in argspec_password_no_log.stderr' + - argspec_password_no_log.stdout|regex_findall('VALUE_SPECIFIED_IN_NO_LOG_PARAMETER')|length == 1 + + - argspec_int_invalid is failed diff --git a/test/integration/targets/argspec/tasks/password_no_log.yml b/test/integration/targets/argspec/tasks/password_no_log.yml new file mode 100644 index 00000000..99c3307b --- /dev/null +++ b/test/integration/targets/argspec/tasks/password_no_log.yml @@ -0,0 +1,14 @@ +- argspec: + required: value + required_one_of_one: value + password: foo + +- argspec: + required: value + required_one_of_one: value + not_a_password: foo + +- argspec: + required: value + required_one_of_one: value + maybe_password: foo diff --git a/test/integration/targets/assemble/aliases b/test/integration/targets/assemble/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/assemble/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/assemble/files/fragment1 b/test/integration/targets/assemble/files/fragment1 new file mode 100644 index 00000000..a00d3ea0 --- /dev/null +++ b/test/integration/targets/assemble/files/fragment1 @@ -0,0 +1 @@ +this is fragment 1 diff --git a/test/integration/targets/assemble/files/fragment2 b/test/integration/targets/assemble/files/fragment2 new file mode 100644 index 00000000..860f7603 --- /dev/null +++ b/test/integration/targets/assemble/files/fragment2 @@ -0,0 +1 @@ +this is fragment 2 diff --git a/test/integration/targets/assemble/files/fragment3 b/test/integration/targets/assemble/files/fragment3 new file mode 100644 index 00000000..df95b24b --- /dev/null +++ b/test/integration/targets/assemble/files/fragment3 @@ -0,0 +1 @@ +this is fragment 3 diff --git a/test/integration/targets/assemble/files/fragment4 b/test/integration/targets/assemble/files/fragment4 new file mode 100644 index 00000000..c83252bb --- /dev/null +++ b/test/integration/targets/assemble/files/fragment4 @@ -0,0 +1 @@ +this is fragment 4 diff --git a/test/integration/targets/assemble/files/fragment5 b/test/integration/targets/assemble/files/fragment5 new file mode 100644 index 00000000..8a527d15 --- /dev/null +++ b/test/integration/targets/assemble/files/fragment5 @@ -0,0 +1 @@ +this is fragment 5 diff --git a/test/integration/targets/assemble/meta/main.yml b/test/integration/targets/assemble/meta/main.yml new file mode 100644 index 00000000..a9d0b468 --- /dev/null +++ b/test/integration/targets/assemble/meta/main.yml @@ -0,0 +1,20 @@ +# test code for the assemble module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +dependencies: + - prepare_tests diff --git a/test/integration/targets/assemble/tasks/main.yml b/test/integration/targets/assemble/tasks/main.yml new file mode 100644 index 00000000..5e779cfb --- /dev/null +++ b/test/integration/targets/assemble/tasks/main.yml @@ -0,0 +1,163 @@ +# test code for the assemble module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: create a new directory for file source + file: dest="{{output_dir}}/src" state=directory + register: result + +- name: assert the directory was created + assert: + that: + - "result.state == 'directory'" + +- name: copy the files to a new directory + copy: src="./" dest="{{output_dir}}/src" + register: result + +- name: create unicode file for test + shell: echo "π" > {{ output_dir }}/src/ßΩ.txt + register: result + +- name: assert that the new file was created + assert: + that: + - "result.changed == true" + +- name: test assemble with all fragments + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled1" + register: result + +- name: assert the fragments were assembled + assert: + that: + - "result.state == 'file'" + - "result.changed == True" + - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'" + +- name: test assemble with all fragments + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled1" + register: result + +- name: assert that the same assemble made no changes + assert: + that: + - "result.state == 'file'" + - "result.changed == False" + - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'" + +- name: test assemble with all fragments and decrypt=True + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled2" decrypt=yes + register: result + +- name: assert the fragments were assembled with decrypt=True + assert: + that: + - "result.state == 'file'" + - "result.changed == True" + - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'" + +- name: test assemble with all fragments and decrypt=True + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled2" decrypt=yes + register: result + +- name: assert that the same assemble made no changes with decrypt=True + assert: + that: + - "result.state == 'file'" + - "result.changed == False" + - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'" + +- name: test assemble with fragments matching a regex + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled3" regexp="^fragment[1-3]$" + register: result + +- name: assert the fragments were assembled with a regex + assert: + that: + - "result.state == 'file'" + - "result.checksum == 'edfe2d7487ef8f5ebc0f1c4dc57ba7b70a7b8e2b'" + +- name: test assemble with fragments matching a regex and decrypt=True + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled4" regexp="^fragment[1-3]$" decrypt=yes + register: result + +- name: assert the fragments were assembled with a regex and decrypt=True + assert: + that: + - "result.state == 'file'" + - "result.checksum == 'edfe2d7487ef8f5ebc0f1c4dc57ba7b70a7b8e2b'" + +- name: test assemble with a delimiter + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled5" delimiter="#--- delimiter ---#" + register: result + +- name: assert the fragments were assembled with a delimiter + assert: + that: + - "result.state == 'file'" + - "result.checksum == 'd986cefb82e34e4cf14d33a3cda132ff45aa2980'" + +- name: test assemble with a delimiter and decrypt=True + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled6" delimiter="#--- delimiter ---#" decrypt=yes + register: result + +- name: assert the fragments were assembled with a delimiter and decrypt=True + assert: + that: + - "result.state == 'file'" + - "result.checksum == 'd986cefb82e34e4cf14d33a3cda132ff45aa2980'" + +- name: test assemble with remote_src=False + assemble: src="./" dest="{{output_dir}}/assembled7" remote_src=no + register: result + +- name: assert the fragments were assembled without remote + assert: + that: + - "result.state == 'file'" + - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'" + +- name: test assemble with remote_src=False and decrypt=True + assemble: src="./" dest="{{output_dir}}/assembled8" remote_src=no decrypt=yes + register: result + +- name: assert the fragments were assembled without remote and decrypt=True + assert: + that: + - "result.state == 'file'" + - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'" + +- name: test assemble with remote_src=False and a delimiter + assemble: src="./" dest="{{output_dir}}/assembled9" remote_src=no delimiter="#--- delimiter ---#" + register: result + +- name: assert the fragments were assembled without remote + assert: + that: + - "result.state == 'file'" + - "result.checksum == '505359f48c65b3904127cf62b912991d4da7ed6d'" + +- name: test assemble with remote_src=False and a delimiter and decrypt=True + assemble: src="./" dest="{{output_dir}}/assembled10" remote_src=no delimiter="#--- delimiter ---#" decrypt=yes + register: result + +- name: assert the fragments were assembled without remote + assert: + that: + - "result.state == 'file'" + - "result.checksum == '505359f48c65b3904127cf62b912991d4da7ed6d'" diff --git a/test/integration/targets/assert/aliases b/test/integration/targets/assert/aliases new file mode 100644 index 00000000..757c9966 --- /dev/null +++ b/test/integration/targets/assert/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/aix diff --git a/test/integration/targets/assert/assert_quiet.out.quiet.stderr b/test/integration/targets/assert/assert_quiet.out.quiet.stderr new file mode 100644 index 00000000..bd973b04 --- /dev/null +++ b/test/integration/targets/assert/assert_quiet.out.quiet.stderr @@ -0,0 +1,2 @@ ++ ansible-playbook -i localhost, -c local quiet.yml +++ set +x diff --git a/test/integration/targets/assert/assert_quiet.out.quiet.stdout b/test/integration/targets/assert/assert_quiet.out.quiet.stdout new file mode 100644 index 00000000..b62aac6c --- /dev/null +++ b/test/integration/targets/assert/assert_quiet.out.quiet.stdout @@ -0,0 +1,17 @@ + +PLAY [localhost] *************************************************************** + +TASK [assert] ****************************************************************** +ok: [localhost] => (item=item_A) + +TASK [assert] ****************************************************************** +ok: [localhost] => (item=item_A) => { + "ansible_loop_var": "item", + "changed": false, + "item": "item_A", + "msg": "All assertions passed" +} + +PLAY RECAP ********************************************************************* +localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + diff --git a/test/integration/targets/assert/inventory b/test/integration/targets/assert/inventory new file mode 100644 index 00000000..16182004 --- /dev/null +++ b/test/integration/targets/assert/inventory @@ -0,0 +1,3 @@ +[all] +localhost + diff --git a/test/integration/targets/assert/quiet.yml b/test/integration/targets/assert/quiet.yml new file mode 100644 index 00000000..6834712c --- /dev/null +++ b/test/integration/targets/assert/quiet.yml @@ -0,0 +1,16 @@ +--- +- hosts: localhost + gather_facts: False + vars: + item_A: yes + tasks: + - assert: + that: "{{ item }} is defined" + quiet: True + with_items: + - item_A + - assert: + that: "{{ item }} is defined" + quiet: False + with_items: + - item_A diff --git a/test/integration/targets/assert/runme.sh b/test/integration/targets/assert/runme.sh new file mode 100755 index 00000000..ca0a8587 --- /dev/null +++ b/test/integration/targets/assert/runme.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# This test compares "known good" output with various settings against output +# with the current code. It's brittle by nature, but this is probably the +# "best" approach possible. +# +# Notes: +# * options passed to this script (such as -v) are ignored, as they would change +# the output and break the test +# * the number of asterisks after a "banner" differs is forced to 79 by +# redirecting stdin from /dev/null + +set -eux + +run_test() { + # testname is playbook name + local testname=$1 + + # The shenanigans with redirection and 'tee' are to capture STDOUT and + # STDERR separately while still displaying both to the console + { ansible-playbook -i 'localhost,' -c local "${testname}.yml" \ + > >(set +x; tee "${OUTFILE}.${testname}.stdout"); } \ + 2> >(set +x; tee "${OUTFILE}.${testname}.stderr" >&2) 0 + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: run a 2 second loop + shell: for i in $(seq 1 2); do echo $i ; sleep 1; done; + async: 10 + poll: 1 + register: async_result + + +- debug: var=async_result + +- name: validate async returns + assert: + that: + - "'ansible_job_id' in async_result" + - "'changed' in async_result" + - "'cmd' in async_result" + - "'delta' in async_result" + - "'end' in async_result" + - "'rc' in async_result" + - "'start' in async_result" + - "'stderr' in async_result" + - "'stdout' in async_result" + - "'stdout_lines' in async_result" + - async_result.rc == 0 + - async_result.finished == 1 + - async_result is finished + +- name: test async without polling + command: sleep 5 + async: 30 + poll: 0 + register: async_result + +- debug: var=async_result + +- name: validate async without polling returns + assert: + that: + - "'ansible_job_id' in async_result" + - "'started' in async_result" + - async_result.finished == 0 + - async_result is not finished + +- name: test skipped task handling + command: /bin/true + async: 15 + poll: 0 + when: False + +# test async "fire and forget, but check later" + +- name: 'start a task with "fire-and-forget"' + command: sleep 3 + async: 30 + poll: 0 + register: fnf_task + +- name: assert task was successfully started + assert: + that: + - fnf_task.started == 1 + - fnf_task is started + - "'ansible_job_id' in fnf_task" + +- name: 'check on task started as a "fire-and-forget"' + async_status: jid={{ fnf_task.ansible_job_id }} + register: fnf_result + until: fnf_result is finished + retries: 10 + delay: 1 + +- name: assert task was successfully checked + assert: + that: + - fnf_result.finished + - fnf_result is finished + +- name: test graceful module failure + async_test: + fail_mode: graceful + async: 30 + poll: 1 + register: async_result + ignore_errors: true + +- name: assert task failed correctly + assert: + that: + - async_result.ansible_job_id is match('\d+\.\d+') + - async_result.finished == 1 + - async_result is finished + - async_result is not changed + - async_result is failed + - async_result.msg == 'failed gracefully' + +- name: test exception module failure + async_test: + fail_mode: exception + async: 5 + poll: 1 + register: async_result + ignore_errors: true + +- name: validate response + assert: + that: + - async_result.ansible_job_id is match('\d+\.\d+') + - async_result.finished == 1 + - async_result is finished + - async_result.changed == false + - async_result is not changed + - async_result.failed == true + - async_result is failed + - async_result.stderr is search('failing via exception', multiline=True) + +- name: test leading junk before JSON + async_test: + fail_mode: leading_junk + async: 5 + poll: 1 + register: async_result + +- name: validate response + assert: + that: + - async_result.ansible_job_id is match('\d+\.\d+') + - async_result.finished == 1 + - async_result is finished + - async_result.changed == true + - async_result is changed + - async_result is successful + +- name: test trailing junk after JSON + async_test: + fail_mode: trailing_junk + async: 5 + poll: 1 + register: async_result + +- name: validate response + assert: + that: + - async_result.ansible_job_id is match('\d+\.\d+') + - async_result.finished == 1 + - async_result is finished + - async_result.changed == true + - async_result is changed + - async_result is successful + - async_result.warnings[0] is search('trailing junk after module output') + +- name: test stderr handling + async_test: + fail_mode: stderr + async: 30 + poll: 1 + register: async_result + ignore_errors: true + +- assert: + that: + - async_result.stderr == "printed to stderr\n" + +# NOTE: This should report a warning that cannot be tested +- name: test async properties on non-async task + command: sleep 1 + register: non_async_result + +- name: validate response + assert: + that: + - non_async_result is successful + - non_async_result is changed + - non_async_result is finished + - "'ansible_job_id' not in non_async_result" + +- name: set fact of custom tmp dir + set_fact: + custom_async_tmp: ~/.ansible_async_test + +- name: ensure custom async tmp dir is absent + file: + path: '{{ custom_async_tmp }}' + state: absent + +- block: + - name: run async task with custom dir + command: sleep 1 + register: async_custom_dir + async: 5 + poll: 1 + vars: + ansible_async_dir: '{{ custom_async_tmp }}' + + - name: check if the async temp dir is created + stat: + path: '{{ custom_async_tmp }}' + register: async_custom_dir_result + + - name: assert run async task with custom dir + assert: + that: + - async_custom_dir is successful + - async_custom_dir is finished + - async_custom_dir_result.stat.exists + + - name: remove custom async dir again + file: + path: '{{ custom_async_tmp }}' + state: absent + + - name: run async task with custom dir - deprecated format + command: sleep 1 + register: async_custom_dir_dep + async: 5 + poll: 1 + environment: + ANSIBLE_ASYNC_DIR: '{{ custom_async_tmp }}' + + - name: check if the async temp dir is created - deprecated format + stat: + path: '{{ custom_async_tmp }}' + register: async_custom_dir_dep_result + + - name: assert run async task with custom dir - deprecated format + assert: + that: + - async_custom_dir_dep is successful + - async_custom_dir_dep is finished + - async_custom_dir_dep_result.stat.exists + + - name: remove custom async dir after deprecation test + file: + path: '{{ custom_async_tmp }}' + state: absent + + - name: run fire and forget async task with custom dir + command: echo moo + register: async_fandf_custom_dir + async: 5 + poll: 0 + vars: + ansible_async_dir: '{{ custom_async_tmp }}' + + - name: fail to get async status with custom dir with defaults + async_status: + jid: '{{ async_fandf_custom_dir.ansible_job_id }}' + register: async_fandf_custom_dir_fail + ignore_errors: yes + + - name: get async status with custom dir using newer format + async_status: + jid: '{{ async_fandf_custom_dir.ansible_job_id }}' + register: async_fandf_custom_dir_result + vars: + ansible_async_dir: '{{ custom_async_tmp }}' + + - name: get async status with custom dir - deprecated format + async_status: + jid: '{{ async_fandf_custom_dir.ansible_job_id }}' + register: async_fandf_custom_dir_dep_result + environment: + ANSIBLE_ASYNC_DIR: '{{ custom_async_tmp }}' + + - name: assert run fire and forget async task with custom dir + assert: + that: + - async_fandf_custom_dir is successful + - async_fandf_custom_dir_fail is failed + - async_fandf_custom_dir_fail.msg == "could not find job" + - async_fandf_custom_dir_result is successful + - async_fandf_custom_dir_dep_result is successful + + always: + - name: remove custom tmp dir after test + file: + path: '{{ custom_async_tmp }}' + state: absent + +- name: Test that async has stdin + command: > + {{ ansible_python_interpreter|default('/usr/bin/python') }} -c 'import os; os.fdopen(os.dup(0), "r")' + async: 1 + poll: 1 diff --git a/test/integration/targets/async_extra_data/aliases b/test/integration/targets/async_extra_data/aliases new file mode 100644 index 00000000..70a7b7a9 --- /dev/null +++ b/test/integration/targets/async_extra_data/aliases @@ -0,0 +1 @@ +shippable/posix/group5 diff --git a/test/integration/targets/async_extra_data/library/junkping.py b/test/integration/targets/async_extra_data/library/junkping.py new file mode 100644 index 00000000..b61d965d --- /dev/null +++ b/test/integration/targets/async_extra_data/library/junkping.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print("junk_before_module_output") + print(json.dumps(dict(changed=False, source='user'))) + print("junk_after_module_output") + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/async_extra_data/runme.sh b/test/integration/targets/async_extra_data/runme.sh new file mode 100755 index 00000000..46132731 --- /dev/null +++ b/test/integration/targets/async_extra_data/runme.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -eux + +# Verify that extra data before module JSON output during async call is ignored, and that the warning exists. +ANSIBLE_DEBUG=0 ansible-playbook -i ../../inventory test_async.yml -v "$@" \ + | grep 'junk after the JSON data: junk_after_module_output' diff --git a/test/integration/targets/async_extra_data/test_async.yml b/test/integration/targets/async_extra_data/test_async.yml new file mode 100644 index 00000000..480a2a65 --- /dev/null +++ b/test/integration/targets/async_extra_data/test_async.yml @@ -0,0 +1,10 @@ +- hosts: testhost + gather_facts: false + tasks: + # make sure non-JSON data before module output is ignored + - name: async ping wrapped in extra junk + junkping: + async: 10 + poll: 1 + register: result + - debug: var=result diff --git a/test/integration/targets/async_fail/action_plugins/normal.py b/test/integration/targets/async_fail/action_plugins/normal.py new file mode 100644 index 00000000..297cbd9b --- /dev/null +++ b/test/integration/targets/async_fail/action_plugins/normal.py @@ -0,0 +1,62 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleError +from ansible.plugins.action import ActionBase +from ansible.utils.vars import merge_hash + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + + # individual modules might disagree but as the generic the action plugin, pass at this point. + self._supports_check_mode = True + self._supports_async = True + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + if not result.get('skipped'): + + if result.get('invocation', {}).get('module_args'): + # avoid passing to modules in case of no_log + # should not be set anymore but here for backwards compatibility + del result['invocation']['module_args'] + + # FUTURE: better to let _execute_module calculate this internally? + wrap_async = self._task.async_val and not self._connection.has_native_async + + # do work! + result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async)) + + # hack to keep --verbose from showing all the setup module result + # moved from setup module as now we filter out all _ansible_ from result + if self._task.action == 'setup': + result['_ansible_verbose_override'] = True + + # Simulate a transient network failure + if self._task.action == 'async_status' and 'finished' in result and result['finished'] != 1: + raise AnsibleError('Pretend to fail somewher ein executing async_status') + + if not wrap_async: + # remove a temporary path we created + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/test/integration/targets/async_fail/aliases b/test/integration/targets/async_fail/aliases new file mode 100644 index 00000000..c989cd70 --- /dev/null +++ b/test/integration/targets/async_fail/aliases @@ -0,0 +1,3 @@ +async_status +async_wrapper +shippable/posix/group2 diff --git a/test/integration/targets/async_fail/library/async_test.py b/test/integration/targets/async_fail/library/async_test.py new file mode 100644 index 00000000..838f2f07 --- /dev/null +++ b/test/integration/targets/async_fail/library/async_test.py @@ -0,0 +1,50 @@ +import json +import sys +import time + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + if "--interactive" in sys.argv: + import ansible.module_utils.basic + ansible.module_utils.basic._ANSIBLE_ARGS = json.dumps(dict( + ANSIBLE_MODULE_ARGS=dict( + fail_mode="graceful" + ) + )) + + module = AnsibleModule( + argument_spec=dict( + fail_mode=dict(type='list', default=['success']) + ) + ) + + result = dict(changed=True) + + fail_mode = module.params['fail_mode'] + + try: + if 'leading_junk' in fail_mode: + print("leading junk before module output") + + if 'graceful' in fail_mode: + module.fail_json(msg="failed gracefully") + + if 'exception' in fail_mode: + raise Exception('failing via exception') + + if 'recovered_fail' in fail_mode: + result = {"msg": "succeeded", "failed": False, "changed": True} + # Wait in the middle to setup a race where the controller reads incomplete data from our + # special async_status the first poll + time.sleep(5) + + module.exit_json(**result) + + finally: + if 'trailing_junk' in fail_mode: + print("trailing junk after module output") + + +main() diff --git a/test/integration/targets/async_fail/meta/main.yml b/test/integration/targets/async_fail/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/async_fail/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/async_fail/tasks/main.yml b/test/integration/targets/async_fail/tasks/main.yml new file mode 100644 index 00000000..40f72e10 --- /dev/null +++ b/test/integration/targets/async_fail/tasks/main.yml @@ -0,0 +1,36 @@ +# test code for the async keyword failing in the middle of output +# (c) 2018, Ansible Project + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# This uses a special copy of the normal action plugin which simulates +# a transient failure in the module +- name: test that we can recover from initial failures to read + async_test: + fail_mode: recovered_fail + async: 10 + poll: 1 + register: async_result + +- name: validate that by the end of the retry interval, we succeeded + assert: + that: + - async_result.ansible_job_id is match('\d+\.\d+') + - async_result.finished == 1 + - async_result is finished + - async_result is changed + - async_result is successful + - async_result.msg is search('succeeded') diff --git a/test/integration/targets/become/aliases b/test/integration/targets/become/aliases new file mode 100644 index 00000000..3a07aab3 --- /dev/null +++ b/test/integration/targets/become/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/become/files/baz.txt b/test/integration/targets/become/files/baz.txt new file mode 100644 index 00000000..b8d834da --- /dev/null +++ b/test/integration/targets/become/files/baz.txt @@ -0,0 +1 @@ +testing tilde expansion with become diff --git a/test/integration/targets/become/tasks/default.yml b/test/integration/targets/become/tasks/default.yml new file mode 100644 index 00000000..4ba10170 --- /dev/null +++ b/test/integration/targets/become/tasks/default.yml @@ -0,0 +1,82 @@ +- name: Create test user (become_method=default) + become: True + become_user: root + user: + name: "{{ become_test_user }}" + group: '{{ "staff" if ansible_facts.distribution == "MacOSX" else omit }}' + +- name: test becoming user (become_method=default) + shell: whoami + become: True + become_user: "{{ become_test_user }}" + register: results + +- assert: + that: + - "results.stdout == '{{ become_test_user }}'" + +- name: tilde expansion honors become in file (become_method=default) + become: True + become_user: "{{ become_test_user }}" + file: + path: "~/foo.txt" + state: touch + +- name: check that the path in the user's home dir was created (become_method=default) + become: True + become_user: "{{ become_test_user }}" + stat: + path: "~{{ become_test_user }}/foo.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: tilde expansion honors become in template (become_method=default) + become: True + become_user: "{{ become_test_user }}" + template: + src: "bar.j2" + dest: "~/bar.txt" + +- name: check that the path in the user's home dir was created (become_method=default) + become: True + become_user: "{{ become_test_user }}" + stat: + path: "~{{ become_test_user }}/bar.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: tilde expansion honors become in copy (become_method=default) + become: True + become_user: "{{ become_test_user }}" + copy: + src: baz.txt + dest: "~/baz.txt" + +- name: check that the path in the user's home dir was created (become_method=default) + become: True + become_user: "{{ become_test_user }}" + stat: + path: "~{{ become_test_user }}/baz.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: Remove test user and their home dir (become_method=default) + become: True + become_user: root + user: + name: "{{ become_test_user }}" + state: "absent" + remove: "yes" + force: "yes" diff --git a/test/integration/targets/become/tasks/main.yml b/test/integration/targets/become/tasks/main.yml new file mode 100644 index 00000000..3feb5cc7 --- /dev/null +++ b/test/integration/targets/become/tasks/main.yml @@ -0,0 +1,5 @@ +- include_vars: default.yml + +- include: default.yml +- include: sudo.yml +- include: su.yml diff --git a/test/integration/targets/become/tasks/su.yml b/test/integration/targets/become/tasks/su.yml new file mode 100644 index 00000000..d314b0a8 --- /dev/null +++ b/test/integration/targets/become/tasks/su.yml @@ -0,0 +1,91 @@ +- name: Create test user (become_method=su) + become: True + become_user: root + become_method: su + user: + name: "{{ become_test_user }}" + group: '{{ "staff" if ansible_facts.distribution == "MacOSX" else omit }}' + +- name: test becoming user (become_method=su) + shell: whoami + become: True + become_user: "{{ become_test_user }}" + become_method: su + register: results + +- assert: + that: + - "results.stdout == '{{ become_test_user }}'" + +- name: tilde expansion honors become in file (become_method=su) + become: True + become_user: "{{ become_test_user }}" + become_method: su + file: + path: "~/foo.txt" + state: touch + +- name: check that the path in the user's home dir was created (become_method=su) + become: True + become_user: "{{ become_test_user }}" + become_method: su + stat: + path: "~{{ become_test_user }}/foo.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: tilde expansion honors become in template (become_method=su) + become: True + become_user: "{{ become_test_user }}" + become_method: su + template: + src: "bar.j2" + dest: "~/bar.txt" + +- name: check that the path in the user's home dir was created (become_method=su) + become: True + become_user: "{{ become_test_user }}" + become_method: su + stat: + path: "~{{ become_test_user }}/bar.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: tilde expansion honors become in copy (become_method=su) + become: True + become_user: "{{ become_test_user }}" + become_method: su + copy: + src: baz.txt + dest: "~/baz.txt" + +- name: check that the path in the user's home dir was created (become_method=su) + become: True + become_user: "{{ become_test_user }}" + become_method: su + stat: + path: "~{{ become_test_user }}/baz.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: Remove test user and their home dir (become_method=su) + become: True + become_user: root + become_method: su + user: + name: "{{ become_test_user }}" + state: "absent" + remove: "yes" + force: "yes" diff --git a/test/integration/targets/become/tasks/sudo.yml b/test/integration/targets/become/tasks/sudo.yml new file mode 100644 index 00000000..636ec378 --- /dev/null +++ b/test/integration/targets/become/tasks/sudo.yml @@ -0,0 +1,91 @@ +- name: Create test user (become_method=sudo) + become: True + become_user: root + become_method: sudo + user: + name: "{{ become_test_user }}" + group: '{{ "staff" if ansible_facts.distribution == "MacOSX" else omit }}' + +- name: test becoming user (become_method=sudo) + shell: whoami + become: True + become_user: "{{ become_test_user }}" + become_method: sudo + register: results + +- assert: + that: + - "results.stdout == '{{ become_test_user }}'" + +- name: tilde expansion honors become in file (become_method=sudo) + become: True + become_user: "{{ become_test_user }}" + become_method: sudo + file: + path: "~/foo.txt" + state: touch + +- name: check that the path in the user's home dir was created (become_method=sudo) + become: True + become_user: "{{ become_test_user }}" + become_method: sudo + stat: + path: "~{{ become_test_user }}/foo.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: tilde expansion honors become in template (become_method=sudo) + become: True + become_user: "{{ become_test_user }}" + become_method: sudo + template: + src: "bar.j2" + dest: "~/bar.txt" + +- name: check that the path in the user's home dir was created (become_method=sudo) + become: True + become_user: "{{ become_test_user }}" + become_method: sudo + stat: + path: "~{{ become_test_user }}/bar.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: tilde expansion honors become in copy (become_method=sudo) + become: True + become_user: "{{ become_test_user }}" + become_method: sudo + copy: + src: baz.txt + dest: "~/baz.txt" + +- name: check that the path in the user's home dir was created (become_method=sudo) + become: True + become_user: "{{ become_test_user }}" + become_method: sudo + stat: + path: "~{{ become_test_user }}/baz.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: Remove test user and their home dir (become_method=sudo) + become: True + become_user: root + become_method: sudo + user: + name: "{{ become_test_user }}" + state: "absent" + remove: "yes" + force: "yes" diff --git a/test/integration/targets/become/templates/bar.j2 b/test/integration/targets/become/templates/bar.j2 new file mode 100644 index 00000000..7c5fe0ab --- /dev/null +++ b/test/integration/targets/become/templates/bar.j2 @@ -0,0 +1 @@ +{{ become_test_user }} diff --git a/test/integration/targets/become/vars/default.yml b/test/integration/targets/become/vars/default.yml new file mode 100644 index 00000000..223d44ed --- /dev/null +++ b/test/integration/targets/become/vars/default.yml @@ -0,0 +1 @@ +become_test_user: ansibletest1 diff --git a/test/integration/targets/become_su/aliases b/test/integration/targets/become_su/aliases new file mode 100644 index 00000000..3a07aab3 --- /dev/null +++ b/test/integration/targets/become_su/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/become_su/runme.sh b/test/integration/targets/become_su/runme.sh new file mode 100755 index 00000000..87a3511f --- /dev/null +++ b/test/integration/targets/become_su/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +# ensure we execute su with a pseudo terminal +[ "$(ansible -a whoami --become-method=su localhost --become)" != "su: requires a terminal to execute" ] diff --git a/test/integration/targets/binary/aliases b/test/integration/targets/binary/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/binary/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/binary/files/b64_latin1 b/test/integration/targets/binary/files/b64_latin1 new file mode 100644 index 00000000..c7fbdeb6 --- /dev/null +++ b/test/integration/targets/binary/files/b64_latin1 @@ -0,0 +1 @@ +Café Eñe diff --git a/test/integration/targets/binary/files/b64_utf8 b/test/integration/targets/binary/files/b64_utf8 new file mode 100644 index 00000000..c7fbdeb6 --- /dev/null +++ b/test/integration/targets/binary/files/b64_utf8 @@ -0,0 +1 @@ +Café Eñe diff --git a/test/integration/targets/binary/files/from_playbook b/test/integration/targets/binary/files/from_playbook new file mode 100644 index 00000000..c7fbdeb6 --- /dev/null +++ b/test/integration/targets/binary/files/from_playbook @@ -0,0 +1 @@ +Café Eñe diff --git a/test/integration/targets/binary/meta/main.yml b/test/integration/targets/binary/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/binary/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/binary/tasks/main.yml b/test/integration/targets/binary/tasks/main.yml new file mode 100644 index 00000000..486ee6d6 --- /dev/null +++ b/test/integration/targets/binary/tasks/main.yml @@ -0,0 +1,131 @@ +--- +# Various ways users want to use binary data +# Could integrate into individual modules but currently these don't all work. +# Probably easier to see them all in a single block to know what we're testing. +# When we can start testing v2 we should test that all of these work. + +# In v1: The following line will traceback if it's the first task in the role. +# Does not traceback if it's the second or third etc task. +- debug: msg="{{ utf8_simple_accents|b64decode}}" + +# Expected values of the written files +- name: get checksums that we expect later files to have + copy: + src: from_playbook + dest: "{{ output_dir }}" + +- copy: + src: b64_utf8 + dest: "{{ output_dir }}" + +- copy: + src: b64_latin1 + dest: "{{ output_dir }}" + +- stat: + path: "{{ output_dir }}/from_playbook" + register: from_playbook + +- stat: + path: "{{ output_dir }}/b64_utf8" + register: b64_utf8 + +- stat: + path: "{{ output_dir }}/b64_latin1" + register: b64_latin1 + +# Tests themselves +- name: copy with utf-8 content in a playbook + copy: + content: "{{ simple_accents }}\n" + dest: "{{ output_dir }}/from_playbook.txt" + +- name: Check that copying utf-8 content matches + stat: + path: "{{ output_dir }}/from_playbook.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == from_playbook.stat.checksum' + +- name: copy with utf8 in a base64 encoded string + copy: + content: "{{ utf8_simple_accents|b64decode }}\n" + dest: "{{ output_dir }}/b64_utf8.txt" + +- name: Check that utf8 in a base64 string matches + stat: + path: "{{ output_dir }}/b64_utf8.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_utf8.stat.checksum' + +- name: copy with latin1 in a base64 encoded string + copy: + content: "{{ latin1_simple_accents|b64decode }}\n" + dest: "{{ output_dir }}/b64_latin1.txt" + +- name: Check that latin1 in a base64 string matches + stat: + path: "{{ output_dir }}/b64_latin1.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_latin1.stat.checksum' + # This one depends on being able to pass binary data through + # Might be a while before we find a solution for this + ignore_errors: True + +- name: Template with a unicode string from the playbook + template: + src: "from_playbook_template.j2" + dest: "{{ output_dir }}/from_playbook_template.txt" + +- name: Check that writing a template from a playbook var matches + stat: + path: "{{ output_dir }}/from_playbook_template.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == from_playbook.stat.checksum' + +- name: Template with utf8 in a base64 encoded string + template: + src: "b64_utf8_template.j2" + dest: "{{ output_dir }}/b64_utf8_template.txt" + +- name: Check that writing a template from a base64 encoded utf8 string matches + stat: + path: "{{ output_dir }}/b64_utf8_template.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_utf8.stat.checksum' + +- name: Template with latin1 in a base64 encoded string + template: + src: "b64_latin1_template.j2" + dest: "{{ output_dir }}/b64_latin1_template.txt" + +- name: Check that writing a template from a base64 encoded latin1 string matches + stat: + path: "{{ output_dir }}/b64_latin1_template.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_latin1.stat.checksum' + # This one depends on being able to pass binary data through + # Might be a while before we find a solution for this + ignore_errors: True + +# These might give garbled output but none of them should traceback +- debug: var=simple_accents +- debug: msg="{{ utf8_simple_accents|b64decode}}" +- debug: msg="{{ latin1_simple_accents|b64decode}}" diff --git a/test/integration/targets/binary/templates/b64_latin1_template.j2 b/test/integration/targets/binary/templates/b64_latin1_template.j2 new file mode 100644 index 00000000..ee2fc1b1 --- /dev/null +++ b/test/integration/targets/binary/templates/b64_latin1_template.j2 @@ -0,0 +1 @@ +{{ latin1_simple_accents|b64decode }} diff --git a/test/integration/targets/binary/templates/b64_utf8_template.j2 b/test/integration/targets/binary/templates/b64_utf8_template.j2 new file mode 100644 index 00000000..9fd3ed48 --- /dev/null +++ b/test/integration/targets/binary/templates/b64_utf8_template.j2 @@ -0,0 +1 @@ +{{ utf8_simple_accents|b64decode }} diff --git a/test/integration/targets/binary/templates/from_playbook_template.j2 b/test/integration/targets/binary/templates/from_playbook_template.j2 new file mode 100644 index 00000000..3be6dd4f --- /dev/null +++ b/test/integration/targets/binary/templates/from_playbook_template.j2 @@ -0,0 +1 @@ +{{ simple_accents }} diff --git a/test/integration/targets/binary/vars/main.yml b/test/integration/targets/binary/vars/main.yml new file mode 100644 index 00000000..f6d40232 --- /dev/null +++ b/test/integration/targets/binary/vars/main.yml @@ -0,0 +1,3 @@ +simple_accents: 'Café Eñe' +utf8_simple_accents: 'Q2Fmw6kgRcOxZQ==' +latin1_simple_accents: 'Q2Fm6SBF8WU=' diff --git a/test/integration/targets/binary_modules/Makefile b/test/integration/targets/binary_modules/Makefile new file mode 100644 index 00000000..c3092e47 --- /dev/null +++ b/test/integration/targets/binary_modules/Makefile @@ -0,0 +1,16 @@ +.PHONY: all clean + +all: + # Compiled versions of these binary modules are available at the url below. + # This avoids a dependency on go and keeps the binaries out of our git repository. + # https://ansible-ci-files.s3.amazonaws.com/test/integration/roles/test_binary_modules/ + cd library; \ + GOOS=linux GOARCH=amd64 go build -o helloworld_linux_x86_64 helloworld.go; \ + GOOS=linux GOARCH=ppc64le go build -o helloworld_linux_ppc64le helloworld.go; \ + GOOS=aix GOARCH=ppc64 go build -o helloworld_aix_chrp helloworld.go; \ + GOOS=windows GOARCH=amd64 go build -o helloworld_win32nt_64-bit.exe helloworld.go; \ + GOOS=darwin GOARCH=amd64 go build -o helloworld_darwin_x86_64 helloworld.go; \ + GOOS=freebsd GOARCH=amd64 go build -o helloworld_freebsd_amd64 helloworld.go + +clean: + rm -f library/helloworld_* diff --git a/test/integration/targets/binary_modules/aliases b/test/integration/targets/binary_modules/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/binary_modules/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/binary_modules/download_binary_modules.yml b/test/integration/targets/binary_modules/download_binary_modules.yml new file mode 100644 index 00000000..e8f51b1a --- /dev/null +++ b/test/integration/targets/binary_modules/download_binary_modules.yml @@ -0,0 +1,9 @@ +- hosts: testhost + tasks: + - name: download binary module + tags: test_binary_modules + get_url: + url: "https://ansible-ci-files.s3.amazonaws.com/test/integration/roles/test_binary_modules/{{ filename }}" + dest: "{{ playbook_dir }}/library/{{ filename }}" + mode: 0755 + delegate_to: localhost diff --git a/test/integration/targets/binary_modules/group_vars/all b/test/integration/targets/binary_modules/group_vars/all new file mode 100644 index 00000000..1d3ff5e4 --- /dev/null +++ b/test/integration/targets/binary_modules/group_vars/all @@ -0,0 +1,3 @@ +system: "{{ ansible_system|lower }}" +suffix: "{{ '.exe' if system == 'win32nt' else '' }}" +filename: "helloworld_{{ system }}_{{ ansible_architecture }}{{ suffix }}" diff --git a/test/integration/targets/binary_modules/library/.gitignore b/test/integration/targets/binary_modules/library/.gitignore new file mode 100644 index 00000000..d034a06a --- /dev/null +++ b/test/integration/targets/binary_modules/library/.gitignore @@ -0,0 +1 @@ +helloworld_* diff --git a/test/integration/targets/binary_modules/library/helloworld.go b/test/integration/targets/binary_modules/library/helloworld.go new file mode 100644 index 00000000..a4c16b20 --- /dev/null +++ b/test/integration/targets/binary_modules/library/helloworld.go @@ -0,0 +1,89 @@ +// This file is part of Ansible +// +// Ansible is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Ansible is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Ansible. If not, see . + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" +) + +type ModuleArgs struct { + Name string +} + +type Response struct { + Msg string `json:"msg"` + Changed bool `json:"changed"` + Failed bool `json:"failed"` +} + +func ExitJson(responseBody Response) { + returnResponse(responseBody) +} + +func FailJson(responseBody Response) { + responseBody.Failed = true + returnResponse(responseBody) +} + +func returnResponse(responseBody Response) { + var response []byte + var err error + response, err = json.Marshal(responseBody) + if err != nil { + response, _ = json.Marshal(Response{Msg: "Invalid response object"}) + } + fmt.Println(string(response)) + if responseBody.Failed { + os.Exit(1) + } else { + os.Exit(0) + } +} + +func main() { + var response Response + + if len(os.Args) != 2 { + response.Msg = "No argument file provided" + FailJson(response) + } + + argsFile := os.Args[1] + + text, err := ioutil.ReadFile(argsFile) + if err != nil { + response.Msg = "Could not read configuration file: " + argsFile + FailJson(response) + } + + var moduleArgs ModuleArgs + err = json.Unmarshal(text, &moduleArgs) + if err != nil { + response.Msg = "Configuration file not valid JSON: " + argsFile + FailJson(response) + } + + var name string = "World" + if moduleArgs.Name != "" { + name = moduleArgs.Name + } + + response.Msg = "Hello, " + name + "!" + ExitJson(response) +} diff --git a/test/integration/targets/binary_modules/roles/test_binary_modules/tasks/main.yml b/test/integration/targets/binary_modules/roles/test_binary_modules/tasks/main.yml new file mode 100644 index 00000000..35a58dcb --- /dev/null +++ b/test/integration/targets/binary_modules/roles/test_binary_modules/tasks/main.yml @@ -0,0 +1,53 @@ +- debug: var=ansible_system + +- name: ping + ping: + when: ansible_system != 'Win32NT' + +- name: win_ping + action: win_ping + when: ansible_system == 'Win32NT' + +- name: Hello, World! + action: "{{ filename }}" + register: hello_world + +- assert: + that: + - 'hello_world.msg == "Hello, World!"' + +- name: Hello, Ansible! + action: "{{ filename }}" + args: + name: Ansible + register: hello_ansible + +- assert: + that: + - 'hello_ansible.msg == "Hello, Ansible!"' + +- name: Async Hello, World! + action: "{{ filename }}" + async: 10 + poll: 1 + when: ansible_system != 'Win32NT' + register: async_hello_world + +- assert: + that: + - 'async_hello_world.msg == "Hello, World!"' + when: async_hello_world is not skipped + +- name: Async Hello, Ansible! + action: "{{ filename }}" + args: + name: Ansible + async: 10 + poll: 1 + when: ansible_system != 'Win32NT' + register: async_hello_ansible + +- assert: + that: + - 'async_hello_ansible.msg == "Hello, Ansible!"' + when: async_hello_ansible is not skipped diff --git a/test/integration/targets/binary_modules/test.sh b/test/integration/targets/binary_modules/test.sh new file mode 100755 index 00000000..7f046670 --- /dev/null +++ b/test/integration/targets/binary_modules/test.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -eux + +[ -f "${INVENTORY}" ] + +ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook download_binary_modules.yml -i "${INVENTORY}" -v "$@" +ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook test_binary_modules.yml -i "${INVENTORY}" -v "$@" diff --git a/test/integration/targets/binary_modules/test_binary_modules.yml b/test/integration/targets/binary_modules/test_binary_modules.yml new file mode 100644 index 00000000..bdf2a061 --- /dev/null +++ b/test/integration/targets/binary_modules/test_binary_modules.yml @@ -0,0 +1,5 @@ +- hosts: testhost + roles: + - role: test_binary_modules + tags: + - test_binary_modules diff --git a/test/integration/targets/binary_modules_posix/aliases b/test/integration/targets/binary_modules_posix/aliases new file mode 100644 index 00000000..2c6e4a07 --- /dev/null +++ b/test/integration/targets/binary_modules_posix/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +needs/target/binary_modules diff --git a/test/integration/targets/binary_modules_posix/runme.sh b/test/integration/targets/binary_modules_posix/runme.sh new file mode 100755 index 00000000..670477d1 --- /dev/null +++ b/test/integration/targets/binary_modules_posix/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +cd ../binary_modules +INVENTORY=../../inventory ./test.sh "$@" diff --git a/test/integration/targets/binary_modules_winrm/aliases b/test/integration/targets/binary_modules_winrm/aliases new file mode 100644 index 00000000..ba3d2000 --- /dev/null +++ b/test/integration/targets/binary_modules_winrm/aliases @@ -0,0 +1,4 @@ +shippable/windows/group1 +shippable/windows/smoketest +windows +needs/target/binary_modules diff --git a/test/integration/targets/binary_modules_winrm/runme.sh b/test/integration/targets/binary_modules_winrm/runme.sh new file mode 100755 index 00000000..f182c2d6 --- /dev/null +++ b/test/integration/targets/binary_modules_winrm/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +cd ../binary_modules +INVENTORY=../../inventory.winrm ./test.sh "$@" diff --git a/test/integration/targets/blockinfile/aliases b/test/integration/targets/blockinfile/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/blockinfile/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/blockinfile/files/sshd_config b/test/integration/targets/blockinfile/files/sshd_config new file mode 100644 index 00000000..41fea190 --- /dev/null +++ b/test/integration/targets/blockinfile/files/sshd_config @@ -0,0 +1,135 @@ +# $OpenBSD: sshd_config,v 1.100 2016/08/15 12:32:04 naddy Exp $ + +# This is the sshd server system-wide configuration file. See +# sshd_config(5) for more information. + +# This sshd was compiled with PATH=/usr/local/bin:/usr/bin + +# The strategy used for options in the default sshd_config shipped with +# OpenSSH is to specify options with their default value where +# possible, but leave them commented. Uncommented options override the +# default value. + +# If you want to change the port on a SELinux system, you have to tell +# SELinux about this change. +# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER +# +#Port 22 +#AddressFamily any +#ListenAddress 0.0.0.0 +#ListenAddress :: + +HostKey /etc/ssh/ssh_host_rsa_key +#HostKey /etc/ssh/ssh_host_dsa_key +HostKey /etc/ssh/ssh_host_ecdsa_key +HostKey /etc/ssh/ssh_host_ed25519_key + +# Ciphers and keying +#RekeyLimit default none + +# Logging +#SyslogFacility AUTH +SyslogFacility AUTHPRIV +#LogLevel INFO + +# Authentication: + +#LoginGraceTime 2m +PermitRootLogin yes +#StrictModes yes +#MaxAuthTries 6 +#MaxSessions 10 + +#PubkeyAuthentication yes + +# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2 +# but this is overridden so installations will only check .ssh/authorized_keys +AuthorizedKeysFile .ssh/authorized_keys + +#AuthorizedPrincipalsFile none + +#AuthorizedKeysCommand none +#AuthorizedKeysCommandUser nobody + +# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts +#HostbasedAuthentication no +# Change to yes if you don't trust ~/.ssh/known_hosts for +# HostbasedAuthentication +#IgnoreUserKnownHosts no +# Don't read the user's ~/.rhosts and ~/.shosts files +#IgnoreRhosts yes + +# To disable tunneled clear text passwords, change to no here! +#PermitEmptyPasswords no + +# Change to no to disable s/key passwords +#ChallengeResponseAuthentication yes +ChallengeResponseAuthentication no + +# Kerberos options +#KerberosAuthentication no +#KerberosOrLocalPasswd yes +#KerberosTicketCleanup yes +#KerberosGetAFSToken no +#KerberosUseKuserok yes + +# GSSAPI options +GSSAPIAuthentication yes +GSSAPICleanupCredentials no +#GSSAPIStrictAcceptorCheck yes +#GSSAPIKeyExchange no +#GSSAPIEnablek5users no + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# and ChallengeResponseAuthentication to 'no'. +# WARNING: 'UsePAM no' is not supported in Fedora and may cause several +# problems. +UsePAM yes + +#AllowAgentForwarding yes +#AllowTcpForwarding yes +#GatewayPorts no +X11Forwarding yes +#X11DisplayOffset 10 +#X11UseLocalhost yes +#PermitTTY yes +#PrintMotd yes +#PrintLastLog yes +#TCPKeepAlive yes +#UseLogin no +#UsePrivilegeSeparation sandbox +#PermitUserEnvironment no +#Compression delayed +#ClientAliveInterval 0 +#ClientAliveCountMax 3 +#ShowPatchLevel no +#UseDNS no +#PidFile /var/run/sshd.pid +#MaxStartups 10:30:100 +#PermitTunnel no +#ChrootDirectory none +#VersionAddendum none + +# no default banner path +#Banner none + +# Accept locale-related environment variables +AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES +AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT +AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE +AcceptEnv XMODIFIERS + +# override default of no subsystems +Subsystem sftp /usr/libexec/openssh/sftp-server + +# Example of overriding settings on a per-user basis +#Match User anoncvs +# X11Forwarding no +# AllowTcpForwarding no +# PermitTTY no +# ForceCommand cvs server diff --git a/test/integration/targets/blockinfile/meta/main.yml b/test/integration/targets/blockinfile/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/blockinfile/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/blockinfile/tasks/add_block_to_existing_file.yml b/test/integration/targets/blockinfile/tasks/add_block_to_existing_file.yml new file mode 100644 index 00000000..dbb93ecc --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/add_block_to_existing_file.yml @@ -0,0 +1,47 @@ +- name: copy the sshd_config to the test dir + copy: + src: sshd_config + dest: "{{ output_dir_test }}" + +- name: insert/update "Match User" configuration block in sshd_config + blockinfile: + path: "{{ output_dir_test }}/sshd_config" + block: | + Match User ansible-agent + PasswordAuthentication no + backup: yes + register: blockinfile_test0 + +- name: check content + shell: 'grep -c -e "Match User ansible-agent" -e "PasswordAuthentication no" {{ output_dir_test }}/sshd_config' + register: blockinfile_test0_grep + +- debug: + var: blockinfile_test0 + verbosity: 1 + +- debug: + var: blockinfile_test0_grep + verbosity: 1 + +- name: validate first example results + assert: + that: + - 'blockinfile_test0.changed is defined' + - 'blockinfile_test0.msg is defined' + - 'blockinfile_test0.changed' + - 'blockinfile_test0.msg == "Block inserted"' + - 'blockinfile_test0_grep.stdout == "2"' + +- name: check idemptotence + blockinfile: + path: "{{ output_dir_test }}/sshd_config" + block: | + Match User ansible-agent + PasswordAuthentication no + register: blockinfile_test1 + +- name: validate idempotence results + assert: + that: + - 'not blockinfile_test1.changed' diff --git a/test/integration/targets/blockinfile/tasks/block_without_trailing_newline.yml b/test/integration/targets/blockinfile/tasks/block_without_trailing_newline.yml new file mode 100644 index 00000000..57dac60e --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/block_without_trailing_newline.yml @@ -0,0 +1,30 @@ +- name: Add block without trailing line separator + blockinfile: + path: "{{ output_dir_test }}/chomped_block_test.txt" + create: yes + content: |- + one + two + three + register: chomptest1 + +- name: Add block without trailing line separator again + blockinfile: + path: "{{ output_dir_test }}/chomped_block_test.txt" + content: |- + one + two + three + register: chomptest2 + +- name: Check output file + stat: + path: "{{ output_dir_test }}/chomped_block_test.txt" + register: chomptest_file + +- name: Ensure chomptest results are correct + assert: + that: + - chomptest1 is changed + - chomptest2 is not changed + - chomptest_file.stat.checksum == '50d49f528a5f7147c7029ed6220c326b1ee2c4ae' diff --git a/test/integration/targets/blockinfile/tasks/create_file.yml b/test/integration/targets/blockinfile/tasks/create_file.yml new file mode 100644 index 00000000..94e47203 --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/create_file.yml @@ -0,0 +1,32 @@ +- name: Create a file with blockinfile + blockinfile: + path: "{{ output_dir_test }}/empty.txt" + block: | + Hey + there + state: present + create: yes + register: empty_test_1 + +- name: Run a task that results in an empty file + blockinfile: + path: "{{ output_dir_test }}/empty.txt" + block: | + Hey + there + state: absent + create: yes + register: empty_test_2 + +- stat: + path: "{{ output_dir_test }}/empty.txt" + register: empty_test_stat + +- name: Ensure empty file was created + assert: + that: + - empty_test_1 is changed + - "'File created' in empty_test_1.msg" + - empty_test_2 is changed + - "'Block removed' in empty_test_2.msg" + - empty_test_stat.stat.size == 0 diff --git a/test/integration/targets/blockinfile/tasks/diff.yml b/test/integration/targets/blockinfile/tasks/diff.yml new file mode 100644 index 00000000..4a2f9454 --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/diff.yml @@ -0,0 +1,18 @@ +- name: Create a test file + copy: + content: diff test + dest: "{{ output_dir_test }}/diff.txt" + +- name: Add block to file with diff + blockinfile: + path: "{{ output_dir_test }}/diff.txt" + block: | + line 1 + line 2 + register: difftest + diff: yes + +- name: Ensure diff was shown + assert: + that: + - difftest.diff | length > 0 diff --git a/test/integration/targets/blockinfile/tasks/file_without_trailing_newline.yml b/test/integration/targets/blockinfile/tasks/file_without_trailing_newline.yml new file mode 100644 index 00000000..fe4e2abc --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/file_without_trailing_newline.yml @@ -0,0 +1,36 @@ +- name: Create file without trailing newline + copy: + content: '# File with no newline' + dest: "{{ output_dir_test }}/no_newline_at_end.txt" + register: no_newline + + +- name: Add block to file that does not have a newline at the end + blockinfile: + path: "{{ output_dir_test }}/no_newline_at_end.txt" + content: | + one + two + three + register: no_newline_test1 + +- name: Add block to file that does not have a newline at the end again + blockinfile: + path: "{{ output_dir_test }}/no_newline_at_end.txt" + content: | + one + two + three + register: no_newline_test2 + +- name: Stat the file + stat: + path: "{{ output_dir_test }}/no_newline_at_end.txt" + register: no_newline_file + +- name: Ensure block was correctly written to file with no newline at end + assert: + that: + - no_newline_test1 is changed + - no_newline_test2 is not changed + - no_newline_file.stat.checksum == 'dab16f864025e59125e74d1498ffb2bb048224e6' diff --git a/test/integration/targets/blockinfile/tasks/insertafter.yml b/test/integration/targets/blockinfile/tasks/insertafter.yml new file mode 100644 index 00000000..daf7bcf1 --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/insertafter.yml @@ -0,0 +1,37 @@ +- name: Create insertafter test file + copy: + dest: "{{ output_dir }}/after.txt" + content: | + line1 + line2 + line3 + +- name: Add block using insertafter + blockinfile: + path: "{{ output_dir }}/after.txt" + insertafter: line2 + block: | + block1 + block2 + register: after1 + +- name: Add block using insertafter again + blockinfile: + path: "{{ output_dir }}/after.txt" + insertafter: line2 + block: | + block1 + block2 + register: after2 + +- name: Stat the after.txt file + stat: + path: "{{ output_dir }}/after.txt" + register: after_file + +- name: Ensure insertafter worked correctly + assert: + that: + - after1 is changed + - after2 is not changed + - after_file.stat.checksum == 'a8adeb971358230a28ce554f3b8fdd1ef65fdf1c' diff --git a/test/integration/targets/blockinfile/tasks/insertbefore.yml b/test/integration/targets/blockinfile/tasks/insertbefore.yml new file mode 100644 index 00000000..6089af15 --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/insertbefore.yml @@ -0,0 +1,39 @@ +- name: Create insertbefore test file + copy: + dest: "{{ output_dir }}/before.txt" + content: | + line1 + line2 + line3 + +- name: Add block using insertbefore + blockinfile: + path: "{{ output_dir }}/before.txt" + insertbefore: line2 + block: | + block1 + block2 + register: after1 + +- name: Add block using insertbefore again + blockinfile: + path: "{{ output_dir }}/before.txt" + insertbefore: line2 + block: | + block1 + block2 + register: after2 + +- name: Stat the before.txt file + stat: + path: "{{ output_dir }}/before.txt" + register: after_file + +- command: cat {{ output_dir }}/before.txt + +- name: Ensure insertbefore worked correctly + assert: + that: + - after1 is changed + - after2 is not changed + - after_file.stat.checksum == '16681d1d7f29d173243bb951d6afb9c0824d7bf4' diff --git a/test/integration/targets/blockinfile/tasks/main.yml b/test/integration/targets/blockinfile/tasks/main.yml new file mode 100644 index 00000000..4bc0b8d1 --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/main.yml @@ -0,0 +1,40 @@ +# Test code for the blockinfile module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: + output_dir_test: "{{ output_dir }}/test_blockinfile" + +- name: make sure our testing sub-directory does not exist + file: + path: "{{ output_dir_test }}" + state: absent + +- name: create our testing sub-directory + file: + path: "{{ output_dir_test }}" + state: directory + +- import_tasks: add_block_to_existing_file.yml +- import_tasks: create_file.yml +- import_tasks: preserve_line_endings.yml +- import_tasks: block_without_trailing_newline.yml +- import_tasks: file_without_trailing_newline.yml +- import_tasks: diff.yml +- import_tasks: validate.yml +- import_tasks: insertafter.yml +- import_tasks: insertbefore.yml diff --git a/test/integration/targets/blockinfile/tasks/preserve_line_endings.yml b/test/integration/targets/blockinfile/tasks/preserve_line_endings.yml new file mode 100644 index 00000000..bb2dee29 --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/preserve_line_endings.yml @@ -0,0 +1,24 @@ +- name: create line_endings_test.txt in the test dir + copy: + dest: "{{ output_dir_test }}/line_endings_test.txt" + # generating the content like this instead of copying a fixture file + # prevents sanity checks from warning about mixed line endings + content: "unix\nunix\nunix\n\ndos\r\ndos\r\ndos\r\n\nunix\nunix\n# BEGIN ANSIBLE MANAGED BLOCK\ndos\r\n# END ANSIBLE MANAGED BLOCK\nunix\nunix\nunix\nunix\n" + +- name: insert/update "dos" configuration block in line_endings_test.txt + blockinfile: + path: "{{ output_dir_test }}/line_endings_test.txt" + block: "dos\r\ndos\r\ndos\r\n" + register: blockinfile_test2 + +- name: check content + # using the more precise `grep -Pc "^dos\\r$" ...` fails on BSD/macOS + shell: 'grep -c "^dos.$" {{ output_dir_test }}/line_endings_test.txt' + register: blockinfile_test2_grep + +- name: validate line_endings_test.txt results + assert: + that: + - 'blockinfile_test2 is changed' + - 'blockinfile_test2.msg == "Block inserted"' + - 'blockinfile_test2_grep.stdout == "6"' diff --git a/test/integration/targets/blockinfile/tasks/validate.yml b/test/integration/targets/blockinfile/tasks/validate.yml new file mode 100644 index 00000000..105bca53 --- /dev/null +++ b/test/integration/targets/blockinfile/tasks/validate.yml @@ -0,0 +1,28 @@ +- name: EXPECTED FAILURE test improper validate + blockinfile: + path: "{{ output_dir }}/validate.txt" + block: | + line1 + line2 + create: yes + validate: grep + ignore_errors: yes + +- name: EXPECTED FAILURE test failure to validate + blockinfile: + path: "{{ output_dir }}/validate.txt" + block: | + line1 + line2 + create: yes + validate: grep line47 %s + ignore_errors: yes + +- name: Test proper validate + blockinfile: + path: "{{ output_dir }}/validate.txt" + block: | + line1 + line2 + create: yes + validate: grep line1 %s diff --git a/test/integration/targets/blocks/aliases b/test/integration/targets/blocks/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/blocks/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/blocks/always_failure_no_rescue_rc.yml b/test/integration/targets/blocks/always_failure_no_rescue_rc.yml new file mode 100644 index 00000000..924643ce --- /dev/null +++ b/test/integration/targets/blocks/always_failure_no_rescue_rc.yml @@ -0,0 +1,13 @@ +- hosts: localhost + gather_facts: no + tasks: + - block: + - name: EXPECTED FAILURE + fail: + msg: Failure in block + always: + - name: EXPECTED FAILURE + fail: + msg: Failure in always + - debug: + msg: DID NOT RUN diff --git a/test/integration/targets/blocks/always_failure_with_rescue_rc.yml b/test/integration/targets/blocks/always_failure_with_rescue_rc.yml new file mode 100644 index 00000000..f3029cbc --- /dev/null +++ b/test/integration/targets/blocks/always_failure_with_rescue_rc.yml @@ -0,0 +1,16 @@ +- hosts: localhost + gather_facts: no + tasks: + - block: + - name: EXPECTED FAILURE + fail: + msg: Failure in block + rescue: + - debug: + msg: Rescue + always: + - name: EXPECTED FAILURE + fail: + msg: Failure in always + - debug: + msg: DID NOT RUN diff --git a/test/integration/targets/blocks/always_no_rescue_rc.yml b/test/integration/targets/blocks/always_no_rescue_rc.yml new file mode 100644 index 00000000..a4e86416 --- /dev/null +++ b/test/integration/targets/blocks/always_no_rescue_rc.yml @@ -0,0 +1,12 @@ +- hosts: localhost + gather_facts: no + tasks: + - block: + - name: EXPECTED FAILURE + fail: + msg: Failure in block + always: + - debug: + msg: Always + - debug: + msg: DID NOT RUN diff --git a/test/integration/targets/blocks/block_fail.yml b/test/integration/targets/blocks/block_fail.yml new file mode 100644 index 00000000..6b84d056 --- /dev/null +++ b/test/integration/targets/blocks/block_fail.yml @@ -0,0 +1,5 @@ +--- +- name: Include tasks that have a failure in a block + hosts: localhost + tasks: + - include_tasks: block_fail_tasks.yml diff --git a/test/integration/targets/blocks/block_fail_tasks.yml b/test/integration/targets/blocks/block_fail_tasks.yml new file mode 100644 index 00000000..6e70dc23 --- /dev/null +++ b/test/integration/targets/blocks/block_fail_tasks.yml @@ -0,0 +1,9 @@ +- block: + - name: EXPECTED FAILURE + fail: + msg: failure + + always: + - name: run always task + debug: + msg: TEST COMPLETE diff --git a/test/integration/targets/blocks/block_in_rescue.yml b/test/integration/targets/blocks/block_in_rescue.yml new file mode 100644 index 00000000..15360304 --- /dev/null +++ b/test/integration/targets/blocks/block_in_rescue.yml @@ -0,0 +1,33 @@ +- hosts: localhost + gather_facts: no + tasks: + - block: + - name: "EXPECTED FAILURE" + fail: + msg: "fail to test single level block in rescue" + rescue: + - block: + - debug: + msg: Rescued! + + - block: + - name: "EXPECTED FAILURE" + fail: + msg: "fail to test multi-level block in rescue" + rescue: + - block: + - block: + - debug: + msg: Rescued! + + - name: "Outer block" + block: + - name: "Inner block" + block: + - name: "EXPECTED FAILURE" + fail: + msg: "fail to test multi-level block" + rescue: + - name: "Rescue block" + block: + - debug: msg="Inner block rescue" diff --git a/test/integration/targets/blocks/block_rescue_vars.yml b/test/integration/targets/blocks/block_rescue_vars.yml new file mode 100644 index 00000000..404f7a37 --- /dev/null +++ b/test/integration/targets/blocks/block_rescue_vars.yml @@ -0,0 +1,16 @@ +- hosts: localhost + gather_facts: no + tasks: + - block: + - name: EXPECTED FAILURE + fail: + rescue: + - name: Assert that ansible_failed_task is defined + assert: + that: + - ansible_failed_task is defined + + - name: Assert that ansible_failed_result is defined + assert: + that: + - ansible_failed_result is defined diff --git a/test/integration/targets/blocks/fail.yml b/test/integration/targets/blocks/fail.yml new file mode 100644 index 00000000..ae946551 --- /dev/null +++ b/test/integration/targets/blocks/fail.yml @@ -0,0 +1,2 @@ +- name: EXPECTED FAILURE + fail: msg="{{msg}}" diff --git a/test/integration/targets/blocks/finalized_task.yml b/test/integration/targets/blocks/finalized_task.yml new file mode 100644 index 00000000..300401b5 --- /dev/null +++ b/test/integration/targets/blocks/finalized_task.yml @@ -0,0 +1,17 @@ +- hosts: localhost + gather_facts: false + tasks: + - block: + - include_role: + name: '{{ item }}' + loop: + - fail + rescue: + - debug: + msg: "{{ ansible_failed_task.name }}" + + - assert: + that: + - ansible_failed_task.name == "Fail" + - ansible_failed_task.action == "fail" + - ansible_failed_task.parent is not defined diff --git a/test/integration/targets/blocks/issue29047.yml b/test/integration/targets/blocks/issue29047.yml new file mode 100644 index 00000000..9743773c --- /dev/null +++ b/test/integration/targets/blocks/issue29047.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: no + tasks: + - include_tasks: issue29047_tasks.yml diff --git a/test/integration/targets/blocks/issue29047_tasks.yml b/test/integration/targets/blocks/issue29047_tasks.yml new file mode 100644 index 00000000..3470d867 --- /dev/null +++ b/test/integration/targets/blocks/issue29047_tasks.yml @@ -0,0 +1,13 @@ +--- +- name: "EXPECTED FAILURE" + block: + - fail: + msg: "EXPECTED FAILURE" + rescue: + - name: Assert that ansible_failed_task is defined + assert: + that: ansible_failed_task is defined + + - name: Assert that ansible_failed_result is defined + assert: + that: ansible_failed_result is defined diff --git a/test/integration/targets/blocks/issue71306.yml b/test/integration/targets/blocks/issue71306.yml new file mode 100644 index 00000000..9762f6ee --- /dev/null +++ b/test/integration/targets/blocks/issue71306.yml @@ -0,0 +1,16 @@ +- hosts: all + gather_facts: no + tasks: + - block: + - block: + - block: + - name: EXPECTED FAILURE + fail: + when: ansible_host == "host1" + + - debug: + msg: "I am successful!" + run_once: true + rescue: + - debug: + msg: "Attemp 1 failed!" diff --git a/test/integration/targets/blocks/main.yml b/test/integration/targets/blocks/main.yml new file mode 100644 index 00000000..012d5ab2 --- /dev/null +++ b/test/integration/targets/blocks/main.yml @@ -0,0 +1,128 @@ +- name: simple block test + hosts: testhost + gather_facts: yes + strategy: "{{test_strategy|default('linear')}}" + vars: + block_tasks_run: false + block_rescue_run: false + block_always_run: false + nested_block_always_run: false + tasks_run_after_failure: false + rescue_run_after_failure: false + always_run_after_failure: false + nested_block_fail_always: false + tasks: + - block: + - name: set block tasks run flag + set_fact: + block_tasks_run: true + - name: EXPECTED FAILURE fail in tasks + fail: + - name: tasks flag should not be set after failure + set_fact: + tasks_run_after_failure: true + rescue: + - name: set block rescue run flag + set_fact: + block_rescue_run: true + - name: EXPECTED FAILURE fail in rescue + fail: + - name: tasks flag should not be set after failure in rescue + set_fact: + rescue_run_after_failure: true + always: + - name: set block always run flag + set_fact: + block_always_run: true + #- block: + # - meta: noop + # always: + # - name: set nested block always run flag + # set_fact: + # nested_block_always_run: true + # - name: fail in always + # fail: + # - name: tasks flag should not be set after failure in always + # set_fact: + # always_run_after_failure: true + - meta: clear_host_errors + + # https://github.com/ansible/ansible/issues/35148 + - block: + - block: + - name: EXPECTED FAILURE test triggering always by failing in nested block with run_once set + fail: + run_once: true + always: + - name: set block fail always run flag + set_fact: + nested_block_fail_always: true + - meta: clear_host_errors + + - block: + - block: + - name: EXPECTED FAILURE test triggering always by failing in nested block with any_errors_fatal set + fail: + any_errors_fatal: true + always: + - name: set block fail always run flag + set_fact: + nested_block_fail_always: true + - meta: clear_host_errors + + post_tasks: + - assert: + that: + - block_tasks_run + - block_rescue_run + - block_always_run + #- nested_block_always_run + - not tasks_run_after_failure + - not rescue_run_after_failure + - not always_run_after_failure + - nested_block_fail_always + - debug: msg="TEST COMPLETE" + +- name: block with includes + hosts: testhost + gather_facts: yes + strategy: "{{test_strategy|default('linear')}}" + vars: + rescue_run_after_include_fail: false + always_run_after_include_fail_in_rescue: false + tasks_run_after_failure: false + rescue_run_after_failure: false + always_run_after_failure: false + tasks: + - block: + - name: include fail.yml in tasks + include: fail.yml + args: + msg: "failed from tasks" + - name: tasks flag should not be set after failure + set_fact: + tasks_run_after_failure: true + rescue: + - set_fact: + rescue_run_after_include_fail: true + - name: include fail.yml in rescue + include: fail.yml + args: + msg: "failed from rescue" + - name: flag should not be set after failure in rescue + set_fact: + rescue_run_after_failure: true + always: + - set_fact: + always_run_after_include_fail_in_rescue: true + - meta: clear_host_errors + + post_tasks: + - assert: + that: + - rescue_run_after_include_fail + - always_run_after_include_fail_in_rescue + - not tasks_run_after_failure + - not rescue_run_after_failure + - not always_run_after_failure + - debug: msg="TEST COMPLETE" diff --git a/test/integration/targets/blocks/nested_fail.yml b/test/integration/targets/blocks/nested_fail.yml new file mode 100644 index 00000000..31ae870e --- /dev/null +++ b/test/integration/targets/blocks/nested_fail.yml @@ -0,0 +1,3 @@ +- include: fail.yml + args: + msg: "nested {{msg}}" diff --git a/test/integration/targets/blocks/nested_nested_fail.yml b/test/integration/targets/blocks/nested_nested_fail.yml new file mode 100644 index 00000000..e9a050fb --- /dev/null +++ b/test/integration/targets/blocks/nested_nested_fail.yml @@ -0,0 +1,3 @@ +- include: nested_fail.yml + args: + msg: "nested {{msg}}" diff --git a/test/integration/targets/blocks/roles/fail/tasks/main.yml b/test/integration/targets/blocks/roles/fail/tasks/main.yml new file mode 100644 index 00000000..176fe542 --- /dev/null +++ b/test/integration/targets/blocks/roles/fail/tasks/main.yml @@ -0,0 +1,3 @@ +- name: Fail + fail: + msg: fail diff --git a/test/integration/targets/blocks/runme.sh b/test/integration/targets/blocks/runme.sh new file mode 100755 index 00000000..4f3db1db --- /dev/null +++ b/test/integration/targets/blocks/runme.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +set -eux + +# This test does not use "$@" to avoid further increasing the verbosity beyond what is required for the test. +# Increasing verbosity from -vv to -vvv can increase the line count from ~400 to ~9K on our centos6 test container. + +# remove old output log +rm -f block_test.out +# run the test and check to make sure the right number of completions was logged +ansible-playbook -vv main.yml -i ../../inventory | tee block_test.out +env python -c \ + 'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \ + block_test_wo_colors.out +[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ] +# cleanup the output log again, to make sure the test is clean +rm -f block_test.out block_test_wo_colors.out +# run test with free strategy and again count the completions +ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=free | tee block_test.out +env python -c \ + 'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \ + block_test_wo_colors.out +[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ] +# cleanup the output log again, to make sure the test is clean +rm -f block_test.out block_test_wo_colors.out +# run test with host_pinned strategy and again count the completions +ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=host_pinned | tee block_test.out +env python -c \ + 'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \ + block_test_wo_colors.out +[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ] + +# run test that includes tasks that fail inside a block with always +rm -f block_test.out block_test_wo_colors.out +ansible-playbook -vv block_fail.yml -i ../../inventory | tee block_test.out +env python -c \ + 'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \ + block_test_wo_colors.out +[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ] + +ansible-playbook -vv block_rescue_vars.yml + +# https://github.com/ansible/ansible/issues/70000 +set +e +exit_code=0 +ansible-playbook -vv always_failure_with_rescue_rc.yml > rc_test.out || exit_code=$? +set -e +cat rc_test.out +[ $exit_code -eq 2 ] +[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ] +[ "$(grep -c 'Rescue' rc_test.out )" -eq 1 ] +[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ] +[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ] +rm -f rc_test_out + +set +e +exit_code=0 +ansible-playbook -vv always_no_rescue_rc.yml > rc_test.out || exit_code=$? +set -e +cat rc_test.out +[ $exit_code -eq 2 ] +[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ] +[ "$(grep -c 'Always' rc_test.out )" -eq 1 ] +[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ] +rm -f rc_test.out + +set +e +exit_code=0 +ansible-playbook -vv always_failure_no_rescue_rc.yml > rc_test.out || exit_code=$? +set -e +cat rc_test.out +[ $exit_code -eq 2 ] +[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ] +[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ] +[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ] +rm -f rc_test.out + +# https://github.com/ansible/ansible/issues/71306 +set +e +exit_code=0 +ansible-playbook -i host1,host2 -vv issue71306.yml > rc_test.out || exit_code=$? +set -e +cat rc_test.out +[ $exit_code -eq 0 ] +rm -f rc_test_out + +# https://github.com/ansible/ansible/issues/29047 +ansible-playbook -vv issue29047.yml -i ../../inventory + +# https://github.com/ansible/ansible/issues/61253 +ansible-playbook -vv block_in_rescue.yml -i ../../inventory > rc_test.out +cat rc_test.out +[ "$(grep -c 'rescued=3' rc_test.out)" -eq 1 ] +[ "$(grep -c 'failed=0' rc_test.out)" -eq 1 ] +rm -f rc_test.out diff --git a/test/integration/targets/builtin_vars_prompt/aliases b/test/integration/targets/builtin_vars_prompt/aliases new file mode 100644 index 00000000..4317d112 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/aliases @@ -0,0 +1,3 @@ +setup/always/setup_passlib +setup/always/setup_pexpect +shippable/posix/group4 diff --git a/test/integration/targets/builtin_vars_prompt/runme.sh b/test/integration/targets/builtin_vars_prompt/runme.sh new file mode 100755 index 00000000..af555794 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +# Interactively test vars_prompt +python test-vars_prompt.py -i ../../inventory "$@" diff --git a/test/integration/targets/builtin_vars_prompt/test-vars_prompt.py b/test/integration/targets/builtin_vars_prompt/test-vars_prompt.py new file mode 100644 index 00000000..6c805fdd --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/test-vars_prompt.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python + +import os +import pexpect +import sys + +from ansible.module_utils.six import PY2 + +if PY2: + log_buffer = sys.stdout +else: + log_buffer = sys.stdout.buffer + +env_vars = { + 'ANSIBLE_ROLES_PATH': './roles', + 'ANSIBLE_NOCOLOR': 'True', + 'ANSIBLE_RETRY_FILES_ENABLED': 'False', +} + + +def run_test(playbook, test_spec, args=None, timeout=10, env=None): + + if not env: + env = os.environ.copy() + env.update(env_vars) + + if not args: + args = sys.argv[1:] + + vars_prompt_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=timeout, + env=env, + ) + + vars_prompt_test.logfile = log_buffer + for item in test_spec[0]: + vars_prompt_test.expect(item[0]) + if item[1]: + vars_prompt_test.send(item[1]) + vars_prompt_test.expect(test_spec[1]) + vars_prompt_test.expect(pexpect.EOF) + vars_prompt_test.close() + + +# These are the tests to run. Each test is a playbook and a test_spec. +# +# The test_spec is a list with two elements. +# +# The first element is a list of two element tuples. The first is the regexp to look +# for in the output, the second is the line to send. +# +# The last element is the last string of text to look for in the output. +# +tests = [ + # Basic vars_prompt + {'playbook': 'vars_prompt-1.yml', + 'test_spec': [ + [('input:', 'some input\r')], + '"input": "some input"']}, + + # Custom prompt + {'playbook': 'vars_prompt-2.yml', + 'test_spec': [ + [('Enter some input:', 'some more input\r')], + '"input": "some more input"']}, + + # Test confirm, both correct and incorrect + {'playbook': 'vars_prompt-3.yml', + 'test_spec': [ + [('input:', 'confirm me\r'), + ('confirm input:', 'confirm me\r')], + '"input": "confirm me"']}, + + {'playbook': 'vars_prompt-3.yml', + 'test_spec': [ + [('input:', 'confirm me\r'), + ('confirm input:', 'incorrect\r'), + (r'\*\*\*\*\* VALUES ENTERED DO NOT MATCH \*\*\*\*', ''), + ('input:', 'confirm me\r'), + ('confirm input:', 'confirm me\r')], + '"input": "confirm me"']}, + + # Test private + {'playbook': 'vars_prompt-4.yml', + 'test_spec': [ + [('not_secret', 'this is displayed\r'), + ('this is displayed', '')], + '"not_secret": "this is displayed"']}, + + # Test hashing + {'playbook': 'vars_prompt-5.yml', + 'test_spec': [ + [('password', 'Scenic-Improving-Payphone\r'), + ('confirm password', 'Scenic-Improving-Payphone\r')], + r'"password": "\$6\$']}, + + # Test variables in prompt field + # https://github.com/ansible/ansible/issues/32723 + {'playbook': 'vars_prompt-6.yml', + 'test_spec': [ + [('prompt from variable:', 'input\r')], + '']}, + + # Test play vars coming from vars_prompt + # https://github.com/ansible/ansible/issues/37984 + {'playbook': 'vars_prompt-7.yml', + 'test_spec': [ + [('prompting for host:', 'testhost\r')], + r'testhost.*ok=1']}, + + # Test play unsafe toggle + {'playbook': 'unsafe.yml', + 'test_spec': [ + [('prompting for variable:', '{{whole}}\r')], + r'testhost.*ok=2']}, + + # Test unsupported keys + {'playbook': 'unsupported.yml', + 'test_spec': [ + [], + "Invalid vars_prompt data structure, found unsupported key 'when'"]}, +] + +for t in tests: + run_test(playbook=t['playbook'], test_spec=t['test_spec']) diff --git a/test/integration/targets/builtin_vars_prompt/unsafe.yml b/test/integration/targets/builtin_vars_prompt/unsafe.yml new file mode 100644 index 00000000..348ce152 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/unsafe.yml @@ -0,0 +1,20 @@ +- name: Test vars_prompt unsafe + hosts: testhost + become: no + gather_facts: no + vars: + whole: INVALID + vars_prompt: + - name: input + prompt: prompting for variable + unsafe: true + + tasks: + - name: + assert: + that: + - input != whole + - input != 'INVALID' + + - debug: + var: input diff --git a/test/integration/targets/builtin_vars_prompt/unsupported.yml b/test/integration/targets/builtin_vars_prompt/unsupported.yml new file mode 100644 index 00000000..eab02fd6 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/unsupported.yml @@ -0,0 +1,18 @@ +- name: Test vars_prompt unsupported key + hosts: testhost + become: no + gather_facts: no + vars_prompt: + - name: input + prompt: prompting for variable + # Unsupported key for vars_prompt + when: foo is defined + + tasks: + - name: + assert: + that: + - input is not defined + + - debug: + var: input diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-1.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-1.yml new file mode 100644 index 00000000..727c60e7 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-1.yml @@ -0,0 +1,15 @@ +- name: Basic vars_prompt test + hosts: testhost + become: no + gather_facts: no + + vars_prompt: + - name: input + + tasks: + - assert: + that: + - input == 'some input' + + - debug: + var: input diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-2.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-2.yml new file mode 100644 index 00000000..d8f20db8 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-2.yml @@ -0,0 +1,16 @@ +- name: Test vars_prompt custom prompt + hosts: testhost + become: no + gather_facts: no + + vars_prompt: + - name: input + prompt: "Enter some input" + + tasks: + - assert: + that: + - input == 'some more input' + + - debug: + var: input diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-3.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-3.yml new file mode 100644 index 00000000..f8148182 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-3.yml @@ -0,0 +1,17 @@ +- name: Test vars_prompt confirm + hosts: testhost + become: no + gather_facts: no + + vars_prompt: + - name: input + confirm: yes + + tasks: + - name: + assert: + that: + - input == 'confirm me' + + - debug: + var: input diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-4.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-4.yml new file mode 100644 index 00000000..d33cc902 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-4.yml @@ -0,0 +1,16 @@ +- name: Test vars_prompt not private + hosts: testhost + become: no + gather_facts: no + + vars_prompt: + - name: not_secret + private: no + + tasks: + - assert: + that: + - not_secret == 'this is displayed' + + - debug: + var: not_secret diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-5.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-5.yml new file mode 100644 index 00000000..62c8ad8e --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-5.yml @@ -0,0 +1,14 @@ +- name: Test vars_prompt hashing + hosts: testhost + become: no + gather_facts: no + + vars_prompt: + - name: password + confirm: yes + encrypt: sha512_crypt + salt: 'jESIyad4F08hP3Ta' + + tasks: + - debug: + var: password diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-6.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-6.yml new file mode 100644 index 00000000..ea3fe620 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-6.yml @@ -0,0 +1,20 @@ +- name: Test vars_prompt custom variables in prompt + hosts: testhost + become: no + gather_facts: no + + vars: + prompt_var: prompt from variable + + vars_prompt: + - name: input + prompt: "{{ prompt_var }}" + + tasks: + - name: + assert: + that: + - input == 'input' + + - debug: + var: input diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-7.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-7.yml new file mode 100644 index 00000000..a6b086d0 --- /dev/null +++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-7.yml @@ -0,0 +1,12 @@ +- name: Test vars_prompt play vars + hosts: "{{ target_hosts }}" + become: no + gather_facts: no + + vars_prompt: + - name: target_hosts + prompt: prompting for host + private: no + + tasks: + - ping: diff --git a/test/integration/targets/callback_default/aliases b/test/integration/targets/callback_default/aliases new file mode 100644 index 00000000..f8e28c7e --- /dev/null +++ b/test/integration/targets/callback_default/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stderr b/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stderr new file mode 100644 index 00000000..431a0200 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stderr @@ -0,0 +1,2 @@ ++ ansible-playbook -i inventory --check test_dryrun.yml +++ set +x diff --git a/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stdout b/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stdout new file mode 100644 index 00000000..8a349097 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stdout @@ -0,0 +1,78 @@ + +DRY RUN ************************************************************************ + +PLAY [A common play] [CHECK MODE] ********************************************** + +TASK [debug] [CHECK MODE] ****************************************************** +ok: [testhost] => { + "msg": "ansible_check_mode: True" +} + +TASK [Command] [CHECK MODE] **************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY [Play with check_mode: true (runs always in check_mode)] [CHECK MODE] ***** + +TASK [debug] [CHECK MODE] ****************************************************** +ok: [testhost] => { + "msg": "ansible_check_mode: True" +} + +TASK [Command] [CHECK MODE] **************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY [Play with check_mode: false (runs always in wet mode)] ******************* + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "msg": "ansible_check_mode: True" +} + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY [Play with a block with check_mode: true] [CHECK MODE] ******************** + +TASK [Command] [CHECK MODE] **************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY [Play with a block with check_mode: false] [CHECK MODE] ******************* + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY RECAP ********************************************************************* +testhost : ok=10 changed=7 unreachable=0 failed=0 skipped=8 rescued=0 ignored=0 + + +DRY RUN ************************************************************************ diff --git a/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stderr b/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stderr new file mode 100644 index 00000000..e4309428 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stderr @@ -0,0 +1,2 @@ ++ ansible-playbook -i inventory test_dryrun.yml +++ set +x diff --git a/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stdout b/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stdout new file mode 100644 index 00000000..f5f45105 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stdout @@ -0,0 +1,74 @@ + +PLAY [A common play] *********************************************************** + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "msg": "ansible_check_mode: False" +} + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY [Play with check_mode: true (runs always in check_mode)] [CHECK MODE] ***** + +TASK [debug] [CHECK MODE] ****************************************************** +ok: [testhost] => { + "msg": "ansible_check_mode: False" +} + +TASK [Command] [CHECK MODE] **************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY [Play with check_mode: false (runs always in wet mode)] ******************* + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "msg": "ansible_check_mode: False" +} + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY [Play with a block with check_mode: true] ********************************* + +TASK [Command] [CHECK MODE] **************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY [Play with a block with check_mode: false] ******************************** + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] [CHECK MODE] ****************************** +skipping: [testhost] + +PLAY RECAP ********************************************************************* +testhost : ok=11 changed=8 unreachable=0 failed=0 skipped=7 rescued=0 ignored=0 + diff --git a/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stderr b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stderr new file mode 100644 index 00000000..431a0200 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stderr @@ -0,0 +1,2 @@ ++ ansible-playbook -i inventory --check test_dryrun.yml +++ set +x diff --git a/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stdout b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stdout new file mode 100644 index 00000000..e984d499 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stdout @@ -0,0 +1,74 @@ + +PLAY [A common play] *********************************************************** + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "msg": "ansible_check_mode: True" +} + +TASK [Command] ***************************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY [Play with check_mode: true (runs always in check_mode)] ****************** + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "msg": "ansible_check_mode: True" +} + +TASK [Command] ***************************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY [Play with check_mode: false (runs always in wet mode)] ******************* + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "msg": "ansible_check_mode: True" +} + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY [Play with a block with check_mode: true] ********************************* + +TASK [Command] ***************************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY [Play with a block with check_mode: false] ******************************** + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY RECAP ********************************************************************* +testhost : ok=10 changed=7 unreachable=0 failed=0 skipped=8 rescued=0 ignored=0 + diff --git a/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stderr b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stderr new file mode 100644 index 00000000..e4309428 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stderr @@ -0,0 +1,2 @@ ++ ansible-playbook -i inventory test_dryrun.yml +++ set +x diff --git a/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stdout b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stdout new file mode 100644 index 00000000..2b331bb8 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stdout @@ -0,0 +1,74 @@ + +PLAY [A common play] *********************************************************** + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "msg": "ansible_check_mode: False" +} + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY [Play with check_mode: true (runs always in check_mode)] ****************** + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "msg": "ansible_check_mode: False" +} + +TASK [Command] ***************************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY [Play with check_mode: false (runs always in wet mode)] ******************* + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "msg": "ansible_check_mode: False" +} + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY [Play with a block with check_mode: true] ********************************* + +TASK [Command] ***************************************************************** +skipping: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY [Play with a block with check_mode: false] ******************************** + +TASK [Command] ***************************************************************** +changed: [testhost] + +TASK [Command with check_mode: false] ****************************************** +changed: [testhost] + +TASK [Command with check_mode: true] ******************************************* +skipping: [testhost] + +PLAY RECAP ********************************************************************* +testhost : ok=11 changed=8 unreachable=0 failed=0 skipped=7 rescued=0 ignored=0 + diff --git a/test/integration/targets/callback_default/callback_default.out.default.stderr b/test/integration/targets/callback_default/callback_default.out.default.stderr new file mode 100644 index 00000000..d3e07d47 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.default.stderr @@ -0,0 +1,2 @@ ++ ansible-playbook -i inventory test.yml +++ set +x diff --git a/test/integration/targets/callback_default/callback_default.out.default.stdout b/test/integration/targets/callback_default/callback_default.out.default.stdout new file mode 100644 index 00000000..05f90beb --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.default.stdout @@ -0,0 +1,72 @@ + +PLAY [testhost] **************************************************************** + +TASK [Changed task] ************************************************************ +changed: [testhost] + +TASK [Ok task] ***************************************************************** +ok: [testhost] + +TASK [Failed task] ************************************************************* +fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"} +...ignoring + +TASK [Skipped task] ************************************************************ +skipping: [testhost] + +TASK [Task with var in name (foo bar)] ***************************************** +changed: [testhost] + +TASK [Loop task] *************************************************************** +changed: [testhost] => (item=foo-1) +changed: [testhost] => (item=foo-2) +changed: [testhost] => (item=foo-3) + +TASK [debug loop] ************************************************************** +changed: [testhost] => (item=debug-1) => { + "msg": "debug-1" +} +failed: [testhost] (item=debug-2) => { + "msg": "debug-2" +} +ok: [testhost] => (item=debug-3) => { + "msg": "debug-3" +} +skipping: [testhost] => (item=debug-4) +fatal: [testhost]: FAILED! => {"msg": "All items completed"} +...ignoring + +TASK [EXPECTED FAILURE Failed task to be rescued] ****************************** +fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"} + +TASK [Rescue task] ************************************************************* +changed: [testhost] + +TASK [include_tasks] *********************************************************** +included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1) + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "item": 1 +} + +RUNNING HANDLER [Test handler 1] *********************************************** +changed: [testhost] + +RUNNING HANDLER [Test handler 2] *********************************************** +ok: [testhost] + +RUNNING HANDLER [Test handler 3] *********************************************** +changed: [testhost] + +PLAY [testhost] **************************************************************** + +TASK [First free task] ********************************************************* +changed: [testhost] + +TASK [Second free task] ******************************************************** +changed: [testhost] + +PLAY RECAP ********************************************************************* +testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2 + diff --git a/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stderr b/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stderr new file mode 100644 index 00000000..932a2e4f --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stderr @@ -0,0 +1,5 @@ ++ ansible-playbook -i inventory test.yml +++ set +x +fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"} +fatal: [testhost]: FAILED! => {"msg": "All items completed"} +fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"} diff --git a/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stdout b/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stdout new file mode 100644 index 00000000..fe990d42 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stdout @@ -0,0 +1,69 @@ + +PLAY [testhost] **************************************************************** + +TASK [Changed task] ************************************************************ +changed: [testhost] + +TASK [Ok task] ***************************************************************** +ok: [testhost] + +TASK [Failed task] ************************************************************* +...ignoring + +TASK [Skipped task] ************************************************************ +skipping: [testhost] + +TASK [Task with var in name (foo bar)] ***************************************** +changed: [testhost] + +TASK [Loop task] *************************************************************** +changed: [testhost] => (item=foo-1) +changed: [testhost] => (item=foo-2) +changed: [testhost] => (item=foo-3) + +TASK [debug loop] ************************************************************** +changed: [testhost] => (item=debug-1) => { + "msg": "debug-1" +} +failed: [testhost] (item=debug-2) => { + "msg": "debug-2" +} +ok: [testhost] => (item=debug-3) => { + "msg": "debug-3" +} +skipping: [testhost] => (item=debug-4) +...ignoring + +TASK [EXPECTED FAILURE Failed task to be rescued] ****************************** + +TASK [Rescue task] ************************************************************* +changed: [testhost] + +TASK [include_tasks] *********************************************************** +included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1) + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "item": 1 +} + +RUNNING HANDLER [Test handler 1] *********************************************** +changed: [testhost] + +RUNNING HANDLER [Test handler 2] *********************************************** +ok: [testhost] + +RUNNING HANDLER [Test handler 3] *********************************************** +changed: [testhost] + +PLAY [testhost] **************************************************************** + +TASK [First free task] ********************************************************* +changed: [testhost] + +TASK [Second free task] ******************************************************** +changed: [testhost] + +PLAY RECAP ********************************************************************* +testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2 + diff --git a/test/integration/targets/callback_default/callback_default.out.free.stdout b/test/integration/targets/callback_default/callback_default.out.free.stdout new file mode 100644 index 00000000..0ec04479 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.free.stdout @@ -0,0 +1,35 @@ + +PLAY [nonlockstep] ************************************************************* + +TASK [command] ***************************************************************** +changed: [testhost10] + +TASK [command] ***************************************************************** +changed: [testhost10] + +TASK [command] ***************************************************************** +changed: [testhost10] + +TASK [command] ***************************************************************** +changed: [testhost11] + +TASK [command] ***************************************************************** +changed: [testhost11] + +TASK [command] ***************************************************************** +changed: [testhost11] + +TASK [command] ***************************************************************** +changed: [testhost12] + +TASK [command] ***************************************************************** +changed: [testhost12] + +TASK [command] ***************************************************************** +changed: [testhost12] + +PLAY RECAP ********************************************************************* +testhost10 : ok=3 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +testhost11 : ok=3 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +testhost12 : ok=3 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + diff --git a/test/integration/targets/callback_default/callback_default.out.hide_ok.stderr b/test/integration/targets/callback_default/callback_default.out.hide_ok.stderr new file mode 100644 index 00000000..d3e07d47 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.hide_ok.stderr @@ -0,0 +1,2 @@ ++ ansible-playbook -i inventory test.yml +++ set +x diff --git a/test/integration/targets/callback_default/callback_default.out.hide_ok.stdout b/test/integration/targets/callback_default/callback_default.out.hide_ok.stdout new file mode 100644 index 00000000..c1e1846b --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.hide_ok.stdout @@ -0,0 +1,56 @@ + +PLAY [testhost] **************************************************************** + +TASK [Changed task] ************************************************************ +changed: [testhost] + +TASK [Failed task] ************************************************************* +fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"} +...ignoring + +TASK [Skipped task] ************************************************************ +skipping: [testhost] + +TASK [Task with var in name (foo bar)] ***************************************** +changed: [testhost] + +TASK [Loop task] *************************************************************** +changed: [testhost] => (item=foo-1) +changed: [testhost] => (item=foo-2) +changed: [testhost] => (item=foo-3) + +TASK [debug loop] ************************************************************** +changed: [testhost] => (item=debug-1) => { + "msg": "debug-1" +} +failed: [testhost] (item=debug-2) => { + "msg": "debug-2" +} +skipping: [testhost] => (item=debug-4) +fatal: [testhost]: FAILED! => {"msg": "All items completed"} +...ignoring + +TASK [EXPECTED FAILURE Failed task to be rescued] ****************************** +fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"} + +TASK [Rescue task] ************************************************************* +changed: [testhost] +included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1) + +RUNNING HANDLER [Test handler 1] *********************************************** +changed: [testhost] + +RUNNING HANDLER [Test handler 3] *********************************************** +changed: [testhost] + +PLAY [testhost] **************************************************************** + +TASK [First free task] ********************************************************* +changed: [testhost] + +TASK [Second free task] ******************************************************** +changed: [testhost] + +PLAY RECAP ********************************************************************* +testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2 + diff --git a/test/integration/targets/callback_default/callback_default.out.hide_skipped.stderr b/test/integration/targets/callback_default/callback_default.out.hide_skipped.stderr new file mode 100644 index 00000000..d3e07d47 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.hide_skipped.stderr @@ -0,0 +1,2 @@ ++ ansible-playbook -i inventory test.yml +++ set +x diff --git a/test/integration/targets/callback_default/callback_default.out.hide_skipped.stdout b/test/integration/targets/callback_default/callback_default.out.hide_skipped.stdout new file mode 100644 index 00000000..660c7285 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.hide_skipped.stdout @@ -0,0 +1,66 @@ + +PLAY [testhost] **************************************************************** + +TASK [Changed task] ************************************************************ +changed: [testhost] + +TASK [Ok task] ***************************************************************** +ok: [testhost] + +TASK [Failed task] ************************************************************* +fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"} +...ignoring + +TASK [Task with var in name (foo bar)] ***************************************** +changed: [testhost] + +TASK [Loop task] *************************************************************** +changed: [testhost] => (item=foo-1) +changed: [testhost] => (item=foo-2) +changed: [testhost] => (item=foo-3) + +TASK [debug loop] ************************************************************** +changed: [testhost] => (item=debug-1) => { + "msg": "debug-1" +} +failed: [testhost] (item=debug-2) => { + "msg": "debug-2" +} +ok: [testhost] => (item=debug-3) => { + "msg": "debug-3" +} +fatal: [testhost]: FAILED! => {"msg": "All items completed"} +...ignoring + +TASK [EXPECTED FAILURE Failed task to be rescued] ****************************** +fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"} + +TASK [Rescue task] ************************************************************* +changed: [testhost] +included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1) + +TASK [debug] ******************************************************************* +ok: [testhost] => { + "item": 1 +} + +RUNNING HANDLER [Test handler 1] *********************************************** +changed: [testhost] + +RUNNING HANDLER [Test handler 2] *********************************************** +ok: [testhost] + +RUNNING HANDLER [Test handler 3] *********************************************** +changed: [testhost] + +PLAY [testhost] **************************************************************** + +TASK [First free task] ********************************************************* +changed: [testhost] + +TASK [Second free task] ******************************************************** +changed: [testhost] + +PLAY RECAP ********************************************************************* +testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2 + diff --git a/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stderr b/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stderr new file mode 100644 index 00000000..d3e07d47 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stderr @@ -0,0 +1,2 @@ ++ ansible-playbook -i inventory test.yml +++ set +x diff --git a/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stdout b/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stdout new file mode 100644 index 00000000..13948b9f --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stdout @@ -0,0 +1,52 @@ + +PLAY [testhost] **************************************************************** + +TASK [Changed task] ************************************************************ +changed: [testhost] + +TASK [Failed task] ************************************************************* +fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"} +...ignoring + +TASK [Task with var in name (foo bar)] ***************************************** +changed: [testhost] + +TASK [Loop task] *************************************************************** +changed: [testhost] => (item=foo-1) +changed: [testhost] => (item=foo-2) +changed: [testhost] => (item=foo-3) + +TASK [debug loop] ************************************************************** +changed: [testhost] => (item=debug-1) => { + "msg": "debug-1" +} +failed: [testhost] (item=debug-2) => { + "msg": "debug-2" +} +fatal: [testhost]: FAILED! => {"msg": "All items completed"} +...ignoring + +TASK [EXPECTED FAILURE Failed task to be rescued] ****************************** +fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"} + +TASK [Rescue task] ************************************************************* +changed: [testhost] +included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1) + +RUNNING HANDLER [Test handler 1] *********************************************** +changed: [testhost] + +RUNNING HANDLER [Test handler 3] *********************************************** +changed: [testhost] + +PLAY [testhost] **************************************************************** + +TASK [First free task] ********************************************************* +changed: [testhost] + +TASK [Second free task] ******************************************************** +changed: [testhost] + +PLAY RECAP ********************************************************************* +testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2 + diff --git a/test/integration/targets/callback_default/callback_default.out.host_pinned.stdout b/test/integration/targets/callback_default/callback_default.out.host_pinned.stdout new file mode 100644 index 00000000..0ec04479 --- /dev/null +++ b/test/integration/targets/callback_default/callback_default.out.host_pinned.stdout @@ -0,0 +1,35 @@ + +PLAY [nonlockstep] ************************************************************* + +TASK [command] ***************************************************************** +changed: [testhost10] + +TASK [command] ***************************************************************** +changed: [testhost10] + +TASK [command] ***************************************************************** +changed: [testhost10] + +TASK [command] ***************************************************************** +changed: [testhost11] + +TASK [command] ***************************************************************** +changed: [testhost11] + +TASK [command] ***************************************************************** +changed: [testhost11] + +TASK [command] ***************************************************************** +changed: [testhost12] + +TASK [command] ***************************************************************** +changed: [testhost12] + +TASK [command] ***************************************************************** +changed: [testhost12] + +PLAY RECAP ********************************************************************* +testhost10 : ok=3 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +testhost11 : ok=3 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +testhost12 : ok=3 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + diff --git a/test/integration/targets/callback_default/include_me.yml b/test/integration/targets/callback_default/include_me.yml new file mode 100644 index 00000000..51470f3c --- /dev/null +++ b/test/integration/targets/callback_default/include_me.yml @@ -0,0 +1,2 @@ +- debug: + var: item diff --git a/test/integration/targets/callback_default/inventory b/test/integration/targets/callback_default/inventory new file mode 100644 index 00000000..6d9b3028 --- /dev/null +++ b/test/integration/targets/callback_default/inventory @@ -0,0 +1,10 @@ +[local] +testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" + +[nonexistent] +testhost5 ansible_host=169.254.199.200 # no connection is ever established with this host + +[nonlockstep] +testhost10 i=0.5 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +testhost11 i=3.0 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +testhost12 i=12.0 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/callback_default/runme.sh b/test/integration/targets/callback_default/runme.sh new file mode 100755 index 00000000..a8033d7d --- /dev/null +++ b/test/integration/targets/callback_default/runme.sh @@ -0,0 +1,190 @@ +#!/usr/bin/env bash + +# This test compares "known good" output with various settings against output +# with the current code. It's brittle by nature, but this is probably the +# "best" approach possible. +# +# Notes: +# * options passed to this script (such as -v) are ignored, as they would change +# the output and break the test +# * the number of asterisks after a "banner" differs depending on the number of +# columns on the TTY, so we must adjust the columns for the current session +# for consistency + +set -eux + +run_test() { + local testname=$1 + + # outout was recorded w/o cowsay, ensure we reproduce the same + export ANSIBLE_NOCOWS=1 + + # The shenanigans with redirection and 'tee' are to capture STDOUT and + # STDERR separately while still displaying both to the console + { ansible-playbook -i inventory test.yml \ + > >(set +x; tee "${OUTFILE}.${testname}.stdout"); } \ + 2> >(set +x; tee "${OUTFILE}.${testname}.stderr" >&2) + # Scrub deprication warning that shows up in Python 2.6 on CentOS 6 + sed -i -e '/RandomPool_DeprecationWarning/d' "${OUTFILE}.${testname}.stderr" + sed -i -e 's/included: .*\/test\/integration/included: ...\/test\/integration/g' "${OUTFILE}.${testname}.stdout" + + diff -u "${ORIGFILE}.${testname}.stdout" "${OUTFILE}.${testname}.stdout" || diff_failure + diff -u "${ORIGFILE}.${testname}.stderr" "${OUTFILE}.${testname}.stderr" || diff_failure +} + +run_test_dryrun() { + local testname=$1 + # optional, pass --check to run a dry run + local chk=${2:-} + + # outout was recorded w/o cowsay, ensure we reproduce the same + export ANSIBLE_NOCOWS=1 + + # This needed to satisfy shellcheck that can not accept unquoted variable + cmd="ansible-playbook -i inventory ${chk} test_dryrun.yml" + + # The shenanigans with redirection and 'tee' are to capture STDOUT and + # STDERR separately while still displaying both to the console + { $cmd \ + > >(set +x; tee "${OUTFILE}.${testname}.stdout"); } \ + 2> >(set +x; tee "${OUTFILE}.${testname}.stderr" >&2) + # Scrub deprication warning that shows up in Python 2.6 on CentOS 6 + sed -i -e '/RandomPool_DeprecationWarning/d' "${OUTFILE}.${testname}.stderr" + + diff -u "${ORIGFILE}.${testname}.stdout" "${OUTFILE}.${testname}.stdout" || diff_failure + diff -u "${ORIGFILE}.${testname}.stderr" "${OUTFILE}.${testname}.stderr" || diff_failure +} + +diff_failure() { + if [[ $INIT = 0 ]]; then + echo "FAILURE...diff mismatch!" + exit 1 + fi +} + +cleanup() { + if [[ $INIT = 0 ]]; then + rm -rf "${OUTFILE}.*" + fi + + if [[ -f "${BASEFILE}.unreachable.stdout" ]]; then + rm -rf "${BASEFILE}.unreachable.stdout" + fi + + if [[ -f "${BASEFILE}.unreachable.stderr" ]]; then + rm -rf "${BASEFILE}.unreachable.stderr" + fi + + # Restore TTY cols + if [[ -n ${TTY_COLS:-} ]]; then + stty cols "${TTY_COLS}" + fi +} + +adjust_tty_cols() { + if [[ -t 1 ]]; then + # Preserve existing TTY cols + TTY_COLS=$( stty -a | grep -Eo '; columns [0-9]+;' | cut -d';' -f2 | cut -d' ' -f3 ) + # Override TTY cols to make comparing ansible-playbook output easier + # This value matches the default in the code when there is no TTY + stty cols 79 + fi +} + +BASEFILE=callback_default.out + +ORIGFILE="${BASEFILE}" +OUTFILE="${BASEFILE}.new" + +trap 'cleanup' EXIT + +# The --init flag will (re)generate the "good" output files used by the tests +INIT=0 +if [[ ${1:-} == "--init" ]]; then + shift + OUTFILE=$ORIGFILE + INIT=1 +fi + +adjust_tty_cols + +# Force the 'default' callback plugin, since that's what we're testing +export ANSIBLE_STDOUT_CALLBACK=default +# Disable color in output for consistency +export ANSIBLE_FORCE_COLOR=0 +export ANSIBLE_NOCOLOR=1 + +# Default settings +export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1 +export ANSIBLE_DISPLAY_OK_HOSTS=1 +export ANSIBLE_DISPLAY_FAILED_STDERR=0 +export ANSIBLE_CHECK_MODE_MARKERS=0 + +run_test default + +# Hide skipped +export ANSIBLE_DISPLAY_SKIPPED_HOSTS=0 + +run_test hide_skipped + +# Hide skipped/ok +export ANSIBLE_DISPLAY_SKIPPED_HOSTS=0 +export ANSIBLE_DISPLAY_OK_HOSTS=0 + +run_test hide_skipped_ok + +# Hide ok +export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1 +export ANSIBLE_DISPLAY_OK_HOSTS=0 + +run_test hide_ok + +# Failed to stderr +export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1 +export ANSIBLE_DISPLAY_OK_HOSTS=1 +export ANSIBLE_DISPLAY_FAILED_STDERR=1 + +run_test failed_to_stderr + +# Default settings with unreachable tasks +export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1 +export ANSIBLE_DISPLAY_OK_HOSTS=1 +export ANSIBLE_DISPLAY_FAILED_STDERR=1 +export ANSIBLE_TIMEOUT=1 + +# Check if UNREACHBLE is available in stderr +set +e +ansible-playbook -i inventory test_2.yml > >(set +x; tee "${BASEFILE}.unreachable.stdout";) 2> >(set +x; tee "${BASEFILE}.unreachable.stderr" >&2) || true +set -e +if test "$(grep -c 'UNREACHABLE' "${BASEFILE}.unreachable.stderr")" -ne 1; then + echo "Test failed" + exit 1 +fi + +## DRY RUN tests +# +# Default settings with dry run tasks +export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1 +export ANSIBLE_DISPLAY_OK_HOSTS=1 +export ANSIBLE_DISPLAY_FAILED_STDERR=1 +# Enable Check mode markers +export ANSIBLE_CHECK_MODE_MARKERS=1 + +# Test the wet run with check markers +run_test_dryrun check_markers_wet + +# Test the dry run with check markers +run_test_dryrun check_markers_dry --check + +# Disable Check mode markers +export ANSIBLE_CHECK_MODE_MARKERS=0 + +# Test the wet run without check markers +run_test_dryrun check_nomarkers_wet + +# Test the dry run without check markers +run_test_dryrun check_nomarkers_dry --check + +# Ensure free/host_pinned non-lockstep strategies display correctly +diff -u callback_default.out.free.stdout <(ANSIBLE_STRATEGY=free ansible-playbook -i inventory test_non_lockstep.yml 2>/dev/null) +diff -u callback_default.out.host_pinned.stdout <(ANSIBLE_STRATEGY=host_pinned ansible-playbook -i inventory test_non_lockstep.yml 2>/dev/null) diff --git a/test/integration/targets/callback_default/test.yml b/test/integration/targets/callback_default/test.yml new file mode 100644 index 00000000..b31787bf --- /dev/null +++ b/test/integration/targets/callback_default/test.yml @@ -0,0 +1,88 @@ +--- +- hosts: testhost + gather_facts: no + vars: + foo: foo bar + tasks: + - name: Changed task + command: echo foo + changed_when: true + notify: test handlers + + - name: Ok task + command: echo foo + changed_when: false + + - name: Failed task + fail: + msg: no reason + ignore_errors: yes + + - name: Skipped task + command: echo foo + when: false + + - name: Task with var in name ({{ foo }}) + command: echo foo + + - name: Loop task + command: echo foo + loop: + - 1 + - 2 + - 3 + loop_control: + label: foo-{{ item }} + + # detect "changed" debug tasks being hidden with display_ok_tasks=false + - name: debug loop + debug: + msg: debug-{{ item }} + changed_when: item == 1 + failed_when: item == 2 + when: item != 4 + ignore_errors: yes + loop: + - 1 + - 2 + - 3 + - 4 + loop_control: + label: debug-{{ item }} + + - block: + - name: EXPECTED FAILURE Failed task to be rescued + fail: + rescue: + - name: Rescue task + command: echo rescued + + - include_tasks: include_me.yml + loop: + - 1 + + handlers: + - name: Test handler 1 + command: echo foo + listen: test handlers + + - name: Test handler 2 + command: echo foo + changed_when: false + listen: test handlers + + - name: Test handler 3 + command: echo foo + listen: test handlers + +# An issue was found previously for tasks in a play using strategy 'free' after +# a non-'free' play in the same playbook, so we protect against a regression. +- hosts: testhost + gather_facts: no + strategy: free + tasks: + - name: First free task + command: echo foo + + - name: Second free task + command: echo foo diff --git a/test/integration/targets/callback_default/test_2.yml b/test/integration/targets/callback_default/test_2.yml new file mode 100644 index 00000000..2daded71 --- /dev/null +++ b/test/integration/targets/callback_default/test_2.yml @@ -0,0 +1,6 @@ +- hosts: nonexistent + gather_facts: no + tasks: + - name: Test task for unreachable host + command: echo foo + ignore_errors: True diff --git a/test/integration/targets/callback_default/test_dryrun.yml b/test/integration/targets/callback_default/test_dryrun.yml new file mode 100644 index 00000000..26cf0831 --- /dev/null +++ b/test/integration/targets/callback_default/test_dryrun.yml @@ -0,0 +1,93 @@ +--- +- name: A common play + hosts: testhost + gather_facts: no + tasks: + - debug: + msg: 'ansible_check_mode: {{ansible_check_mode}}' + + - name: Command + command: ls -l + + - name: "Command with check_mode: false" + command: ls -l + check_mode: false + + - name: "Command with check_mode: true" + command: ls -l + check_mode: true + + +- name: "Play with check_mode: true (runs always in check_mode)" + hosts: testhost + gather_facts: no + check_mode: true + tasks: + - debug: + msg: 'ansible_check_mode: {{ansible_check_mode}}' + + - name: Command + command: ls -l + + - name: "Command with check_mode: false" + command: ls -l + check_mode: false + + - name: "Command with check_mode: true" + command: ls -l + check_mode: true + + +- name: "Play with check_mode: false (runs always in wet mode)" + hosts: testhost + gather_facts: no + check_mode: false + tasks: + - debug: + msg: 'ansible_check_mode: {{ansible_check_mode}}' + + - name: Command + command: ls -l + + - name: "Command with check_mode: false" + command: ls -l + check_mode: false + + - name: "Command with check_mode: true" + command: ls -l + check_mode: true + + +- name: "Play with a block with check_mode: true" + hosts: testhost + gather_facts: no + tasks: + - block: + - name: Command + command: ls -l + + - name: "Command with check_mode: false" + command: ls -l + check_mode: false + + - name: "Command with check_mode: true" + command: ls -l + check_mode: true + check_mode: true + +- name: "Play with a block with check_mode: false" + hosts: testhost + gather_facts: no + tasks: + - block: + - name: Command + command: ls -l + + - name: "Command with check_mode: false" + command: ls -l + check_mode: false + + - name: "Command with check_mode: true" + command: ls -l + check_mode: true + check_mode: false diff --git a/test/integration/targets/callback_default/test_non_lockstep.yml b/test/integration/targets/callback_default/test_non_lockstep.yml new file mode 100644 index 00000000..b656ee91 --- /dev/null +++ b/test/integration/targets/callback_default/test_non_lockstep.yml @@ -0,0 +1,7 @@ +--- +- hosts: nonlockstep + gather_facts: false + tasks: + - command: sleep {{ i }} + - command: sleep {{ i }} + - command: sleep {{ i }} diff --git a/test/integration/targets/changed_when/aliases b/test/integration/targets/changed_when/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/changed_when/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/changed_when/meta/main.yml b/test/integration/targets/changed_when/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/changed_when/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/changed_when/tasks/main.yml b/test/integration/targets/changed_when/tasks/main.yml new file mode 100644 index 00000000..7b997189 --- /dev/null +++ b/test/integration/targets/changed_when/tasks/main.yml @@ -0,0 +1,61 @@ +# test code for the changed_when parameter +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: ensure shell is always changed + shell: ls -al /tmp + register: shell_result + +- debug: var=shell_result + +- name: changed should always be true for shell + assert: + that: + - "shell_result.changed" + +- name: test changed_when override for shell + shell: ls -al /tmp + changed_when: False + register: shell_result + +- debug: var=shell_result + +- name: changed should be false + assert: + that: + - "not shell_result.changed" + +- name: Add hosts to test group and ensure it appears as changed + group_by: + key: "cw_test1_{{ inventory_hostname }}" + register: groupby + +- name: verify its changed + assert: + that: + - groupby is changed + +- name: Add hosts to test group and ensure it does NOT appear as changed + group_by: + key: "cw_test2_{{ inventory_hostname }}" + changed_when: False + register: groupby + +- name: verify its not changed + assert: + that: + - groupby is not changed diff --git a/test/integration/targets/check_mode/aliases b/test/integration/targets/check_mode/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/check_mode/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/check_mode/check_mode-not-on-cli.yml b/test/integration/targets/check_mode/check_mode-not-on-cli.yml new file mode 100644 index 00000000..1b0c734b --- /dev/null +++ b/test/integration/targets/check_mode/check_mode-not-on-cli.yml @@ -0,0 +1,37 @@ +--- +# Run withhout --check +- hosts: testhost + gather_facts: False + tasks: + - command: 'echo ran' + register: command_out + + - debug: var=command_out + - name: check that this did not run in check mode + assert: + that: + - '"ran" in command_out["stdout"]' + +- hosts: testhost + gather_facts: False + check_mode: True + tasks: + - command: 'echo ran' + register: command_out + + - name: check that play level check_mode overrode the cli + assert: + that: + - '"check mode" in command_out["msg"]' + +- hosts: testhost + gather_facts: False + tasks: + - command: 'echo ran' + register: command_out + check_mode: True + + - name: check that task level check_mode overrode the cli + assert: + that: + - '"check mode" in command_out["msg"]' diff --git a/test/integration/targets/check_mode/check_mode-on-cli.yml b/test/integration/targets/check_mode/check_mode-on-cli.yml new file mode 100644 index 00000000..0af34b83 --- /dev/null +++ b/test/integration/targets/check_mode/check_mode-on-cli.yml @@ -0,0 +1,36 @@ +--- +# Run with --check +- hosts: testhost + gather_facts: False + tasks: + - command: 'echo ran' + register: command_out + + - name: check that this did not run in check mode + assert: + that: + - '"check mode" in command_out["msg"]' + +- hosts: testhost + gather_facts: False + check_mode: False + tasks: + - command: 'echo ran' + register: command_out + + - name: check that play level check_mode overrode the cli + assert: + that: + - '"ran" in command_out["stdout"]' + +- hosts: testhost + gather_facts: False + tasks: + - command: 'echo ran' + register: command_out + check_mode: False + + - name: check that task level check_mode overrode the cli + assert: + that: + - '"ran" in command_out["stdout"]' diff --git a/test/integration/targets/check_mode/check_mode.yml b/test/integration/targets/check_mode/check_mode.yml new file mode 100644 index 00000000..a5777506 --- /dev/null +++ b/test/integration/targets/check_mode/check_mode.yml @@ -0,0 +1,7 @@ +- name: Test that check works with check_mode specified in roles + hosts: testhost + vars: + - output_dir: . + roles: + - { role: test_always_run, tags: test_always_run } + - { role: test_check_mode, tags: test_check_mode } diff --git a/test/integration/targets/check_mode/roles/test_always_run/meta/main.yml b/test/integration/targets/check_mode/roles/test_always_run/meta/main.yml new file mode 100644 index 00000000..d06fd48c --- /dev/null +++ b/test/integration/targets/check_mode/roles/test_always_run/meta/main.yml @@ -0,0 +1,17 @@ +# test code for the check_mode: no option +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/test/integration/targets/check_mode/roles/test_always_run/tasks/main.yml b/test/integration/targets/check_mode/roles/test_always_run/tasks/main.yml new file mode 100644 index 00000000..59bfb1d6 --- /dev/null +++ b/test/integration/targets/check_mode/roles/test_always_run/tasks/main.yml @@ -0,0 +1,29 @@ +# test code for the check_mode: no option +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: run a command while in check mode + shell: echo "running" + check_mode: no + register: result + +- name: assert that the command was run + assert: + that: + - "result.changed == true" + - "result.stdout == 'running'" + - "result.rc == 0" diff --git a/test/integration/targets/check_mode/roles/test_check_mode/files/foo.txt b/test/integration/targets/check_mode/roles/test_check_mode/files/foo.txt new file mode 100644 index 00000000..3e96db9b --- /dev/null +++ b/test/integration/targets/check_mode/roles/test_check_mode/files/foo.txt @@ -0,0 +1 @@ +templated_var_loaded diff --git a/test/integration/targets/check_mode/roles/test_check_mode/tasks/main.yml b/test/integration/targets/check_mode/roles/test_check_mode/tasks/main.yml new file mode 100644 index 00000000..f926d144 --- /dev/null +++ b/test/integration/targets/check_mode/roles/test_check_mode/tasks/main.yml @@ -0,0 +1,50 @@ +# test code for the template module +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: fill in a basic template in check mode + template: src=foo.j2 dest={{output_dir}}/checkmode_foo.templated mode=0644 + register: template_result + +- name: check whether file exists + stat: path={{output_dir}}/checkmode_foo.templated + register: foo + +- name: verify that the file was marked as changed in check mode + assert: + that: + - "template_result is changed" + - "not foo.stat.exists" + +- name: Actually create the file, disable check mode + template: src=foo.j2 dest={{output_dir}}/checkmode_foo.templated2 mode=0644 + check_mode: no + register: checkmode_disabled + +- name: fill in template with new content + template: src=foo.j2 dest={{output_dir}}/checkmode_foo.templated2 mode=0644 + register: template_result2 + +- name: remove templated file + file: path={{output_dir}}/checkmode_foo.templated2 state=absent + check_mode: no + +- name: verify that the file was not changed + assert: + that: + - "checkmode_disabled is changed" + - "template_result2 is not changed" diff --git a/test/integration/targets/check_mode/roles/test_check_mode/templates/foo.j2 b/test/integration/targets/check_mode/roles/test_check_mode/templates/foo.j2 new file mode 100644 index 00000000..55aab8f1 --- /dev/null +++ b/test/integration/targets/check_mode/roles/test_check_mode/templates/foo.j2 @@ -0,0 +1 @@ +{{ templated_var }} diff --git a/test/integration/targets/check_mode/roles/test_check_mode/vars/main.yml b/test/integration/targets/check_mode/roles/test_check_mode/vars/main.yml new file mode 100644 index 00000000..1e8f64cc --- /dev/null +++ b/test/integration/targets/check_mode/roles/test_check_mode/vars/main.yml @@ -0,0 +1 @@ +templated_var: templated_var_loaded diff --git a/test/integration/targets/check_mode/runme.sh b/test/integration/targets/check_mode/runme.sh new file mode 100755 index 00000000..954ac6ff --- /dev/null +++ b/test/integration/targets/check_mode/runme.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook check_mode.yml -i ../../inventory -v --check "$@" +ansible-playbook check_mode-on-cli.yml -i ../../inventory -v --check "$@" +ansible-playbook check_mode-not-on-cli.yml -i ../../inventory -v "$@" diff --git a/test/integration/targets/cli/aliases b/test/integration/targets/cli/aliases new file mode 100644 index 00000000..a8816e11 --- /dev/null +++ b/test/integration/targets/cli/aliases @@ -0,0 +1,5 @@ +destructive +needs/root +needs/ssh +needs/target/setup_pexpect +shippable/posix/group3 diff --git a/test/integration/targets/cli/runme.sh b/test/integration/targets/cli/runme.sh new file mode 100755 index 00000000..d9e84625 --- /dev/null +++ b/test/integration/targets/cli/runme.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_ROLES_PATH=../ ansible-playbook setup.yml + +python test-cli.py diff --git a/test/integration/targets/cli/setup.yml b/test/integration/targets/cli/setup.yml new file mode 100644 index 00000000..901cfd14 --- /dev/null +++ b/test/integration/targets/cli/setup.yml @@ -0,0 +1,42 @@ +- hosts: localhost + gather_facts: yes + roles: + - setup_pexpect + + tasks: + - name: Test ansible-playbook and ansible with -K + block: + - name: Create user to connect as + user: + name: cliuser1 + shell: /bin/bash + groups: wheel + append: yes + password: "{{ 'secretpassword' | password_hash('sha512', 'mysecretsalt') }}" + - name: Create user to become + user: + name: cliuser2 + shell: /bin/bash + password: "{{ 'secretpassword' | password_hash('sha512', 'mysecretsalt') }}" + # Sometimes this file doesn't get removed, and we need it gone to ssh + - name: Remove /run/nologin + file: + path: /run/nologin + state: absent + # Make Ansible run Python to run Ansible + - name: Run the test + shell: python test_k_and_K.py {{ ansible_python_interpreter }} + always: + - name: Remove users + user: + name: "{{ item }}" + state: absent + with_items: + - cliuser1 + - cliuser2 + # For now, we don't test this everywhere, because `user` works differently + # on some platforms, as does sudo/sudoers. On Fedora, we can just add + # the user to 'wheel' and things magically work. + # TODO: In theory, we should test this with all the different 'become' + # plugins in base. + when: ansible_distribution == 'Fedora' diff --git a/test/integration/targets/cli/test-cli.py b/test/integration/targets/cli/test-cli.py new file mode 100644 index 00000000..9893d665 --- /dev/null +++ b/test/integration/targets/cli/test-cli.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# Copyright (c) 2019 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +import pexpect + +os.environ['ANSIBLE_NOCOLOR'] = '1' +out = pexpect.run( + 'ansible localhost -m debug -a msg="{{ ansible_password }}" -k', + events={ + 'SSH password:': '{{ 1 + 2 }}\n' + } +) + +assert b'{{ 1 + 2 }}' in out diff --git a/test/integration/targets/cli/test_k_and_K.py b/test/integration/targets/cli/test_k_and_K.py new file mode 100644 index 00000000..f7077fba --- /dev/null +++ b/test/integration/targets/cli/test_k_and_K.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + +import pexpect + +os.environ['ANSIBLE_NOCOLOR'] = '1' + +out = pexpect.run( + 'ansible -c ssh -i localhost, -u cliuser1 -e ansible_python_interpreter={0} ' + '-m command -a whoami -Kkb --become-user cliuser2 localhost'.format(sys.argv[1]), + events={ + 'SSH password:': 'secretpassword\n', + 'BECOME password': 'secretpassword\n', + }, + timeout=10 +) + +print(out) + +assert b'cliuser2' in out diff --git a/test/integration/targets/collections/a.statichost.yml b/test/integration/targets/collections/a.statichost.yml new file mode 100644 index 00000000..683878aa --- /dev/null +++ b/test/integration/targets/collections/a.statichost.yml @@ -0,0 +1,3 @@ +# use a plugin defined in a content-adjacent collection to ensure we added it properly +plugin: testns.content_adj.statichost +hostname: dynamic_host_a diff --git a/test/integration/targets/collections/aliases b/test/integration/targets/collections/aliases new file mode 100644 index 00000000..1a9cc499 --- /dev/null +++ b/test/integration/targets/collections/aliases @@ -0,0 +1,4 @@ +posix +shippable/posix/group4 +shippable/windows/group1 +windows diff --git a/test/integration/targets/collections/cache.statichost.yml b/test/integration/targets/collections/cache.statichost.yml new file mode 100644 index 00000000..b2adcfa6 --- /dev/null +++ b/test/integration/targets/collections/cache.statichost.yml @@ -0,0 +1,7 @@ +# use inventory and cache plugins defined in a content-adjacent collection +plugin: testns.content_adj.statichost +hostname: cache_host_a +cache_plugin: testns.content_adj.custom_jsonfile +cache: yes +cache_connection: inventory_cache +cache_prefix: 'prefix_' diff --git a/test/integration/targets/collections/check_populated_inventory.yml b/test/integration/targets/collections/check_populated_inventory.yml new file mode 100644 index 00000000..ab33081a --- /dev/null +++ b/test/integration/targets/collections/check_populated_inventory.yml @@ -0,0 +1,11 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - assert: + that: + - "groups.all | length == 2" + - "groups.ungrouped == groups.all" + - "'cache_host_a' in groups.all" + - "'dynamic_host_a' in groups.all" diff --git a/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/coll_in_sys/plugins/modules/systestmodule.py b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/coll_in_sys/plugins/modules/systestmodule.py new file mode 100644 index 00000000..cba38120 --- /dev/null +++ b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/coll_in_sys/plugins/modules/systestmodule.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='sys'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/maskedmodule.py b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/maskedmodule.py new file mode 100644 index 00000000..e3db81be --- /dev/null +++ b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/maskedmodule.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, failed=True, msg='this collection should be masked by testcoll in the user content root'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/testmodule.py b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/testmodule.py new file mode 100644 index 00000000..cba38120 --- /dev/null +++ b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/testmodule.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='sys'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/roles/maskedrole/tasks/main.yml b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/roles/maskedrole/tasks/main.yml new file mode 100644 index 00000000..21fe324a --- /dev/null +++ b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/roles/maskedrole/tasks/main.yml @@ -0,0 +1,2 @@ +- fail: + msg: this role should never be visible or runnable diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/builtin/plugins/modules/ping.py b/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/builtin/plugins/modules/ping.py new file mode 100644 index 00000000..07476709 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/builtin/plugins/modules/ping.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='overridden ansible.builtin (should not be possible)'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/bullcoll/plugins/modules/bullmodule.py b/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/bullcoll/plugins/modules/bullmodule.py new file mode 100644 index 00000000..5ea354e7 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/bullcoll/plugins/modules/bullmodule.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='user_ansible_bullcoll'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/__init__.py new file mode 100644 index 00000000..aa5c3eed --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/__init__.py @@ -0,0 +1 @@ +thing = "hello from testns.othercoll.formerly_testcoll_pkg.thing" diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/submod.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/submod.py new file mode 100644 index 00000000..eb49a163 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/submod.py @@ -0,0 +1 @@ +thing = "hello from formerly_testcoll_pkg.submod.thing" diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testbroken/plugins/filter/broken_filter.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testbroken/plugins/filter/broken_filter.py new file mode 100644 index 00000000..51fe8524 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testbroken/plugins/filter/broken_filter.py @@ -0,0 +1,13 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class FilterModule(object): + + def filters(self): + return { + 'broken': lambda x: 'broken', + } + + +raise Exception('This is a broken filter plugin.') diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/meta/runtime.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/meta/runtime.yml new file mode 100644 index 00000000..f5b617d9 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/meta/runtime.yml @@ -0,0 +1,52 @@ +plugin_routing: + action: + uses_redirected_action: + redirect: testns.testcoll.subclassed_normal + callback: + removedcallback: + tombstone: + removal_date: '2020-01-01' + connection: + redirected_local: + redirect: ansible.builtin.local + modules: + multilevel1: + redirect: testns.testcoll.multilevel2 + multilevel2: + redirect: testns.testcoll.multilevel3 + multilevel3: + redirect: testns.testcoll.ping + uses_redirected_action: + redirect: ansible.builtin.ping + setup.ps1: ansible.windows.setup + looped_ping: + redirect: testns.testcoll.looped_ping2 + looped_ping2: + redirect: testns.testcoll.looped_ping + bogus_redirect: + redirect: bogus.collection.shouldbomb + deprecated_ping: + deprecation: + removal_date: 2020-12-31 + warning_text: old_ping will be removed in a future release of this collection. Use new_ping instead. + foobar_facts: + redirect: foobar_info + aliased_ping: + redirect: ansible.builtin.ping + dead_ping: + tombstone: + removal_date: 2019-12-31 + warning_text: dead_ping has been removed + module_utils: + moved_out_root: + redirect: testns.content_adj.sub1.foomodule + formerly_testcoll_pkg: + redirect: ansible_collections.testns.othercoll.plugins.module_utils.formerly_testcoll_pkg + formerly_testcoll_pkg.submod: + redirect: ansible_collections.testns.othercoll.plugins.module_utils.formerly_testcoll_pkg.submod + missing_redirect_target_collection: + redirect: bogusns.boguscoll.bogusmu + missing_redirect_target_module: + redirect: testns.othercoll.bogusmu + +requires_ansible: '>=2.11' diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml new file mode 100644 index 00000000..1d1aee7d --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml @@ -0,0 +1,49 @@ +# verify default collection action/module lookup works +# since we're running this playbook inside a collection, it will set that collection as the default search for all playboooks +# and non-collection roles to allow for easy migration of old integration tests to collections +- hosts: testhost + tasks: + - testmodule: + +- hosts: testhost + vars: + test_role_input: task static default collection + tasks: + - import_role: + name: testrole # unqualified role lookup should work; inheriting from the containing collection + - assert: + that: + - test_role_output.msg == test_role_input + - vars: + test_role_input: task static legacy embedded default collection + block: + - import_role: + name: non_coll_role + - assert: + that: + - test_role_output.msg == test_role_input + +- hosts: testhost + vars: + test_role_input: keyword static default collection + roles: + - testrole + tasks: + - debug: var=test_role_input + - debug: var=test_role_output + - assert: + that: + - test_role_output.msg == test_role_input + +- hosts: testhost + vars: + test_role_input: task dynamic default collection + tasks: + - include_role: + name: testrole # unqualified role lookup should work; inheriting from the containing collection + - include_role: + name: non_coll_role + - assert: + that: + - testmodule_out_from_non_coll_role is success + - embedded_module_out_from_non_coll_role is success diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/library/embedded_module.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/library/embedded_module.py new file mode 100644 index 00000000..54402d12 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/library/embedded_module.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='collection_embedded_non_collection_role'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/tasks/main.yml new file mode 100644 index 00000000..d41ae90e --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/tasks/main.yml @@ -0,0 +1,29 @@ +- testmodule: + register: testmodule_out_from_non_coll_role + +- embedded_module: + register: embedded_module_out_from_non_coll_role + +- name: check collections list from role meta + plugin_lookup: + register: pluginlookup_out + +- debug: var=pluginlookup_out + +- debug: + msg: '{{ test_role_input | default("(undefined)") }}' + register: test_role_output + +- assert: + that: + - test_role_input is not defined or test_role_input == test_role_output.msg + +- vars: + test_role_input: include another non-coll role + block: + - include_role: + name: non_coll_role_to_call + + - assert: + that: + - test_role_output.msg == test_role_input diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role_to_call/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role_to_call/tasks/main.yml new file mode 100644 index 00000000..98445ce3 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role_to_call/tasks/main.yml @@ -0,0 +1,7 @@ +- debug: + msg: '{{ test_role_input | default("(undefined)") }}' + register: test_role_output + +- assert: + that: + - test_role_input is not defined or test_role_input == test_role_output.msg diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/action_subdir/subdir_ping_action.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/action_subdir/subdir_ping_action.py new file mode 100644 index 00000000..5af73342 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/action_subdir/subdir_ping_action.py @@ -0,0 +1,19 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + TRANSFERS_FILES = False + _VALID_ARGS = frozenset() + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(None, task_vars) + + result = dict(changed=False) + + return result diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/bypass_host_loop.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/bypass_host_loop.py new file mode 100644 index 00000000..b15493d9 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/bypass_host_loop.py @@ -0,0 +1,17 @@ +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + BYPASS_HOST_LOOP = True + + def run(self, tmp=None, task_vars=None): + result = super(ActionModule, self).run(tmp, task_vars) + result['bypass_inventory_hostname'] = task_vars['inventory_hostname'] + return result diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/plugin_lookup.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/plugin_lookup.py new file mode 100644 index 00000000..3fa41e8f --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/plugin_lookup.py @@ -0,0 +1,40 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase +from ansible.plugins import loader + + +class ActionModule(ActionBase): + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(('type', 'name')) + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(None, task_vars) + + plugin_type = self._task.args.get('type') + name = self._task.args.get('name') + + result = dict(changed=False, collection_list=self._task.collections) + + if all([plugin_type, name]): + attr_name = '{0}_loader'.format(plugin_type) + + typed_loader = getattr(loader, attr_name, None) + + if not typed_loader: + return (dict(failed=True, msg='invalid plugin type {0}'.format(plugin_type))) + + context = typed_loader.find_plugin_with_context(name, collection_list=self._task.collections) + + if not context.resolved: + result['plugin_path'] = None + result['redirect_list'] = [] + else: + result['plugin_path'] = context.plugin_resolved_path + result['redirect_list'] = context.redirect_list + + return result diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/subclassed_normal.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/subclassed_normal.py new file mode 100644 index 00000000..f0eff30b --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/subclassed_normal.py @@ -0,0 +1,11 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action.normal import ActionModule as NormalAction + + +class ActionModule(NormalAction): + def run(self, *args, **kwargs): + result = super(ActionModule, self).run(*args, **kwargs) + result['hacked'] = 'I got run under a subclassed normal, yay' + return result diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/uses_redirected_import.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/uses_redirected_import.py new file mode 100644 index 00000000..701d7b46 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/uses_redirected_import.py @@ -0,0 +1,20 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase +from ansible.module_utils.formerly_core import thingtocall + + +class ActionModule(ActionBase): + TRANSFERS_FILES = False + _VALID_ARGS = frozenset() + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(None, task_vars) + + result = dict(changed=False, ttc_res=thingtocall()) + + return result diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/callback/usercallback.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/callback/usercallback.py new file mode 100644 index 00000000..c5b0f66a --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/callback/usercallback.py @@ -0,0 +1,27 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.callback import CallbackBase + +DOCUMENTATION = ''' + callback: usercallback + callback_type: notification + short_description: does stuff + description: + - does some stuff +''' + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'usercallback' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + + super(CallbackModule, self).__init__() + self._display.display("loaded usercallback from collection, yay") + + def v2_runner_on_ok(self, result): + self._display.display("usercallback says ok") diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/connection/localconn.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/connection/localconn.py new file mode 100644 index 00000000..fc19a99d --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/connection/localconn.py @@ -0,0 +1,41 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils._text import to_native +from ansible.plugins.connection import ConnectionBase + +DOCUMENTATION = """ + connection: localconn + short_description: do stuff local + description: + - does stuff + options: + connectionvar: + description: + - something we set + default: the_default + vars: + - name: ansible_localconn_connectionvar +""" + + +class Connection(ConnectionBase): + transport = 'local' + has_pipelining = True + + def _connect(self): + return self + + def exec_command(self, cmd, in_data=None, sudoable=True): + stdout = 'localconn ran {0}'.format(to_native(cmd)) + stderr = 'connectionvar is {0}'.format(to_native(self.get_option('connectionvar'))) + return (0, stdout, stderr) + + def put_file(self, in_path, out_path): + raise NotImplementedError('just a test') + + def fetch_file(self, in_path, out_path): + raise NotImplementedError('just a test') + + def close(self): + self._connected = False diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/doc_fragments/frag.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/doc_fragments/frag.py new file mode 100644 index 00000000..4549f2d6 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/doc_fragments/frag.py @@ -0,0 +1,18 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + DOCUMENTATION = r''' +options: + normal_doc_frag: + description: + - an option +''' + + OTHER_DOCUMENTATION = r''' +options: + other_doc_frag: + description: + - another option +''' diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/filter_subdir/my_subdir_filters.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/filter_subdir/my_subdir_filters.py new file mode 100644 index 00000000..a5498a43 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/filter_subdir/my_subdir_filters.py @@ -0,0 +1,14 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def test_subdir_filter(data): + return "{0}_via_testfilter_from_subdir".format(data) + + +class FilterModule(object): + + def filters(self): + return { + 'test_subdir_filter': test_subdir_filter + } diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters.py new file mode 100644 index 00000000..0ce239e2 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters.py @@ -0,0 +1,14 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def testfilter(data): + return "{0}_via_testfilter_from_userdir".format(data) + + +class FilterModule(object): + + def filters(self): + return { + 'testfilter': testfilter + } diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters2.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters2.py new file mode 100644 index 00000000..07239222 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters2.py @@ -0,0 +1,14 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def testfilter2(data): + return "{0}_via_testfilter2_from_userdir".format(data) + + +class FilterModule(object): + + def filters(self): + return { + 'testfilter2': testfilter2 + } diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/lookup_subdir/my_subdir_lookup.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/lookup_subdir/my_subdir_lookup.py new file mode 100644 index 00000000..dd9818c9 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/lookup_subdir/my_subdir_lookup.py @@ -0,0 +1,11 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + + def run(self, terms, variables, **kwargs): + + return ['subdir_lookup_from_user_dir'] diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup.py new file mode 100644 index 00000000..1cf3d28f --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup.py @@ -0,0 +1,11 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + + def run(self, terms, variables, **kwargs): + + return ['mylookup_from_user_dir'] diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup2.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup2.py new file mode 100644 index 00000000..bda671f5 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup2.py @@ -0,0 +1,12 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + + def run(self, terms, variables, **kwargs): + + return ['mylookup2_from_user_dir'] diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/AnotherCSMU.cs b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/AnotherCSMU.cs new file mode 100644 index 00000000..68d2bc7a --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/AnotherCSMU.cs @@ -0,0 +1,12 @@ +using System; + +namespace ansible_collections.testns.testcoll.plugins.module_utils.AnotherCSMU +{ + public class AnotherThing + { + public static string CallMe() + { + return "Hello from nested user-collection-hosted AnotherCSMU"; + } + } +} diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyCSMU.cs b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyCSMU.cs new file mode 100644 index 00000000..2b7843d7 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyCSMU.cs @@ -0,0 +1,19 @@ +using System; + +using ansible_collections.testns.testcoll.plugins.module_utils.AnotherCSMU; +using ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subcs; + +//TypeAccelerator -Name MyCSMU -TypeName CustomThing + +namespace ansible_collections.testns.testcoll.plugins.module_utils.MyCSMU +{ + public class CustomThing + { + public static string HelloWorld() + { + string res1 = AnotherThing.CallMe(); + string res2 = NestedUtil.HelloWorld(); + return String.Format("Hello from user_mu collection-hosted MyCSMU, also {0} and {1}", res1, res2); + } + } +} diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyPSMU.psm1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyPSMU.psm1 new file mode 100644 index 00000000..09da66d5 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyPSMU.psm1 @@ -0,0 +1,9 @@ +Function Invoke-FromUserPSMU { + <# + .SYNOPSIS + Test function + #> + return "from user_mu" +} + +Export-ModuleMember -Function Invoke-FromUserPSMU diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/base.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/base.py new file mode 100644 index 00000000..0654d182 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/base.py @@ -0,0 +1,12 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.testns.testcoll.plugins.module_utils import secondary +import ansible_collections.testns.testcoll.plugins.module_utils.secondary + + +def thingtocall(): + if secondary != ansible_collections.testns.testcoll.plugins.module_utils.secondary: + raise Exception() + + return "thingtocall in base called " + ansible_collections.testns.testcoll.plugins.module_utils.secondary.thingtocall() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/leaf.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/leaf.py new file mode 100644 index 00000000..ad847105 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/leaf.py @@ -0,0 +1,6 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def thingtocall(): + return "thingtocall in leaf" diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/nested_same.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/nested_same.py new file mode 100644 index 00000000..77407564 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/nested_same.py @@ -0,0 +1,6 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def nested_same(): + return 'hello from nested_same' diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/secondary.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/secondary.py new file mode 100644 index 00000000..9a315686 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/secondary.py @@ -0,0 +1,6 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def thingtocall(): + return "thingtocall in secondary" diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subcs.cs b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subcs.cs new file mode 100644 index 00000000..ebeb8ce5 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subcs.cs @@ -0,0 +1,13 @@ +using System; + +namespace ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subcs +{ + public class NestedUtil + { + public static string HelloWorld() + { + string res = "Hello from subpkg.subcs"; + return res; + } + } +} diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/submod.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/submod.py new file mode 100644 index 00000000..3c24bc44 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/submod.py @@ -0,0 +1,6 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def thingtocall(): + return "thingtocall in subpkg.submod" diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subps.psm1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subps.psm1 new file mode 100644 index 00000000..1db0ab97 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subps.psm1 @@ -0,0 +1,9 @@ +Function Invoke-SubUserPSMU { + <# + .SYNOPSIS + Test function + #> + return "from subpkg.subps.psm1" +} + +Export-ModuleMember -Function Invoke-SubUserPSMU diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init.py new file mode 100644 index 00000000..b48a717c --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init.py @@ -0,0 +1,11 @@ +# NB: this module should never be loaded, since we'll see the subpkg_with_init package dir first +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def thingtocall(): + raise Exception('this should never be called (loaded discrete module instead of package module)') + + +def anotherthingtocall(): + raise Exception('this should never be called (loaded discrete module instead of package module)') diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/__init__.py new file mode 100644 index 00000000..d424796f --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/__init__.py @@ -0,0 +1,10 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# exercise relative imports in package init; they behave differently +from .mod_in_subpkg_with_init import thingtocall as submod_thingtocall +from ..subpkg.submod import thingtocall as cousin_submod_thingtocall # pylint: disable=relative-beyond-top-level + + +def thingtocall(): + return "thingtocall in subpkg_with_init" diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/mod_in_subpkg_with_init.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/mod_in_subpkg_with_init.py new file mode 100644 index 00000000..27747dae --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/mod_in_subpkg_with_init.py @@ -0,0 +1,6 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def thingtocall(): + return "thingtocall in mod_in_subpkg_with_init" diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/deprecated_ping.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/deprecated_ping.py new file mode 100644 index 00000000..9698ba6f --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/deprecated_ping.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='user', is_deprecated=True))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/module_subdir/subdir_ping_module.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/module_subdir/subdir_ping_module.py new file mode 100644 index 00000000..5a70174d --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/module_subdir/subdir_ping_module.py @@ -0,0 +1,14 @@ +#!/usr/bin/python + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='user'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/ping.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/ping.py new file mode 100644 index 00000000..2ca079c6 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/ping.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='user'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule.py new file mode 100644 index 00000000..e2efadae --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule.py @@ -0,0 +1,21 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +DOCUMENTATION = r''' +module: testmodule +description: for testing +extends_documentation_fragment: + - testns.testcoll.frag + - testns.testcoll.frag.other_documentation +''' + + +def main(): + print(json.dumps(dict(changed=False, source='user'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule_bad_docfrags.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule_bad_docfrags.py new file mode 100644 index 00000000..46ccb76c --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule_bad_docfrags.py @@ -0,0 +1,25 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +DOCUMENTATION = r''' +module: testmodule +description: for testing +extends_documentation_fragment: + - noncollbogusfrag + - noncollbogusfrag.bogusvar + - bogusns.testcoll.frag + - testns.boguscoll.frag + - testns.testcoll.bogusfrag + - testns.testcoll.frag.bogusvar +''' + + +def main(): + print(json.dumps(dict(changed=False, source='user'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_base_mu_granular_nested_import.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_base_mu_granular_nested_import.py new file mode 100644 index 00000000..4054e36f --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_base_mu_granular_nested_import.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ansible_collections.testns.testcoll.plugins.module_utils.base import thingtocall + + +def main(): + mu_result = thingtocall() + print(json.dumps(dict(changed=False, source='user', mu_result=mu_result))) + + sys.exit() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_collection_redirected_mu.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_collection_redirected_mu.py new file mode 100644 index 00000000..b169fdea --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_collection_redirected_mu.py @@ -0,0 +1,21 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ansible_collections.testns.testcoll.plugins.module_utils.moved_out_root import importme +from ..module_utils.formerly_testcoll_pkg import thing as movedthing # pylint: disable=relative-beyond-top-level +from ..module_utils.formerly_testcoll_pkg.submod import thing as submodmovedthing # pylint: disable=relative-beyond-top-level + + +def main(): + mu_result = importme() + print(json.dumps(dict(changed=False, source='user', mu_result=mu_result, mu_result2=movedthing, mu_result3=submodmovedthing))) + + sys.exit() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_core_redirected_mu.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_core_redirected_mu.py new file mode 100644 index 00000000..28a07729 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_core_redirected_mu.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ansible.module_utils.formerly_core import thingtocall + + +def main(): + mu_result = thingtocall() + print(json.dumps(dict(changed=False, source='user', mu_result=mu_result))) + + sys.exit() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.bak b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.bak new file mode 100644 index 00000000..703f4548 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.bak @@ -0,0 +1,3 @@ +# Intentionally blank, and intentionally attempting to shadow +# uses_leaf_mu_flat_import.py. MODULE_IGNORE_EXTS should prevent this file +# from ever being loaded. diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.py new file mode 100644 index 00000000..295d4329 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +import ansible_collections.testns.testcoll.plugins.module_utils.leaf + + +def main(): + mu_result = ansible_collections.testns.testcoll.plugins.module_utils.leaf.thingtocall() + print(json.dumps(dict(changed=False, source='user', mu_result=mu_result))) + + sys.exit() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.yml new file mode 100644 index 00000000..703f4548 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.yml @@ -0,0 +1,3 @@ +# Intentionally blank, and intentionally attempting to shadow +# uses_leaf_mu_flat_import.py. MODULE_IGNORE_EXTS should prevent this file +# from ever being loaded. diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_granular_import.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_granular_import.py new file mode 100644 index 00000000..3794f496 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_granular_import.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ansible_collections.testns.testcoll.plugins.module_utils.leaf import thingtocall as aliasedthing + + +def main(): + mu_result = aliasedthing() + print(json.dumps(dict(changed=False, source='user', mu_result=mu_result))) + + sys.exit() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_module_import_from.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_module_import_from.py new file mode 100644 index 00000000..559e3e56 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_module_import_from.py @@ -0,0 +1,31 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ansible_collections.testns.testcoll.plugins.module_utils import leaf, secondary +# FIXME: this one needs pkginit synthesis to work +# from ansible_collections.testns.testcoll.plugins.module_utils.subpkg import submod +from ansible_collections.testns.testcoll.plugins.module_utils.subpkg_with_init import (thingtocall as spwi_thingtocall, + submod_thingtocall as spwi_submod_thingtocall, + cousin_submod_thingtocall as spwi_cousin_submod_thingtocall) + + +def main(): + mu_result = leaf.thingtocall() + mu2_result = secondary.thingtocall() + mu3_result = "thingtocall in subpkg.submod" # FIXME: this one needs pkginit synthesis to work + # mu3_result = submod.thingtocall() + mu4_result = spwi_thingtocall() + mu5_result = spwi_submod_thingtocall() + mu6_result = spwi_cousin_submod_thingtocall() + print(json.dumps(dict(changed=False, source='user', mu_result=mu_result, mu2_result=mu2_result, + mu3_result=mu3_result, mu4_result=mu4_result, mu5_result=mu5_result, mu6_result=mu6_result))) + + sys.exit() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing.py new file mode 100644 index 00000000..b945eb68 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing.py @@ -0,0 +1,16 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ..module_utils import bogusmu # pylint: disable=relative-beyond-top-level + + +def main(): + raise Exception('should never get here') + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_collection.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_collection.py new file mode 100644 index 00000000..59cb3c5e --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_collection.py @@ -0,0 +1,16 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ..module_utils import missing_redirect_target_collection # pylint: disable=relative-beyond-top-level + + +def main(): + raise Exception('should never get here') + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_module.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_module.py new file mode 100644 index 00000000..31ffd17c --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_module.py @@ -0,0 +1,16 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ..module_utils import missing_redirect_target_module # pylint: disable=relative-beyond-top-level + + +def main(): + raise Exception('should never get here') + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_func.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_func.py new file mode 100644 index 00000000..26fa53c0 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_func.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ansible_collections.testns.testcoll.plugins.module_utils.nested_same.nested_same.nested_same import nested_same + + +def main(): + mu_result = nested_same() + print(json.dumps(dict(changed=False, source='user', mu_result=mu_result))) + + sys.exit() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_module.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_module.py new file mode 100644 index 00000000..e017c14f --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_module.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ansible_collections.testns.testcoll.plugins.module_utils.nested_same.nested_same import nested_same + + +def main(): + mu_result = nested_same.nested_same() + print(json.dumps(dict(changed=False, source='user', mu_result=mu_result))) + + sys.exit() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_csbasic_only.ps1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_csbasic_only.ps1 new file mode 100644 index 00000000..df175831 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_csbasic_only.ps1 @@ -0,0 +1,22 @@ +#!powershell + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic + +$spec = @{ + options = @{ + data = @{ type = "str"; default = "pong" } + } + supports_check_mode = $true +} +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) +$data = $module.Params.data + +if ($data -eq "crash") { + throw "boom" +} + +$module.Result.ping = $data +$module.Result.source = "user" +$module.ExitJson() \ No newline at end of file diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.ps1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.ps1 new file mode 100644 index 00000000..661bc0f6 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.ps1 @@ -0,0 +1,9 @@ +#!powershell + +$res = @{ + changed = $false + source = "user" + msg = "hi from selfcontained.ps1" +} + +ConvertTo-Json $res \ No newline at end of file diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.py new file mode 100644 index 00000000..ce99bfa5 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.py @@ -0,0 +1 @@ +# docs for Windows module would go here; just ensure we don't accidentally load this instead of the .ps1 diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_csmu.ps1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_csmu.ps1 new file mode 100644 index 00000000..af00627b --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_csmu.ps1 @@ -0,0 +1,26 @@ +#!powershell + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#AnsibleRequires -CSharpUtil ansible_collections.testns.testcoll.plugins.module_utils.MyCSMU +#AnsibleRequires -CSharpUtil ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subcs + +$spec = @{ + options = @{ + data = @{ type = "str"; default = "called from $([ansible_collections.testns.testcoll.plugins.module_utils.MyCSMU.CustomThing]::HelloWorld())" } + } + supports_check_mode = $true +} +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) +$data = $module.Params.data + +if ($data -eq "crash") { + throw "boom" +} + +$module.Result.ping = $data +$module.Result.source = "user" +$module.Result.subpkg = [ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subcs.NestedUtil]::HelloWorld() +$module.Result.type_accelerator = "called from $([MyCSMU]::HelloWorld())" +$module.ExitJson() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_psmu.ps1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_psmu.ps1 new file mode 100644 index 00000000..cbca7b70 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_psmu.ps1 @@ -0,0 +1,25 @@ +#!powershell + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#AnsibleRequires -Powershell ansible_collections.testns.testcoll.plugins.module_utils.MyPSMU +#AnsibleRequires -PowerShell ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subps + +$spec = @{ + options = @{ + data = @{ type = "str"; default = "called from $(Invoke-FromUserPSMU)" } + } + supports_check_mode = $true +} +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) +$data = $module.Params.data + +if ($data -eq "crash") { + throw "boom" +} + +$module.Result.ping = $data +$module.Result.source = "user" +$module.Result.subpkg = Invoke-SubUserPSMU +$module.ExitJson() diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests.py new file mode 100644 index 00000000..ba610fb2 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests.py @@ -0,0 +1,13 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def testtest(data): + return data == 'from_user' + + +class TestModule(object): + def tests(self): + return { + 'testtest': testtest + } diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests2.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests2.py new file mode 100644 index 00000000..183944ff --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests2.py @@ -0,0 +1,13 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def testtest(data): + return data == 'from_user2' + + +class TestModule(object): + def tests(self): + return { + 'testtest2': testtest + } diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/test_subdir/my_subdir_tests.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/test_subdir/my_subdir_tests.py new file mode 100644 index 00000000..98a8f893 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/test_subdir/my_subdir_tests.py @@ -0,0 +1,13 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def subdir_test(data): + return data == 'subdir_from_user' + + +class TestModule(object): + def tests(self): + return { + 'subdir_test': subdir_test + } diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/vars/custom_vars.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/vars/custom_vars.py new file mode 100644 index 00000000..c603d72e --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/vars/custom_vars.py @@ -0,0 +1,44 @@ +# Copyright 2019 RedHat, inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + vars: custom_vars + version_added: "2.10" + short_description: load host and group vars + description: test loading host and group vars from a collection + options: + stage: + choices: ['all', 'inventory', 'task'] + type: str + ini: + - key: stage + section: custom_vars + env: + - name: ANSIBLE_VARS_PLUGIN_STAGE +''' + +from ansible.plugins.vars import BaseVarsPlugin + + +class VarsModule(BaseVarsPlugin): + + def get_vars(self, loader, path, entities, cache=True): + super(VarsModule, self).get_vars(loader, path, entities) + return {'collection': 'collection_root_user'} diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/call_standalone/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/call_standalone/tasks/main.yml new file mode 100644 index 00000000..f5dcc0fc --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/call_standalone/tasks/main.yml @@ -0,0 +1,6 @@ +- include_role: + name: standalone + +- assert: + that: + - standalone_role_var is defined diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/meta/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/meta/main.yml new file mode 100644 index 00000000..b3a88198 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - testrole # since testrole lives in this collection, we'll check there first diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/tasks/main.yml new file mode 100644 index 00000000..99297f70 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/tasks/main.yml @@ -0,0 +1,7 @@ +- debug: + msg: '{{ outer_role_input | default("(undefined)") }}' + register: outer_role_output + +- assert: + that: + - outer_role_input is not defined or outer_role_input == outer_role_output.msg diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/common_handlers/handlers/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/common_handlers/handlers/main.yml new file mode 100644 index 00000000..186368f5 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/common_handlers/handlers/main.yml @@ -0,0 +1,27 @@ +# This handler should only be called 1 time, if it's called more than once +# this task should fail on subsequent executions +- name: test_fqcn_handler + set_fact: + handler_counter: '{{ handler_counter|int + 1 }}' + failed_when: handler_counter|int > 1 + +# The following handler contains the role name and should be callable as: +# 'common_handlers test_fqcn_handler' +# 'common_handlers : common_handlers test_fqcn_handler` +# 'testns.testcoll.common_handlers : common_handlers test_fqcn_handler' +- name: common_handlers test_fqcn_handler + set_fact: + handler_counter: '{{ handler_counter|int + 1}}' + failed_when: handler_counter|int > 2 + +# The following handler starts with 'role name : ' and should _not_ be listed as: +# 'common_handlers : common_handlers : test_fqcn_handler` +# 'testns.testcoll.common_handlers : common_handlers : test_fqcn_handler' +- name: 'common_handlers : test_fqcn_handler' + meta: noop + +# The following handler starts with 'fqcn : ' and should _not_ be listed as: +# 'common_handlers : testns.testcoll.common_handlers : test_fqcn_handler` +# 'testns.testcoll.common_handlers : testns.testcoll.common_handlers : test_fqcn_handler' +- name: 'testns.testcoll.common_handlers : test_fqcn_handler' + meta: noop diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/role_subdir/subdir_testrole/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/role_subdir/subdir_testrole/tasks/main.yml new file mode 100644 index 00000000..64f5242b --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/role_subdir/subdir_testrole/tasks/main.yml @@ -0,0 +1,10 @@ +- debug: + msg: '{{ test_role_input | default("(undefined)") }}' + register: test_role_output + +- set_fact: + testrole_source: collection + +- assert: + that: + - test_role_input is not defined or test_role_input == test_role_output.msg diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/meta/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/meta/main.yml new file mode 100644 index 00000000..9218f3d7 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - testns.testcoll.common_handlers diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/tasks/main.yml new file mode 100644 index 00000000..6eadb7c2 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/tasks/main.yml @@ -0,0 +1,16 @@ +- name: Fire fqcn handler 1 + debug: + msg: Fire fqcn handler + changed_when: true + notify: + - 'testns.testcoll.common_handlers : test_fqcn_handler' + - 'common_handlers : test_fqcn_handler' + - 'test_fqcn_handler' + +- debug: + msg: Fire fqcn handler with role name + changed_when: true + notify: + - 'testns.testcoll.common_handlers : common_handlers test_fqcn_handler' + - 'common_handlers : common_handlers test_fqcn_handler' + - 'common_handlers test_fqcn_handler' diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/meta/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/meta/main.yml new file mode 100644 index 00000000..8c22c1c6 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/meta/main.yml @@ -0,0 +1,4 @@ +collections: +- ansible.builtin +- testns.coll_in_sys +- bogus.fromrolemeta diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/tasks/main.yml new file mode 100644 index 00000000..7c05abb1 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/tasks/main.yml @@ -0,0 +1,39 @@ +# test using builtin module of multiple types in a role in a collection +# https://github.com/ansible/ansible/issues/65298 +- name: Run setup module because there is both setup.ps1 and setup.py + setup: + gather_subset: min + +- name: check collections list from role meta + plugin_lookup: + register: pluginlookup_out + +- name: call role-local ping module + ping: + register: ping_out + +- name: call unqualified module in another collection listed in role meta (testns.coll_in_sys) + systestmodule: + register: systestmodule_out + +# verify that pluginloader caching doesn't prevent us from explicitly calling a builtin plugin with the same name +- name: call builtin ping module explicitly + ansible.builtin.ping: + register: builtinping_out + +- debug: + msg: '{{ test_role_input | default("(undefined)") }}' + register: test_role_output + +- set_fact: + testrole_source: collection + +# FIXME: add tests to ensure that block/task level stuff in a collection-hosted role properly inherit role default/meta values + +- assert: + that: + - pluginlookup_out.collection_list == ['testns.testcoll', 'ansible.builtin', 'testns.coll_in_sys', 'bogus.fromrolemeta'] + - ping_out.source is defined and ping_out.source == 'user' + - systestmodule_out.source is defined and systestmodule_out.source == 'sys' + - builtinping_out.ping is defined and builtinping_out.ping == 'pong' + - test_role_input is not defined or test_role_input == test_role_output.msg diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/meta/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/meta/main.yml new file mode 100644 index 00000000..8c22c1c6 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/meta/main.yml @@ -0,0 +1,4 @@ +collections: +- ansible.builtin +- testns.coll_in_sys +- bogus.fromrolemeta diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/tasks/main.yml new file mode 100644 index 00000000..31e3af5e --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/tasks/main.yml @@ -0,0 +1,33 @@ +- name: check collections list from role meta + plugin_lookup: + register: pluginlookup_out + +- name: call role-local ping module + ping: + register: ping_out + +- name: call unqualified module in another collection listed in role meta (testns.coll_in_sys) + systestmodule: + register: systestmodule_out + +# verify that pluginloader caching doesn't prevent us from explicitly calling a builtin plugin with the same name +- name: call builtin ping module explicitly + ansible.builtin.ping: + register: builtinping_out + +- debug: + msg: '{{ test_role_input | default("(undefined)") }}' + register: test_role_output + +- set_fact: + testrole_source: collection + +# FIXME: add tests to ensure that block/task level stuff in a collection-hosted role properly inherit role default/meta values + +- assert: + that: + - pluginlookup_out.collection_list == ['testns.testcoll', 'ansible.builtin', 'testns.coll_in_sys', 'bogus.fromrolemeta'] + - ping_out.source is defined and ping_out.source == 'user' + - systestmodule_out.source is defined and systestmodule_out.source == 'sys' + - builtinping_out.ping is defined and builtinping_out.ping == 'pong' + - test_role_input is not defined or test_role_input == test_role_output.msg diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testredirect/meta/runtime.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testredirect/meta/runtime.yml new file mode 100644 index 00000000..da8e4901 --- /dev/null +++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testredirect/meta/runtime.yml @@ -0,0 +1,4 @@ +plugin_routing: + modules: + ping: + redirect: testns.testcoll.ping diff --git a/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/action/action1.py b/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/action/action1.py new file mode 100644 index 00000000..e9f97311 --- /dev/null +++ b/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/action/action1.py @@ -0,0 +1,29 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + ''' handler for file transfer operations ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + + if result.get('skipped'): + return result + + module_args = self._task.args.copy() + + result.update( + self._execute_module( + module_name='me.mycoll2.module1', + module_args=module_args, + task_vars=task_vars, + ) + ) + + return result diff --git a/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/modules/action1.py b/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/modules/action1.py new file mode 100644 index 00000000..66bb5a41 --- /dev/null +++ b/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/modules/action1.py @@ -0,0 +1,24 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: action1 +short_description: Action Test module +description: + - Action Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' diff --git a/test/integration/targets/collections/collections/ansible_collections/me/mycoll2/plugins/modules/module1.py b/test/integration/targets/collections/collections/ansible_collections/me/mycoll2/plugins/modules/module1.py new file mode 100644 index 00000000..00bb993b --- /dev/null +++ b/test/integration/targets/collections/collections/ansible_collections/me/mycoll2/plugins/modules/module1.py @@ -0,0 +1,43 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: module1 +short_description: module1 Test module +description: + - module1 Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + desc=dict(type='str'), + ), + ) + + results = dict(msg="you just ran me.mycoll2.module1", desc=module.params.get('desc')) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/cache/custom_jsonfile.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/cache/custom_jsonfile.py new file mode 100644 index 00000000..7605dc41 --- /dev/null +++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/cache/custom_jsonfile.py @@ -0,0 +1,63 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + cache: jsonfile + short_description: JSON formatted files. + description: + - This cache uses JSON formatted, per host, files saved to the filesystem. + version_added: "1.9" + author: Ansible Core (@ansible-core) + options: + _uri: + required: True + description: + - Path in which the cache plugin will save the JSON files + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the JSON files + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + description: Expiration timeout for the cache plugin data + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer +''' + +import codecs +import json + +from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder +from ansible.plugins.cache import BaseFileCacheModule + + +class CacheModule(BaseFileCacheModule): + """ + A caching module backed by json files. + """ + + def _load(self, filepath): + # Valid JSON is always UTF-8 encoded. + with codecs.open(filepath, 'r', encoding='utf-8') as f: + return json.load(f, cls=AnsibleJSONDecoder) + + def _dump(self, value, filepath): + with codecs.open(filepath, 'w', encoding='utf-8') as f: + f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)) diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py new file mode 100644 index 00000000..ae6941f3 --- /dev/null +++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py @@ -0,0 +1,68 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + inventory: statichost + short_description: Add a single host + description: Add a single host + extends_documentation_fragment: + - inventory_cache + options: + plugin: + description: plugin name (must be statichost) + required: true + hostname: + description: Toggle display of stderr even when script was successful + required: True +''' + +from ansible.errors import AnsibleParserError +from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable + + +class InventoryModule(BaseInventoryPlugin, Cacheable): + + NAME = 'testns.content_adj.statichost' + + def __init__(self): + + super(InventoryModule, self).__init__() + + self._hosts = set() + + def verify_file(self, path): + ''' Verify if file is usable by this plugin, base does minimal accessibility check ''' + + if not path.endswith('.statichost.yml') and not path.endswith('.statichost.yaml'): + return False + return super(InventoryModule, self).verify_file(path) + + def parse(self, inventory, loader, path, cache=None): + + super(InventoryModule, self).parse(inventory, loader, path) + + # Initialize and validate options + self._read_config_data(path) + + # Exercise cache + cache_key = self.get_cache_key(path) + attempt_to_read_cache = self.get_option('cache') and cache + cache_needs_update = self.get_option('cache') and not cache + if attempt_to_read_cache: + try: + host_to_add = self._cache[cache_key] + except KeyError: + cache_needs_update = True + if not attempt_to_read_cache or cache_needs_update: + host_to_add = self.get_option('hostname') + + # this is where the magic happens + self.inventory.add_host(host_to_add, 'all') + self._cache[cache_key] = host_to_add + + # self.inventory.add_group()... + # self.inventory.add_child()... + # self.inventory.set_variable().. diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/__init__.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/__init__.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/foomodule.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/foomodule.py new file mode 100644 index 00000000..eeffe01e --- /dev/null +++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/foomodule.py @@ -0,0 +1,6 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def importme(): + return "hello from {0}".format(__name__) diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/modules/contentadjmodule.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/modules/contentadjmodule.py new file mode 100644 index 00000000..0fa98eb0 --- /dev/null +++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/modules/contentadjmodule.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='content_adj'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/vars/custom_adj_vars.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/vars/custom_adj_vars.py new file mode 100644 index 00000000..0cd9a1d5 --- /dev/null +++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/vars/custom_adj_vars.py @@ -0,0 +1,45 @@ +# Copyright 2019 RedHat, inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + vars: custom_adj_vars + version_added: "2.10" + short_description: load host and group vars + description: test loading host and group vars from a collection + options: + stage: + default: all + choices: ['all', 'inventory', 'task'] + type: str + ini: + - key: stage + section: custom_adj_vars + env: + - name: ANSIBLE_VARS_PLUGIN_STAGE +''' + +from ansible.plugins.vars import BaseVarsPlugin + + +class VarsModule(BaseVarsPlugin): + + def get_vars(self, loader, path, entities, cache=True): + super(VarsModule, self).get_vars(loader, path, entities) + return {'collection': 'adjacent', 'adj_var': 'value'} diff --git a/test/integration/targets/collections/custom_vars_plugins/v1_vars_plugin.py b/test/integration/targets/collections/custom_vars_plugins/v1_vars_plugin.py new file mode 100644 index 00000000..b5792d88 --- /dev/null +++ b/test/integration/targets/collections/custom_vars_plugins/v1_vars_plugin.py @@ -0,0 +1,37 @@ +# Copyright 2019 RedHat, inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + vars: v1_vars_plugin + version_added: "2.10" + short_description: load host and group vars + description: + - 3rd party vars plugin to test loading host and group vars without requiring whitelisting and without a plugin-specific stage option + options: +''' + +from ansible.plugins.vars import BaseVarsPlugin + + +class VarsModule(BaseVarsPlugin): + + def get_vars(self, loader, path, entities, cache=True): + super(VarsModule, self).get_vars(loader, path, entities) + return {'collection': False, 'name': 'v1_vars_plugin', 'v1_vars_plugin': True} diff --git a/test/integration/targets/collections/custom_vars_plugins/v2_vars_plugin.py b/test/integration/targets/collections/custom_vars_plugins/v2_vars_plugin.py new file mode 100644 index 00000000..fc140162 --- /dev/null +++ b/test/integration/targets/collections/custom_vars_plugins/v2_vars_plugin.py @@ -0,0 +1,45 @@ +# Copyright 2019 RedHat, inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + vars: v2_vars_plugin + version_added: "2.10" + short_description: load host and group vars + description: + - 3rd party vars plugin to test loading host and group vars without requiring whitelisting and with a plugin-specific stage option + options: + stage: + choices: ['all', 'inventory', 'task'] + type: str + ini: + - key: stage + section: other_vars_plugin + env: + - name: ANSIBLE_VARS_PLUGIN_STAGE +''' + +from ansible.plugins.vars import BaseVarsPlugin + + +class VarsModule(BaseVarsPlugin): + + def get_vars(self, loader, path, entities, cache=True): + super(VarsModule, self).get_vars(loader, path, entities) + return {'collection': False, 'name': 'v2_vars_plugin', 'v2_vars_plugin': True} diff --git a/test/integration/targets/collections/custom_vars_plugins/vars_req_whitelist.py b/test/integration/targets/collections/custom_vars_plugins/vars_req_whitelist.py new file mode 100644 index 00000000..0ab95273 --- /dev/null +++ b/test/integration/targets/collections/custom_vars_plugins/vars_req_whitelist.py @@ -0,0 +1,46 @@ +# Copyright 2019 RedHat, inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + vars: vars_req_whitelist + version_added: "2.10" + short_description: load host and group vars + description: test loading host and group vars from a collection + options: + stage: + choices: ['all', 'inventory', 'task'] + type: str + ini: + - key: stage + section: vars_req_whitelist + env: + - name: ANSIBLE_VARS_PLUGIN_STAGE +''' + +from ansible.plugins.vars import BaseVarsPlugin + + +class VarsModule(BaseVarsPlugin): + + REQUIRES_WHITELIST = True + + def get_vars(self, loader, path, entities, cache=True): + super(VarsModule, self).get_vars(loader, path, entities) + return {'whitelisted': True, 'collection': False} diff --git a/test/integration/targets/collections/filter_plugins/override_formerly_core_masked_filter.py b/test/integration/targets/collections/filter_plugins/override_formerly_core_masked_filter.py new file mode 100644 index 00000000..600b1fd8 --- /dev/null +++ b/test/integration/targets/collections/filter_plugins/override_formerly_core_masked_filter.py @@ -0,0 +1,13 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def override_formerly_core_masked_filter(*args, **kwargs): + return 'hello from overridden formerly_core_masked_filter' + + +class FilterModule(object): + def filters(self): + return { + 'formerly_core_masked_filter': override_formerly_core_masked_filter + } diff --git a/test/integration/targets/collections/includeme.yml b/test/integration/targets/collections/includeme.yml new file mode 100644 index 00000000..219ee58f --- /dev/null +++ b/test/integration/targets/collections/includeme.yml @@ -0,0 +1,6 @@ +- testns.testcoll.plugin_lookup: + register: included_plugin_lookup_out + +- assert: + that: + - included_plugin_lookup_out.collection_list == ['bogus.bogus', 'ansible.legacy'] diff --git a/test/integration/targets/collections/inventory_test.yml b/test/integration/targets/collections/inventory_test.yml new file mode 100644 index 00000000..b5089278 --- /dev/null +++ b/test/integration/targets/collections/inventory_test.yml @@ -0,0 +1,26 @@ +- name: test a collection-hosted connection plugin against hosts from collection-hosted inventory plugins + hosts: dynamic_host_a, dynamic_host_redirected + gather_facts: no + vars: + ansible_connection: testns.testcoll.localconn + ansible_localconn_connectionvar: from_play + tasks: + - raw: echo 'hello world' + register: connection_out + + - assert: + that: + - connection_out.stdout == "localconn ran echo 'hello world'" + # ensure that the connection var we overrode above made it into the running config + - connection_out.stderr == "connectionvar is from_play" + + +- hosts: localhost + gather_facts: no + tasks: + - assert: + that: + - hostvars['dynamic_host_a'] is defined + - hostvars['dynamic_host_a'].connection_out.stdout == "localconn ran echo 'hello world'" + - hostvars['dynamic_host_redirected'] is defined + - hostvars['dynamic_host_redirected'].connection_out.stdout == "localconn ran echo 'hello world'" diff --git a/test/integration/targets/collections/invocation_tests.yml b/test/integration/targets/collections/invocation_tests.yml new file mode 100644 index 00000000..c80e1edc --- /dev/null +++ b/test/integration/targets/collections/invocation_tests.yml @@ -0,0 +1,5 @@ +- hosts: testhost + gather_facts: false + tasks: + - name: run action that invokes module from another collection + me.mycoll1.action1: desc="this should run me.mycoll2.module1" diff --git a/test/integration/targets/collections/library/ping.py b/test/integration/targets/collections/library/ping.py new file mode 100644 index 00000000..7a416a64 --- /dev/null +++ b/test/integration/targets/collections/library/ping.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='legacy_library_dir'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/noop.yml b/test/integration/targets/collections/noop.yml new file mode 100644 index 00000000..81c6e473 --- /dev/null +++ b/test/integration/targets/collections/noop.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: no + tasks: + - debug: diff --git a/test/integration/targets/collections/posix.yml b/test/integration/targets/collections/posix.yml new file mode 100644 index 00000000..903fb4ff --- /dev/null +++ b/test/integration/targets/collections/posix.yml @@ -0,0 +1,443 @@ +- hosts: testhost + tasks: + # basic test of FQ module lookup and that we got the right one (user-dir hosted) + - name: exec FQ module in a user-dir testns collection + testns.testcoll.testmodule: + register: testmodule_out + + # verifies that distributed collection subpackages are visible under a multi-location namespace (testns exists in user and sys locations) + - name: exec FQ module in a sys-dir testns collection + testns.coll_in_sys.systestmodule: + register: systestmodule_out + + # verifies that content-adjacent collections were automatically added to the installed content roots + - name: exec FQ module from content-adjacent collection + testns.content_adj.contentadjmodule: + register: contentadjmodule_out + + # content should only be loaded from the first visible instance of a collection + - name: attempt to look up FQ module in a masked collection + testns.testcoll.plugin_lookup: + type: module + name: testns.testcoll.maskedmodule + register: maskedmodule_out + + # ensure the ansible ns can have real collections added to it + - name: call an external module in the ansible namespace + ansible.bullcoll.bullmodule: + register: bullmodule_out + + # ensure the ansible ns cannot override ansible.builtin externally + - name: call an external module in the ansible.builtin collection (should use the built in module) + ansible.builtin.ping: + register: builtin_ping_out + + # action in a collection subdir + - name: test subdir action FQ + testns.testcoll.action_subdir.subdir_ping_action: + register: subdir_ping_action_out + + # module in a collection subdir + - name: test subdir module FQ + testns.testcoll.module_subdir.subdir_ping_module: + register: subdir_ping_module_out + + # module with a granular module_utils import (from (this collection).module_utils.leaf import thingtocall) + - name: exec module with granular module utils import from this collection + testns.testcoll.uses_leaf_mu_granular_import: + register: granular_out + + # module with a granular nested module_utils import (from (this collection).module_utils.base import thingtocall, + # where base imports secondary from the same collection's module_utils) + - name: exec module with nested module utils from this collection + testns.testcoll.uses_base_mu_granular_nested_import: + register: granular_nested_out + + # module with a flat module_utils import (import (this collection).module_utils.leaf) + - name: exec module with flat module_utils import from this collection + testns.testcoll.uses_leaf_mu_flat_import: + register: flat_out + + # module with a full-module module_utils import using 'from' (from (this collection).module_utils import leaf) + - name: exec module with full-module module_utils import using 'from' from this collection + testns.testcoll.uses_leaf_mu_module_import_from: + register: from_out + + # module with multiple levels of the same nested package name and imported as a function + - name: exec module with multiple levels of the same nested package name imported as a function + testns.testcoll.uses_nested_same_as_func: + register: from_nested_func + + # module with multiple levels of the same nested package name and imported as a module + - name: exec module with multiple levels of the same nested package name imported as a module + testns.testcoll.uses_nested_same_as_module: + register: from_nested_module + + # module using a bunch of collection-level redirected module_utils + - name: exec module using a bunch of collection-level redirected module_utils + testns.testcoll.uses_collection_redirected_mu: + register: from_redirected_mu + + # module with bogus MU + - name: exec module with bogus MU + testns.testcoll.uses_mu_missing: + ignore_errors: true + register: from_missing_mu + + # module with redirected MU, redirect collection not found + - name: exec module with a missing redirect target collection + testns.testcoll.uses_mu_missing_redirect_collection: + ignore_errors: true + register: from_missing_redir_collection + + # module with redirected MU, redirect module not found + - name: exec module with a missing redirect target module + testns.testcoll.uses_mu_missing_redirect_module: + ignore_errors: true + register: from_missing_redir_module + + - assert: + that: + - testmodule_out.source == 'user' + - systestmodule_out.source == 'sys' + - contentadjmodule_out.source == 'content_adj' + - not maskedmodule_out.plugin_path + - bullmodule_out.source == 'user_ansible_bullcoll' + - builtin_ping_out.source is not defined + - builtin_ping_out.ping == 'pong' + - subdir_ping_action_out is not changed + - subdir_ping_module_out is not changed + - granular_out.mu_result == 'thingtocall in leaf' + - granular_nested_out.mu_result == 'thingtocall in base called thingtocall in secondary' + - flat_out.mu_result == 'thingtocall in leaf' + - from_out.mu_result == 'thingtocall in leaf' + - from_out.mu2_result == 'thingtocall in secondary' + - from_out.mu3_result == 'thingtocall in subpkg.submod' + - from_out.mu4_result == 'thingtocall in subpkg_with_init' + - from_out.mu5_result == 'thingtocall in mod_in_subpkg_with_init' + - from_out.mu6_result == 'thingtocall in subpkg.submod' + - from_nested_func.mu_result == 'hello from nested_same' + - from_nested_module.mu_result == 'hello from nested_same' + - from_redirected_mu.mu_result == 'hello from ansible_collections.testns.content_adj.plugins.module_utils.sub1.foomodule' + - from_redirected_mu.mu_result2 == 'hello from testns.othercoll.formerly_testcoll_pkg.thing' + - from_redirected_mu.mu_result3 == 'hello from formerly_testcoll_pkg.submod.thing' + - from_missing_mu is failed + - "'Could not find imported module support' in from_missing_mu.msg" + - from_missing_redir_collection is failed + - "'unable to locate collection bogusns.boguscoll' in from_missing_redir_collection.msg" + - from_missing_redir_module is failed + - "'Could not find imported module support code for ansible_collections.testns.testcoll.plugins.modules.uses_mu_missing_redirect_module' in from_missing_redir_module.msg" + + +- hosts: testhost + tasks: + - name: exercise filters/tests/lookups + assert: + that: + - "'data' | testns.testcoll.testfilter == 'data_via_testfilter_from_userdir'" + - "'data' | testns.testcoll.testfilter2 == 'data_via_testfilter2_from_userdir'" + - "'data' | testns.testcoll.filter_subdir.test_subdir_filter == 'data_via_testfilter_from_subdir'" + - "'from_user' is testns.testcoll.testtest" + - "'from_user2' is testns.testcoll.testtest2" + - "'subdir_from_user' is testns.testcoll.test_subdir.subdir_test" + - lookup('testns.testcoll.mylookup') == 'mylookup_from_user_dir' + - lookup('testns.testcoll.mylookup2') == 'mylookup2_from_user_dir' + - lookup('testns.testcoll.lookup_subdir.my_subdir_lookup') == 'subdir_lookup_from_user_dir' + + - debug: + msg: "{{ 'foo'|testns.testbroken.broken }}" + register: result + ignore_errors: true + + - assert: + that: + - | + 'This is a broken filter plugin.' in result.msg + + - debug: + msg: "{{ 'foo'|missing.collection.filter }}" + register: result + ignore_errors: true + + - assert: + that: + - result is failed + +# ensure that the synthetic ansible.builtin collection limits to builtin plugins, that ansible.legacy loads overrides +# from legacy plugin dirs, and that a same-named plugin loaded from a real collection is not masked by the others +- hosts: testhost + tasks: + - name: test unqualified ping from library dir + ping: + register: unqualified_ping_out + + - name: test legacy-qualified ping from library dir + ansible.legacy.ping: + register: legacy_ping_out + + - name: test builtin ping + ansible.builtin.ping: + register: builtin_ping_out + + - name: test collection-based ping + testns.testcoll.ping: + register: collection_ping_out + + - assert: + that: + - unqualified_ping_out.source == 'legacy_library_dir' + - legacy_ping_out.source == 'legacy_library_dir' + - builtin_ping_out.ping == 'pong' + - collection_ping_out.source == 'user' + +# verify the default value for the collections list is empty +- hosts: testhost + tasks: + - name: sample default collections value + testns.testcoll.plugin_lookup: + register: coll_default_out + + - assert: + that: + # in original release, collections defaults to empty, which is mostly equivalent to ansible.legacy + - not coll_default_out.collection_list + + +# ensure that inheritance/masking works as expected, that the proper default values are injected when missing, +# and that the order is preserved if one of the magic values is explicitly specified +- name: verify collections keyword play/block/task inheritance and magic values + hosts: testhost + collections: + - bogus.fromplay + tasks: + - name: sample play collections value + testns.testcoll.plugin_lookup: + register: coll_play_out + + - name: collections override block-level + collections: + - bogus.fromblock + block: + - name: sample block collections value + testns.testcoll.plugin_lookup: + register: coll_block_out + + - name: sample task collections value + collections: + - bogus.fromtask + testns.testcoll.plugin_lookup: + register: coll_task_out + + - name: sample task with explicit core + collections: + - ansible.builtin + - bogus.fromtaskexplicitcore + testns.testcoll.plugin_lookup: + register: coll_task_core + + - name: sample task with explicit legacy + collections: + - ansible.legacy + - bogus.fromtaskexplicitlegacy + testns.testcoll.plugin_lookup: + register: coll_task_legacy + + - assert: + that: + # ensure that parent value inheritance is masked properly by explicit setting + - coll_play_out.collection_list == ['bogus.fromplay', 'ansible.legacy'] + - coll_block_out.collection_list == ['bogus.fromblock', 'ansible.legacy'] + - coll_task_out.collection_list == ['bogus.fromtask', 'ansible.legacy'] + - coll_task_core.collection_list == ['ansible.builtin', 'bogus.fromtaskexplicitcore'] + - coll_task_legacy.collection_list == ['ansible.legacy', 'bogus.fromtaskexplicitlegacy'] + +- name: verify unqualified plugin resolution behavior + hosts: testhost + collections: + - testns.testcoll + - testns.coll_in_sys + - testns.contentadj + tasks: + # basic test of unqualified module lookup and that we got the right one (user-dir hosted, there's another copy of + # this one in the same-named collection in sys dir that should be masked + - name: exec unqualified module in a user-dir testns collection + testmodule: + register: testmodule_out + + # use another collection to verify that we're looking in all collections listed on the play + - name: exec unqualified module in a sys-dir testns collection + systestmodule: + register: systestmodule_out + + - assert: + that: + - testmodule_out.source == 'user' + - systestmodule_out.source == 'sys' + +# test keyword-static execution of a FQ collection-backed role with "tasks/main.yaml" +- name: verify collection-backed role execution (keyword static) + hosts: testhost + collections: + # set to ansible.builtin only to ensure that roles function properly without inheriting the play's collections config + - ansible.builtin + vars: + test_role_input: keyword static + roles: + - role: testns.testcoll.testrole_main_yaml + tasks: + - name: ensure role executed + assert: + that: + - test_role_output.msg == test_role_input + - testrole_source == 'collection' + + +# test dynamic execution of a FQ collection-backed role +- name: verify collection-backed role execution (dynamic) + hosts: testhost + collections: + # set to ansible.builtin only to ensure that roles function properly without inheriting the play's collections config + - ansible.builtin + vars: + test_role_input: dynamic + tasks: + - include_role: + name: testns.testcoll.testrole + - name: ensure role executed + assert: + that: + - test_role_output.msg == test_role_input + - testrole_source == 'collection' + +# test task-static execution of a FQ collection-backed role +- name: verify collection-backed role execution (task static) + hosts: testhost + collections: + - ansible.builtin + vars: + test_role_input: task static + tasks: + - import_role: + name: testns.testcoll.testrole + - name: ensure role executed + assert: + that: + - test_role_output.msg == test_role_input + - testrole_source == 'collection' + + +# test a legacy playbook-adjacent role, ensure that play collections config is not inherited +- name: verify legacy playbook-adjacent role behavior + hosts: testhost + collections: + - bogus.bogus + vars: + test_role_input: legacy playbook-adjacent + roles: + - testrole +# FIXME: this should technically work to look up a playbook-adjacent role +# - ansible.legacy.testrole + tasks: + - name: ensure role executed + assert: + that: + - test_role_output.msg == test_role_input + - testrole_source == 'legacy roles dir' + + +# test dynamic execution of a FQ collection-backed role +- name: verify collection-backed role execution in subdir (include) + hosts: testhost + vars: + test_role_input: dynamic (subdir) + tasks: + - include_role: + name: testns.testcoll.role_subdir.subdir_testrole + - name: ensure role executed + assert: + that: + - test_role_output.msg == test_role_input + - testrole_source == 'collection' + + +# test collection-relative role deps (keyword static) +- name: verify collection-relative role deps + hosts: testhost + vars: + outer_role_input: keyword static outer + test_role_input: keyword static inner + roles: + - testns.testcoll.calls_intra_collection_dep_role_unqualified + tasks: + - assert: + that: + - outer_role_output.msg == outer_role_input + - test_role_output.msg == test_role_input + - testrole_source == 'collection' + +# test collection-relative role deps (task static) +- name: verify collection-relative role deps + hosts: testhost + vars: + outer_role_input: task static outer + test_role_input: task static inner + tasks: + - import_role: + name: testns.testcoll.calls_intra_collection_dep_role_unqualified + - assert: + that: + - outer_role_output.msg == outer_role_input + - test_role_output.msg == test_role_input + - testrole_source == 'collection' + +# test collection-relative role deps (task dynamic) +- name: verify collection-relative role deps + hosts: testhost + vars: + outer_role_input: task dynamic outer + test_role_input: task dynamic inner + tasks: + - include_role: + name: testns.testcoll.calls_intra_collection_dep_role_unqualified + - assert: + that: + - outer_role_output.msg == outer_role_input + - test_role_output.msg == test_role_input + - testrole_source == 'collection' + + +- name: validate static task include behavior + hosts: testhost + collections: + - bogus.bogus + tasks: + - import_tasks: includeme.yml + + +- name: validate dynamic task include behavior + hosts: testhost + collections: + - bogus.bogus + tasks: + - include_tasks: includeme.yml + + +- import_playbook: test_collection_meta.yml +- name: Test FQCN handlers + hosts: testhost + vars: + handler_counter: 0 + roles: + - testns.testcoll.test_fqcn_handlers + +- name: Ensure a collection role can call a standalone role + hosts: testhost + roles: + - testns.testcoll.call_standalone + +# Issue https://github.com/ansible/ansible/issues/69054 +- name: Test collection as string + hosts: testhost + collections: foo + tasks: + - debug: msg="Test" diff --git a/test/integration/targets/collections/redirected.statichost.yml b/test/integration/targets/collections/redirected.statichost.yml new file mode 100644 index 00000000..9fd2c2d8 --- /dev/null +++ b/test/integration/targets/collections/redirected.statichost.yml @@ -0,0 +1,3 @@ +# use a plugin redirected by core to a collection to ensure inventory redirection and redirected config names are working +plugin: formerly_core_inventory # this is defined in the ansible-base runtime.yml routing to point at testns.content_adj.statichost +hostname: dynamic_host_redirected diff --git a/test/integration/targets/collections/roles/standalone/tasks/main.yml b/test/integration/targets/collections/roles/standalone/tasks/main.yml new file mode 100644 index 00000000..b4dd23db --- /dev/null +++ b/test/integration/targets/collections/roles/standalone/tasks/main.yml @@ -0,0 +1,2 @@ +- set_fact: + standalone_role_var: True diff --git a/test/integration/targets/collections/roles/testrole/tasks/main.yml b/test/integration/targets/collections/roles/testrole/tasks/main.yml new file mode 100644 index 00000000..cbf6b8e7 --- /dev/null +++ b/test/integration/targets/collections/roles/testrole/tasks/main.yml @@ -0,0 +1,28 @@ +- debug: + msg: executing testrole from legacy playbook-adjacent roles dir + +- name: exec a FQ module from a legacy role + testns.testcoll.testmodule: + register: coll_module_out + +- name: exec a legacy playbook-adjacent module from a legacy role + ping: + register: ping_out + +- name: sample collections list inside a legacy role (should be empty) + testns.testcoll.plugin_lookup: + register: plugin_lookup_out + +- debug: + msg: '{{ test_role_input | default("(undefined)") }}' + register: test_role_output + +- set_fact: + testrole_source: legacy roles dir + +- assert: + that: + - coll_module_out.source == 'user' + # ensure we used the library/ ping override, not the builtin or one from another collection + - ping_out.source == 'legacy_library_dir' + - not plugin_lookup_out.collection_list diff --git a/test/integration/targets/collections/runme.sh b/test/integration/targets/collections/runme.sh new file mode 100755 index 00000000..f3e886a5 --- /dev/null +++ b/test/integration/targets/collections/runme.sh @@ -0,0 +1,114 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_COLLECTIONS_PATH=$PWD/collection_root_user:$PWD/collection_root_sys +export ANSIBLE_GATHERING=explicit +export ANSIBLE_GATHER_SUBSET=minimal +export ANSIBLE_HOST_PATTERN_MISMATCH=error + +# FUTURE: just use INVENTORY_PATH as-is once ansible-test sets the right dir +ipath=../../$(basename "${INVENTORY_PATH:-../../inventory}") +export INVENTORY_PATH="$ipath" + +echo "--- validating callbacks" +# validate FQ callbacks in ansible-playbook +ANSIBLE_CALLBACK_WHITELIST=testns.testcoll.usercallback ansible-playbook noop.yml | grep "usercallback says ok" +# use adhoc for the rest of these tests, must force it to load other callbacks +export ANSIBLE_LOAD_CALLBACK_PLUGINS=1 +# validate redirected callback +ANSIBLE_CALLBACK_WHITELIST=formerly_core_callback ansible localhost -m debug 2>&1 | grep -- "usercallback says ok" +## validate missing redirected callback +ANSIBLE_CALLBACK_WHITELIST=formerly_core_missing_callback ansible localhost -m debug 2>&1 | grep -- "Skipping callback plugin 'formerly_core_missing_callback'" +## validate redirected + removed callback (fatal) +ANSIBLE_CALLBACK_WHITELIST=formerly_core_removed_callback ansible localhost -m debug 2>&1 | grep -- "testns.testcoll.removedcallback has been removed" +# validate avoiding duplicate loading of callback, even if using diff names +[ "$(ANSIBLE_CALLBACK_WHITELIST=testns.testcoll.usercallback,formerly_core_callback ansible localhost -m debug 2>&1 | grep -c 'usercallback says ok')" = "1" ] +# ensure non existing callback does not crash ansible +ANSIBLE_CALLBACK_WHITELIST=charlie.gomez.notme ansible localhost -m debug 2>&1 | grep -- "Skipping callback plugin 'charlie.gomez.notme'" +unset ANSIBLE_LOAD_CALLBACK_PLUGINS +# adhoc normally shouldn't load non-default plugins- let's be sure +output=$(ANSIBLE_CALLBACK_WHITELIST=testns.testcoll.usercallback ansible localhost -m debug) +if [[ "${output}" =~ "usercallback says ok" ]]; then echo fail; exit 1; fi + +echo "--- validating docs" +# test documentation +ansible-doc testns.testcoll.testmodule -vvv | grep -- "- normal_doc_frag" +# same with symlink +ln -s "${PWD}/testcoll2" ./collection_root_sys/ansible_collections/testns/testcoll2 +ansible-doc testns.testcoll2.testmodule2 -vvv | grep "Test module" +# now test we can list with symlink +ansible-doc -l -vvv| grep "testns.testcoll2.testmodule2" + +echo "testing bad doc_fragments (expected ERROR message follows)" +# test documentation failure +ansible-doc testns.testcoll.testmodule_bad_docfrags -vvv 2>&1 | grep -- "unknown doc_fragment" + +echo "--- validating default collection" +# test adhoc default collection resolution (use unqualified collection module with playbook dir under its collection) + +echo "testing adhoc default collection support with explicit playbook dir" +ANSIBLE_PLAYBOOK_DIR=./collection_root_user/ansible_collections/testns/testcoll ansible localhost -m testmodule + +# we need multiple plays, and conditional import_playbook is noisy and causes problems, so choose here which one to use... +if [[ ${INVENTORY_PATH} == *.winrm ]]; then + export TEST_PLAYBOOK=windows.yml +else + export TEST_PLAYBOOK=posix.yml + + echo "testing default collection support" + ansible-playbook -i "${INVENTORY_PATH}" collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml "$@" +fi + +echo "--- validating collections support in playbooks/roles" +# run test playbooks +ansible-playbook -i "${INVENTORY_PATH}" -v "${TEST_PLAYBOOK}" "$@" + +if [[ ${INVENTORY_PATH} != *.winrm ]]; then + ansible-playbook -i "${INVENTORY_PATH}" -v invocation_tests.yml "$@" +fi + +echo "--- validating bypass_host_loop with collection search" +ansible-playbook -i host1,host2, -v test_bypass_host_loop.yml "$@" + +echo "--- validating inventory" +# test collection inventories +ansible-playbook inventory_test.yml -i a.statichost.yml -i redirected.statichost.yml "$@" + +# test plugin loader redirect_list +ansible-playbook test_redirect_list.yml -v "$@" + +# test adjacent with --playbook-dir +export ANSIBLE_COLLECTIONS_PATH='' +ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=1 ansible-inventory --list --export --playbook-dir=. -v "$@" + +# use an inventory source with caching enabled +ansible-playbook -i a.statichost.yml -i ./cache.statichost.yml -v check_populated_inventory.yml + +# Check that the inventory source with caching enabled was stored +if [[ "$(find ./inventory_cache -type f ! -path "./inventory_cache/.keep" | wc -l)" -ne "1" ]]; then + echo "Failed to find the expected single cache" + exit 1 +fi + +CACHEFILE="$(find ./inventory_cache -type f ! -path './inventory_cache/.keep')" + +if [[ $CACHEFILE != ./inventory_cache/prefix_* ]]; then + echo "Unexpected cache file" + exit 1 +fi + +# Check the cache for the expected hosts + +if [[ "$(grep -wc "cache_host_a" "$CACHEFILE")" -ne "1" ]]; then + echo "Failed to cache host as expected" + exit 1 +fi + +if [[ "$(grep -wc "dynamic_host_a" "$CACHEFILE")" -ne "0" ]]; then + echo "Cached an incorrect source" + exit 1 +fi + +./vars_plugin_tests.sh + diff --git a/test/integration/targets/collections/test_bypass_host_loop.yml b/test/integration/targets/collections/test_bypass_host_loop.yml new file mode 100644 index 00000000..e95262b8 --- /dev/null +++ b/test/integration/targets/collections/test_bypass_host_loop.yml @@ -0,0 +1,22 @@ +- name: Test collection lookup bypass host list + hosts: all + connection: local + gather_facts: false + collections: + - testns.testcoll + tasks: + - meta: end_host + when: lookup('pipe', ansible_playbook_python ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.7', '<') + + - bypass_host_loop: + register: bypass + + - run_once: true + vars: + bypass_hosts: '{{ hostvars|dictsort|map(attribute="1.bypass.bypass_inventory_hostname")|select("defined")|unique }}' + block: + - debug: + var: bypass_hosts + + - assert: + that: bypass_hosts|length == 1 diff --git a/test/integration/targets/collections/test_collection_meta.yml b/test/integration/targets/collections/test_collection_meta.yml new file mode 100644 index 00000000..22a00b21 --- /dev/null +++ b/test/integration/targets/collections/test_collection_meta.yml @@ -0,0 +1,46 @@ +- hosts: localhost + gather_facts: no + collections: + - testns.testcoll + vars: + # redirect connection + ansible_connection: testns.testcoll.redirected_local + tasks: + - assert: + that: ('data' | testns.testcoll.testfilter) == 'data_via_testfilter_from_userdir' + + # redirect module (multiple levels) + - multilevel1: + # redirect action + - uses_redirected_action: + # redirect import (consumed via action) + - uses_redirected_import: + # redirect lookup + - assert: + that: lookup('formerly_core_lookup') == 'mylookup_from_user_dir' + # redirect filter + - assert: + that: ('yes' | formerly_core_filter) == True + # legacy filter should mask redirected + - assert: + that: ('' | formerly_core_masked_filter) == 'hello from overridden formerly_core_masked_filter' + # redirect test + - assert: + that: + - "'stuff' is formerly_core_test('tuf')" + - "'hello override' is formerly_core_masked_test" + # redirect module (formerly internal) + - formerly_core_ping: + # redirect module from collection (with subdir) + - testns.testcoll.module_subdir.subdir_ping_module: + # redirect module_utils plugin (consumed via module) + - uses_core_redirected_mu: + # deprecated module (issues warning) + - deprecated_ping: + # redirect module (internal alias) + - aliased_ping: + # redirect module (cycle detection, fatal) +# - looped_ping: + + # removed module (fatal) +# - dead_ping: diff --git a/test/integration/targets/collections/test_plugins/override_formerly_core_masked_test.py b/test/integration/targets/collections/test_plugins/override_formerly_core_masked_test.py new file mode 100644 index 00000000..11c7f7a7 --- /dev/null +++ b/test/integration/targets/collections/test_plugins/override_formerly_core_masked_test.py @@ -0,0 +1,16 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def override_formerly_core_masked_test(value, *args, **kwargs): + if value != 'hello override': + raise Exception('expected "hello override" only...') + + return True + + +class TestModule(object): + def tests(self): + return { + 'formerly_core_masked_test': override_formerly_core_masked_test + } diff --git a/test/integration/targets/collections/test_redirect_list.yml b/test/integration/targets/collections/test_redirect_list.yml new file mode 100644 index 00000000..8a24b960 --- /dev/null +++ b/test/integration/targets/collections/test_redirect_list.yml @@ -0,0 +1,86 @@ +--- +- hosts: localhost + gather_facts: no + module_defaults: + testns.testcoll.plugin_lookup: + type: module + tasks: + - name: test builtin + testns.testcoll.plugin_lookup: + name: dnf + register: result + failed_when: + - result['redirect_list'] != ['dnf'] or result['plugin_path'].endswith('library/dnf.py') + + - name: test builtin with collections kw + testns.testcoll.plugin_lookup: + name: dnf + register: result + failed_when: + - result['redirect_list'] != ['dnf'] or result['plugin_path'].endswith('library/dnf.py') + collections: + - testns.unrelatedcoll + + - name: test redirected builtin + testns.testcoll.plugin_lookup: + name: formerly_core_ping + register: result + failed_when: result['redirect_list'] != expected_redirect_list + vars: + expected_redirect_list: + - formerly_core_ping + - ansible.builtin.formerly_core_ping + - testns.testcoll.ping + + - name: test redirected builtin with collections kw + testns.testcoll.plugin_lookup: + name: formerly_core_ping + register: result + failed_when: result['redirect_list'] != expected_redirect_list + vars: + expected_redirect_list: + - formerly_core_ping + - ansible.builtin.formerly_core_ping + - testns.testcoll.ping + collections: + - testns.unrelatedcoll + - testns.testcoll + + - name: test collection module with collections kw + testns.testcoll.plugin_lookup: + name: ping + register: result + failed_when: result['redirect_list'] != expected_redirect_list + vars: + expected_redirect_list: + - ping + - testns.testcoll.ping + collections: + - testns.unrelatedcoll + - testns.testcoll + + - name: test redirected collection module with collections kw + testns.testcoll.plugin_lookup: + name: ping + register: result + failed_when: result['redirect_list'] != expected_redirect_list + vars: + expected_redirect_list: + - ping + - testns.testredirect.ping + - testns.testcoll.ping + collections: + - testns.unrelatedcoll + - testns.testredirect + + - name: test legacy module with collections kw + testns.testcoll.plugin_lookup: + name: ping + register: result + failed_when: + - result['redirect_list'] != expected_redirect_list or not result['plugin_path'].endswith('library/ping.py') + vars: + expected_redirect_list: + - ping + collections: + - testns.unrelatedcoll diff --git a/test/integration/targets/collections/testcoll2/MANIFEST.json b/test/integration/targets/collections/testcoll2/MANIFEST.json new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/collections/testcoll2/plugins/modules/testmodule2.py b/test/integration/targets/collections/testcoll2/plugins/modules/testmodule2.py new file mode 100644 index 00000000..7f6eb024 --- /dev/null +++ b/test/integration/targets/collections/testcoll2/plugins/modules/testmodule2.py @@ -0,0 +1,33 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: testmodule2 +short_description: Test module +description: + - Test module +author: + - Ansible Core Team +''' + +EXAMPLES = ''' +''' + +RETURN = ''' +''' + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='sys'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections/vars_plugin_tests.sh b/test/integration/targets/collections/vars_plugin_tests.sh new file mode 100755 index 00000000..2118af6d --- /dev/null +++ b/test/integration/targets/collections/vars_plugin_tests.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +set -eux + +# Collections vars plugins must be whitelisted with FQCN because PluginLoader.all() does not search collections + +# Let vars plugins run for inventory by using the global setting +export ANSIBLE_RUN_VARS_PLUGINS=start + +# Test vars plugin in a playbook-adjacent collection +export ANSIBLE_VARS_ENABLED=testns.content_adj.custom_adj_vars + +ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt + +grep '"collection": "adjacent"' out.txt +grep '"adj_var": "value"' out.txt + +# Test vars plugin in a collection path +export ANSIBLE_VARS_ENABLED=testns.testcoll.custom_vars +export ANSIBLE_COLLECTIONS_PATH=$PWD/collection_root_user:$PWD/collection_root_sys + +ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt + +grep '"collection": "collection_root_user"' out.txt +grep -v '"adj_var": "value"' out.txt + +# Test enabled vars plugins order reflects the order in which variables are merged +export ANSIBLE_VARS_ENABLED=testns.content_adj.custom_adj_vars,testns.testcoll.custom_vars + +ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt + +grep '"collection": "collection_root_user"' out.txt +grep '"adj_var": "value"' out.txt +grep -v '"collection": "adjacent"' out.txt + +# Test that 3rd party plugins in plugin_path do not need to require whitelisting by default +# Plugins shipped with Ansible and in the custom plugin dir should be used first +export ANSIBLE_VARS_PLUGINS=./custom_vars_plugins + +ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt + +grep '"name": "v2_vars_plugin"' out.txt +grep '"collection": "collection_root_user"' out.txt +grep '"adj_var": "value"' out.txt +grep -v '"whitelisted": true' out.txt + +# Test plugins in plugin paths that opt-in to require whitelisting +unset ANSIBLE_VARS_ENABLED +unset ANSIBLE_COLLECTIONS_PATH + +ANSIBLE_VARS_ENABLED=vars_req_whitelist ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt + +grep '"whitelisted": true' out.txt + +# Test vars plugins that support the stage setting don't run for inventory when stage is set to 'task' +# and that the vars plugins that don't support the stage setting don't run for inventory when the global setting is 'demand' +ANSIBLE_VARS_PLUGIN_STAGE=task ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt + +grep -v '"v1_vars_plugin": true' out.txt +grep -v '"v2_vars_plugin": true' out.txt +grep -v '"vars_req_whitelist": true' out.txt +grep -v '"collection": "adjacent"' out.txt +grep -v '"collection": "collection_root_user"' out.txt +grep -v '"adj_var": "value"' out.txt + +# Test that the global setting allows v1 and v2 plugins to run after importing inventory +ANSIBLE_RUN_VARS_PLUGINS=start ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt + +grep -v '"vars_req_whitelist": true' out.txt +grep '"v1_vars_plugin": true' out.txt +grep '"v2_vars_plugin": true' out.txt +grep '"name": "v2_vars_plugin"' out.txt + +# Test that vars plugins in collections and in the vars plugin path are available for tasks +cat << EOF > "test_task_vars.yml" +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - debug: msg="{{ name }}" + - debug: msg="{{ collection }}" + - debug: msg="{{ adj_var }}" +EOF + +export ANSIBLE_VARS_ENABLED=testns.content_adj.custom_adj_vars + +ANSIBLE_VARS_PLUGIN_STAGE=task ANSIBLE_VARS_PLUGINS=./custom_vars_plugins ansible-playbook test_task_vars.yml | grep "ok=3" +ANSIBLE_RUN_VARS_PLUGINS=start ANSIBLE_VARS_PLUGIN_STAGE=inventory ANSIBLE_VARS_PLUGINS=./custom_vars_plugins ansible-playbook test_task_vars.yml | grep "ok=3" +ANSIBLE_RUN_VARS_PLUGINS=demand ANSIBLE_VARS_PLUGIN_STAGE=inventory ANSIBLE_VARS_PLUGINS=./custom_vars_plugins ansible-playbook test_task_vars.yml | grep "ok=3" +ANSIBLE_VARS_PLUGINS=./custom_vars_plugins ansible-playbook test_task_vars.yml | grep "ok=3" diff --git a/test/integration/targets/collections/windows.yml b/test/integration/targets/collections/windows.yml new file mode 100644 index 00000000..4bdfb0ed --- /dev/null +++ b/test/integration/targets/collections/windows.yml @@ -0,0 +1,28 @@ +- hosts: windows + tasks: + - testns.testcoll.win_selfcontained: + register: selfcontained_out + + - testns.testcoll.win_csbasic_only: + register: csbasic_only_out + + - testns.testcoll.win_uses_coll_psmu: + register: uses_coll_psmu + + - testns.testcoll.win_uses_coll_csmu: + register: uses_coll_csmu + + - assert: + that: + - selfcontained_out.source == 'user' + - csbasic_only_out.source == 'user' + # win_uses_coll_psmu + - uses_coll_psmu.source == 'user' + - "'user_mu' in uses_coll_psmu.ping" + - uses_coll_psmu.subpkg == 'from subpkg.subps.psm1' + # win_uses_coll_csmu + - uses_coll_csmu.source == 'user' + - "'user_mu' in uses_coll_csmu.ping" + - "'Hello from subpkg.subcs' in uses_coll_csmu.ping" + - uses_coll_csmu.subpkg == 'Hello from subpkg.subcs' + - uses_coll_csmu.type_accelerator == uses_coll_csmu.ping diff --git a/test/integration/targets/collections_plugin_namespace/aliases b/test/integration/targets/collections_plugin_namespace/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/collections_plugin_namespace/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/filter/test_filter.py b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/filter/test_filter.py new file mode 100644 index 00000000..dca094be --- /dev/null +++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/filter/test_filter.py @@ -0,0 +1,15 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def filter_name(a): + return __name__ + + +class FilterModule(object): + def filters(self): + filters = { + 'filter_name': filter_name, + } + + return filters diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_name.py b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_name.py new file mode 100644 index 00000000..d0af703b --- /dev/null +++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_name.py @@ -0,0 +1,9 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + return [__name__] diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py new file mode 100644 index 00000000..79e80f62 --- /dev/null +++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py @@ -0,0 +1,10 @@ +# do not add future boilerplate to this plugin +# specifically, do not add absolute_import, as the purpose of this plugin is to test implicit relative imports on Python 2.x +__metaclass__ = type + +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + return [__name__] diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/test/test_test.py b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/test/test_test.py new file mode 100644 index 00000000..1739072f --- /dev/null +++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/test/test_test.py @@ -0,0 +1,13 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def test_name_ok(value): + return __name__ == 'ansible_collections.my_ns.my_col.plugins.test.test_test' + + +class TestModule: + def tests(self): + return { + 'test_name_ok': test_name_ok, + } diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml new file mode 100644 index 00000000..d80f5470 --- /dev/null +++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml @@ -0,0 +1,12 @@ +- set_fact: + filter_name: "{{ 1 | my_ns.my_col.filter_name }}" + lookup_name: "{{ lookup('my_ns.my_col.lookup_name') }}" + lookup_no_future_boilerplate: "{{ lookup('my_ns.my_col.lookup_no_future_boilerplate') }}" + test_name_ok: "{{ 1 is my_ns.my_col.test_name_ok }}" + +- assert: + that: + - filter_name == 'ansible_collections.my_ns.my_col.plugins.filter.test_filter' + - lookup_name == 'ansible_collections.my_ns.my_col.plugins.lookup.lookup_name' + - lookup_no_future_boilerplate == 'ansible_collections.my_ns.my_col.plugins.lookup.lookup_no_future_boilerplate' + - test_name_ok diff --git a/test/integration/targets/collections_plugin_namespace/runme.sh b/test/integration/targets/collections_plugin_namespace/runme.sh new file mode 100755 index 00000000..96e83d36 --- /dev/null +++ b/test/integration/targets/collections_plugin_namespace/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_COLLECTIONS_PATH="${PWD}/collection_root" ansible-playbook test.yml -i ../../inventory "$@" diff --git a/test/integration/targets/collections_plugin_namespace/test.yml b/test/integration/targets/collections_plugin_namespace/test.yml new file mode 100644 index 00000000..d1c3f1b7 --- /dev/null +++ b/test/integration/targets/collections_plugin_namespace/test.yml @@ -0,0 +1,3 @@ +- hosts: testhost + roles: + - my_ns.my_col.test diff --git a/test/integration/targets/collections_relative_imports/aliases b/test/integration/targets/collections_relative_imports/aliases new file mode 100644 index 00000000..996481b4 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/aliases @@ -0,0 +1,4 @@ +posix +shippable/posix/group1 +shippable/windows/group1 +windows diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/PSRel1.psm1 b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/PSRel1.psm1 new file mode 100644 index 00000000..bf812643 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/PSRel1.psm1 @@ -0,0 +1,11 @@ +#AnsibleRequires -PowerShell .sub_pkg.PSRel2 + +Function Invoke-FromPSRel1 { + <# + .SYNOPSIS + Test function + #> + return "$(Invoke-FromPSRel2) -> Invoke-FromPSRel1" +} + +Export-ModuleMember -Function Invoke-FromPSRel1 diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util1.py b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util1.py new file mode 100644 index 00000000..196b4abf --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util1.py @@ -0,0 +1,6 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +def one(): + return 1 diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py new file mode 100644 index 00000000..0d985bf3 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from .my_util1 import one + + +def two(): + return one() * 2 diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py new file mode 100644 index 00000000..1529d7b2 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from . import my_util2 + + +def three(): + return my_util2.two() + 1 diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/sub_pkg/PSRel2.psm1 b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/sub_pkg/PSRel2.psm1 new file mode 100644 index 00000000..d0aa3686 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/sub_pkg/PSRel2.psm1 @@ -0,0 +1,11 @@ +#AnsibleRequires -PowerShell ansible_collections.my_ns.my_col2.plugins.module_utils.PSRel3 + +Function Invoke-FromPSRel2 { + <# + .SYNOPSIS + Test function + #> + return "$(Invoke-FromPSRel3) -> Invoke-FromPSRel2" +} + +Export-ModuleMember -Function Invoke-FromPSRel2 diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py new file mode 100644 index 00000000..0cdf5008 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py @@ -0,0 +1,24 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +from ..module_utils.my_util2 import two +from ..module_utils import my_util3 + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + supports_check_mode=True + ) + + result = dict( + two=two(), + three=my_util3.three(), + ) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/win_relative.ps1 b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/win_relative.ps1 new file mode 100644 index 00000000..383df0a3 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/win_relative.ps1 @@ -0,0 +1,10 @@ +#!powershell + +#AnsibleRequires -CSharpUtil Ansible.Basic +#AnsibleRequires -PowerShell ..module_utils.PSRel1 + +$module = [Ansible.Basic.AnsibleModule]::Create($args, @{}) + +$module.Result.data = Invoke-FromPSRel1 + +$module.ExitJson() diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml new file mode 100644 index 00000000..9ba0f7ed --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml @@ -0,0 +1,4 @@ +- name: fully qualified module usage with relative imports + my_ns.my_col.my_module: +- name: collection relative module usage with relative imports + my_module: diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/PSRel3.psm1 b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/PSRel3.psm1 new file mode 100644 index 00000000..46edd5a9 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/PSRel3.psm1 @@ -0,0 +1,11 @@ +#AnsibleRequires -CSharpUtil .sub_pkg.CSRel4 + +Function Invoke-FromPSRel3 { + <# + .SYNOPSIS + Test function + #> + return "$([CSRel4]::Invoke()) -> Invoke-FromPSRel3" +} + +Export-ModuleMember -Function Invoke-FromPSRel3 diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/sub_pkg/CSRel4.cs b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/sub_pkg/CSRel4.cs new file mode 100644 index 00000000..c50024b6 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/sub_pkg/CSRel4.cs @@ -0,0 +1,14 @@ +using System; + +//TypeAccelerator -Name CSRel4 -TypeName TestClass + +namespace ansible_collections.my_ns.my_col.plugins.module_utils.sub_pkg.CSRel4 +{ + public class TestClass + { + public static string Invoke() + { + return "CSRel4.Invoke()"; + } + } +} diff --git a/test/integration/targets/collections_relative_imports/runme.sh b/test/integration/targets/collections_relative_imports/runme.sh new file mode 100755 index 00000000..754efafe --- /dev/null +++ b/test/integration/targets/collections_relative_imports/runme.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -eux + +# we need multiple plays, and conditional import_playbook is noisy and causes problems, so choose here which one to use... +if [[ ${INVENTORY_PATH} == *.winrm ]]; then + export TEST_PLAYBOOK=windows.yml +else + export TEST_PLAYBOOK=test.yml + +fi + +ANSIBLE_COLLECTIONS_PATH="${PWD}/collection_root" ansible-playbook "${TEST_PLAYBOOK}" -i "${INVENTORY_PATH}" "$@" diff --git a/test/integration/targets/collections_relative_imports/test.yml b/test/integration/targets/collections_relative_imports/test.yml new file mode 100644 index 00000000..d1c3f1b7 --- /dev/null +++ b/test/integration/targets/collections_relative_imports/test.yml @@ -0,0 +1,3 @@ +- hosts: testhost + roles: + - my_ns.my_col.test diff --git a/test/integration/targets/collections_relative_imports/windows.yml b/test/integration/targets/collections_relative_imports/windows.yml new file mode 100644 index 00000000..aa6badfa --- /dev/null +++ b/test/integration/targets/collections_relative_imports/windows.yml @@ -0,0 +1,11 @@ +- hosts: windows + gather_facts: no + tasks: + - name: test out relative imports on Windows modules + my_ns.my_col.win_relative: + register: win_relative + + - name: assert relative imports on Windows modules + assert: + that: + - win_relative.data == 'CSRel4.Invoke() -> Invoke-FromPSRel3 -> Invoke-FromPSRel2 -> Invoke-FromPSRel1' diff --git a/test/integration/targets/collections_runtime_pythonpath/aliases b/test/integration/targets/collections_runtime_pythonpath/aliases new file mode 100644 index 00000000..0a772ad7 --- /dev/null +++ b/test/integration/targets/collections_runtime_pythonpath/aliases @@ -0,0 +1,3 @@ +shippable/posix/group4 +skip/python2.6 +skip/aix diff --git a/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/ansible_collections/python/dist/plugins/modules/boo.py b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/ansible_collections/python/dist/plugins/modules/boo.py new file mode 100644 index 00000000..a2313b12 --- /dev/null +++ b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/ansible_collections/python/dist/plugins/modules/boo.py @@ -0,0 +1,28 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Say hello.""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec={ + 'name': {'default': 'world'}, + }, + ) + name = module.params['name'] + + module.exit_json( + msg='Greeting {name} completed.'. + format(name=name.title()), + greeting='Hello, {name}!'.format(name=name), + ) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/pyproject.toml b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/pyproject.toml new file mode 100644 index 00000000..feec734a --- /dev/null +++ b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/pyproject.toml @@ -0,0 +1,6 @@ +[build-system] +requires = [ + "setuptools >= 44", + "wheel", +] +build-backend = "setuptools.build_meta" diff --git a/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/setup.cfg b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/setup.cfg new file mode 100644 index 00000000..d25ebb0f --- /dev/null +++ b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/setup.cfg @@ -0,0 +1,15 @@ +[metadata] +name = ansible-collections.python.dist +version = 1.0.0rc2.post3.dev4 + +[options] +package_dir = + = . +packages = + ansible_collections + ansible_collections.python + ansible_collections.python.dist + ansible_collections.python.dist.plugins + ansible_collections.python.dist.plugins.modules +zip_safe = True +include_package_data = True diff --git a/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-foo/ansible_collections/python/dist/plugins/modules/boo.py b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-foo/ansible_collections/python/dist/plugins/modules/boo.py new file mode 100644 index 00000000..1ef03330 --- /dev/null +++ b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-foo/ansible_collections/python/dist/plugins/modules/boo.py @@ -0,0 +1,28 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Say hello in Ukrainian.""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec={ + 'name': {'default': 'світ'}, + }, + ) + name = module.params['name'] + + module.exit_json( + msg='Greeting {name} completed.'. + format(name=name.title()), + greeting='Привіт, {name}!'.format(name=name), + ) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/collections_runtime_pythonpath/runme.sh b/test/integration/targets/collections_runtime_pythonpath/runme.sh new file mode 100755 index 00000000..41236e8b --- /dev/null +++ b/test/integration/targets/collections_runtime_pythonpath/runme.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +set -eux -o pipefail + + +export PIP_DISABLE_PIP_VERSION_CHECK=1 + +export ANSIBLE_TEST_PREFER_VENV=1 +source virtualenv.sh + + +>&2 echo \ + === Test that the module \ + gets picked up if discoverable \ + via PYTHONPATH env var === +PYTHONPATH="${PWD}/ansible-collection-python-dist-boo:$PYTHONPATH" \ +ansible \ + -m python.dist.boo \ + -a 'name=Bob' \ + -c local localhost \ + "$@" | grep -E '"greeting": "Hello, Bob!",' + + +>&2 echo \ + === Test that the module \ + gets picked up if installed \ + into site-packages === +python -m pip.__main__ install pep517 +( # Build a binary Python dist (a wheel) using PEP517: + cp -r ansible-collection-python-dist-boo "${OUTPUT_DIR}/" + cd "${OUTPUT_DIR}/ansible-collection-python-dist-boo" + python -m pep517.build --binary --out-dir dist . +) +# Install a pre-built dist with pip: +python -m pip.__main__ install \ + --no-index \ + -f "${OUTPUT_DIR}/ansible-collection-python-dist-boo/dist/" \ + --only-binary=ansible-collections.python.dist \ + ansible-collections.python.dist +python -m pip.__main__ show ansible-collections.python.dist +ansible \ + -m python.dist.boo \ + -a 'name=Frodo' \ + -c local localhost \ + "$@" | grep -E '"greeting": "Hello, Frodo!",' + + +>&2 echo \ + === Test that ansible_collections \ + root takes precedence over \ + PYTHONPATH/site-packages === +# This is done by injecting a module with the same FQCN +# into another collection root. +ANSIBLE_COLLECTIONS_PATH="${PWD}/ansible-collection-python-dist-foo" \ +PYTHONPATH="${PWD}/ansible-collection-python-dist-boo:$PYTHONPATH" \ +ansible \ + -m python.dist.boo \ + -a 'name=Степан' \ + -c local localhost \ + "$@" | grep -E '"greeting": "Привіт, Степан!",' diff --git a/test/integration/targets/command_shell/aliases b/test/integration/targets/command_shell/aliases new file mode 100644 index 00000000..8dd7b884 --- /dev/null +++ b/test/integration/targets/command_shell/aliases @@ -0,0 +1,4 @@ +command +shippable/posix/group2 +shell +skip/aix diff --git a/test/integration/targets/command_shell/files/create_afile.sh b/test/integration/targets/command_shell/files/create_afile.sh new file mode 100755 index 00000000..e6fae448 --- /dev/null +++ b/test/integration/targets/command_shell/files/create_afile.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +echo "win" > "$1" \ No newline at end of file diff --git a/test/integration/targets/command_shell/files/remove_afile.sh b/test/integration/targets/command_shell/files/remove_afile.sh new file mode 100755 index 00000000..4a7fea66 --- /dev/null +++ b/test/integration/targets/command_shell/files/remove_afile.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +rm "$1" \ No newline at end of file diff --git a/test/integration/targets/command_shell/files/test.sh b/test/integration/targets/command_shell/files/test.sh new file mode 100755 index 00000000..ade17e9b --- /dev/null +++ b/test/integration/targets/command_shell/files/test.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +echo -n "win" \ No newline at end of file diff --git a/test/integration/targets/command_shell/meta/main.yml b/test/integration/targets/command_shell/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/command_shell/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/command_shell/tasks/main.yml b/test/integration/targets/command_shell/tasks/main.yml new file mode 100644 index 00000000..1d614e49 --- /dev/null +++ b/test/integration/targets/command_shell/tasks/main.yml @@ -0,0 +1,446 @@ +# Test code for the command and shell modules. + +# Copyright: (c) 2014, Richard Isaacson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: use command to execute sudo + command: sudo -h + register: become + +- name: assert become warning was reported + assert: + that: + - "become.warnings | length() == 1" + - "'Consider using' in become.warnings[0]" + +- name: use command to execute sudo without warnings + command: sudo -h warn=no + register: become + +- name: assert become warning was not reported + assert: + that: + - "'warnings' not in become" + +- name: use command to execute tar + command: tar --help + register: tar + +- name: assert tar warning was reported + assert: + that: + - tar.warnings | length() == 1 + - '"Consider using the unarchive module rather than running ''tar''" in tar.warnings[0]' + +- name: use command to execute chown + command: chown -h + register: chown + ignore_errors: true + +- name: assert chown warning was reported + assert: + that: + - chown.warnings | length() == 1 + - '"Consider using the file module with owner rather than running ''chown''" in chown.warnings[0]' + +- name: use command with unsupported executable arg + command: ls /dev/null + args: + executable: /bogus + register: executable + +- name: assert executable warning was reported + assert: + that: + - executable.stdout == '/dev/null' + - executable.warnings | length() == 1 + - "'no longer supported' in executable.warnings[0]" + +# The warning isn't on the task since it comes from the action plugin. Not sure +# how to test for that. +# +# - name: Use command with reboot +# command: sleep 2 && /not/shutdown -r now +# ignore_errors: yes +# register: reboot + +# - name: Assert that reboot warning was issued +# assert: +# that: +# - '"Consider using the reboot module" in reboot.warnings[0]' + +- name: use command with no command + command: + args: + chdir: / + register: no_command + ignore_errors: true + +- name: assert executable fails with no command + assert: + that: + - no_command is failed + - no_command.msg == 'no command given' + - no_command.rc == 256 + +- name: use argv + command: + argv: + - echo + - testing + register: argv_command + ignore_errors: true + +- name: assert executable works with argv + assert: + that: + - "argv_command.stdout == 'testing'" + +- name: use argv and command string + command: echo testing + args: + argv: + - echo + - testing + register: argv_and_string_command + ignore_errors: true + +- name: assert executable fails with both argv and command string + assert: + that: + - argv_and_string_command is failed + - argv_and_string_command.msg == 'only command or argv can be given, not both' + - argv_and_string_command.rc == 256 + +- set_fact: + output_dir_test: "{{ output_dir }}/test_command_shell" + +- name: make sure our testing sub-directory does not exist + file: + path: "{{ output_dir_test }}" + state: absent + +- name: create our testing sub-directory + file: + path: "{{ output_dir_test }}" + state: directory + +- name: prep our test script + copy: + src: test.sh + dest: "{{ output_dir_test }}" + mode: '0755' + +- name: prep our test script + copy: + src: create_afile.sh + dest: "{{ output_dir_test }}" + mode: '0755' + +- name: prep our test script + copy: + src: remove_afile.sh + dest: "{{ output_dir_test }}" + mode: '0755' + +- name: locate bash + shell: which bash + register: bash + +## +## command +## + +- name: execute the test.sh script via command + command: "{{ output_dir_test }}/test.sh" + register: command_result0 + +- name: assert that the script executed correctly + assert: + that: + - command_result0.rc == 0 + - command_result0.stderr == '' + - command_result0.stdout == 'win' + +# executable + +# FIXME doesn't have the expected stdout. + +#- name: execute the test.sh script with executable via command +# command: "{{output_dir_test }}/test.sh executable={{ bash.stdout }}" +# register: command_result1 +# +#- name: assert that the script executed correctly with command +# assert: +# that: +# - "command_result1.rc == 0" +# - "command_result1.stderr == ''" +# - "command_result1.stdout == 'win'" + +# chdir + +- name: execute the test.sh script with chdir via command + command: ./test.sh + args: + chdir: "{{ output_dir_test }}" + register: command_result2 + +- name: assert that the script executed correctly with chdir + assert: + that: + - command_result2.rc == 0 + - command_result2.stderr == '' + - command_result2.stdout == 'win' + +# creates + +- name: verify that afile.txt is absent + file: + path: "{{ output_dir_test }}/afile.txt" + state: absent + +- name: create afile.txt with create_afile.sh via command + command: "{{ output_dir_test }}/create_afile.sh {{output_dir_test }}/afile.txt" + args: + creates: "{{ output_dir_test }}/afile.txt" + +- name: verify that afile.txt is present + file: + path: "{{ output_dir_test }}/afile.txt" + state: file + +- name: re-run previous command using creates with globbing + command: "{{ output_dir_test }}/create_afile.sh {{ output_dir_test }}/afile.txt" + args: + creates: "{{ output_dir_test }}/afile.*" + register: command_result3 + +- name: assert that creates with globbing is working + assert: + that: + - command_result3 is not changed + +# removes + +- name: remove afile.txt with remote_afile.sh via command + command: "{{ output_dir_test }}/remove_afile.sh {{ output_dir_test }}/afile.txt" + args: + removes: "{{ output_dir_test }}/afile.txt" + +- name: verify that afile.txt is absent + file: path={{output_dir_test}}/afile.txt state=absent + +- name: re-run previous command using removes with globbing + command: "{{ output_dir_test }}/remove_afile.sh {{ output_dir_test }}/afile.txt" + args: + removes: "{{ output_dir_test }}/afile.*" + register: command_result4 + +- name: assert that removes with globbing is working + assert: + that: + - command_result4.changed != True + +- name: pass stdin to cat via command + command: cat + args: + stdin: 'foobar' + register: command_result5 + +- name: assert that stdin is passed + assert: + that: + - command_result5.stdout == 'foobar' + +- name: send to stdin literal multiline block + command: "{{ ansible_python.executable }} -c 'import hashlib, sys; print(hashlib.sha1((sys.stdin.buffer if hasattr(sys.stdin, \"buffer\") else sys.stdin).read()).hexdigest())'" + args: + stdin: |- + this is the first line + this is the second line + + this line is after an empty line + this line is the last line + register: command_result6 + +- name: assert the multiline input was passed correctly + assert: + that: + - "command_result6.stdout == '9cd0697c6a9ff6689f0afb9136fa62e0b3fee903'" + +## +## shell +## + +- name: Execute the test.sh script + shell: "{{ output_dir_test }}/test.sh" + register: shell_result0 + +- name: Assert that the script executed correctly + assert: + that: + - shell_result0 is changed + - shell_result0.cmd == '{{ output_dir_test }}/test.sh' + - shell_result0.rc == 0 + - shell_result0.stderr == '' + - shell_result0.stdout == 'win' + +# executable + +# FIXME doesn't pass the expected stdout + +#- name: execute the test.sh script +# shell: "{{output_dir_test }}/test.sh executable={{ bash.stdout }}" +# register: shell_result1 +# +#- name: assert that the shell executed correctly +# assert: +# that: +# - "shell_result1.rc == 0" +# - "shell_result1.stderr == ''" +# - "shell_result1.stdout == 'win'" + +# chdir + +- name: Execute the test.sh script with chdir + shell: ./test.sh + args: + chdir: "{{ output_dir_test }}" + register: shell_result2 + +- name: Assert that the shell executed correctly with chdir + assert: + that: + - shell_result2 is changed + - shell_result2.cmd == './test.sh' + - shell_result2.rc == 0 + - shell_result2.stderr == '' + - shell_result2.stdout == 'win' + +# creates + +- name: Verify that afile.txt is absent + file: + path: "{{ output_dir_test }}/afile.txt" + state: absent + +- name: Execute the test.sh script with chdir + shell: "{{ output_dir_test }}/test.sh > {{ output_dir_test }}/afile.txt" + args: + chdir: "{{ output_dir_test }}" + creates: "{{ output_dir_test }}/afile.txt" + +- name: Verify that afile.txt is present + file: + path: "{{ output_dir_test }}/afile.txt" + state: file + +# multiline + +- name: Remove test file previously created + file: + path: "{{ output_dir_test }}/afile.txt" + state: absent + +- name: Execute a shell command using a literal multiline block + args: + executable: "{{ bash.stdout }}" + shell: | + echo this is a \ + "multiline echo" \ + "with a new line + in quotes" \ + | {{ ansible_python.executable }} -c 'import hashlib, sys; print(hashlib.sha1((sys.stdin.buffer if hasattr(sys.stdin, "buffer") else sys.stdin).read()).hexdigest())' + echo "this is a second line" + register: shell_result5 + +- name: Assert the multiline shell command ran as expected + assert: + that: + - shell_result5 is changed + - shell_result5.rc == 0 + - shell_result5.cmd == 'echo this is a "multiline echo" "with a new line\nin quotes" | ' + ansible_python.executable + ' -c \'import hashlib, sys; print(hashlib.sha1((sys.stdin.buffer if hasattr(sys.stdin, "buffer") else sys.stdin).read()).hexdigest())\'\necho "this is a second line"\n' + - shell_result5.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line' + +- name: Execute a shell command using a literal multiline block with arguments in it + shell: | + executable="{{ bash.stdout }}" + creates={{ output_dir_test }}/afile.txt + echo "test" + register: shell_result6 + +- name: Assert the multiline shell command with arguments in it run as expected + assert: + that: + - shell_result6 is changed + - shell_result6.rc == 0 + - shell_result6.cmd == 'echo "test"\n' + - shell_result6.stdout == 'test' + +- name: Execute a shell command using a multiline block where whitespaces matter + shell: | + cat < {{output_dir_test }}/afile.txt + args: + stdin: test + stdin_add_newline: no + +- name: make sure content matches expected + copy: + dest: "{{output_dir_test }}/afile.txt" + content: test + register: shell_result7 + failed_when: + - shell_result7 is failed or + shell_result7 is changed + +- name: execute a shell command with trailing newline to stdin + shell: cat > {{output_dir_test }}/afile.txt + args: + stdin: test + stdin_add_newline: yes + +- name: make sure content matches expected + copy: + dest: "{{output_dir_test }}/afile.txt" + content: | + test + register: shell_result8 + failed_when: + - shell_result8 is failed or + shell_result8 is changed + +- name: execute a shell command with trailing newline to stdin, default + shell: cat > {{output_dir_test }}/afile.txt + args: + stdin: test + +- name: make sure content matches expected + copy: + dest: "{{output_dir_test }}/afile.txt" + content: | + test + register: shell_result9 + failed_when: + - shell_result9 is failed or + shell_result9 is changed + +- name: remove the previously created file + file: + path: "{{ output_dir_test }}/afile.txt" + state: absent diff --git a/test/integration/targets/common_network/aliases b/test/integration/targets/common_network/aliases new file mode 100644 index 00000000..70a7b7a9 --- /dev/null +++ b/test/integration/targets/common_network/aliases @@ -0,0 +1 @@ +shippable/posix/group5 diff --git a/test/integration/targets/common_network/tasks/main.yml b/test/integration/targets/common_network/tasks/main.yml new file mode 100644 index 00000000..97b3dd0d --- /dev/null +++ b/test/integration/targets/common_network/tasks/main.yml @@ -0,0 +1,4 @@ +- assert: + that: + - '"00:00:00:a1:2b:cc" is is_mac' + - '"foo" is not is_mac' diff --git a/test/integration/targets/common_network/test_plugins/is_mac.py b/test/integration/targets/common_network/test_plugins/is_mac.py new file mode 100644 index 00000000..6a4d4092 --- /dev/null +++ b/test/integration/targets/common_network/test_plugins/is_mac.py @@ -0,0 +1,14 @@ +# Copyright: (c) 2020, Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.common.network import is_mac + + +class TestModule(object): + def tests(self): + return { + 'is_mac': is_mac, + } diff --git a/test/integration/targets/conditionals/aliases b/test/integration/targets/conditionals/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/conditionals/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/conditionals/play.yml b/test/integration/targets/conditionals/play.yml new file mode 100644 index 00000000..c6bb3815 --- /dev/null +++ b/test/integration/targets/conditionals/play.yml @@ -0,0 +1,551 @@ +# (c) 2014, James Cammarata +# (c) 2019, Ansible Project + +- hosts: testhost + gather_facts: false + vars_files: + - vars/main.yml + tasks: + - name: set conditional bare vars status + set_fact: + bare: "{{lookup('config', 'CONDITIONAL_BARE_VARS')|bool}}" + + - name: test conditional '==' + shell: echo 'testing' + when: 1 == 1 + register: result + + - name: assert conditional '==' ran + assert: + that: + - result is changed + - "result.stdout == 'testing'" + - "result.rc == 0" + + - name: test bad conditional '==' + shell: echo 'testing' + when: 0 == 1 + register: result + + - name: assert bad conditional '==' did NOT run + assert: + that: + - result is skipped + + - name: test conditional '!=' + shell: echo 'testing' + when: 0 != 1 + register: result + + - name: assert conditional '!=' ran + assert: + that: + - result is changed + - "result.stdout == 'testing'" + - "result.rc == 0" + + - name: test bad conditional '!=' + shell: echo 'testing' + when: 1 != 1 + register: result + + - name: assert bad conditional '!=' did NOT run + assert: + that: + - result is skipped + + - name: test conditional 'in' + shell: echo 'testing' + when: 1 in [1,2,3] + register: result + + - name: assert conditional 'in' ran + assert: + that: + - result is changed + - "result.stdout == 'testing'" + - "result.rc == 0" + + - name: test bad conditional 'in' + shell: echo 'testing' + when: 1 in [7,8,9] + register: result + + - name: assert bad conditional 'in' did NOT run + assert: + that: + - result is skipped + + - name: test conditional 'not in' + shell: echo 'testing' + when: 0 not in [1,2,3] + register: result + + - name: assert conditional 'not in' ran + assert: + that: + - result is changed + - "result.stdout == 'testing'" + - "result.rc == 0" + + - name: test bad conditional 'not in' + shell: echo 'testing' + when: 1 not in [1,2,3] + register: result + + - name: assert bad conditional 'not in' did NOT run + assert: + that: + - result is skipped + + - name: test conditional 'is defined' + shell: echo 'testing' + when: test_bare is defined + register: result + + - name: assert conditional 'is defined' ran + assert: + that: + - result is changed + - "result.stdout == 'testing'" + - "result.rc == 0" + + - name: test bad conditional 'is defined' + shell: echo 'testing' + when: foo_asdf_xyz is defined + register: result + + - name: assert bad conditional 'is defined' did NOT run + assert: + that: + - result is skipped + + - name: test conditional 'is not defined' + shell: echo 'testing' + when: foo_asdf_xyz is not defined + register: result + + - name: assert conditional 'is not defined' ran + assert: + that: + - result is changed + - "result.stdout == 'testing'" + - "result.rc == 0" + + - name: test bad conditional 'is not defined' + shell: echo 'testing' + when: test_bare is not defined + register: result + + - name: assert bad conditional 'is not defined' did NOT run + assert: + that: + - result is skipped + + - name: test bad conditional 'is undefined' + shell: echo 'testing' + when: test_bare is undefined + register: result + + - name: assert bad conditional 'is undefined' did NOT run + assert: + that: + - result is skipped + + - name: test bare conditional + shell: echo 'testing' + when: test_bare + register: result + + - name: assert bare conditional ran + assert: + that: + - result is changed + - "result.stdout == 'testing'" + - "result.rc == 0" + + - name: test conditional using a variable + shell: echo 'testing' + when: test_bare_var == 123 + register: result + + - name: assert conditional using a variable ran + assert: + that: + - result is changed + - "result.stdout == 'testing'" + - "result.rc == 0" + + - name: test good conditional based on nested variables + shell: echo 'testing' + when: test_bare_nested_good + register: result + + - name: assert good conditional based on nested var ran + assert: + that: + - result is changed + - "result.stdout == 'testing'" + - "result.rc == 0" + + - name: test bad conditional based on nested variables + shell: echo 'testing' + when: test_bare_nested_bad + register: result + + - debug: var={{item}} + loop: + - bare + - result + - test_bare_nested_bad + + - name: assert that the bad nested conditional is skipped since 'bare' since 'string' template is resolved to 'false' + assert: + that: + - result is skipped + + when: bare|bool + + - name: assert that the bad nested conditional did run since non bare 'string' is untemplated but 'trueish' + assert: + that: + - result is skipped + when: not bare|bool + - result is changed + + - name: test bad conditional based on nested variables with bool filter + shell: echo 'testing' + when: test_bare_nested_bad|bool + register: result + + - name: assert that the bad nested conditional did NOT run as bool forces evaluation + assert: + that: + - result is skipped + + #----------------------------------------------------------------------- + # proper booleanification tests (issue #8629) + + - name: set fact to string 'false' + set_fact: bool_test1=false + + - name: set fact to string 'False' + set_fact: bool_test2=False + + - name: set fact to a proper boolean using complex args + set_fact: + bool_test3: false + + - name: "test boolean value 'false' string using 'when: var'" + command: echo 'hi' + when: bool_test1 + register: result + + - name: assert that the task did not run for 'false' + assert: + that: + - result is skipped + + - name: "test boolean value 'false' string using 'when: not var'" + command: echo 'hi' + when: not bool_test1 + register: result + + - name: assert that the task DID run for not 'false' + assert: + that: + - result is changed + + - name: "test boolean value of 'False' string using 'when: var'" + command: echo 'hi' + when: bool_test2 + register: result + + - name: assert that the task did not run for 'False' + assert: + that: + - result is skipped + + - name: "test boolean value 'False' string using 'when: not var'" + command: echo 'hi' + when: not bool_test2 + register: result + + - name: assert that the task DID run for not 'False' + assert: + that: + - result is changed + + - name: "test proper boolean value of complex arg using 'when: var'" + command: echo 'hi' + when: bool_test3 + register: result + + - name: assert that the task did not run for proper boolean false + assert: + that: + - result is skipped + + - name: "test proper boolean value of complex arg using 'when: not var'" + command: echo 'hi' + when: not bool_test3 + register: result + + - name: assert that the task DID run for not false + assert: + that: + - result is changed + + - set_fact: skipped_bad_attribute=True + - block: + - name: test a with_items loop using a variable with a missing attribute + debug: var=item + with_items: "{{cond_bad_attribute.results | default('')}}" + register: result + - set_fact: skipped_bad_attribute=False + - name: assert the task was skipped + assert: + that: + - skipped_bad_attribute + when: cond_bad_attribute is defined and 'results' in cond_bad_attribute + + - name: test a with_items loop skipping a single item + debug: var=item + with_items: "{{cond_list_of_items.results}}" + when: item != 'b' + register: result + + - debug: var=result + + - name: assert only a single item was skipped + assert: + that: + - result.results|length == 3 + - result.results[1].skipped + + - name: test complex templated condition + debug: msg="it works" + when: vars_file_var in things1|union([vars_file_var]) + + - name: test dict with invalid key is undefined + vars: + mydict: + a: foo + b: bar + debug: var=mydict['c'] + register: result + when: mydict['c'] is undefined + + - name: assert the task did not fail + assert: + that: + - result is success + + - name: test dict with invalid key does not run with conditional is defined + vars: + mydict: + a: foo + b: bar + debug: var=mydict['c'] + when: mydict['c'] is defined + register: result + + - name: assert the task was skipped + assert: + that: + - result is skipped + + - name: test list with invalid element does not run with conditional is defined + vars: + mylist: [] + debug: var=mylist[0] + when: mylist[0] is defined + register: result + + - name: assert the task was skipped + assert: + that: + - result is skipped + + - name: test list with invalid element is undefined + vars: + mylist: [] + debug: var=mylist[0] + when: mylist[0] is undefined + register: result + + - name: assert the task did not fail + assert: + that: + - result is success + + + - name: Deal with multivar equality + tags: ['leveldiff'] + when: not bare|bool + vars: + toplevel_hash: + hash_var_one: justastring + hash_var_two: something.with.dots + hash_var_three: something:with:colons + hash_var_four: something/with/slashes + hash_var_five: something with spaces + hash_var_six: yes + hash_var_seven: no + toplevel_var_one: justastring + toplevel_var_two: something.with.dots + toplevel_var_three: something:with:colons + toplevel_var_four: something/with/slashes + toplevel_var_five: something with spaces + toplevel_var_six: yes + toplevel_var_seven: no + block: + + - name: var subkey simple string + debug: + var: toplevel_hash.hash_var_one + register: sub + when: toplevel_hash.hash_var_one + + - name: toplevel simple string + debug: + var: toplevel_var_one + when: toplevel_var_one + register: top + ignore_errors: yes + + - name: ensure top and multi work same + assert: + that: + - top is not skipped + - sub is not skipped + - top is not failed + - sub is not failed + + - name: var subkey string with dots + debug: + var: toplevel_hash.hash_var_two + register: sub + when: toplevel_hash.hash_var_two + + - debug: + var: toplevel_var_two + when: toplevel_var_two + register: top + ignore_errors: yes + + - name: ensure top and multi work same + assert: + that: + - top is not skipped + - sub is not skipped + - top is not failed + - sub is not failed + + - name: var subkey string with dots + debug: + var: toplevel_hash.hash_var_three + register: sub + when: toplevel_hash.hash_var_three + + - debug: + var: toplevel_var_three + when: toplevel_var_three + register: top + ignore_errors: yes + + - name: ensure top and multi work same + assert: + that: + - top is not skipped + - sub is not skipped + - top is not failed + - sub is not failed + + - name: var subkey string with colon + debug: + var: toplevel_hash.hash_var_four + register: sub + when: toplevel_hash.hash_var_four + + - debug: + var: toplevel_var_four + when: toplevel_var_four + register: top + ignore_errors: yes + + - name: ensure top and multi work same + assert: + that: + - top is not skipped + - sub is not skipped + - top is not failed + - sub is not failed + + - name: var subkey string with spaces + debug: + var: toplevel_hash.hash_var_five + register: sub + when: toplevel_hash.hash_var_five + + - debug: + var: toplevel_var_five + when: toplevel_var_five + register: top + ignore_errors: yes + + - name: ensure top and multi work same + assert: + that: + - top is not skipped + - sub is not skipped + - top is not failed + - sub is not failed + + - name: var subkey with 'yes' value + debug: + var: toplevel_hash.hash_var_six + register: sub + when: toplevel_hash.hash_var_six + + - debug: + var: toplevel_var_six + register: top + when: toplevel_var_six + + - name: ensure top and multi work same + assert: + that: + - top is not skipped + - sub is not skipped + + - name: var subkey with 'no' value + debug: + var: toplevel_hash.hash_var_seven + register: sub + when: toplevel_hash.hash_var_seven + + - debug: + var: toplevel_var_seven + register: top + when: toplevel_var_seven + + - name: ensure top and multi work same + assert: + that: + - top is skipped + - sub is skipped + + - name: test that 'comparison expression' item works with_items + assert: + that: + - item + with_items: + - 1 == 1 + + - name: test that 'comparison expression' item works in loop + assert: + that: + - item + loop: + - 1 == 1 diff --git a/test/integration/targets/conditionals/runme.sh b/test/integration/targets/conditionals/runme.sh new file mode 100755 index 00000000..934443a5 --- /dev/null +++ b/test/integration/targets/conditionals/runme.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_CONDITIONAL_BARE_VARS=1 ansible-playbook -i ../../inventory play.yml "$@" +ANSIBLE_CONDITIONAL_BARE_VARS=0 ansible-playbook -i ../../inventory play.yml "$@" + +export ANSIBLE_CONDITIONAL_BARE_VARS=1 +export ANSIBLE_DEPRECATION_WARNINGS=True + +# No warnings for conditionals that are already type bool +test "$(ansible-playbook -i ../../inventory test_no_warnings.yml "$@" 2>&1 | grep -c '\[DEPRECATION WARNING\]')" = 0 + +# Warn for bare vars of other types since they may be interpreted differently when CONDITIONAL_BARE_VARS defaults to False +test "$(ansible-playbook -i ../../inventory test_warnings.yml "$@" 2>&1 | grep -c '\[DEPRECATION WARNING\]')" = 2 diff --git a/test/integration/targets/conditionals/test_no_warnings.yml b/test/integration/targets/conditionals/test_no_warnings.yml new file mode 100644 index 00000000..93280447 --- /dev/null +++ b/test/integration/targets/conditionals/test_no_warnings.yml @@ -0,0 +1,18 @@ +- hosts: testhost + gather_facts: false + vars: + boolean_var: false + nested: + bool_var: false + tasks: + - name: Run tasks with previous warnings requesting the bool filter on type boolean vars + block: + - debug: + when: boolean_var + - debug: + when: nested.bool_var + - debug: + when: double_interpolated + vars: + double_interpolated: "{{ other }}" + other: false diff --git a/test/integration/targets/conditionals/test_warnings.yml b/test/integration/targets/conditionals/test_warnings.yml new file mode 100644 index 00000000..4186cd01 --- /dev/null +++ b/test/integration/targets/conditionals/test_warnings.yml @@ -0,0 +1,14 @@ +- hosts: testhost + gather_facts: false + vars: + str_boolean_var: 'false' + tasks: + - name: Run tasks with warnings for conditionals that will change in behavior depending on CONDITIONAL_BARE_VARS + block: + - debug: + when: str_boolean_var + - debug: + when: double_interpolated + vars: + double_interpolated: other + other: false diff --git a/test/integration/targets/conditionals/vars/main.yml b/test/integration/targets/conditionals/vars/main.yml new file mode 100644 index 00000000..d6221478 --- /dev/null +++ b/test/integration/targets/conditionals/vars/main.yml @@ -0,0 +1,22 @@ +--- +# foo is a dictionary that will be used to check that +# a conditional passes a with_items loop on a variable +# with a missing attribute (ie. foo.results) +cond_bad_attribute: + bar: a + +cond_list_of_items: + results: + - a + - b + - c + +things1: + - 1 + - 2 +vars_file_var: 321 + +test_bare: true +test_bare_var: 123 +test_bare_nested_good: "test_bare_var == 123" +test_bare_nested_bad: "{{test_bare_var}} == 321" diff --git a/test/integration/targets/config/aliases b/test/integration/targets/config/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/config/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/config/inline_comment_ansible.cfg b/test/integration/targets/config/inline_comment_ansible.cfg new file mode 100644 index 00000000..afe9197d --- /dev/null +++ b/test/integration/targets/config/inline_comment_ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +cow_whitelist = ansibull ; BOOM diff --git a/test/integration/targets/config/runme.sh b/test/integration/targets/config/runme.sh new file mode 100755 index 00000000..ea3989b8 --- /dev/null +++ b/test/integration/targets/config/runme.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -eux + +# ignore empty env var and use default +# shellcheck disable=SC1007 +ANSIBLE_TIMEOUT= ansible -m ping testhost -i ../../inventory "$@" + +# env var is wrong type, this should be a fatal error pointing at the setting +ANSIBLE_TIMEOUT='lola' ansible -m ping testhost -i ../../inventory "$@" 2>&1|grep 'Invalid type for configuration option setting: DEFAULT_TIMEOUT' + +# https://github.com/ansible/ansible/issues/69577 +ANSIBLE_REMOTE_TMP="$HOME/.ansible/directory_with_no_space" ansible -m ping testhost -i ../../inventory "$@" + +ANSIBLE_REMOTE_TMP="$HOME/.ansible/directory with space" ansible -m ping testhost -i ../../inventory "$@" + +ANSIBLE_CONFIG=nonexistent.cfg ansible-config dump --only-changed -v | grep 'No config file found; using defaults' + +# https://github.com/ansible/ansible/pull/73715 +ANSIBLE_CONFIG=inline_comment_ansible.cfg ansible-config dump --only-changed | grep "'ansibull'" diff --git a/test/integration/targets/connection/aliases b/test/integration/targets/connection/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/connection/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/connection/test.sh b/test/integration/targets/connection/test.sh new file mode 100755 index 00000000..ad672e23 --- /dev/null +++ b/test/integration/targets/connection/test.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -eux + +[ -f "${INVENTORY}" ] + +# Run connection tests with both the default and C locale. + + ansible-playbook test_connection.yml -i "${INVENTORY}" "$@" +LC_ALL=C LANG=C ansible-playbook test_connection.yml -i "${INVENTORY}" "$@" + +# Check that connection vars do not appear in the output +# https://github.com/ansible/ansible/pull/70853 +trap "rm out.txt" EXIT + +ansible all -i "${INVENTORY}" -m set_fact -a "testing=value" -v | tee out.txt +if grep 'ansible_host' out.txt +then + echo "FAILURE: Connection vars in output" + exit 1 +else + echo "SUCCESS: Connection vars not found" +fi + +ansible-playbook test_reset_connection.yml -i "${INVENTORY}" "$@" diff --git a/test/integration/targets/connection/test_connection.yml b/test/integration/targets/connection/test_connection.yml new file mode 100644 index 00000000..21699422 --- /dev/null +++ b/test/integration/targets/connection/test_connection.yml @@ -0,0 +1,43 @@ +- hosts: "{{ target_hosts }}" + gather_facts: no + serial: 1 + tasks: + + ### raw with unicode arg and output + + - name: raw with unicode arg and output + raw: echo 汉语 + register: command + - name: check output of raw with unicode arg and output + assert: + that: + - "'汉语' in command.stdout" + - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules + + ### copy local file with unicode filename and content + + - name: create local file with unicode filename and content + local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语 + - name: remove remote file with unicode filename and content + action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent" + - name: create remote directory with unicode name + action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory" + - name: copy local file with unicode filename and content + action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt" + + ### fetch remote file with unicode filename and content + + - name: remove local file with unicode filename and content + local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent + - name: fetch remote file with unicode filename and content + fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true + + ### remove local and remote temp files + + - name: remove local temp file + local_action: file path={{ local_tmp }}-汉语 state=absent + - name: remove remote temp file + action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent" + + ### test wait_for_connection plugin + - wait_for_connection: diff --git a/test/integration/targets/connection/test_reset_connection.yml b/test/integration/targets/connection/test_reset_connection.yml new file mode 100644 index 00000000..2f6cb8dc --- /dev/null +++ b/test/integration/targets/connection/test_reset_connection.yml @@ -0,0 +1,5 @@ +- hosts: "{{ target_hosts }}" + gather_facts: no + tasks: + # https://github.com/ansible/ansible/issues/65812 + - meta: reset_connection diff --git a/test/integration/targets/connection_delegation/action_plugins/delegation_action.py b/test/integration/targets/connection_delegation/action_plugins/delegation_action.py new file mode 100644 index 00000000..9d419e75 --- /dev/null +++ b/test/integration/targets/connection_delegation/action_plugins/delegation_action.py @@ -0,0 +1,12 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + return { + 'remote_password': self._connection.get_option('remote_password'), + } diff --git a/test/integration/targets/connection_delegation/aliases b/test/integration/targets/connection_delegation/aliases new file mode 100644 index 00000000..87caabdf --- /dev/null +++ b/test/integration/targets/connection_delegation/aliases @@ -0,0 +1,5 @@ +shippable/posix/group1 +skip/freebsd # No sshpass +skip/osx # No sshpass +skip/macos # No sshpass +skip/rhel # No sshpass diff --git a/test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py b/test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py new file mode 100644 index 00000000..f61846cf --- /dev/null +++ b/test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py @@ -0,0 +1,45 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +author: Ansible Core Team +connection: delegation_connection +short_description: Test connection for delegated host check +description: +- Some further description that you don't care about. +options: + remote_password: + description: The remote password + type: str + vars: + - name: ansible_password + # Tests that an aliased key gets the -k option which hardcodes the value to password + aliases: + - password +""" + +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + + transport = 'delegation_connection' + has_pipelining = True + + def __init__(self, *args, **kwargs): + super(Connection, self).__init__(*args, **kwargs) + + def _connect(self): + super(Connection, self)._connect() + + def exec_command(self, cmd, in_data=None, sudoable=True): + super(Connection, self).exec_command(cmd, in_data, sudoable) + + def put_file(self, in_path, out_path): + super(Connection, self).put_file(in_path, out_path) + + def fetch_file(self, in_path, out_path): + super(Connection, self).fetch_file(in_path, out_path) + + def close(self): + super(Connection, self).close() diff --git a/test/integration/targets/connection_delegation/inventory.ini b/test/integration/targets/connection_delegation/inventory.ini new file mode 100644 index 00000000..e7f846d3 --- /dev/null +++ b/test/integration/targets/connection_delegation/inventory.ini @@ -0,0 +1 @@ +my_host ansible_host=127.0.0.1 ansible_connection=delegation_connection diff --git a/test/integration/targets/connection_delegation/runme.sh b/test/integration/targets/connection_delegation/runme.sh new file mode 100755 index 00000000..eb26f7c5 --- /dev/null +++ b/test/integration/targets/connection_delegation/runme.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -ux + +echo "Checking if sshpass is present" +which sshpass 2>&1 || exit 0 +echo "sshpass is present, continuing with test" + +sshpass -p my_password ansible-playbook -i inventory.ini test.yml -k "$@" diff --git a/test/integration/targets/connection_delegation/test.yml b/test/integration/targets/connection_delegation/test.yml new file mode 100644 index 00000000..678bef51 --- /dev/null +++ b/test/integration/targets/connection_delegation/test.yml @@ -0,0 +1,23 @@ +--- +- hosts: localhost + gather_facts: no + tasks: + - name: test connection receives -k from play_context when delegating + delegation_action: + delegate_to: my_host + register: result + + - assert: + that: + - result.remote_password == 'my_password' + + - name: ensure vars set for that host take precedence over -k + delegation_action: + delegate_to: my_host + vars: + ansible_password: other_password + register: result + + - assert: + that: + - result.remote_password == 'other_password' diff --git a/test/integration/targets/connection_local/aliases b/test/integration/targets/connection_local/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/connection_local/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/connection_local/runme.sh b/test/integration/targets/connection_local/runme.sh new file mode 120000 index 00000000..70aa5dbd --- /dev/null +++ b/test/integration/targets/connection_local/runme.sh @@ -0,0 +1 @@ +../connection_posix/test.sh \ No newline at end of file diff --git a/test/integration/targets/connection_local/test_connection.inventory b/test/integration/targets/connection_local/test_connection.inventory new file mode 100644 index 00000000..64a27227 --- /dev/null +++ b/test/integration/targets/connection_local/test_connection.inventory @@ -0,0 +1,7 @@ +[local] +local-pipelining ansible_ssh_pipelining=true +local-no-pipelining ansible_ssh_pipelining=false +[local:vars] +ansible_host=localhost +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/connection_paramiko_ssh/aliases b/test/integration/targets/connection_paramiko_ssh/aliases new file mode 100644 index 00000000..ad44392e --- /dev/null +++ b/test/integration/targets/connection_paramiko_ssh/aliases @@ -0,0 +1,5 @@ +needs/ssh +shippable/posix/group3 +needs/target/setup_paramiko +destructive # potentially installs/uninstalls OS packages via setup_paramiko +skip/aix diff --git a/test/integration/targets/connection_paramiko_ssh/runme.sh b/test/integration/targets/connection_paramiko_ssh/runme.sh new file mode 100755 index 00000000..123f6e23 --- /dev/null +++ b/test/integration/targets/connection_paramiko_ssh/runme.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -eux + +source ../setup_paramiko/setup.sh + +./test.sh diff --git a/test/integration/targets/connection_paramiko_ssh/test.sh b/test/integration/targets/connection_paramiko_ssh/test.sh new file mode 120000 index 00000000..70aa5dbd --- /dev/null +++ b/test/integration/targets/connection_paramiko_ssh/test.sh @@ -0,0 +1 @@ +../connection_posix/test.sh \ No newline at end of file diff --git a/test/integration/targets/connection_paramiko_ssh/test_connection.inventory b/test/integration/targets/connection_paramiko_ssh/test_connection.inventory new file mode 100644 index 00000000..a3f34ab7 --- /dev/null +++ b/test/integration/targets/connection_paramiko_ssh/test_connection.inventory @@ -0,0 +1,7 @@ +[paramiko_ssh] +paramiko_ssh-pipelining ansible_ssh_pipelining=true +paramiko_ssh-no-pipelining ansible_ssh_pipelining=false +[paramiko_ssh:vars] +ansible_host=localhost +ansible_connection=paramiko_ssh +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/connection_posix/aliases b/test/integration/targets/connection_posix/aliases new file mode 100644 index 00000000..f5e09799 --- /dev/null +++ b/test/integration/targets/connection_posix/aliases @@ -0,0 +1,2 @@ +needs/target/connection +hidden diff --git a/test/integration/targets/connection_posix/test.sh b/test/integration/targets/connection_posix/test.sh new file mode 100755 index 00000000..d3976ff3 --- /dev/null +++ b/test/integration/targets/connection_posix/test.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -eux + +# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir. +# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix. + +group=$(python -c \ + "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))") + +cd ../connection + +INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \ + -e target_hosts="${group}" \ + -e action_prefix= \ + -e local_tmp=/tmp/ansible-local \ + -e remote_tmp=/tmp/ansible-remote \ + "$@" diff --git a/test/integration/targets/connection_psrp/aliases b/test/integration/targets/connection_psrp/aliases new file mode 100644 index 00000000..b3e9b8bc --- /dev/null +++ b/test/integration/targets/connection_psrp/aliases @@ -0,0 +1,4 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest +needs/target/connection diff --git a/test/integration/targets/connection_psrp/files/empty.txt b/test/integration/targets/connection_psrp/files/empty.txt new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/connection_psrp/runme.sh b/test/integration/targets/connection_psrp/runme.sh new file mode 100755 index 00000000..35984bba --- /dev/null +++ b/test/integration/targets/connection_psrp/runme.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -eux + +# make sure hosts are using psrp connections +ansible -i ../../inventory.winrm localhost \ + -m template \ + -a "src=test_connection.inventory.j2 dest=${OUTPUT_DIR}/test_connection.inventory" \ + "$@" + +python.py -m pip install pypsrp +cd ../connection + +INVENTORY="${OUTPUT_DIR}/test_connection.inventory" ./test.sh \ + -e target_hosts=windows \ + -e action_prefix=win_ \ + -e local_tmp=/tmp/ansible-local \ + -e remote_tmp=c:/windows/temp/ansible-remote \ + "$@" + +cd ../connection_psrp + +ansible-playbook -i "${OUTPUT_DIR}/test_connection.inventory" tests.yml \ + "$@" diff --git a/test/integration/targets/connection_psrp/test_connection.inventory.j2 b/test/integration/targets/connection_psrp/test_connection.inventory.j2 new file mode 100644 index 00000000..d2d3a492 --- /dev/null +++ b/test/integration/targets/connection_psrp/test_connection.inventory.j2 @@ -0,0 +1,9 @@ +[windows] +{% for host in vars.groups.windows %} +{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_port={{ hostvars[host]['ansible_port'] }} ansible_user={{ hostvars[host]['ansible_user'] }} ansible_password={{ hostvars[host]['ansible_password'] }} +{% endfor %} + +[windows:vars] +ansible_connection=psrp +ansible_psrp_auth=negotiate +ansible_psrp_cert_validation=ignore diff --git a/test/integration/targets/connection_psrp/tests.yml b/test/integration/targets/connection_psrp/tests.yml new file mode 100644 index 00000000..dabbf407 --- /dev/null +++ b/test/integration/targets/connection_psrp/tests.yml @@ -0,0 +1,133 @@ +--- +# these are extra tests for psrp that aren't covered under test/integration/targets/connection/* +- name: test out psrp specific tests + hosts: windows + serial: 1 + gather_facts: no + + tasks: + - name: test complex objects in raw output + # until PyYAML is upgraded to 4.x we need to use the \U escape for a unicode codepoint + # and enclose in a quote to it translates the \U + raw: " + [PSCustomObject]@{string = 'string'}; + [PSCustomObject]@{unicode = 'poo - \U0001F4A9'}; + [PSCustomObject]@{integer = 1}; + [PSCustomObject]@{list = @(1, 2)}; + Get-Service -Name winrm; + Write-Output -InputObject 'string - \U0001F4A9';" + register: raw_out + + - name: assert complex objects in raw output + assert: + that: + - raw_out.stdout_lines|count == 6 + - "raw_out.stdout_lines[0] == 'string: string'" + - "raw_out.stdout_lines[1] == 'unicode: poo - \U0001F4A9'" + - "raw_out.stdout_lines[2] == 'integer: 1'" + - "raw_out.stdout_lines[3] == \"list: [1, 2]\"" + - raw_out.stdout_lines[4] == "winrm" + - raw_out.stdout_lines[5] == "string - \U0001F4A9" + + # Become only works on Server 2008 when running with basic auth, skip this host for now as it is too complicated to + # override the auth protocol in the tests. + - name: check if we running on Server 2008 + win_shell: '[System.Environment]::OSVersion.Version -ge [Version]"6.1"' + register: os_version + + - name: test out become with psrp + win_whoami: + when: os_version|bool + register: whoami_out + become: yes + become_method: runas + become_user: SYSTEM + + - name: assert test out become with psrp + assert: + that: + - whoami_out.account.sid == "S-1-5-18" + when: os_version|bool + + - name: test out async with psrp + win_shell: Start-Sleep -Seconds 2; Write-Output abc + async: 10 + poll: 1 + register: async_out + + - name: assert est out async with psrp + assert: + that: + - async_out.stdout_lines == ["abc"] + + - name: Output unicode characters from Powershell using PSRP + win_command: "powershell.exe -ExecutionPolicy ByPass -Command \"Write-Host '\U0001F4A9'\"" + register: command_unicode_output + + - name: Assert unicode output + assert: + that: + - command_unicode_output is changed + - command_unicode_output.rc == 0 + - "command_unicode_output.stdout == '\U0001F4A9\n'" + - command_unicode_output.stderr == '' + + - name: Output unicode characters from Powershell using PSRP + win_shell: "Write-Host '\U0001F4A9'" + register: shell_unicode_output + + - name: Assert unicode output + assert: + that: + - shell_unicode_output is changed + - shell_unicode_output.rc == 0 + - "shell_unicode_output.stdout == '\U0001F4A9\n'" + - shell_unicode_output.stderr == '' + + - name: copy empty file + win_copy: + src: empty.txt + dest: C:\Windows\TEMP\empty.txt + register: copy_empty + + - name: get result of copy empty file + win_stat: + path: C:\Windows\TEMP\empty.txt + get_checksum: yes + register: copy_empty_actual + + - name: assert copy empty file + assert: + that: + - copy_empty.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709' + - copy_empty_actual.stat.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709' + - copy_empty_actual.stat.size == 0 + + - block: + - name: fetch empty file + fetch: + src: C:\Windows\TEMP\empty.txt + dest: /tmp/empty.txt + flat: yes + register: fetch_empty + + - name: get result of fetch empty file + stat: + path: /tmp/empty.txt + get_checksum: yes + register: fetch_empty_actual + delegate_to: localhost + + - name: assert fetch empty file + assert: + that: + - fetch_empty.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709' + - fetch_empty_actual.stat.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709' + - fetch_empty_actual.stat.size == 0 + + always: + - name: remove tmp file + file: + path: /tmp/empty.txt + state: absent + delegate_to: localhost diff --git a/test/integration/targets/connection_ssh/aliases b/test/integration/targets/connection_ssh/aliases new file mode 100644 index 00000000..1d822b45 --- /dev/null +++ b/test/integration/targets/connection_ssh/aliases @@ -0,0 +1,3 @@ +needs/ssh +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/connection_ssh/posix.sh b/test/integration/targets/connection_ssh/posix.sh new file mode 120000 index 00000000..70aa5dbd --- /dev/null +++ b/test/integration/targets/connection_ssh/posix.sh @@ -0,0 +1 @@ +../connection_posix/test.sh \ No newline at end of file diff --git a/test/integration/targets/connection_ssh/runme.sh b/test/integration/targets/connection_ssh/runme.sh new file mode 100755 index 00000000..e7b2b21f --- /dev/null +++ b/test/integration/targets/connection_ssh/runme.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +set -ux + +# We skip this whole section if the test node doesn't have sshpass on it. +if command -v sshpass > /dev/null; then + # Check if our sshpass supports -P + sshpass -P foo > /dev/null + sshpass_supports_prompt=$? + if [[ $sshpass_supports_prompt -eq 0 ]]; then + # If the prompt is wrong, we'll end up hanging (due to sshpass hanging). + # We should probably do something better here, like timing out in Ansible, + # but this has been the behavior for a long time, before we supported custom + # password prompts. + # + # So we search for a custom password prompt that is clearly wrong and call + # ansible with timeout. If we time out, our custom prompt was successfully + # searched for. It's a weird way of doing things, but it does ensure + # that the flag gets passed to sshpass. + timeout 5 ansible -m ping \ + -e ansible_connection=ssh \ + -e ansible_sshpass_prompt=notThis: \ + -e ansible_password=foo \ + -e ansible_user=definitelynotroot \ + -i test_connection.inventory \ + ssh-pipelining + ret=$? + if [[ $ret -ne 124 ]]; then + echo "Expected to time out and we did not. Exiting with failure." + exit 1 + fi + else + ansible -m ping \ + -e ansible_connection=ssh \ + -e ansible_sshpass_prompt=notThis: \ + -e ansible_password=foo \ + -e ansible_user=definitelynotroot \ + -i test_connection.inventory \ + ssh-pipelining | grep 'customized password prompts' + ret=$? + [[ $ret -eq 0 ]] || exit $ret + fi +fi + +set -e + +# temporary work-around for issues due to new scp filename checking +# https://github.com/ansible/ansible/issues/52640 +if [[ "$(scp -T 2>&1)" == "usage: scp "* ]]; then + # scp supports the -T option + # work-around required + scp_args=("-e" "ansible_scp_extra_args=-T") +else + # scp does not support the -T option + # no work-around required + # however we need to put something in the array to keep older versions of bash happy + scp_args=("-e" "") +fi + +# sftp +./posix.sh "$@" +# scp +ANSIBLE_SCP_IF_SSH=true ./posix.sh "$@" "${scp_args[@]}" +# piped +ANSIBLE_SSH_TRANSFER_METHOD=piped ./posix.sh "$@" diff --git a/test/integration/targets/connection_ssh/test_connection.inventory b/test/integration/targets/connection_ssh/test_connection.inventory new file mode 100644 index 00000000..a1a4ff1e --- /dev/null +++ b/test/integration/targets/connection_ssh/test_connection.inventory @@ -0,0 +1,7 @@ +[ssh] +ssh-pipelining ansible_ssh_pipelining=true +ssh-no-pipelining ansible_ssh_pipelining=false +[ssh:vars] +ansible_host=localhost +ansible_connection=ssh +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/connection_windows_ssh/aliases b/test/integration/targets/connection_windows_ssh/aliases new file mode 100644 index 00000000..45a48f09 --- /dev/null +++ b/test/integration/targets/connection_windows_ssh/aliases @@ -0,0 +1,6 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest +skip/windows/2008 # Windows Server 2008 does not support Win32-OpenSSH +needs/target/connection +needs/target/setup_remote_tmp_dir diff --git a/test/integration/targets/connection_windows_ssh/runme.sh b/test/integration/targets/connection_windows_ssh/runme.sh new file mode 100755 index 00000000..488bb7c5 --- /dev/null +++ b/test/integration/targets/connection_windows_ssh/runme.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -eux + +# We need to run these tests with both the powershell and cmd shell type + +### cmd tests - no DefaultShell set ### +ansible -i ../../inventory.winrm localhost \ + -m template \ + -a "src=test_connection.inventory.j2 dest=${OUTPUT_DIR}/test_connection.inventory" \ + -e "test_shell_type=cmd" \ + "$@" + +# https://github.com/PowerShell/Win32-OpenSSH/wiki/DefaultShell +ansible -i ../../inventory.winrm windows \ + -m win_regedit \ + -a "path=HKLM:\\\\SOFTWARE\\\\OpenSSH name=DefaultShell state=absent" \ + "$@" + +# Need to flush the connection to ensure we get a new shell for the next tests +ansible -i "${OUTPUT_DIR}/test_connection.inventory" windows \ + -m meta -a "reset_connection" \ + "$@" + +# sftp +./windows.sh "$@" +# scp +ANSIBLE_SCP_IF_SSH=true ./windows.sh "$@" +# other tests not part of the generic connection test framework +ansible-playbook -i "${OUTPUT_DIR}/test_connection.inventory" tests.yml \ + "$@" + +### powershell tests - explicit DefaultShell set ### +# we do this last as the default shell on our CI instances is set to PowerShell +ansible -i ../../inventory.winrm localhost \ + -m template \ + -a "src=test_connection.inventory.j2 dest=${OUTPUT_DIR}/test_connection.inventory" \ + -e "test_shell_type=powershell" \ + "$@" + +# ensure the default shell is set to PowerShell +ansible -i ../../inventory.winrm windows \ + -m win_regedit \ + -a "path=HKLM:\\\\SOFTWARE\\\\OpenSSH name=DefaultShell data=C:\\\\Windows\\\\System32\\\\WindowsPowerShell\\\\v1.0\\\\powershell.exe" \ + "$@" + +ansible -i "${OUTPUT_DIR}/test_connection.inventory" windows \ + -m meta -a "reset_connection" \ + "$@" + +./windows.sh "$@" +ANSIBLE_SCP_IF_SSH=true ./windows.sh "$@" +ansible-playbook -i "${OUTPUT_DIR}/test_connection.inventory" tests.yml \ + "$@" diff --git a/test/integration/targets/connection_windows_ssh/test_connection.inventory.j2 b/test/integration/targets/connection_windows_ssh/test_connection.inventory.j2 new file mode 100644 index 00000000..5893eafe --- /dev/null +++ b/test/integration/targets/connection_windows_ssh/test_connection.inventory.j2 @@ -0,0 +1,12 @@ +[windows] +{% for host in vars.groups.windows %} +{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user={{ hostvars[host]['ansible_user'] }}{{ ' ansible_ssh_private_key_file=' ~ hostvars[host]['ansible_ssh_private_key_file'] if (hostvars[host]['ansible_ssh_private_key_file']|default()) else '' }} +{% endfor %} + +[windows:vars] +ansible_shell_type={{ test_shell_type }} +ansible_connection=ssh +ansible_port=22 +# used to preserve the existing environment and not touch existing files +ansible_ssh_extra_args="-o UserKnownHostsFile=/dev/null" +ansible_ssh_host_key_checking=False diff --git a/test/integration/targets/connection_windows_ssh/tests.yml b/test/integration/targets/connection_windows_ssh/tests.yml new file mode 100644 index 00000000..e9b538b4 --- /dev/null +++ b/test/integration/targets/connection_windows_ssh/tests.yml @@ -0,0 +1,32 @@ +--- +- name: test out Windows SSH specific tests + hosts: windows + serial: 1 + gather_facts: no + + tasks: + - name: test out become with Windows SSH + win_whoami: + register: win_ssh_become + become: yes + become_method: runas + become_user: SYSTEM + + - name: assert test out become with Windows SSH + assert: + that: + - win_ssh_become.account.sid == "S-1-5-18" + + - name: test out async with Windows SSH + win_shell: Write-Host café + async: 20 + poll: 3 + register: win_ssh_async + + - name: assert test out async with Windows SSH + assert: + that: + - win_ssh_async is changed + - win_ssh_async.rc == 0 + - win_ssh_async.stdout == "café\n" + - win_ssh_async.stderr == "" diff --git a/test/integration/targets/connection_windows_ssh/tests_fetch.yml b/test/integration/targets/connection_windows_ssh/tests_fetch.yml new file mode 100644 index 00000000..0b4fe949 --- /dev/null +++ b/test/integration/targets/connection_windows_ssh/tests_fetch.yml @@ -0,0 +1,41 @@ +# This must be a play as we need to invoke it with the ANSIBLE_SCP_IF_SSH env +# to control the mechanism used. Unfortunately while ansible_scp_if_ssh is +# documented, it isn't actually used hence the separate invocation +--- +- name: further fetch tests with metachar characters in filename + hosts: windows + force_handlers: yes + serial: 1 + gather_facts: no + + tasks: + - name: setup remote tmp dir + import_role: + name: ../../setup_remote_tmp_dir + + - name: create remote file with metachar in name + win_copy: + content: some content + dest: '{{ remote_tmp_dir }}\file ^with &whoami' + + - name: test fetch against a file with cmd metacharacters + block: + - name: fetch file with metachar in name + fetch: + src: '{{ remote_tmp_dir }}\file ^with &whoami' + dest: ansible-test.txt + flat: yes + register: fetch_res + + - name: assert fetch file with metachar in name + assert: + that: + - fetch_res is changed + - fetch_res.checksum == '94e66df8cd09d410c62d9e0dc59d3a884e458e05' + + always: + - name: remove local copy of file + file: + path: ansible-test.txt + state: absent + delegate_to: localhost diff --git a/test/integration/targets/connection_windows_ssh/windows.sh b/test/integration/targets/connection_windows_ssh/windows.sh new file mode 100755 index 00000000..d2db50f8 --- /dev/null +++ b/test/integration/targets/connection_windows_ssh/windows.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -eux + +cd ../connection + +# A recent patch to OpenSSH causes a validation error when running through Ansible. It seems like if the path is quoted +# then it will fail with 'protocol error: filename does not match request'. We currently ignore this by setting +# 'ansible_scp_extra_args=-T' to ignore this check but this should be removed once that bug is fixed and our test +# container has been updated. +# https://unix.stackexchange.com/questions/499958/why-does-scps-strict-filename-checking-reject-quoted-last-component-but-not-oth +# https://github.com/openssh/openssh-portable/commit/391ffc4b9d31fa1f4ad566499fef9176ff8a07dc +INVENTORY="${OUTPUT_DIR}/test_connection.inventory" ./test.sh \ + -e target_hosts=windows \ + -e action_prefix=win_ \ + -e local_tmp=/tmp/ansible-local \ + -e remote_tmp=c:/windows/temp/ansible-remote \ + -e ansible_scp_extra_args=-T \ + "$@" + +cd ../connection_windows_ssh + +ansible-playbook -i "${OUTPUT_DIR}/test_connection.inventory" tests_fetch.yml \ + -e ansible_scp_extra_args=-T \ + "$@" diff --git a/test/integration/targets/connection_winrm/aliases b/test/integration/targets/connection_winrm/aliases new file mode 100644 index 00000000..b3e9b8bc --- /dev/null +++ b/test/integration/targets/connection_winrm/aliases @@ -0,0 +1,4 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest +needs/target/connection diff --git a/test/integration/targets/connection_winrm/runme.sh b/test/integration/targets/connection_winrm/runme.sh new file mode 100755 index 00000000..e6772415 --- /dev/null +++ b/test/integration/targets/connection_winrm/runme.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -eux + +# make sure hosts are using winrm connections +ansible -i ../../inventory.winrm localhost \ + -m template \ + -a "src=test_connection.inventory.j2 dest=${OUTPUT_DIR}/test_connection.inventory" \ + "$@" + +cd ../connection + +INVENTORY="${OUTPUT_DIR}/test_connection.inventory" ./test.sh \ + -e target_hosts=windows \ + -e action_prefix=win_ \ + -e local_tmp=/tmp/ansible-local \ + -e remote_tmp=c:/windows/temp/ansible-remote \ + "$@" diff --git a/test/integration/targets/connection_winrm/test_connection.inventory.j2 b/test/integration/targets/connection_winrm/test_connection.inventory.j2 new file mode 100644 index 00000000..7c4f3dc9 --- /dev/null +++ b/test/integration/targets/connection_winrm/test_connection.inventory.j2 @@ -0,0 +1,10 @@ +[windows] +{% for host in vars.groups.windows %} +{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_port={{ hostvars[host]['ansible_port'] }} ansible_user={{ hostvars[host]['ansible_user'] }} ansible_password={{ hostvars[host]['ansible_password'] }} +{% endfor %} + +[windows:vars] +ansible_connection=winrm +# we don't know if we're using an encrypted connection or not, so we'll use message encryption +ansible_winrm_transport=ntlm +ansible_winrm_server_cert_validation=ignore diff --git a/test/integration/targets/copy/aliases b/test/integration/targets/copy/aliases new file mode 100644 index 00000000..db9bbd8c --- /dev/null +++ b/test/integration/targets/copy/aliases @@ -0,0 +1,4 @@ +needs/root +shippable/posix/group2 +destructive +skip/aix diff --git a/test/integration/targets/copy/defaults/main.yml b/test/integration/targets/copy/defaults/main.yml new file mode 100644 index 00000000..8e9a5836 --- /dev/null +++ b/test/integration/targets/copy/defaults/main.yml @@ -0,0 +1,2 @@ +--- +remote_unprivileged_user: tmp_ansible_test_user diff --git a/test/integration/targets/copy/files/foo.txt b/test/integration/targets/copy/files/foo.txt new file mode 100644 index 00000000..7c6ded14 --- /dev/null +++ b/test/integration/targets/copy/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git a/test/integration/targets/copy/files/subdir/bar.txt b/test/integration/targets/copy/files/subdir/bar.txt new file mode 100644 index 00000000..76018072 --- /dev/null +++ b/test/integration/targets/copy/files/subdir/bar.txt @@ -0,0 +1 @@ +baz diff --git a/test/integration/targets/copy/files/subdir/subdir1/bar.txt b/test/integration/targets/copy/files/subdir/subdir1/bar.txt new file mode 120000 index 00000000..315e865d --- /dev/null +++ b/test/integration/targets/copy/files/subdir/subdir1/bar.txt @@ -0,0 +1 @@ +../bar.txt \ No newline at end of file diff --git a/test/integration/targets/copy/files/subdir/subdir2/baz.txt b/test/integration/targets/copy/files/subdir/subdir2/baz.txt new file mode 100644 index 00000000..76018072 --- /dev/null +++ b/test/integration/targets/copy/files/subdir/subdir2/baz.txt @@ -0,0 +1 @@ +baz diff --git a/test/integration/targets/copy/files/subdir/subdir2/subdir3/subdir4/qux.txt b/test/integration/targets/copy/files/subdir/subdir2/subdir3/subdir4/qux.txt new file mode 100644 index 00000000..78df5b06 --- /dev/null +++ b/test/integration/targets/copy/files/subdir/subdir2/subdir3/subdir4/qux.txt @@ -0,0 +1 @@ +qux \ No newline at end of file diff --git a/test/integration/targets/copy/meta/main.yml b/test/integration/targets/copy/meta/main.yml new file mode 100644 index 00000000..06d4fd29 --- /dev/null +++ b/test/integration/targets/copy/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - setup_nobody diff --git a/test/integration/targets/copy/tasks/acls.yml b/test/integration/targets/copy/tasks/acls.yml new file mode 100644 index 00000000..9a3be9b0 --- /dev/null +++ b/test/integration/targets/copy/tasks/acls.yml @@ -0,0 +1,33 @@ +- block: + - block: + - name: Testing ACLs + copy: + content: "TEST" + mode: 0644 + dest: "~/test.txt" + + - shell: getfacl ~/test.txt + register: acls + + become: yes + become_user: "{{ remote_unprivileged_user }}" + + - name: Check that there are no ACLs leftovers + assert: + that: + - "'user:{{ remote_unprivileged_user }}:r-x\t#effective:r--' not in acls.stdout_lines" + + - name: Check that permissions match with what was set in the mode param + assert: + that: + - "'user::rw-' in acls.stdout_lines" + - "'group::r--' in acls.stdout_lines" + - "'other::r--' in acls.stdout_lines" + + always: + - name: Clean up + file: + path: "~/test.txt" + state: absent + become: yes + become_user: "{{ remote_unprivileged_user }}" diff --git a/test/integration/targets/copy/tasks/check_mode.yml b/test/integration/targets/copy/tasks/check_mode.yml new file mode 100644 index 00000000..5b405cc4 --- /dev/null +++ b/test/integration/targets/copy/tasks/check_mode.yml @@ -0,0 +1,126 @@ +- block: + + - name: check_mode - Create another clean copy of 'subdir' not messed with by previous tests (check_mode) + copy: + src: subdir + dest: 'checkmode_subdir/' + directory_mode: 0700 + local_follow: False + check_mode: true + register: check_mode_subdir_first + + - name: check_mode - Stat the new dir to make sure it really doesn't exist + stat: + path: 'checkmode_subdir/' + register: check_mode_subdir_first_stat + + - name: check_mode - Actually do it + copy: + src: subdir + dest: 'checkmode_subdir/' + directory_mode: 0700 + local_follow: False + register: check_mode_subdir_real + + - name: check_mode - Stat the new dir to make sure it really exists + stat: + path: 'checkmode_subdir/' + register: check_mode_subdir_real_stat + + # Quick sanity before we move on + - assert: + that: + - check_mode_subdir_first is changed + - not check_mode_subdir_first_stat.stat.exists + - check_mode_subdir_real is changed + - check_mode_subdir_real_stat.stat.exists + + # Do some finagling here. First, use check_mode to ensure it never gets + # created. Then actualy create it, and use check_mode to ensure that doing + # the same copy gets marked as no change. + # + # This same pattern repeats for several other src/dest combinations. + - name: check_mode - Ensure dest with trailing / never gets created but would be without check_mode + copy: + remote_src: true + src: 'checkmode_subdir/' + dest: 'destdir_should_never_exist_because_of_check_mode/' + follow: true + check_mode: true + register: check_mode_trailing_slash_first + + - name: check_mode - Stat the new dir to make sure it really doesn't exist + stat: + path: 'destdir_should_never_exist_because_of_check_mode/' + register: check_mode_trailing_slash_first_stat + + - name: check_mode - Create the above copy for real now (without check_mode) + copy: + remote_src: true + src: 'checkmode_subdir/' + dest: 'destdir_should_never_exist_because_of_check_mode/' + register: check_mode_trailing_slash_real + + - name: check_mode - Stat the new dir to make sure it really exists + stat: + path: 'destdir_should_never_exist_because_of_check_mode/' + register: check_mode_trailing_slash_real_stat + + - name: check_mode - Do the same copy yet again (with check_mode this time) to ensure it's marked unchanged + copy: + remote_src: true + src: 'checkmode_subdir/' + dest: 'destdir_should_never_exist_because_of_check_mode/' + check_mode: true + register: check_mode_trailing_slash_second + + # Repeat the same basic pattern here. + + - name: check_mode - Do another basic copy (with check_mode) + copy: + src: foo.txt + dest: "{{ remote_dir }}/foo-check_mode.txt" + mode: 0444 + check_mode: true + register: check_mode_foo_first + + - name: check_mode - Stat the new file to make sure it really doesn't exist + stat: + path: "{{ remote_dir }}/foo-check_mode.txt" + register: check_mode_foo_first_stat + + - name: check_mode - Do the same basic copy (without check_mode) + copy: + src: foo.txt + dest: "{{ remote_dir }}/foo-check_mode.txt" + mode: 0444 + register: check_mode_foo_real + + - name: check_mode - Stat the new file to make sure it really exists + stat: + path: "{{ remote_dir }}/foo-check_mode.txt" + register: check_mode_foo_real_stat + + - name: check_mode - And again (with check_mode) + copy: + src: foo.txt + dest: "{{ remote_dir }}/foo-check_mode.txt" + mode: 0444 + register: check_mode_foo_second + + - assert: + that: + - check_mode_subdir_first is changed + + - check_mode_trailing_slash_first is changed + # TODO: This is a legitimate bug + #- not check_mode_trailing_slash_first_stat.stat.exists + - check_mode_trailing_slash_real is changed + - check_mode_trailing_slash_real_stat.stat.exists + - check_mode_trailing_slash_second is not changed + + - check_mode_foo_first is changed + - not check_mode_foo_first_stat.stat.exists + - check_mode_foo_real is changed + - check_mode_foo_real_stat.stat.exists + - check_mode_foo_second is not changed diff --git a/test/integration/targets/copy/tasks/dest_in_non_existent_directories.yml b/test/integration/targets/copy/tasks/dest_in_non_existent_directories.yml new file mode 100644 index 00000000..c86caa1e --- /dev/null +++ b/test/integration/targets/copy/tasks/dest_in_non_existent_directories.yml @@ -0,0 +1,29 @@ +# src is a file, dest is a non-existent directory (2 levels of directories): +# checks that dest is created +- name: Ensure that dest top directory doesn't exist + file: + path: '{{ remote_dir }}/{{ item.dest.split("/")[0] }}' + state: absent + +- name: Copy file, dest is a nonexistent target directory + copy: + src: '{{ item.src }}' + dest: '{{ remote_dir }}/{{ item.dest }}' + register: copy_result + +- name: assert copy worked + assert: + that: + - 'copy_result is successful' + - 'copy_result is changed' + +- name: stat copied file + stat: + path: '{{ remote_dir }}/{{ item.check }}' + register: stat_file_in_dir_result + +- name: assert that file exists + assert: + that: + - stat_file_in_dir_result.stat.exists + - stat_file_in_dir_result.stat.isreg diff --git a/test/integration/targets/copy/tasks/dest_in_non_existent_directories_remote_src.yml b/test/integration/targets/copy/tasks/dest_in_non_existent_directories_remote_src.yml new file mode 100644 index 00000000..fad53e71 --- /dev/null +++ b/test/integration/targets/copy/tasks/dest_in_non_existent_directories_remote_src.yml @@ -0,0 +1,43 @@ +# src is a file, dest is a non-existent directory (2 levels of directories): +# checks that dest is created +- name: Ensure that dest top directory doesn't exist + file: + path: '{{ remote_dir }}/{{ item.dest.split("/")[0] }}' + state: absent + +- name: create subdir + file: + path: subdir + state: directory + +- name: create src file + file: + path: "{{ item }}" + state: touch + loop: + - foo.txt + - subdir/bar.txt + +- name: Copy file, dest is a nonexistent target directory + copy: + src: '{{ item.src }}' + dest: '{{ remote_dir }}/{{ item.dest }}' + remote_src: true + register: copy_result + +- name: assert copy worked + assert: + that: + - 'copy_result is successful' + - 'copy_result is changed' + +- name: stat copied file + stat: + path: '{{ remote_dir }}/{{ item.check }}' + register: stat_file_in_dir_result + +- name: assert that file exists + assert: + that: + - stat_file_in_dir_result.stat.exists + - stat_file_in_dir_result.stat.isreg diff --git a/test/integration/targets/copy/tasks/main.yml b/test/integration/targets/copy/tasks/main.yml new file mode 100644 index 00000000..33a92bf9 --- /dev/null +++ b/test/integration/targets/copy/tasks/main.yml @@ -0,0 +1,117 @@ +- block: + + - name: Create a local temporary directory + shell: mktemp -d /tmp/ansible_test.XXXXXXXXX + register: tempfile_result + delegate_to: localhost + + - set_fact: + local_temp_dir: '{{ tempfile_result.stdout }}' + remote_dir: '{{ output_dir }}' + symlinks: + ansible-test-abs-link: /tmp/ansible-test-abs-link + ansible-test-abs-link-dir: /tmp/ansible-test-abs-link-dir + circles: ../ + invalid: invalid + invalid2: ../invalid + out_of_tree_circle: /tmp/ansible-test-link-dir/out_of_tree_circle + subdir3: ../subdir2/subdir3 + + - file: path={{local_temp_dir}} state=directory + name: ensure temp dir exists + + # file cannot do this properly, use command instead + - name: Create symbolic link + command: "ln -s '{{ item.value }}' '{{ item.key }}'" + args: + chdir: '{{role_path}}/files/subdir/subdir1' + warn: no + with_dict: "{{ symlinks }}" + + - name: Create remote unprivileged remote user + user: + name: '{{ remote_unprivileged_user }}' + register: user + + - name: Check sudoers dir + stat: + path: /etc/sudoers.d + register: etc_sudoers + + - name: Set sudoers.d path fact + set_fact: + sudoers_d_file: "{{ '/etc/sudoers.d' if etc_sudoers.stat.exists else '/usr/local/etc/sudoers.d' }}/{{ remote_unprivileged_user }}" + + - name: Create sudoers file + copy: + dest: "{{ sudoers_d_file }}" + content: "{{ remote_unprivileged_user }} ALL=(ALL) NOPASSWD: ALL" + + - file: + path: "{{ user.home }}/.ssh" + owner: '{{ remote_unprivileged_user }}' + state: directory + mode: 0700 + + - name: Duplicate authorized_keys + copy: + src: $HOME/.ssh/authorized_keys + dest: '{{ user.home }}/.ssh/authorized_keys' + owner: '{{ remote_unprivileged_user }}' + mode: 0600 + remote_src: yes + + - file: + path: "{{ remote_dir }}" + state: directory + remote_user: '{{ remote_unprivileged_user }}' + + # execute tests tasks using an unprivileged user, this is useful to avoid + # local/remote ambiguity when controller and managed hosts are identical. + - import_tasks: tests.yml + remote_user: '{{ remote_unprivileged_user }}' + + - import_tasks: acls.yml + when: ansible_system == 'Linux' + + - import_tasks: no_log.yml + + - import_tasks: check_mode.yml + + # https://github.com/ansible/ansible/issues/57618 + - name: Test diff contents + copy: + content: 'Ansible managed\n' + dest: "{{ local_temp_dir }}/file.txt" + diff: yes + register: diff_output + + - assert: + that: + - 'diff_output.diff[0].before == ""' + - '"Ansible managed" in diff_output.diff[0].after' + + always: + - name: Cleaning + file: + path: '{{ local_temp_dir }}' + state: absent + delegate_to: localhost + + - name: Remove symbolic link + file: + path: '{{ role_path }}/files/subdir/subdir1/{{ item.key }}' + state: absent + delegate_to: localhost + with_dict: "{{ symlinks }}" + + - name: Remote unprivileged remote user + user: + name: '{{ remote_unprivileged_user }}' + state: absent + remove: yes + + - name: Remove sudoers.d file + file: + path: "{{ sudoers_d_file }}" + state: absent diff --git a/test/integration/targets/copy/tasks/no_log.yml b/test/integration/targets/copy/tasks/no_log.yml new file mode 100644 index 00000000..980c3177 --- /dev/null +++ b/test/integration/targets/copy/tasks/no_log.yml @@ -0,0 +1,82 @@ +- block: + + - set_fact: + dest: "{{ local_temp_dir }}/test_no_log" + + - name: ensure playbook and dest files don't exist yet + file: + path: "{{ item }}" + state: absent + loop: + - "{{ local_temp_dir }}/test_no_log.yml" + - "{{ dest }}" + + - name: create a playbook to run with command + copy: + dest: "{{local_temp_dir}}/test_no_log.yml" + content: !unsafe | + --- + - hosts: localhost + gather_facts: no + tasks: + - copy: + dest: "{{ dest }}" + content: "{{ secret }}" + + - name: copy the secret while using -vvv and check mode + command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=SECRET -e dest={{dest}} --check" + register: result + + - assert: + that: + - "'SECRET' not in result.stdout" + + - name: copy the secret while using -vvv + command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=SECRET -e dest={{dest}}" + register: result + + - assert: + that: + - "'SECRET' not in result.stdout" + + - name: copy the secret while using -vvv and check mode again + command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=SECRET -e dest={{dest}} --check" + register: result + + - assert: + that: + - "'SECRET' not in result.stdout" + + - name: copy the secret while using -vvv again + command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=SECRET -e dest={{dest}}" + register: result + + - assert: + that: + - "'SECRET' not in result.stdout" + + - name: copy a new secret while using -vvv and check mode + command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=NEWSECRET -e dest={{dest}} --check" + register: result + + - assert: + that: + - "'NEWSECRET' not in result.stdout" + + - name: copy a new secret while using -vvv + command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=NEWSECRET -e dest={{dest}}" + register: result + + - assert: + that: + - "'NEWSECRET' not in result.stdout" + + always: + + - name: remove temp test files + file: + path: "{{ item }}" + state: absent + loop: + - "{{ local_temp_dir }}/test_no_log.yml" + - "{{ dest }}" diff --git a/test/integration/targets/copy/tasks/selinux.yml b/test/integration/targets/copy/tasks/selinux.yml new file mode 100644 index 00000000..6bd3b04f --- /dev/null +++ b/test/integration/targets/copy/tasks/selinux.yml @@ -0,0 +1,35 @@ +# Ensure that our logic for special filesystems works as intended +# https://github.com/ansible/ansible/issues/70244 +- block: + - name: Install dosfstools + yum: + name: dosfstools + state: present + + - name: Create a file to use for a fat16 filesystem + command: dd if=/dev/zero of=/fat16 bs=1024 count=10240 + + - name: mkfs.fat + command: mkfs.fat -F16 /fat16 + + - name: Mount it + command: mount /fat16 /mnt + + - name: Copy a file to it + copy: + src: /etc/fstab + dest: /mnt/fstab + always: + - name: Unmount it + command: umount /mnt + ignore_errors: true + + - name: Nuke /fat16 + file: + path: /fat16 + state: absent + + - name: Uninstall dosfstools + yum: + name: dosfstools + state: absent diff --git a/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir.yml b/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir.yml new file mode 100644 index 00000000..f4ab9998 --- /dev/null +++ b/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir.yml @@ -0,0 +1,26 @@ +- name: Ensure that dest top directory doesn't exist + file: + path: '{{ remote_dir }}/{{ dest.split("/")[0] }}' + state: absent + +- name: Copy file, dest is a file in non-existing target directory + copy: + src: foo.txt + dest: '{{ remote_dir }}/{{ dest }}' + register: copy_result + ignore_errors: True + +- name: Assert copy failed + assert: + that: + - 'copy_result is failed' + +- name: Stat dest path + stat: + path: '{{ remote_dir }}/{{ dest.split("/")[0] }}' + register: stat_file_in_dir_result + +- name: assert that dest doesn't exist + assert: + that: + - 'not stat_file_in_dir_result.stat.exists' diff --git a/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir_remote_src.yml b/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir_remote_src.yml new file mode 100644 index 00000000..61d87969 --- /dev/null +++ b/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir_remote_src.yml @@ -0,0 +1,32 @@ +- name: Ensure that dest top directory doesn't exist + file: + path: '{{ remote_dir }}/{{ dest.split("/")[0] }}' + state: absent + +- name: create src file + file: + path: foo.txt + state: touch + +- name: Copy file, dest is a file in non-existing target directory + copy: + src: foo.txt + dest: '{{ remote_dir }}/{{ dest }}' + remote_src: true + register: copy_result + ignore_errors: True + +- name: Assert copy failed + assert: + that: + - 'copy_result is failed' + +- name: Stat dest path + stat: + path: '{{ remote_dir }}/{{ dest.split("/")[0] }}' + register: stat_file_in_dir_result + +- name: assert that dest doesn't exist + assert: + that: + - 'not stat_file_in_dir_result.stat.exists' diff --git a/test/integration/targets/copy/tasks/tests.yml b/test/integration/targets/copy/tasks/tests.yml new file mode 100644 index 00000000..be955317 --- /dev/null +++ b/test/integration/targets/copy/tasks/tests.yml @@ -0,0 +1,2261 @@ +# test code for the copy module and action plugin +# (c) 2014, Michael DeHaan +# (c) 2017, Ansible Project +# +# GNU General Public License v3 or later (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt ) +# + +- name: Record the output directory + set_fact: + remote_file: "{{ remote_dir }}/foo.txt" + +- name: Initiate a basic copy, and also test the mode + copy: + src: foo.txt + dest: "{{ remote_file }}" + mode: 0444 + register: copy_result + +- name: Record the sha of the test file for later tests + set_fact: + remote_file_hash: "{{ copy_result['checksum'] }}" + +- name: Check the mode of the output file + file: + name: "{{ remote_file }}" + state: file + register: file_result_check + +- name: Assert the mode is correct + assert: + that: + - "file_result_check.mode == '0444'" + +# same as expanduser & expandvars +- command: 'echo {{ remote_dir }}' + register: echo + +- set_fact: + remote_dir_expanded: '{{ echo.stdout }}' + remote_file_expanded: '{{ echo.stdout }}/foo.txt' + +- debug: + var: copy_result + verbosity: 1 + +- name: Assert basic copy worked + assert: + that: + - "'changed' in copy_result" + - copy_result.dest == remote_file_expanded + - "'group' in copy_result" + - "'gid' in copy_result" + - "'checksum' in copy_result" + - "'owner' in copy_result" + - "'size' in copy_result" + - "'src' in copy_result" + - "'state' in copy_result" + - "'uid' in copy_result" + +- name: Verify that the file was marked as changed + assert: + that: + - "copy_result.changed == true" + +- name: Verify that the file checksums are correct + assert: + that: + - "copy_result.checksum == ('foo.txt\n'|hash('sha1'))" + +- name: Verify that the legacy md5sum is correct + assert: + that: + - "copy_result.md5sum == ('foo.txt\n'|hash('md5'))" + when: ansible_fips|bool != True + +- name: Check the stat results of the file + stat: + path: "{{ remote_file }}" + register: stat_results + +- debug: + var: stat_results + verbosity: 1 + +- name: Assert the stat results are correct + assert: + that: + - "stat_results.stat.exists == true" + - "stat_results.stat.isblk == false" + - "stat_results.stat.isfifo == false" + - "stat_results.stat.isreg == true" + - "stat_results.stat.issock == false" + - "stat_results.stat.checksum == ('foo.txt\n'|hash('sha1'))" + +- name: Overwrite the file via same means + copy: + src: foo.txt + dest: "{{ remote_file }}" + decrypt: no + register: copy_result2 + +- name: Assert that the file was not changed + assert: + that: + - "copy_result2 is not changed" + +- name: Assert basic copy worked + assert: + that: + - "'changed' in copy_result2" + - copy_result2.dest == remote_file_expanded + - "'group' in copy_result2" + - "'gid' in copy_result2" + - "'checksum' in copy_result2" + - "'owner' in copy_result2" + - "'size' in copy_result2" + - "'state' in copy_result2" + - "'uid' in copy_result2" + +- name: Overwrite the file using the content system + copy: + content: "modified" + dest: "{{ remote_file }}" + decrypt: no + register: copy_result3 + +- name: Check the stat results of the file + stat: + path: "{{ remote_file }}" + register: stat_results + +- debug: + var: stat_results + verbosity: 1 + +- name: Assert that the file has changed + assert: + that: + - "copy_result3 is changed" + - "'content' not in copy_result3" + - "stat_results.stat.checksum == ('modified'|hash('sha1'))" + - "stat_results.stat.mode != '0700'" + +- name: Overwrite the file again using the content system, also passing along file params + copy: + content: "modified" + dest: "{{ remote_file }}" + mode: 0700 + decrypt: no + register: copy_result4 + +- name: Check the stat results of the file + stat: + path: "{{ remote_file }}" + register: stat_results + +- debug: + var: stat_results + verbosity: 1 + +- name: Assert that the file has changed + assert: + that: + - "copy_result3 is changed" + - "'content' not in copy_result3" + - "stat_results.stat.checksum == ('modified'|hash('sha1'))" + - "stat_results.stat.mode == '0700'" + +- name: Create a hardlink to the file + file: + src: '{{ remote_file }}' + dest: '{{ remote_dir }}/hard.lnk' + state: hard + +- name: copy the same contents into place + copy: + content: 'modified' + dest: '{{ remote_file }}' + mode: 0700 + decrypt: no + register: copy_results + +- name: Check the stat results of the file + stat: + path: "{{ remote_file }}" + register: stat_results + +- name: Check the stat results of the hard link + stat: + path: "{{ remote_dir }}/hard.lnk" + register: hlink_results + +- name: Check that the file did not change + assert: + that: + - 'stat_results.stat.inode == hlink_results.stat.inode' + - 'copy_results.changed == False' + - "stat_results.stat.checksum == ('modified'|hash('sha1'))" + +- name: copy the same contents into place but change mode + copy: + content: 'modified' + dest: '{{ remote_file }}' + mode: 0404 + decrypt: no + register: copy_results + +- name: Check the stat results of the file + stat: + path: "{{ remote_file }}" + register: stat_results + +- name: Check the stat results of the hard link + stat: + path: "{{ remote_dir }}/hard.lnk" + register: hlink_results + +- name: Check that the file changed permissions but is still the same + assert: + that: + - 'stat_results.stat.inode == hlink_results.stat.inode' + - 'copy_results.changed == True' + - 'stat_results.stat.mode == hlink_results.stat.mode' + - 'stat_results.stat.mode == "0404"' + - "stat_results.stat.checksum == ('modified'|hash('sha1'))" + +- name: copy the different contents into place + copy: + content: 'adjusted' + dest: '{{ remote_file }}' + mode: 0404 + register: copy_results + +- name: Check the stat results of the file + stat: + path: "{{ remote_file }}" + register: stat_results + +- name: Check the stat results of the hard link + stat: + path: "{{ remote_dir }}/hard.lnk" + register: hlink_results + +- name: Check that the file changed and hardlink was broken + assert: + that: + - 'stat_results.stat.inode != hlink_results.stat.inode' + - 'copy_results.changed == True' + - "stat_results.stat.checksum == ('adjusted'|hash('sha1'))" + - "hlink_results.stat.checksum == ('modified'|hash('sha1'))" + +- name: Try invalid copy input location fails + copy: + src: invalid_file_location_does_not_exist + dest: "{{ remote_dir }}/file.txt" + ignore_errors: True + register: failed_copy + +- name: Assert that invalid source failed + assert: + that: + - "failed_copy.failed" + - "'invalid_file_location_does_not_exist' in failed_copy.msg" + +- name: Try empty source to ensure it fails + copy: + src: '' + dest: "{{ remote_dir }}" + ignore_errors: True + register: failed_copy + +- debug: + var: failed_copy + verbosity: 1 + +- name: Assert that empty source failed + assert: + that: + - failed_copy is failed + - "'src (or content) is required' in failed_copy.msg" + +- name: Try without destination to ensure it fails + copy: + src: foo.txt + ignore_errors: True + register: failed_copy + +- debug: + var: failed_copy + verbosity: 1 + +- name: Assert that missing destination failed + assert: + that: + - failed_copy is failed + - "'dest is required' in failed_copy.msg" + +- name: Try without source to ensure it fails + copy: + dest: "{{ remote_file }}" + ignore_errors: True + register: failed_copy + +- debug: + var: failed_copy + verbosity: 1 + +- name: Assert that missing source failed + assert: + that: + - failed_copy is failed + - "'src (or content) is required' in failed_copy.msg" + +- name: Try with both src and content to ensure it fails + copy: + src: foo.txt + content: testing + dest: "{{ remote_file }}" + ignore_errors: True + register: failed_copy + +- name: Assert that mutually exclusive parameters failed + assert: + that: + - failed_copy is failed + - "'mutually exclusive' in failed_copy.msg" + +- name: Try with content and directory as destination to ensure it fails + copy: + content: testing + dest: "{{ remote_dir }}" + ignore_errors: True + register: failed_copy + +- debug: + var: failed_copy + verbosity: 1 + +- name: Assert that content and directory as destination failed + assert: + that: + - failed_copy is failed + - "'can not use content with a dir as dest' in failed_copy.msg" + +- name: Clean up + file: + path: "{{ remote_file }}" + state: absent + +- name: Copy source file to destination directory with mode + copy: + src: foo.txt + dest: "{{ remote_dir }}" + mode: 0500 + register: copy_results + +- name: Check the stat results of the file + stat: + path: '{{ remote_file }}' + register: stat_results + +- debug: + var: stat_results + verbosity: 1 + +- name: Assert that the file has changed + assert: + that: + - "copy_results is changed" + - "stat_results.stat.checksum == ('foo.txt\n'|hash('sha1'))" + - "stat_results.stat.mode == '0500'" + +# Test copy with mode=preserve +- name: Create file and set perms to an odd value + copy: + content: "foo.txt\n" + dest: '{{ local_temp_dir }}/foo.txt' + mode: 0547 + delegate_to: localhost + +- name: Copy with mode=preserve + copy: + src: '{{ local_temp_dir }}/foo.txt' + dest: '{{ remote_dir }}/copy-foo.txt' + mode: preserve + register: copy_results + +- name: Check the stat results of the file + stat: + path: '{{ remote_dir }}/copy-foo.txt' + register: stat_results + +- name: Assert that the file has changed and has correct mode + assert: + that: + - "copy_results is changed" + - "copy_results.mode == '0547'" + - "stat_results.stat.checksum == ('foo.txt\n'|hash('sha1'))" + - "stat_results.stat.mode == '0547'" + +- name: Test copy with mode=preserve and remote_src=True + copy: + src: '{{ remote_dir }}/copy-foo.txt' + dest: '{{ remote_dir }}/copy-foo2.txt' + mode: 'preserve' + remote_src: True + register: copy_results2 + +- name: Check the stat results of the file + stat: + path: '{{ remote_dir }}/copy-foo2.txt' + register: stat_results2 + +- name: Assert that the file has changed and has correct mode + assert: + that: + - "copy_results2 is changed" + - "copy_results2.mode == '0547'" + - "stat_results2.stat.checksum == ('foo.txt\n'|hash('sha1'))" + - "stat_results2.stat.mode == '0547'" + +# +# test recursive copy local_follow=False, no trailing slash +# + +- name: Create empty directory in the role we're copying from (git can't store empty dirs) + file: + path: '{{ role_path }}/files/subdir/subdira' + state: directory + delegate_to: localhost + +- name: Set the output subdirectory + set_fact: + remote_subdir: "{{ remote_dir }}/sub" + +- name: Make an output subdirectory + file: + name: "{{ remote_subdir }}" + state: directory + +- name: Setup link target for absolute link + copy: + dest: /tmp/ansible-test-abs-link + content: target + delegate_to: localhost + +- name: Setup link target dir for absolute link + file: + dest: /tmp/ansible-test-abs-link-dir + state: directory + delegate_to: localhost + +- name: Test recursive copy to directory no trailing slash, local_follow=False + copy: + src: subdir + dest: "{{ remote_subdir }}" + directory_mode: 0700 + local_follow: False + register: recursive_copy_result + +- debug: + var: recursive_copy_result + verbosity: 1 + +- name: Assert that the recursive copy did something + assert: + that: + - "recursive_copy_result is changed" + +- name: Check that a file in a directory was transferred + stat: + path: "{{ remote_dir }}/sub/subdir/bar.txt" + register: stat_bar + +- name: Check that a file in a deeper directory was transferred + stat: + path: "{{ remote_dir }}/sub/subdir/subdir2/baz.txt" + register: stat_bar2 + +- name: Check that a file in a directory whose parent contains a directory alone was transferred + stat: + path: "{{ remote_dir }}/sub/subdir/subdir2/subdir3/subdir4/qux.txt" + register: stat_bar3 + +- name: Assert recursive copy files + assert: + that: + - "stat_bar.stat.exists" + - "stat_bar2.stat.exists" + - "stat_bar3.stat.exists" + +- name: Check symlink to absolute path + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/ansible-test-abs-link' + register: stat_abs_link + +- name: Check symlink to relative path + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/bar.txt' + register: stat_relative_link + +- name: Check symlink to self + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/invalid' + register: stat_self_link + +- name: Check symlink to nonexistent file + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/invalid2' + register: stat_invalid_link + +- name: Check symlink to directory in copy + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/subdir3' + register: stat_dir_in_copy_link + +- name: Check symlink to directory outside of copy + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/ansible-test-abs-link-dir' + register: stat_dir_outside_copy_link + +- name: Assert recursive copy symlinks local_follow=False + assert: + that: + - "stat_abs_link.stat.exists" + - "stat_abs_link.stat.islnk" + - "'/tmp/ansible-test-abs-link' == stat_abs_link.stat.lnk_target" + - "stat_relative_link.stat.exists" + - "stat_relative_link.stat.islnk" + - "'../bar.txt' == stat_relative_link.stat.lnk_target" + - "stat_self_link.stat.exists" + - "stat_self_link.stat.islnk" + - "'invalid' in stat_self_link.stat.lnk_target" + - "stat_invalid_link.stat.exists" + - "stat_invalid_link.stat.islnk" + - "'../invalid' in stat_invalid_link.stat.lnk_target" + - "stat_dir_in_copy_link.stat.exists" + - "stat_dir_in_copy_link.stat.islnk" + - "'../subdir2/subdir3' in stat_dir_in_copy_link.stat.lnk_target" + - "stat_dir_outside_copy_link.stat.exists" + - "stat_dir_outside_copy_link.stat.islnk" + - "'/tmp/ansible-test-abs-link-dir' == stat_dir_outside_copy_link.stat.lnk_target" + +- name: Stat the recursively copied directories + stat: + path: "{{ remote_dir }}/sub/{{ item }}" + register: dir_stats + with_items: + - "subdir" + - "subdir/subdira" + - "subdir/subdir1" + - "subdir/subdir2" + - "subdir/subdir2/subdir3" + - "subdir/subdir2/subdir3/subdir4" + +- debug: + var: stat_results + verbosity: 1 + +- name: Assert recursive copied directories mode (1) + assert: + that: + - "item.stat.exists" + - "item.stat.mode == '0700'" + with_items: "{{dir_stats.results}}" + +- name: Test recursive copy to directory no trailing slash, local_follow=False second time + copy: + src: subdir + dest: "{{ remote_subdir }}" + directory_mode: 0700 + local_follow: False + register: recursive_copy_result + +- name: Assert that the second copy did not change anything + assert: + that: + - "recursive_copy_result is not changed" + +- name: Cleanup the recursive copy subdir + file: + name: "{{ remote_subdir }}" + state: absent + +# +# Recursive copy with local_follow=False, trailing slash +# + +- name: Set the output subdirectory + set_fact: + remote_subdir: "{{ remote_dir }}/sub" + +- name: Make an output subdirectory + file: + name: "{{ remote_subdir }}" + state: directory + +- name: Setup link target for absolute link + copy: + dest: /tmp/ansible-test-abs-link + content: target + delegate_to: localhost + +- name: Setup link target dir for absolute link + file: + dest: /tmp/ansible-test-abs-link-dir + state: directory + delegate_to: localhost + +- name: Test recursive copy to directory trailing slash, local_follow=False + copy: + src: subdir/ + dest: "{{ remote_subdir }}" + directory_mode: 0700 + local_follow: False + register: recursive_copy_result + +- debug: + var: recursive_copy_result + verbosity: 1 + +- name: Assert that the recursive copy did something + assert: + that: + - "recursive_copy_result is changed" + +- name: Check that a file in a directory was transferred + stat: + path: "{{ remote_dir }}/sub/bar.txt" + register: stat_bar + +- name: Check that a file in a deeper directory was transferred + stat: + path: "{{ remote_dir }}/sub/subdir2/baz.txt" + register: stat_bar2 + +- name: Check that a file in a directory whose parent contains a directory alone was transferred + stat: + path: "{{ remote_dir }}/sub/subdir2/subdir3/subdir4/qux.txt" + register: stat_bar3 + +- name: Assert recursive copy files + assert: + that: + - "stat_bar.stat.exists" + - "stat_bar2.stat.exists" + - "stat_bar3.stat.exists" + +- name: Check symlink to absolute path + stat: + path: '{{ remote_dir }}/sub/subdir1/ansible-test-abs-link' + register: stat_abs_link + +- name: Check symlink to relative path + stat: + path: '{{ remote_dir }}/sub/subdir1/bar.txt' + register: stat_relative_link + +- name: Check symlink to self + stat: + path: '{{ remote_dir }}/sub/subdir1/invalid' + register: stat_self_link + +- name: Check symlink to nonexistent file + stat: + path: '{{ remote_dir }}/sub/subdir1/invalid2' + register: stat_invalid_link + +- name: Check symlink to directory in copy + stat: + path: '{{ remote_dir }}/sub/subdir1/subdir3' + register: stat_dir_in_copy_link + +- name: Check symlink to directory outside of copy + stat: + path: '{{ remote_dir }}/sub/subdir1/ansible-test-abs-link-dir' + register: stat_dir_outside_copy_link + +- name: Assert recursive copy symlinks local_follow=False trailing slash + assert: + that: + - "stat_abs_link.stat.exists" + - "stat_abs_link.stat.islnk" + - "'/tmp/ansible-test-abs-link' == stat_abs_link.stat.lnk_target" + - "stat_relative_link.stat.exists" + - "stat_relative_link.stat.islnk" + - "'../bar.txt' == stat_relative_link.stat.lnk_target" + - "stat_self_link.stat.exists" + - "stat_self_link.stat.islnk" + - "'invalid' in stat_self_link.stat.lnk_target" + - "stat_invalid_link.stat.exists" + - "stat_invalid_link.stat.islnk" + - "'../invalid' in stat_invalid_link.stat.lnk_target" + - "stat_dir_in_copy_link.stat.exists" + - "stat_dir_in_copy_link.stat.islnk" + - "'../subdir2/subdir3' in stat_dir_in_copy_link.stat.lnk_target" + - "stat_dir_outside_copy_link.stat.exists" + - "stat_dir_outside_copy_link.stat.islnk" + - "'/tmp/ansible-test-abs-link-dir' == stat_dir_outside_copy_link.stat.lnk_target" + +- name: Stat the recursively copied directories + stat: + path: "{{ remote_dir }}/sub/{{ item }}" + register: dir_stats + with_items: + - "subdira" + - "subdir1" + - "subdir2" + - "subdir2/subdir3" + - "subdir2/subdir3/subdir4" + +- debug: + var: dir_stats + verbosity: 1 + +- name: Assert recursive copied directories mode (2) + assert: + that: + - "item.stat.mode == '0700'" + with_items: "{{dir_stats.results}}" + +- name: Test recursive copy to directory trailing slash, local_follow=False second time + copy: + src: subdir/ + dest: "{{ remote_subdir }}" + directory_mode: 0700 + local_follow: False + register: recursive_copy_result + +- name: Assert that the second copy did not change anything + assert: + that: + - "recursive_copy_result is not changed" + +- name: Cleanup the recursive copy subdir + file: + name: "{{ remote_subdir }}" + state: absent + +# +# test recursive copy local_follow=True, no trailing slash +# + +- name: Set the output subdirectory + set_fact: + remote_subdir: "{{ remote_dir }}/sub" + +- name: Make an output subdirectory + file: + name: "{{ remote_subdir }}" + state: directory + +- name: Setup link target for absolute link + copy: + dest: /tmp/ansible-test-abs-link + content: target + delegate_to: localhost + +- name: Setup link target dir for absolute link + file: + dest: /tmp/ansible-test-abs-link-dir + state: directory + delegate_to: localhost + +- name: Test recursive copy to directory no trailing slash, local_follow=True + copy: + src: subdir + dest: "{{ remote_subdir }}" + directory_mode: 0700 + local_follow: True + register: recursive_copy_result + +- debug: + var: recursive_copy_result + verbosity: 1 + +- name: Assert that the recursive copy did something + assert: + that: + - "recursive_copy_result is changed" + +- name: Check that a file in a directory was transferred + stat: + path: "{{ remote_dir }}/sub/subdir/bar.txt" + register: stat_bar + +- name: Check that a file in a deeper directory was transferred + stat: + path: "{{ remote_dir }}/sub/subdir/subdir2/baz.txt" + register: stat_bar2 + +- name: Check that a file in a directory whose parent contains a directory alone was transferred + stat: + path: "{{ remote_dir }}/sub/subdir/subdir2/subdir3/subdir4/qux.txt" + register: stat_bar3 + +- name: Check that a file in a directory whose parent is a symlink was transferred + stat: + path: "{{ remote_dir }}/sub/subdir/subdir1/subdir3/subdir4/qux.txt" + register: stat_bar4 + +- name: Assert recursive copy files + assert: + that: + - "stat_bar.stat.exists" + - "stat_bar2.stat.exists" + - "stat_bar3.stat.exists" + - "stat_bar4.stat.exists" + +- name: Check symlink to absolute path + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/ansible-test-abs-link' + register: stat_abs_link + +- name: Check symlink to relative path + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/bar.txt' + register: stat_relative_link + +- name: Check symlink to self + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/invalid' + register: stat_self_link + +- name: Check symlink to nonexistent file + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/invalid2' + register: stat_invalid_link + +- name: Check symlink to directory in copy + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/subdir3' + register: stat_dir_in_copy_link + +- name: Check symlink to directory outside of copy + stat: + path: '{{ remote_dir }}/sub/subdir/subdir1/ansible-test-abs-link-dir' + register: stat_dir_outside_copy_link + +- name: Assert recursive copy symlinks local_follow=True + assert: + that: + - "stat_abs_link.stat.exists" + - "not stat_abs_link.stat.islnk" + - "stat_abs_link.stat.checksum == ('target'|hash('sha1'))" + - "stat_relative_link.stat.exists" + - "not stat_relative_link.stat.islnk" + - "stat_relative_link.stat.checksum == ('baz\n'|hash('sha1'))" + - "stat_self_link.stat.exists" + - "stat_self_link.stat.islnk" + - "'invalid' in stat_self_link.stat.lnk_target" + - "stat_invalid_link.stat.exists" + - "stat_invalid_link.stat.islnk" + - "'../invalid' in stat_invalid_link.stat.lnk_target" + - "stat_dir_in_copy_link.stat.exists" + - "not stat_dir_in_copy_link.stat.islnk" + - "stat_dir_in_copy_link.stat.isdir" + - + - "stat_dir_outside_copy_link.stat.exists" + - "not stat_dir_outside_copy_link.stat.islnk" + - "stat_dir_outside_copy_link.stat.isdir" + +- name: Stat the recursively copied directories + stat: + path: "{{ remote_dir }}/sub/{{ item }}" + register: dir_stats + with_items: + - "subdir" + - "subdir/subdira" + - "subdir/subdir1" + - "subdir/subdir1/subdir3" + - "subdir/subdir1/subdir3/subdir4" + - "subdir/subdir2" + - "subdir/subdir2/subdir3" + - "subdir/subdir2/subdir3/subdir4" + +- debug: + var: dir_stats + verbosity: 1 + +- name: Assert recursive copied directories mode (3) + assert: + that: + - "item.stat.mode == '0700'" + with_items: "{{dir_stats.results}}" + +- name: Test recursive copy to directory no trailing slash, local_follow=True second time + copy: + src: subdir + dest: "{{ remote_subdir }}" + directory_mode: 0700 + local_follow: True + register: recursive_copy_result + +- name: Assert that the second copy did not change anything + assert: + that: + - "recursive_copy_result is not changed" + +- name: Cleanup the recursive copy subdir + file: + name: "{{ remote_subdir }}" + state: absent + +# +# Recursive copy of tricky symlinks +# +- block: + - name: Create a directory to copy from + file: + path: '{{ local_temp_dir }}/source1' + state: directory + + - name: Create a directory outside of the tree + file: + path: '{{ local_temp_dir }}/source2' + state: directory + + - name: Create a symlink to a directory outside of the tree + file: + path: '{{ local_temp_dir }}/source1/link' + src: '{{ local_temp_dir }}/source2' + state: link + + - name: Create a circular link back to the tree + file: + path: '{{ local_temp_dir }}/source2/circle' + src: '../source1' + state: link + + - name: Create output directory + file: + path: '{{ local_temp_dir }}/dest1' + state: directory + delegate_to: localhost + +- name: Recursive copy the source + copy: + src: '{{ local_temp_dir }}/source1' + dest: '{{ remote_dir }}/dest1' + local_follow: True + register: copy_result + +- name: Check that the tree link is now a directory + stat: + path: '{{ remote_dir }}/dest1/source1/link' + register: link_result + +- name: Check that the out of tree link is still a link + stat: + path: '{{ remote_dir }}/dest1/source1/link/circle' + register: circle_result + +- name: Verify that the recursive copy worked + assert: + that: + - 'copy_result.changed' + - 'link_result.stat.isdir' + - 'not link_result.stat.islnk' + - 'circle_result.stat.islnk' + - '"../source1" == circle_result.stat.lnk_target' + +- name: Recursive copy the source a second time + copy: + src: '{{ local_temp_dir }}/source1' + dest: '{{ remote_dir }}/dest1' + local_follow: True + register: copy_result + +- name: Verify that the recursive copy made no changes + assert: + that: + - 'not copy_result.changed' + +# +# Recursive copy with absolute paths (#27439) +# +- name: Test that remote_dir is appropriate for this test (absolute path) + assert: + that: + - '{{ remote_dir_expanded[0] == "/" }}' + +- block: + - name: Create a directory to copy + file: + path: '{{ local_temp_dir }}/source_recursive' + state: directory + + - name: Create a file inside of the directory + copy: + content: "testing" + dest: '{{ local_temp_dir }}/source_recursive/file' + + - name: Create a directory to place the test output in + file: + path: '{{ local_temp_dir }}/destination' + state: directory + delegate_to: localhost + +- name: Copy the directory and files within (no trailing slash) + copy: + src: '{{ local_temp_dir }}/source_recursive' + dest: '{{ remote_dir }}/destination' + +- name: Stat the recursively copied directory + stat: + path: "{{ remote_dir }}/destination/{{ item }}" + register: copied_stat + with_items: + - "source_recursive" + - "source_recursive/file" + - "file" + +- debug: + var: copied_stat + verbosity: 1 + +- name: Assert with no trailing slash, directory and file is copied + assert: + that: + - "copied_stat.results[0].stat.exists" + - "copied_stat.results[1].stat.exists" + - "not copied_stat.results[2].stat.exists" + +- name: Cleanup + file: + path: '{{ remote_dir }}/destination' + state: absent + +# Try again with no trailing slash + +- name: Create a directory to place the test output in + file: + path: '{{ remote_dir }}/destination' + state: directory + +- name: Copy just the files inside of the directory + copy: + src: '{{ local_temp_dir }}/source_recursive/' + dest: '{{ remote_dir }}/destination' + +- name: Stat the recursively copied directory + stat: + path: "{{ remote_dir }}/destination/{{ item }}" + register: copied_stat + with_items: + - "source_recursive" + - "source_recursive/file" + - "file" + +- debug: + var: copied_stat + verbosity: 1 + +- name: Assert with trailing slash, only the file is copied + assert: + that: + - "not copied_stat.results[0].stat.exists" + - "not copied_stat.results[1].stat.exists" + - "copied_stat.results[2].stat.exists" + +# +# Recursive copy with relative paths (#34893) +# + +- name: Create a directory to copy + file: + path: 'source_recursive' + state: directory + delegate_to: localhost + +- name: Create a file inside of the directory + copy: + content: "testing" + dest: 'source_recursive/file' + delegate_to: localhost + +- name: Create a directory to place the test output in + file: + path: 'destination' + state: directory + delegate_to: localhost + +- name: Copy the directory and files within (no trailing slash) + copy: + src: 'source_recursive' + dest: 'destination' + +- name: Stat the recursively copied directory + stat: + path: "destination/{{ item }}" + register: copied_stat + with_items: + - "source_recursive" + - "source_recursive/file" + - "file" + +- debug: + var: copied_stat + verbosity: 1 + +- name: Assert with no trailing slash, directory and file is copied + assert: + that: + - "copied_stat.results[0].stat.exists" + - "copied_stat.results[1].stat.exists" + - "not copied_stat.results[2].stat.exists" + +- name: Cleanup + file: + path: 'destination' + state: absent + +# Try again with no trailing slash + +- name: Create a directory to place the test output in + file: + path: 'destination' + state: directory + +- name: Copy just the files inside of the directory + copy: + src: 'source_recursive/' + dest: 'destination' + +- name: Stat the recursively copied directory + stat: + path: "destination/{{ item }}" + register: copied_stat + with_items: + - "source_recursive" + - "source_recursive/file" + - "file" + +- debug: + var: copied_stat + verbosity: 1 + +- name: Assert with trailing slash, only the file is copied + assert: + that: + - "not copied_stat.results[0].stat.exists" + - "not copied_stat.results[1].stat.exists" + - "copied_stat.results[2].stat.exists" + +- name: Cleanup + file: + path: 'destination' + state: absent + +- name: Cleanup + file: + path: 'source_recursive' + state: absent + +# +# issue 8394 +# + +- name: Create a file with content and a literal multiline block + copy: + content: | + this is the first line + this is the second line + + this line is after an empty line + this line is the last line + dest: "{{ remote_dir }}/multiline.txt" + register: copy_result6 + +- debug: + var: copy_result6 + verbosity: 1 + +- name: Assert the multiline file was created correctly + assert: + that: + - "copy_result6.changed" + - "copy_result6.dest == '{{remote_dir_expanded}}/multiline.txt'" + - "copy_result6.checksum == '9cd0697c6a9ff6689f0afb9136fa62e0b3fee903'" + +# test overwriting a file as an unprivileged user (pull request #8624) +# this can't be relative to {{remote_dir}} as ~root usually has mode 700 +- block: + - name: Create world writable directory + file: + dest: /tmp/worldwritable + state: directory + mode: 0777 + + - name: Create world writable file + copy: + dest: /tmp/worldwritable/file.txt + content: "bar" + mode: 0666 + + - name: Overwrite the file as user nobody + copy: + dest: /tmp/worldwritable/file.txt + content: "baz" + become: yes + become_user: nobody + register: copy_result7 + + - name: Assert the file was overwritten + assert: + that: + - "copy_result7.changed" + - "copy_result7.dest == '/tmp/worldwritable/file.txt'" + - "copy_result7.checksum == ('baz'|hash('sha1'))" + + - name: Clean up + file: + dest: /tmp/worldwritable + state: absent + + remote_user: root + +# +# Follow=True tests +# + +# test overwriting a link using "follow=yes" so that the link +# is preserved and the link target is updated + +- name: Create a test file to symlink to + copy: + dest: "{{ remote_dir }}/follow_test" + content: "this is the follow test file\n" + +- name: Create a symlink to the test file + file: + path: "{{ remote_dir }}/follow_link" + src: './follow_test' + state: link + +- name: Update the test file using follow=True to preserve the link + copy: + dest: "{{ remote_dir }}/follow_link" + src: foo.txt + follow: yes + register: replace_follow_result + +- name: Stat the link path + stat: + path: "{{ remote_dir }}/follow_link" + register: stat_link_result + +- name: Assert that the link is still a link and contents were changed + assert: + that: + - stat_link_result['stat']['islnk'] + - stat_link_result['stat']['lnk_target'] == './follow_test' + - replace_follow_result['changed'] + - "replace_follow_result['checksum'] == remote_file_hash" + +# Symlink handling when the dest is already there +# https://github.com/ansible/ansible-modules-core/issues/1568 + +- name: test idempotency by trying to copy to the symlink with the same contents + copy: + dest: "{{ remote_dir }}/follow_link" + src: foo.txt + follow: yes + register: replace_follow_result + +- name: Stat the link path + stat: + path: "{{ remote_dir }}/follow_link" + register: stat_link_result + +- name: Assert that the link is still a link and contents were changed + assert: + that: + - stat_link_result['stat']['islnk'] + - stat_link_result['stat']['lnk_target'] == './follow_test' + - not replace_follow_result['changed'] + - replace_follow_result['checksum'] == remote_file_hash + + +- name: Update the test file using follow=False to overwrite the link + copy: + dest: '{{ remote_dir }}/follow_link' + content: 'modified' + follow: False + register: copy_results + +- name: Check the stat results of the file + stat: + path: '{{remote_dir}}/follow_link' + register: stat_results + +- debug: + var: stat_results + verbosity: 1 + +- name: Assert that the file has changed and is not a link + assert: + that: + - "copy_results is changed" + - "'content' not in copy_results" + - "stat_results.stat.checksum == ('modified'|hash('sha1'))" + - "not stat_results.stat.islnk" + +# test overwriting a link using "follow=yes" so that the link +# is preserved and the link target is updated when the thing being copied is a link + +# +# File mode tests +# + +- name: setup directory for test + file: state=directory dest={{remote_dir }}/directory mode=0755 + +- name: set file mode when the destination is a directory + copy: src=foo.txt dest={{remote_dir}}/directory/ mode=0705 + +- name: set file mode when the destination is a directory + copy: src=foo.txt dest={{remote_dir}}/directory/ mode=0604 + register: file_result + +- name: check that the file has the correct attributes + stat: path={{ remote_dir }}/directory/foo.txt + register: file_attrs + +- assert: + that: + - "file_attrs.stat.mode == '0604'" + # The below assertions make an invalid assumption, these were not explicitly set + # - "file_attrs.stat.uid == 0" + # - "file_attrs.stat.pw_name == 'root'" + +- name: check that the containing directory did not change attributes + stat: path={{ remote_dir }}/directory/ + register: dir_attrs + +- assert: + that: + - "dir_attrs.stat.mode == '0755'" + +# Test that recursive copy of a directory containing a symlink to another +# directory, with mode=preserve and local_follow=no works. +# See: https://github.com/ansible/ansible/issues/68471 + +- name: Test recursive copy of dir with symlinks, mode=preserve, local_follow=False + copy: + src: '{{ role_path }}/files/subdir/' + dest: '{{ local_temp_dir }}/preserve_symlink/' + mode: preserve + local_follow: no + +- name: check that we actually used and still have a symlink + stat: path={{ local_temp_dir }}/preserve_symlink/subdir1/bar.txt + register: symlink_path + +- assert: + that: + - symlink_path.stat.exists + - symlink_path.stat.islnk + +# +# I believe the below section is now covered in the recursive copying section. +# Hold on for now as an original test case but delete once confirmed that +# everything is passing + +# +# Recursive copying with symlinks tests +# +- delegate_to: localhost + block: + - name: Create a test dir to copy + file: + path: '{{ local_temp_dir }}/top_dir' + state: directory + + - name: Create a test dir to symlink to + file: + path: '{{ local_temp_dir }}/linked_dir' + state: directory + + - name: Create a file in the test dir + copy: + dest: '{{ local_temp_dir }}/linked_dir/file1' + content: 'hello world' + + - name: Create a link to the test dir + file: + path: '{{ local_temp_dir }}/top_dir/follow_link_dir' + src: '{{ local_temp_dir }}/linked_dir' + state: link + + - name: Create a circular subdir + file: + path: '{{ local_temp_dir }}/top_dir/subdir' + state: directory + + ### FIXME: Also add a test for a relative symlink + - name: Create a circular symlink + file: + path: '{{ local_temp_dir }}/top_dir/subdir/circle' + src: '{{ local_temp_dir }}/top_dir/' + state: link + +- name: Copy the directory's link + copy: + src: '{{ local_temp_dir }}/top_dir' + dest: '{{ remote_dir }}/new_dir' + local_follow: True + +- name: Stat the copied path + stat: + path: '{{ remote_dir }}/new_dir/top_dir/follow_link_dir' + register: stat_dir_result + +- name: Stat the copied file + stat: + path: '{{ remote_dir }}/new_dir/top_dir/follow_link_dir/file1' + register: stat_file_in_dir_result + +- name: Stat the circular symlink + stat: + path: '{{ remote_dir }}/new_dir/top_dir/subdir/circle' + register: stat_circular_symlink_result + +- name: Assert that the directory exists + assert: + that: + - stat_dir_result.stat.exists + - stat_dir_result.stat.isdir + - stat_file_in_dir_result.stat.exists + - stat_file_in_dir_result.stat.isreg + - stat_circular_symlink_result.stat.exists + - stat_circular_symlink_result.stat.islnk + +# Relative paths in dest: +- name: Smoketest that copying content to an implicit relative path works + copy: + content: 'testing' + dest: 'ansible-testing.txt' + register: relative_results + +- name: Assert that copying to an implicit relative path reported changed + assert: + that: + - 'relative_results["changed"]' + - 'relative_results["checksum"] == "dc724af18fbdd4e59189f5fe768a5f8311527050"' + +- name: Test that copying the same content with an implicit relative path reports no change + copy: + content: 'testing' + dest: 'ansible-testing.txt' + register: relative_results + +- name: Assert that copying the same content with an implicit relative path reports no change + assert: + that: + - 'not relative_results["changed"]' + - 'relative_results["checksum"] == "dc724af18fbdd4e59189f5fe768a5f8311527050"' + +- name: Test that copying different content with an implicit relative path reports change + copy: + content: 'testing2' + dest: 'ansible-testing.txt' + register: relative_results + +- name: Assert that copying different content with an implicit relative path reports changed + assert: + that: + - 'relative_results["changed"]' + - 'relative_results["checksum"] == "596b29ec9afea9e461a20610d150939b9c399d93"' + +- name: Smoketest that explicit relative path works + copy: + content: 'testing' + dest: './ansible-testing.txt' + register: relative_results + +- name: Assert that explicit relative paths reports change + assert: + that: + - 'relative_results["changed"]' + - 'relative_results["checksum"] == "dc724af18fbdd4e59189f5fe768a5f8311527050"' + +- name: Cleanup relative path tests + file: + path: 'ansible-testing.txt' + state: absent + +# src is a file, dest is a non-existent directory (2 levels of directories): +# using remote_src +# checks that dest is created +- include: dest_in_non_existent_directories_remote_src.yml + with_items: + - { src: 'foo.txt', dest: 'new_sub_dir1/sub_dir2/', check: 'new_sub_dir1/sub_dir2/foo.txt' } + +# src is a file, dest is file in a non-existent directory: checks that a failure occurs +# using remote_src +- include: src_file_dest_file_in_non_existent_dir_remote_src.yml + with_items: + - 'new_sub_dir1/sub_dir2/foo.txt' + - 'new_sub_dir1/foo.txt' + loop_control: + loop_var: 'dest' + +# src is a file, dest is a non-existent directory (2 levels of directories): +# checks that dest is created +- include: dest_in_non_existent_directories.yml + with_items: + - { src: 'foo.txt', dest: 'new_sub_dir1/sub_dir2/', check: 'new_sub_dir1/sub_dir2/foo.txt' } + - { src: 'subdir', dest: 'new_sub_dir1/sub_dir2/', check: 'new_sub_dir1/sub_dir2/subdir/bar.txt' } + - { src: 'subdir/', dest: 'new_sub_dir1/sub_dir2/', check: 'new_sub_dir1/sub_dir2/bar.txt' } + - { src: 'subdir', dest: 'new_sub_dir1/sub_dir2', check: 'new_sub_dir1/sub_dir2/subdir/bar.txt' } + - { src: 'subdir/', dest: 'new_sub_dir1/sub_dir2', check: 'new_sub_dir1/sub_dir2/bar.txt' } + +# src is a file, dest is file in a non-existent directory: checks that a failure occurs +- include: src_file_dest_file_in_non_existent_dir.yml + with_items: + - 'new_sub_dir1/sub_dir2/foo.txt' + - 'new_sub_dir1/foo.txt' + loop_control: + loop_var: 'dest' +# +# Recursive copying on remote host +# +## prepare for test +- block: + + - name: execute - Create a test src dir + file: + path: '{{ remote_dir }}/remote_dir_src' + state: directory + + - name: gather - Stat the remote_dir_src + stat: + path: '{{ remote_dir }}/remote_dir_src' + register: stat_remote_dir_src_before + + - name: execute - Create a subdir + file: + path: '{{ remote_dir }}/remote_dir_src/subdir' + state: directory + + - name: gather - Stat the remote_dir_src/subdir + stat: + path: '{{ remote_dir }}/remote_dir_src/subdir' + register: stat_remote_dir_src_subdir_before + + - name: execute - Create a file in the top of src + copy: + dest: '{{ remote_dir }}/remote_dir_src/file1' + content: 'hello world 1' + + - name: gather - Stat the remote_dir_src/file1 + stat: + path: '{{ remote_dir }}/remote_dir_src/file1' + register: stat_remote_dir_src_file1_before + + - name: execute - Create a file in the subdir + copy: + dest: '{{ remote_dir }}/remote_dir_src/subdir/file12' + content: 'hello world 12' + + - name: gather - Stat the remote_dir_src/subdir/file12 + stat: + path: '{{ remote_dir }}/remote_dir_src/subdir/file12' + register: stat_remote_dir_src_subdir_file12_before + + - name: execute - Create a link to the file12 + file: + path: '{{ remote_dir }}/remote_dir_src/link_file12' + src: '{{ remote_dir }}/remote_dir_src/subdir/file12' + state: link + + - name: gather - Stat the remote_dir_src/link_file12 + stat: + path: '{{ remote_dir }}/remote_dir_src/link_file12' + register: stat_remote_dir_src_link_file12_before + +### test when src endswith os.sep and dest isdir +- block: + +### local_follow: True + - name: execute - Create a test dest dir + file: + path: '{{ remote_dir }}/testcase1_local_follow_true' + state: directory + + - name: execute - Copy the directory on remote with local_follow True + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src/' + dest: '{{ remote_dir }}/testcase1_local_follow_true' + local_follow: True + register: testcase1 + + - name: gather - Stat the testcase1_local_follow_true + stat: + path: '{{ remote_dir }}/testcase1_local_follow_true' + register: stat_testcase1_local_follow_true + - name: gather - Stat the testcase1_local_follow_true/subdir + stat: + path: '{{ remote_dir }}/testcase1_local_follow_true/subdir' + register: stat_testcase1_local_follow_true_subdir + - name: gather - Stat the testcase1_local_follow_true/file1 + stat: + path: '{{ remote_dir }}/testcase1_local_follow_true/file1' + register: stat_testcase1_local_follow_true_file1 + - name: gather - Stat the testcase1_local_follow_true/subdir/file12 + stat: + path: '{{ remote_dir }}/testcase1_local_follow_true/subdir/file12' + register: stat_testcase1_local_follow_true_subdir_file12 + - name: gather - Stat the testcase1_local_follow_true/link_file12 + stat: + path: '{{ remote_dir }}/testcase1_local_follow_true/link_file12' + register: stat_testcase1_local_follow_true_link_file12 + + - name: assert - remote_dir_src has copied with local_follow True. + assert: + that: + - testcase1 is changed + - "stat_testcase1_local_follow_true.stat.isdir" + - "stat_testcase1_local_follow_true_subdir.stat.isdir" + - "stat_testcase1_local_follow_true_file1.stat.exists" + - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase1_local_follow_true_file1.stat.checksum" + - "stat_testcase1_local_follow_true_subdir_file12.stat.exists" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase1_local_follow_true_subdir_file12.stat.checksum" + - "stat_testcase1_local_follow_true_link_file12.stat.exists" + - "not stat_testcase1_local_follow_true_link_file12.stat.islnk" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase1_local_follow_true_link_file12.stat.checksum" + +### local_follow: False + - name: execute - Create a test dest dir + file: + path: '{{ remote_dir }}/testcase1_local_follow_false' + state: directory + + - name: execute - Copy the directory on remote with local_follow False + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src/' + dest: '{{ remote_dir }}/testcase1_local_follow_false' + local_follow: False + register: testcase1 + + - name: gather - Stat the testcase1_local_follow_false + stat: + path: '{{ remote_dir }}/testcase1_local_follow_false' + register: stat_testcase1_local_follow_false + - name: gather - Stat the testcase1_local_follow_false/subdir + stat: + path: '{{ remote_dir }}/testcase1_local_follow_false/subdir' + register: stat_testcase1_local_follow_false_subdir + - name: gather - Stat the testcase1_local_follow_false/file1 + stat: + path: '{{ remote_dir }}/testcase1_local_follow_false/file1' + register: stat_testcase1_local_follow_false_file1 + - name: gather - Stat the testcase1_local_follow_false/subdir/file12 + stat: + path: '{{ remote_dir }}/testcase1_local_follow_false/subdir/file12' + register: stat_testcase1_local_follow_false_subdir_file12 + - name: gather - Stat the testcase1_local_follow_false/link_file12 + stat: + path: '{{ remote_dir }}/testcase1_local_follow_false/link_file12' + register: stat_testcase1_local_follow_false_link_file12 + + - name: assert - remote_dir_src has copied with local_follow True. + assert: + that: + - testcase1 is changed + - "stat_testcase1_local_follow_false.stat.isdir" + - "stat_testcase1_local_follow_false_subdir.stat.isdir" + - "stat_testcase1_local_follow_false_file1.stat.exists" + - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase1_local_follow_false_file1.stat.checksum" + - "stat_testcase1_local_follow_false_subdir_file12.stat.exists" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase1_local_follow_false_subdir_file12.stat.checksum" + - "stat_testcase1_local_follow_false_link_file12.stat.exists" + - "stat_testcase1_local_follow_false_link_file12.stat.islnk" + +## test when src endswith os.sep and dest not exists + +- block: + - name: execute - Copy the directory on remote with local_follow True + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src/' + dest: '{{ remote_dir }}/testcase2_local_follow_true' + local_follow: True + register: testcase2 + + - name: gather - Stat the testcase2_local_follow_true + stat: + path: '{{ remote_dir }}/testcase2_local_follow_true' + register: stat_testcase2_local_follow_true + - name: gather - Stat the testcase2_local_follow_true/subdir + stat: + path: '{{ remote_dir }}/testcase2_local_follow_true/subdir' + register: stat_testcase2_local_follow_true_subdir + - name: gather - Stat the testcase2_local_follow_true/file1 + stat: + path: '{{ remote_dir }}/testcase2_local_follow_true/file1' + register: stat_testcase2_local_follow_true_file1 + - name: gather - Stat the testcase2_local_follow_true/subdir/file12 + stat: + path: '{{ remote_dir }}/testcase2_local_follow_true/subdir/file12' + register: stat_testcase2_local_follow_true_subdir_file12 + - name: gather - Stat the testcase2_local_follow_true/link_file12 + stat: + path: '{{ remote_dir }}/testcase2_local_follow_true/link_file12' + register: stat_testcase2_local_follow_true_link_file12 + + - name: assert - remote_dir_src has copied with local_follow True. + assert: + that: + - testcase2 is changed + - "stat_testcase2_local_follow_true.stat.isdir" + - "stat_testcase2_local_follow_true_subdir.stat.isdir" + - "stat_testcase2_local_follow_true_file1.stat.exists" + - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase2_local_follow_true_file1.stat.checksum" + - "stat_testcase2_local_follow_true_subdir_file12.stat.exists" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase2_local_follow_true_subdir_file12.stat.checksum" + - "stat_testcase2_local_follow_true_link_file12.stat.exists" + - "not stat_testcase2_local_follow_true_link_file12.stat.islnk" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase2_local_follow_true_link_file12.stat.checksum" + +### local_follow: False + - name: execute - Copy the directory on remote with local_follow False + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src/' + dest: '{{ remote_dir }}/testcase2_local_follow_false' + local_follow: False + register: testcase2 + + - name: execute - Copy the directory on remote with local_follow False + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src/' + dest: '{{ remote_dir }}/testcase2_local_follow_false' + local_follow: False + register: testcase1 + + - name: gather - Stat the testcase2_local_follow_false + stat: + path: '{{ remote_dir }}/testcase2_local_follow_false' + register: stat_testcase2_local_follow_false + - name: gather - Stat the testcase2_local_follow_false/subdir + stat: + path: '{{ remote_dir }}/testcase2_local_follow_false/subdir' + register: stat_testcase2_local_follow_false_subdir + - name: gather - Stat the testcase2_local_follow_false/file1 + stat: + path: '{{ remote_dir }}/testcase2_local_follow_false/file1' + register: stat_testcase2_local_follow_false_file1 + - name: gather - Stat the testcase2_local_follow_false/subdir/file12 + stat: + path: '{{ remote_dir }}/testcase2_local_follow_false/subdir/file12' + register: stat_testcase2_local_follow_false_subdir_file12 + - name: gather - Stat the testcase2_local_follow_false/link_file12 + stat: + path: '{{ remote_dir }}/testcase2_local_follow_false/link_file12' + register: stat_testcase2_local_follow_false_link_file12 + + - name: assert - remote_dir_src has copied with local_follow True. + assert: + that: + - testcase2 is changed + - "stat_testcase2_local_follow_false.stat.isdir" + - "stat_testcase2_local_follow_false_subdir.stat.isdir" + - "stat_testcase2_local_follow_false_file1.stat.exists" + - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase2_local_follow_false_file1.stat.checksum" + - "stat_testcase2_local_follow_false_subdir_file12.stat.exists" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase2_local_follow_false_subdir_file12.stat.checksum" + - "stat_testcase2_local_follow_false_link_file12.stat.exists" + - "stat_testcase2_local_follow_false_link_file12.stat.islnk" + +## test when src not endswith os.sep and dest isdir +- block: + +### local_follow: True + - name: execute - Create a test dest dir + file: + path: '{{ remote_dir }}/testcase3_local_follow_true' + state: directory + + - name: execute - Copy the directory on remote with local_follow True + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src' + dest: '{{ remote_dir }}/testcase3_local_follow_true' + local_follow: True + register: testcase3 + + - name: gather - Stat the testcase3_local_follow_true + stat: + path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src' + register: stat_testcase3_local_follow_true_remote_dir_src + - name: gather - Stat the testcase3_local_follow_true/remote_dir_src/subdir + stat: + path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src/subdir' + register: stat_testcase3_local_follow_true_remote_dir_src_subdir + - name: gather - Stat the testcase3_local_follow_true/remote_dir_src/file1 + stat: + path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src/file1' + register: stat_testcase3_local_follow_true_remote_dir_src_file1 + - name: gather - Stat the testcase3_local_follow_true/remote_dir_src/subdir/file12 + stat: + path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src/subdir/file12' + register: stat_testcase3_local_follow_true_remote_dir_src_subdir_file12 + - name: gather - Stat the testcase3_local_follow_true/remote_dir_src/link_file12 + stat: + path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src/link_file12' + register: stat_testcase3_local_follow_true_remote_dir_src_link_file12 + + - name: assert - remote_dir_src has copied with local_follow True. + assert: + that: + - testcase3 is changed + - "stat_testcase3_local_follow_true_remote_dir_src.stat.isdir" + - "stat_testcase3_local_follow_true_remote_dir_src_subdir.stat.isdir" + - "stat_testcase3_local_follow_true_remote_dir_src_file1.stat.exists" + - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase3_local_follow_true_remote_dir_src_file1.stat.checksum" + - "stat_testcase3_local_follow_true_remote_dir_src_subdir_file12.stat.exists" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase3_local_follow_true_remote_dir_src_subdir_file12.stat.checksum" + - "stat_testcase3_local_follow_true_remote_dir_src_link_file12.stat.exists" + - "not stat_testcase3_local_follow_true_remote_dir_src_link_file12.stat.islnk" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase3_local_follow_true_remote_dir_src_link_file12.stat.checksum" + +### local_follow: False + - name: execute - Create a test dest dir + file: + path: '{{ remote_dir }}/testcase3_local_follow_false' + state: directory + + - name: execute - Copy the directory on remote with local_follow False + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src' + dest: '{{ remote_dir }}/testcase3_local_follow_false' + local_follow: False + register: testcase3 + + - name: gather - Stat the testcase3_local_follow_false + stat: + path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src' + register: stat_testcase3_local_follow_false_remote_dir_src + - name: gather - Stat the testcase3_local_follow_false/remote_dir_src/subdir + stat: + path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src/subdir' + register: stat_testcase3_local_follow_false_remote_dir_src_subdir + - name: gather - Stat the testcase3_local_follow_false/remote_dir_src/file1 + stat: + path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src/file1' + register: stat_testcase3_local_follow_false_remote_dir_src_file1 + - name: gather - Stat the testcase3_local_follow_false/remote_dir_src/subdir/file12 + stat: + path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src/subdir/file12' + register: stat_testcase3_local_follow_false_remote_dir_src_subdir_file12 + - name: gather - Stat the testcase3_local_follow_false/remote_dir_src/link_file12 + stat: + path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src/link_file12' + register: stat_testcase3_local_follow_false_remote_dir_src_link_file12 + + - name: assert - remote_dir_src has copied with local_follow False. + assert: + that: + - testcase3 is changed + - "stat_testcase3_local_follow_false_remote_dir_src.stat.isdir" + - "stat_testcase3_local_follow_false_remote_dir_src_subdir.stat.isdir" + - "stat_testcase3_local_follow_false_remote_dir_src_file1.stat.exists" + - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase3_local_follow_false_remote_dir_src_file1.stat.checksum" + - "stat_testcase3_local_follow_false_remote_dir_src_subdir_file12.stat.exists" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase3_local_follow_false_remote_dir_src_subdir_file12.stat.checksum" + - "stat_testcase3_local_follow_false_remote_dir_src_link_file12.stat.exists" + - "stat_testcase3_local_follow_false_remote_dir_src_link_file12.stat.islnk" + +## test when src not endswith os.sep and dest not exists +- block: +### local_follow: True + - name: execute - Copy the directory on remote with local_follow True + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src' + dest: '{{ remote_dir }}/testcase4_local_follow_true' + local_follow: True + register: testcase4 + + - name: gather - Stat the testcase4_local_follow_true + stat: + path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src' + register: stat_testcase4_local_follow_true_remote_dir_src + - name: gather - Stat the testcase4_local_follow_true/remote_dir_src/subdir + stat: + path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src/subdir' + register: stat_testcase4_local_follow_true_remote_dir_src_subdir + - name: gather - Stat the testcase4_local_follow_true/remote_dir_src/file1 + stat: + path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src/file1' + register: stat_testcase4_local_follow_true_remote_dir_src_file1 + - name: gather - Stat the testcase4_local_follow_true/remote_dir_src/subdir/file12 + stat: + path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src/subdir/file12' + register: stat_testcase4_local_follow_true_remote_dir_src_subdir_file12 + - name: gather - Stat the testcase4_local_follow_true/remote_dir_src/link_file12 + stat: + path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src/link_file12' + register: stat_testcase4_local_follow_true_remote_dir_src_link_file12 + + - name: assert - remote_dir_src has copied with local_follow True. + assert: + that: + - testcase4 is changed + - "stat_testcase4_local_follow_true_remote_dir_src.stat.isdir" + - "stat_testcase4_local_follow_true_remote_dir_src_subdir.stat.isdir" + - "stat_testcase4_local_follow_true_remote_dir_src_file1.stat.exists" + - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase4_local_follow_true_remote_dir_src_file1.stat.checksum" + - "stat_testcase4_local_follow_true_remote_dir_src_subdir_file12.stat.exists" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase4_local_follow_true_remote_dir_src_subdir_file12.stat.checksum" + - "stat_testcase4_local_follow_true_remote_dir_src_link_file12.stat.exists" + - "not stat_testcase4_local_follow_true_remote_dir_src_link_file12.stat.islnk" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase4_local_follow_true_remote_dir_src_link_file12.stat.checksum" + +### local_follow: False + - name: execute - Copy the directory on remote with local_follow False + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src' + dest: '{{ remote_dir }}/testcase4_local_follow_false' + local_follow: False + register: testcase4 + + - name: gather - Stat the testcase4_local_follow_false + stat: + path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src' + register: stat_testcase4_local_follow_false_remote_dir_src + - name: gather - Stat the testcase4_local_follow_false/remote_dir_src/subdir + stat: + path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src/subdir' + register: stat_testcase4_local_follow_false_remote_dir_src_subdir + - name: gather - Stat the testcase4_local_follow_false/remote_dir_src/file1 + stat: + path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src/file1' + register: stat_testcase4_local_follow_false_remote_dir_src_file1 + - name: gather - Stat the testcase4_local_follow_false/remote_dir_src/subdir/file12 + stat: + path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src/subdir/file12' + register: stat_testcase4_local_follow_false_remote_dir_src_subdir_file12 + - name: gather - Stat the testcase4_local_follow_false/remote_dir_src/link_file12 + stat: + path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src/link_file12' + register: stat_testcase4_local_follow_false_remote_dir_src_link_file12 + + - name: assert - remote_dir_src has copied with local_follow False. + assert: + that: + - testcase4 is changed + - "stat_testcase4_local_follow_false_remote_dir_src.stat.isdir" + - "stat_testcase4_local_follow_false_remote_dir_src_subdir.stat.isdir" + - "stat_testcase4_local_follow_false_remote_dir_src_file1.stat.exists" + - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase4_local_follow_false_remote_dir_src_file1.stat.checksum" + - "stat_testcase4_local_follow_false_remote_dir_src_subdir_file12.stat.exists" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase4_local_follow_false_remote_dir_src_subdir_file12.stat.checksum" + - "stat_testcase4_local_follow_false_remote_dir_src_link_file12.stat.exists" + - "stat_testcase4_local_follow_false_remote_dir_src_link_file12.stat.islnk" + +- block: + - name: execute - Clone the source directory on remote + copy: + remote_src: True + src: '{{ remote_dir }}/remote_dir_src/' + dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_src' + - name: Create a 2nd level subdirectory + file: + path: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/' + state: directory + - name: execute - Copy the directory on remote + copy: + remote_src: True + src: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/' + dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_dest' + local_follow: True + - name: execute - Create a new file in the subdir + copy: + dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/file13' + content: 'very new file' + - name: gather - Stat the testcase5_remote_src_subdirs_src/subdir/subdir2/file13 + stat: + path: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/file13' + - name: execute - Copy the directory on remote + copy: + remote_src: True + src: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/' + dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_dest/' + register: testcase5_new + - name: execute - Edit a file in the subdir + copy: + dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/file13' + content: 'NOT hello world 12' + - name: gather - Stat the testcase5_remote_src_subdirs_src/subdir/subdir2/file13 + stat: + path: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/file13' + register: stat_testcase5_remote_src_subdirs_file13_before + - name: execute - Copy the directory on remote + copy: + remote_src: True + src: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/' + dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_dest/' + register: testcase5_edited + - name: gather - Stat the testcase5_remote_src_subdirs_dest/subdir/subdir2/file13 + stat: + path: '{{ remote_dir }}/testcase5_remote_src_subdirs_dest/subdir/subdir2/file13' + register: stat_testcase5_remote_src_subdirs_file13 + - name: assert - remote_dir_src has copied with local_follow False. + assert: + that: + - testcase5_new is changed + - testcase5_edited is changed + - "stat_testcase5_remote_src_subdirs_file13.stat.exists" + - "stat_testcase5_remote_src_subdirs_file13_before.stat.checksum == stat_testcase5_remote_src_subdirs_file13.stat.checksum" + + +## test copying the directory on remote with chown + + +- block: + + - set_fact: + ansible_copy_test_user_name: 'ansible_copy_test_{{ 100000 | random }}' + + - name: execute - create a user for test + user: + name: '{{ ansible_copy_test_user_name }}' + state: present + become: true + register: ansible_copy_test_user + + - name: execute - create a group for test + group: + name: '{{ ansible_copy_test_user_name }}' + state: present + become: true + register: ansible_copy_test_group + + - name: execute - Copy the directory on remote with chown + copy: + remote_src: True + src: '{{ remote_dir_expanded }}/remote_dir_src/' + dest: '{{ remote_dir_expanded }}/new_dir_with_chown' + owner: '{{ ansible_copy_test_user_name }}' + group: '{{ ansible_copy_test_user_name }}' + follow: true + register: testcase5 + become: true + + - name: gather - Stat the new_dir_with_chown + stat: + path: '{{ remote_dir }}/new_dir_with_chown' + register: stat_new_dir_with_chown + + - name: gather - Stat the new_dir_with_chown/file1 + stat: + path: '{{ remote_dir }}/new_dir_with_chown/file1' + register: stat_new_dir_with_chown_file1 + + - name: gather - Stat the new_dir_with_chown/subdir + stat: + path: '{{ remote_dir }}/new_dir_with_chown/subdir' + register: stat_new_dir_with_chown_subdir + + - name: gather - Stat the new_dir_with_chown/subdir/file12 + stat: + path: '{{ remote_dir }}/new_dir_with_chown/subdir/file12' + register: stat_new_dir_with_chown_subdir_file12 + + - name: gather - Stat the new_dir_with_chown/link_file12 + stat: + path: '{{ remote_dir }}/new_dir_with_chown/link_file12' + register: stat_new_dir_with_chown_link_file12 + + - name: assert - owner and group have changed + assert: + that: + - testcase5 is changed + - "stat_new_dir_with_chown.stat.uid == {{ ansible_copy_test_user.uid }}" + - "stat_new_dir_with_chown.stat.gid == {{ ansible_copy_test_group.gid }}" + - "stat_new_dir_with_chown.stat.pw_name == '{{ ansible_copy_test_user_name }}'" + - "stat_new_dir_with_chown.stat.gr_name == '{{ ansible_copy_test_user_name }}'" + - "stat_new_dir_with_chown_file1.stat.uid == {{ ansible_copy_test_user.uid }}" + - "stat_new_dir_with_chown_file1.stat.gid == {{ ansible_copy_test_group.gid }}" + - "stat_new_dir_with_chown_file1.stat.pw_name == '{{ ansible_copy_test_user_name }}'" + - "stat_new_dir_with_chown_file1.stat.gr_name == '{{ ansible_copy_test_user_name }}'" + - "stat_new_dir_with_chown_subdir.stat.uid == {{ ansible_copy_test_user.uid }}" + - "stat_new_dir_with_chown_subdir.stat.gid == {{ ansible_copy_test_group.gid }}" + - "stat_new_dir_with_chown_subdir.stat.pw_name == '{{ ansible_copy_test_user_name }}'" + - "stat_new_dir_with_chown_subdir.stat.gr_name == '{{ ansible_copy_test_user_name }}'" + - "stat_new_dir_with_chown_subdir_file12.stat.uid == {{ ansible_copy_test_user.uid }}" + - "stat_new_dir_with_chown_subdir_file12.stat.gid == {{ ansible_copy_test_group.gid }}" + - "stat_new_dir_with_chown_subdir_file12.stat.pw_name == '{{ ansible_copy_test_user_name }}'" + - "stat_new_dir_with_chown_subdir_file12.stat.gr_name == '{{ ansible_copy_test_user_name }}'" + - "stat_new_dir_with_chown_link_file12.stat.uid == {{ ansible_copy_test_user.uid }}" + - "stat_new_dir_with_chown_link_file12.stat.gid == {{ ansible_copy_test_group.gid }}" + - "stat_new_dir_with_chown_link_file12.stat.pw_name == '{{ ansible_copy_test_user_name }}'" + - "stat_new_dir_with_chown_link_file12.stat.gr_name == '{{ ansible_copy_test_user_name }}'" + + always: + - name: execute - remove the user for test + user: + name: '{{ ansible_copy_test_user_name }}' + state: absent + remove: yes + become: true + + - name: execute - remove the group for test + group: + name: '{{ ansible_copy_test_user_name }}' + state: absent + become: true + +## testcase last - make sure remote_dir_src not change +- block: + - name: Stat the remote_dir_src + stat: + path: '{{ remote_dir }}/remote_dir_src' + register: stat_remote_dir_src_after + + - name: Stat the remote_dir_src/subdir + stat: + path: '{{ remote_dir }}/remote_dir_src/subdir' + register: stat_remote_dir_src_subdir_after + + - name: Stat the remote_dir_src/file1 + stat: + path: '{{ remote_dir }}/remote_dir_src/file1' + register: stat_remote_dir_src_file1_after + + - name: Stat the remote_dir_src/subdir/file12 + stat: + path: '{{ remote_dir }}/remote_dir_src/subdir/file12' + register: stat_remote_dir_src_subdir_file12_after + + - name: Stat the remote_dir_src/link_file12 + stat: + path: '{{ remote_dir }}/remote_dir_src/link_file12' + register: stat_remote_dir_src_link_file12_after + + - name: Assert that remote_dir_src not change. + assert: + that: + - "stat_remote_dir_src_after.stat.exists" + - "stat_remote_dir_src_after.stat.isdir" + - "stat_remote_dir_src_before.stat.uid == stat_remote_dir_src_after.stat.uid" + - "stat_remote_dir_src_before.stat.gid == stat_remote_dir_src_after.stat.gid" + - "stat_remote_dir_src_before.stat.pw_name == stat_remote_dir_src_after.stat.pw_name" + - "stat_remote_dir_src_before.stat.gr_name == stat_remote_dir_src_after.stat.gr_name" + - "stat_remote_dir_src_before.stat.path == stat_remote_dir_src_after.stat.path" + - "stat_remote_dir_src_before.stat.mode == stat_remote_dir_src_after.stat.mode" + + - "stat_remote_dir_src_subdir_after.stat.exists" + - "stat_remote_dir_src_subdir_after.stat.isdir" + - "stat_remote_dir_src_subdir_before.stat.uid == stat_remote_dir_src_subdir_after.stat.uid" + - "stat_remote_dir_src_subdir_before.stat.gid == stat_remote_dir_src_subdir_after.stat.gid" + - "stat_remote_dir_src_subdir_before.stat.pw_name == stat_remote_dir_src_subdir_after.stat.pw_name" + - "stat_remote_dir_src_subdir_before.stat.gr_name == stat_remote_dir_src_subdir_after.stat.gr_name" + - "stat_remote_dir_src_subdir_before.stat.path == stat_remote_dir_src_subdir_after.stat.path" + - "stat_remote_dir_src_subdir_before.stat.mode == stat_remote_dir_src_subdir_after.stat.mode" + + - "stat_remote_dir_src_file1_after.stat.exists" + - "stat_remote_dir_src_file1_before.stat.uid == stat_remote_dir_src_file1_after.stat.uid" + - "stat_remote_dir_src_file1_before.stat.gid == stat_remote_dir_src_file1_after.stat.gid" + - "stat_remote_dir_src_file1_before.stat.pw_name == stat_remote_dir_src_file1_after.stat.pw_name" + - "stat_remote_dir_src_file1_before.stat.gr_name == stat_remote_dir_src_file1_after.stat.gr_name" + - "stat_remote_dir_src_file1_before.stat.path == stat_remote_dir_src_file1_after.stat.path" + - "stat_remote_dir_src_file1_before.stat.mode == stat_remote_dir_src_file1_after.stat.mode" + - "stat_remote_dir_src_file1_before.stat.checksum == stat_remote_dir_src_file1_after.stat.checksum" + + - "stat_remote_dir_src_subdir_file12_after.stat.exists" + - "stat_remote_dir_src_subdir_file12_before.stat.uid == stat_remote_dir_src_subdir_file12_after.stat.uid" + - "stat_remote_dir_src_subdir_file12_before.stat.gid == stat_remote_dir_src_subdir_file12_after.stat.gid" + - "stat_remote_dir_src_subdir_file12_before.stat.pw_name == stat_remote_dir_src_subdir_file12_after.stat.pw_name" + - "stat_remote_dir_src_subdir_file12_before.stat.gr_name == stat_remote_dir_src_subdir_file12_after.stat.gr_name" + - "stat_remote_dir_src_subdir_file12_before.stat.path == stat_remote_dir_src_subdir_file12_after.stat.path" + - "stat_remote_dir_src_subdir_file12_before.stat.mode == stat_remote_dir_src_subdir_file12_after.stat.mode" + - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_remote_dir_src_subdir_file12_after.stat.checksum" + + - "stat_remote_dir_src_link_file12_after.stat.exists" + - "stat_remote_dir_src_link_file12_after.stat.islnk" + - "stat_remote_dir_src_link_file12_before.stat.uid == stat_remote_dir_src_link_file12_after.stat.uid" + - "stat_remote_dir_src_link_file12_before.stat.gid == stat_remote_dir_src_link_file12_after.stat.gid" + - "stat_remote_dir_src_link_file12_before.stat.pw_name == stat_remote_dir_src_link_file12_after.stat.pw_name" + - "stat_remote_dir_src_link_file12_before.stat.gr_name == stat_remote_dir_src_link_file12_after.stat.gr_name" + - "stat_remote_dir_src_link_file12_before.stat.path == stat_remote_dir_src_link_file12_after.stat.path" + - "stat_remote_dir_src_link_file12_before.stat.mode == stat_remote_dir_src_link_file12_after.stat.mode" + +# Test for issue 69783: copy with remote_src=yes and src='dir/' preserves all permissions +- block: + - name: Create directory structure + file: + path: "{{ local_temp_dir }}/test69783/{{ item }}" + state: directory + loop: + - "src/dir" + - "dest" + + - name: Create source file structure + file: + path: "{{ local_temp_dir }}/test69783/src/{{ item.name }}" + state: touch + mode: "{{ item.mode }}" + loop: + - { name: 'readwrite', mode: '0644' } + - { name: 'executable', mode: '0755' } + - { name: 'readonly', mode: '0444' } + - { name: 'dir/readwrite', mode: '0644' } + - { name: 'dir/executable', mode: '0755' } + - { name: 'dir/readonly', mode: '0444' } + + - name: Recursive remote copy with preserve + copy: + src: "{{ local_temp_dir }}/test69783/src/" + dest: "{{ local_temp_dir }}/test69783/dest/" + remote_src: yes + mode: preserve + + - name: Stat dest 'readwrite' file + stat: + path: "{{ local_temp_dir}}/test69783/dest/readwrite" + register: dest_readwrite_stat + + - name: Stat dest 'executable' file + stat: + path: "{{ local_temp_dir}}/test69783/dest/executable" + register: dest_executable_stat + + - name: Stat dest 'readonly' file + stat: + path: "{{ local_temp_dir}}/test69783/dest/readonly" + register: dest_readonly_stat + + - name: Stat dest 'dir/readwrite' file + stat: + path: "{{ local_temp_dir}}/test69783/dest/dir/readwrite" + register: dest_dir_readwrite_stat + + - name: Stat dest 'dir/executable' file + stat: + path: "{{ local_temp_dir}}/test69783/dest/dir/executable" + register: dest_dir_executable_stat + + - name: Stat dest 'dir/readonly' file + stat: + path: "{{ local_temp_dir}}/test69783/dest/dir/readonly" + register: dest_dir_readonly_stat + + - name: Assert modes are preserved + assert: + that: + - "dest_readwrite_stat.stat.mode == '0644'" + - "dest_executable_stat.stat.mode == '0755'" + - "dest_readonly_stat.stat.mode == '0444'" + - "dest_dir_readwrite_stat.stat.mode == '0644'" + - "dest_dir_executable_stat.stat.mode == '0755'" + - "dest_dir_readonly_stat.stat.mode == '0444'" diff --git a/test/integration/targets/cron/aliases b/test/integration/targets/cron/aliases new file mode 100644 index 00000000..b2033afd --- /dev/null +++ b/test/integration/targets/cron/aliases @@ -0,0 +1,5 @@ +destructive +shippable/posix/group4 +skip/aix +skip/osx +skip/macos diff --git a/test/integration/targets/cron/defaults/main.yml b/test/integration/targets/cron/defaults/main.yml new file mode 100644 index 00000000..37e6fc37 --- /dev/null +++ b/test/integration/targets/cron/defaults/main.yml @@ -0,0 +1 @@ +faketime_pkg: libfaketime diff --git a/test/integration/targets/cron/meta/main.yml b/test/integration/targets/cron/meta/main.yml new file mode 100644 index 00000000..2d2436a1 --- /dev/null +++ b/test/integration/targets/cron/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_cron diff --git a/test/integration/targets/cron/tasks/main.yml b/test/integration/targets/cron/tasks/main.yml new file mode 100644 index 00000000..3537b48d --- /dev/null +++ b/test/integration/targets/cron/tasks/main.yml @@ -0,0 +1,213 @@ +- name: add cron task (check mode enabled, cron task not already created) + cron: + name: test cron task + job: 'date > {{ remote_dir }}/cron_canary1' + check_mode: yes + register: check_mode_enabled_state_present + +- assert: + that: check_mode_enabled_state_present is changed + +- name: add cron task (check mode disabled, task hasn't already been created) + cron: + name: test cron task + job: 'date > {{ remote_dir }}/cron_canary1' + register: add_cron_task + +- assert: + that: add_cron_task is changed + +- name: add cron task (check mode enabled, cron task already exists) + cron: + name: test cron task + job: 'date > {{ remote_dir }}/cron_canary1' + check_mode: yes + register: check_mode_enabled_state_present_cron_task_already_exists + +- assert: + that: check_mode_enabled_state_present_cron_task_already_exists is not changed + +- name: add cron task (check mode disabled, cron task already created) + cron: + name: test cron task + job: 'date > {{ remote_dir }}/cron_canary1' + register: cron_task_already_created + +- assert: + that: cron_task_already_created is not changed + +- block: + - name: wait for canary creation + wait_for: + path: '{{ remote_dir }}/cron_canary1' + timeout: '{{ 20 if faketime_pkg else 70 }}' + register: wait_canary + always: + - name: display some logs in case of failure + command: 'journalctl -u {{ cron_service }}' + when: wait_canary is failed and ansible_service_mgr == 'systemd' + +- debug: + msg: 'elapsed time waiting for canary: {{ wait_canary.elapsed }}' + +- name: Check check_mode + cron: + name: test cron task + job: 'date > {{ remote_dir }}/cron_canary1' + state: absent + check_mode: yes + register: check_check_mode + +- assert: + that: check_check_mode is changed + +- name: Remove a cron task + cron: + name: test cron task + job: 'date > {{ remote_dir }}/cron_canary1' + state: absent + register: remove_task + +- assert: + that: remove_task is changed + +- name: 'cron task missing: check idempotence (check mode enabled, state=absent)' + cron: + name: test cron task + job: 'date > {{ remote_dir }}/cron_canary1' + state: absent + register: check_mode_enabled_remove_task_idempotence + +- assert: + that: check_mode_enabled_remove_task_idempotence is not changed + +- name: 'cron task missing: check idempotence (check mode disabled, state=absent)' + cron: + name: test cron task + job: 'date > {{ remote_dir }}/cron_canary1' + state: absent + register: remove_task_idempotence + +- assert: + that: remove_task_idempotence is not changed + +- name: Check that removing a cron task with cron_file and without specifying an user is allowed (#58493) + cron: + cron_file: unexistent_cron_file + state: absent + register: remove_cron_file + +- assert: + that: remove_cron_file is not changed + +- name: Non regression test - cron file should not be empty after adding var (#71207) + when: ansible_distribution != 'Alpine' + block: + - name: Cron file creation + cron: + cron_file: cron_filename + name: "simple cron job" + job: 'echo "_o/"' + user: root + + - name: Add var to the cron file + cron: + cron_file: cron_filename + env: yes + name: FOO + value: bar + user: root + + - name: "Ensure cron_file still contains job string" + replace: + path: /etc/cron.d/cron_filename + regexp: "_o/" + replace: "OK" + register: find_chars + failed_when: (find_chars is not changed) or (find_chars is failed) + +# BusyBox does not have /etc/cron.d +- name: Removing a cron file when the name is specified is allowed (#57471) + block: + - name: Cron file creation + cron: + cron_file: cron_filename + name: "integration test cron" + job: 'ls' + user: root + + - name: Cron file deletion + cron: + cron_file: cron_filename + name: "integration test cron" + state: absent + + - name: Check file succesfull deletion + stat: + path: /etc/cron.d/cron_filename + register: cron_file_stats + + - assert: + that: not cron_file_stats.stat.exists + +- name: Allow non-ascii chars in job (#69492) + block: + - name: Cron file creation + cron: + cron_file: cron_filename + name: "cron job that contain non-ascii chars in job (これは日本語です; This is Japanese)" + job: 'echo "うどんは好きだがお化け👻は苦手である。"' + user: root + + - name: "Ensure cron_file contains job string" + replace: + path: /etc/cron.d/cron_filename + regexp: "うどんは好きだがお化け👻は苦手である。" + replace: "それは機密情報🔓です。" + register: find_chars + failed_when: (find_chars is not changed) or (find_chars is failed) + + - name: Cron file deletion + cron: + cron_file: cron_filename + name: "cron job that contain non-ascii chars in job (これは日本語です; This is Japanese)" + state: absent + + - name: Check file succesfull deletion + stat: + path: /etc/cron.d/cron_filename + register: cron_file_stats + + - assert: + that: not cron_file_stats.stat.exists + +- name: Allow non-ascii chars in cron_file (#69492) + block: + - name: Cron file creation with non-ascii filename (これは日本語です; This is Japanese) + cron: + cron_file: 'なせば大抵なんとかなる👊' + name: "integration test cron" + job: 'echo "Hello, ansible!"' + user: root + + - name: Check file exists + stat: + path: "/etc/cron.d/なせば大抵なんとかなる👊" + register: cron_file_stats + + - assert: + that: cron_file_stats.stat.exists + + - name: Cron file deletion + cron: + cron_file: 'なせば大抵なんとかなる👊' + name: "integration test cron" + state: absent + + - name: Check file succesfull deletion + stat: + path: "/etc/cron.d/なせば大抵なんとかなる👊" + register: cron_file_stats + + - assert: + that: not cron_file_stats.stat.exists diff --git a/test/integration/targets/dataloader/aliases b/test/integration/targets/dataloader/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/dataloader/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/dataloader/attempt_to_load_invalid_json.yml b/test/integration/targets/dataloader/attempt_to_load_invalid_json.yml new file mode 100644 index 00000000..536e6daa --- /dev/null +++ b/test/integration/targets/dataloader/attempt_to_load_invalid_json.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: false + vars_files: + - vars/invalid.json diff --git a/test/integration/targets/dataloader/runme.sh b/test/integration/targets/dataloader/runme.sh new file mode 100755 index 00000000..6a1bc9a0 --- /dev/null +++ b/test/integration/targets/dataloader/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +# check if we get proper json error +ansible-playbook -i ../../inventory attempt_to_load_invalid_json.yml "$@" 2>&1|grep 'JSON:' diff --git a/test/integration/targets/dataloader/vars/invalid.json b/test/integration/targets/dataloader/vars/invalid.json new file mode 100644 index 00000000..8d4e4304 --- /dev/null +++ b/test/integration/targets/dataloader/vars/invalid.json @@ -0,0 +1 @@ +{ }} diff --git a/test/integration/targets/debconf/aliases b/test/integration/targets/debconf/aliases new file mode 100644 index 00000000..f8e28c7e --- /dev/null +++ b/test/integration/targets/debconf/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/debconf/meta/main.yml b/test/integration/targets/debconf/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/debconf/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/debconf/tasks/main.yml b/test/integration/targets/debconf/tasks/main.yml new file mode 100644 index 00000000..d3d63cdf --- /dev/null +++ b/test/integration/targets/debconf/tasks/main.yml @@ -0,0 +1,36 @@ +# Test code for the debconf module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +## +## debconf query +## + +- block: + - name: query the tzdata package + debconf: + name: tzdata + register: debconf_test0 + + - name: validate results for test 0 + assert: + that: + - 'debconf_test0.changed is defined' + - 'debconf_test0.current is defined' + - '"tzdata/Zones/Etc" in debconf_test0.current' + - 'debconf_test0.current["tzdata/Zones/Etc"] == "UTC"' + when: ansible_distribution in ('Ubuntu', 'Debian') diff --git a/test/integration/targets/debug/aliases b/test/integration/targets/debug/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/debug/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/debug/main.yml b/test/integration/targets/debug/main.yml new file mode 100644 index 00000000..9e49b827 --- /dev/null +++ b/test/integration/targets/debug/main.yml @@ -0,0 +1,6 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: test item being present in the output + debug: var=item + loop: [1, 2, 3] diff --git a/test/integration/targets/debug/main_fqcn.yml b/test/integration/targets/debug/main_fqcn.yml new file mode 100644 index 00000000..d6a00fc8 --- /dev/null +++ b/test/integration/targets/debug/main_fqcn.yml @@ -0,0 +1,6 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: test item being present in the output + ansible.builtin.debug: var=item + loop: [1, 2, 3] diff --git a/test/integration/targets/debug/nosetfacts.yml b/test/integration/targets/debug/nosetfacts.yml new file mode 100644 index 00000000..231c60e4 --- /dev/null +++ b/test/integration/targets/debug/nosetfacts.yml @@ -0,0 +1,21 @@ +- name: check we dont set facts with debug ansible_facts https://github.com/ansible/ansible/issues/74060 + hosts: localhost + gather_facts: false + tasks: + - name: create namespaced non fact + set_fact: + ansible_facts: + nonfact: 1 + + - name: ensure nonfact does not exist + assert: + that: + - nonfact is not defined + + - name: debug ansible_facts to create issue + debug: var=ansible_facts + + - name: ensure nonfact STILL does not exist + assert: + that: + - nonfact is not defined diff --git a/test/integration/targets/debug/runme.sh b/test/integration/targets/debug/runme.sh new file mode 100755 index 00000000..5faeb782 --- /dev/null +++ b/test/integration/targets/debug/runme.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -eux + +trap 'rm -f out' EXIT + +ansible-playbook main.yml -i ../../inventory | tee out +for i in 1 2 3; do + grep "ok: \[localhost\] => (item=$i)" out + grep "\"item\": $i" out +done + +ansible-playbook main_fqcn.yml -i ../../inventory | tee out +for i in 1 2 3; do + grep "ok: \[localhost\] => (item=$i)" out + grep "\"item\": $i" out +done + +# ensure debug does not set top level vars when looking at ansible_facts +ansible-playbook nosetfacts.yml "$@" diff --git a/test/integration/targets/delegate_to/aliases b/test/integration/targets/delegate_to/aliases new file mode 100644 index 00000000..b8e973da --- /dev/null +++ b/test/integration/targets/delegate_to/aliases @@ -0,0 +1,4 @@ +shippable/posix/group3 +needs/ssh +needs/root # only on macOS and FreeBSD to configure network interfaces +skip/aix diff --git a/test/integration/targets/delegate_to/connection_plugins/fakelocal.py b/test/integration/targets/delegate_to/connection_plugins/fakelocal.py new file mode 100644 index 00000000..59ddcf05 --- /dev/null +++ b/test/integration/targets/delegate_to/connection_plugins/fakelocal.py @@ -0,0 +1,76 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + connection: fakelocal + short_description: dont execute anything + description: + - This connection plugin just verifies parameters passed in + author: ansible (@core) + version_added: histerical + options: + password: + description: Authentication password for the C(remote_user). Can be supplied as CLI option. + vars: + - name: ansible_password + remote_user: + description: + - User name with which to login to the remote server, normally set by the remote_user keyword. + ini: + - section: defaults + key: remote_user + vars: + - name: ansible_user +''' + +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.connection import ConnectionBase +from ansible.utils.display import Display + +display = Display() + + +class Connection(ConnectionBase): + ''' Local based connections ''' + + transport = 'fakelocal' + has_pipelining = True + + def __init__(self, *args, **kwargs): + + super(Connection, self).__init__(*args, **kwargs) + self.cwd = None + + def _connect(self): + ''' verify ''' + + if self.get_option('remote_user') == 'invaliduser' and self.get_option('password') == 'badpassword': + raise AnsibleConnectionFailure('Got invaliduser and badpassword') + + if not self._connected: + display.vvv(u"ESTABLISH FAKELOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr) + self._connected = True + return self + + def exec_command(self, cmd, in_data=None, sudoable=True): + ''' run a command on the local host ''' + + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + return 0, '{"msg": "ALL IS GOOD"}', '' + + def put_file(self, in_path, out_path): + ''' transfer a file from local to local ''' + + super(Connection, self).put_file(in_path, out_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from local to local -- for compatibility ''' + + super(Connection, self).fetch_file(in_path, out_path) + + def close(self): + ''' terminate the connection; nothing to do here ''' + self._connected = False diff --git a/test/integration/targets/delegate_to/delegate_and_nolog.yml b/test/integration/targets/delegate_to/delegate_and_nolog.yml new file mode 100644 index 00000000..d8ed64fe --- /dev/null +++ b/test/integration/targets/delegate_to/delegate_and_nolog.yml @@ -0,0 +1,8 @@ +- hosts: testhost + gather_facts: false + tasks: + - name: no log filtering caused delegation to fail https://github.com/ansible/ansible/issues/43026 + become: False + no_log: true + debug: + delegate_to: localhost diff --git a/test/integration/targets/delegate_to/delegate_facts_block.yml b/test/integration/targets/delegate_to/delegate_facts_block.yml new file mode 100644 index 00000000..2edfeb42 --- /dev/null +++ b/test/integration/targets/delegate_to/delegate_facts_block.yml @@ -0,0 +1,25 @@ +- hosts: testhost + gather_facts: false + tasks: + - name: set var to delegated host directly + set_fact: qq1=333 + delegate_facts: true + delegate_to: localhost + + - name: ensure qq1 exists in localhost but not in testhost + assert: + that: + - qq1 is undefined + - "'qq1' in hostvars['localhost']" + + - name: set var to delegated host via inheritance + block: + - set_fact: qq2=333 + delegate_facts: true + delegate_to: localhost + + - name: ensure qq2 exists in localhost but not in testhost + assert: + that: + - qq2 is undefined + - "'qq2' in hostvars['localhost']" diff --git a/test/integration/targets/delegate_to/delegate_local_from_root.yml b/test/integration/targets/delegate_to/delegate_local_from_root.yml new file mode 100644 index 00000000..c9be4ff2 --- /dev/null +++ b/test/integration/targets/delegate_to/delegate_local_from_root.yml @@ -0,0 +1,10 @@ +- name: handle case from issue 72541 + hosts: testhost + gather_facts: false + remote_user: root + tasks: + - name: ensure we copy w/o errors due to remote user not being overriden + copy: + src: testfile + dest: "{{ playbook_dir }}" + delegate_to: localhost diff --git a/test/integration/targets/delegate_to/delegate_vars_hanldling.yml b/test/integration/targets/delegate_to/delegate_vars_hanldling.yml new file mode 100644 index 00000000..6ac64e9c --- /dev/null +++ b/test/integration/targets/delegate_to/delegate_vars_hanldling.yml @@ -0,0 +1,58 @@ +- name: setup delegated hsot + hosts: localhost + gather_facts: false + tasks: + - add_host: + name: delegatetome + ansible_host: 127.0.0.4 + +- name: ensure we dont use orig host vars if delegated one does not define them + hosts: testhost + gather_facts: false + connection: local + tasks: + - name: force current host to use winrm + set_fact: + ansible_connection: winrm + + - name: this should fail (missing winrm or unreachable) + ping: + ignore_errors: true + ignore_unreachable: true + register: orig + + - name: ensure prev failed + assert: + that: + - orig is failed or orig is unreachable + + - name: this will only fail if we take orig host ansible_connection instead of defaults + ping: + delegate_to: delegatetome + + +- name: ensure plugin specific vars are properly used + hosts: testhost + gather_facts: false + tasks: + - name: set unusable ssh args + set_fact: + ansible_host: 127.0.0.1 + ansible_connection: ssh + ansible_ssh_common_args: 'MEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE' + ansible_connection_timeout: 5 + + - name: fail to ping with bad args + ping: + register: bad_args_ping + ignore_unreachable: true + + - debug: var=bad_args_ping + - name: ensure prev failed + assert: + that: + - bad_args_ping is failed or bad_args_ping is unreachable + + - name: this should work by ignoring the bad ags for orig host + ping: + delegate_to: delegatetome diff --git a/test/integration/targets/delegate_to/discovery_applied.yml b/test/integration/targets/delegate_to/discovery_applied.yml new file mode 100644 index 00000000..fafe664c --- /dev/null +++ b/test/integration/targets/delegate_to/discovery_applied.yml @@ -0,0 +1,8 @@ +- hosts: testhost + gather_facts: no + tasks: + - command: ls + delegate_to: "{{ item }}" + with_items: + - localhost + - "{{ inventory_hostname }}" diff --git a/test/integration/targets/delegate_to/files/testfile b/test/integration/targets/delegate_to/files/testfile new file mode 100644 index 00000000..492bafce --- /dev/null +++ b/test/integration/targets/delegate_to/files/testfile @@ -0,0 +1 @@ +nothing special diff --git a/test/integration/targets/delegate_to/has_hostvars.yml b/test/integration/targets/delegate_to/has_hostvars.yml new file mode 100644 index 00000000..9e8926bd --- /dev/null +++ b/test/integration/targets/delegate_to/has_hostvars.yml @@ -0,0 +1,64 @@ +- name: ensure delegated host has hostvars available for resolving connection + hosts: testhost + gather_facts: false + tasks: + + - name: ensure delegated host uses current host as inventory_hostname + assert: + that: + - inventory_hostname == ansible_delegated_vars['testhost5']['inventory_hostname'] + delegate_to: testhost5 + + - name: Set info on inventory_hostname + set_fact: + login: invaliduser + mypass: badpassword + + - name: test fakelocal + command: ls + ignore_unreachable: True + ignore_errors: True + remote_user: "{{ login }}" + vars: + ansible_password: "{{ mypass }}" + ansible_connection: fakelocal + register: badlogin + + - name: ensure we skipped do to unreachable and not templating error + assert: + that: + - badlogin is unreachable + + - name: delegate but try to use inventory_hostname data directly + command: ls + delegate_to: testhost5 + ignore_unreachable: True + ignore_errors: True + remote_user: "{{ login }}" + vars: + ansible_password: "{{ mypass }}" + register: badlogin + + - name: ensure we skipped do to unreachable and not templating error + assert: + that: + - badlogin is not unreachable + - badlogin is failed + - "'undefined' in badlogin['msg']" + + - name: delegate ls to testhost5 as it uses ssh while testhost is local, but use vars from testhost + command: ls + remote_user: "{{ hostvars[inventory_hostname]['login'] }}" + delegate_to: testhost5 + ignore_unreachable: True + ignore_errors: True + vars: + ansible_password: "{{ hostvars[inventory_hostname]['mypass'] }}" + register: badlogin + + - name: ensure we skipped do to unreachable and not templating error + assert: + that: + - badlogin is unreachable + - badlogin is not failed + - "'undefined' not in badlogin['msg']" diff --git a/test/integration/targets/delegate_to/inventory b/test/integration/targets/delegate_to/inventory new file mode 100644 index 00000000..f7ad0a33 --- /dev/null +++ b/test/integration/targets/delegate_to/inventory @@ -0,0 +1,9 @@ +[local] +testhost ansible_connection=local +testhost2 ansible_connection=local +testhost3 ansible_ssh_host=127.0.0.3 +testhost4 ansible_ssh_host=127.0.0.4 +testhost5 ansible_connection=fakelocal + +[all:vars] +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/delegate_to/inventory_interpreters b/test/integration/targets/delegate_to/inventory_interpreters new file mode 100644 index 00000000..4c202ca5 --- /dev/null +++ b/test/integration/targets/delegate_to/inventory_interpreters @@ -0,0 +1,5 @@ +testhost ansible_python_interpreter=firstpython +testhost2 ansible_python_interpreter=secondpython + +[all:vars] +ansible_connection=local diff --git a/test/integration/targets/delegate_to/library/detect_interpreter.py b/test/integration/targets/delegate_to/library/detect_interpreter.py new file mode 100644 index 00000000..1f401677 --- /dev/null +++ b/test/integration/targets/delegate_to/library/detect_interpreter.py @@ -0,0 +1,18 @@ +#!/usr/bin/python + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import sys + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule(argument_spec={}) + module.exit_json(**dict(found=sys.executable)) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/delegate_to/roles/test_template/templates/foo.j2 b/test/integration/targets/delegate_to/roles/test_template/templates/foo.j2 new file mode 100644 index 00000000..22187f91 --- /dev/null +++ b/test/integration/targets/delegate_to/roles/test_template/templates/foo.j2 @@ -0,0 +1,3 @@ +{{ templated_var }} + +{{ templated_dict | to_nice_json }} diff --git a/test/integration/targets/delegate_to/runme.sh b/test/integration/targets/delegate_to/runme.sh new file mode 100755 index 00000000..44059552 --- /dev/null +++ b/test/integration/targets/delegate_to/runme.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +set -eux + +platform="$(uname)" + +function setup() { + if [[ "${platform}" == "FreeBSD" ]] || [[ "${platform}" == "Darwin" ]]; then + ifconfig lo0 + + existing=$(ifconfig lo0 | grep '^[[:blank:]]inet 127\.0\.0\. ' || true) + + echo "${existing}" + + for i in 3 4 254; do + ip="127.0.0.${i}" + + if [[ "${existing}" != *"${ip}"* ]]; then + ifconfig lo0 alias "${ip}" up + fi + done + + ifconfig lo0 + fi +} + +function teardown() { + if [[ "${platform}" == "FreeBSD" ]] || [[ "${platform}" == "Darwin" ]]; then + for i in 3 4 254; do + ip="127.0.0.${i}" + + if [[ "${existing}" != *"${ip}"* ]]; then + ifconfig lo0 -alias "${ip}" + fi + done + + ifconfig lo0 + fi +} + +setup + +trap teardown EXIT + +ANSIBLE_SSH_ARGS='-C -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null' \ + ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook test_delegate_to.yml -i inventory -v "$@" + +# this test is not doing what it says it does, also relies on var that should not be available +#ansible-playbook test_loop_control.yml -v "$@" + +ansible-playbook test_delegate_to_loop_randomness.yml -v "$@" + +ansible-playbook delegate_and_nolog.yml -i inventory -v "$@" + +ansible-playbook delegate_facts_block.yml -i inventory -v "$@" + +ansible-playbook test_delegate_to_loop_caching.yml -i inventory -v "$@" + +# ensure we are using correct settings when delegating +ANSIBLE_TIMEOUT=3 ansible-playbook delegate_vars_hanldling.yml -i inventory -v "$@" + +ansible-playbook has_hostvars.yml -i inventory -v "$@" + +# test ansible_x_interpreter +# python +export ANSIBLE_TEST_PREFER_VENV=1 +source virtualenv.sh +( +cd "${OUTPUT_DIR}"/venv/bin +ln -s python firstpython +ln -s python secondpython +) +ansible-playbook verify_interpreter.yml -i inventory_interpreters -v "$@" +ansible-playbook discovery_applied.yml -i inventory -v "$@" +ansible-playbook delegate_local_from_root.yml -i inventory -v "$@" -e 'ansible_user=root' diff --git a/test/integration/targets/delegate_to/test_delegate_to.yml b/test/integration/targets/delegate_to/test_delegate_to.yml new file mode 100644 index 00000000..05b0536e --- /dev/null +++ b/test/integration/targets/delegate_to/test_delegate_to.yml @@ -0,0 +1,58 @@ +- hosts: testhost3 + vars: + - template_role: ./roles/test_template + - output_dir: "{{ playbook_dir }}" + - templated_var: foo + - templated_dict: { 'hello': 'world' } + tasks: + - name: Test no delegate_to + setup: + register: setup_results + + - assert: + that: + - '"127.0.0.3" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]' + + - name: Test delegate_to with host in inventory + setup: + register: setup_results + delegate_to: testhost4 + + - debug: var=setup_results + + - assert: + that: + - '"127.0.0.4" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]' + + - name: Test delegate_to with host not in inventory + setup: + register: setup_results + delegate_to: 127.0.0.254 + + - assert: + that: + - '"127.0.0.254" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]' +# +# Smoketest some other modules do not error as a canary +# + - name: Test file works with delegate_to and a host in inventory + file: path={{ output_dir }}/foo.txt mode=0644 state=touch + delegate_to: testhost4 + + - name: Test file works with delegate_to and a host not in inventory + file: path={{ output_dir }}/tmp.txt mode=0644 state=touch + delegate_to: 127.0.0.254 + + - name: Test template works with delegate_to and a host in inventory + template: src={{ template_role }}/templates/foo.j2 dest={{ output_dir }}/foo.txt + delegate_to: testhost4 + + - name: Test template works with delegate_to and a host not in inventory + template: src={{ template_role }}/templates/foo.j2 dest={{ output_dir }}/foo.txt + delegate_to: 127.0.0.254 + + - name: remove test file + file: path={{ output_dir }}/foo.txt state=absent + + - name: remove test file + file: path={{ output_dir }}/tmp.txt state=absent diff --git a/test/integration/targets/delegate_to/test_delegate_to_loop_caching.yml b/test/integration/targets/delegate_to/test_delegate_to_loop_caching.yml new file mode 100644 index 00000000..6ea08f72 --- /dev/null +++ b/test/integration/targets/delegate_to/test_delegate_to_loop_caching.yml @@ -0,0 +1,45 @@ +- hosts: testhost,testhost2 + gather_facts: false + vars: + delegate_to_host: "localhost" + tasks: + - set_fact: + gandalf: + shout: 'You shall not pass!' + when: inventory_hostname == 'testhost' + + - set_fact: + gandalf: + speak: 'Run you fools!' + when: inventory_hostname == 'testhost2' + + - name: works correctly + debug: var=item + delegate_to: localhost + with_dict: "{{ gandalf }}" + register: result1 + + - name: shows same item for all hosts + debug: var=item + delegate_to: "{{ delegate_to_host }}" + with_dict: "{{ gandalf }}" + register: result2 + + - debug: + var: result2.results[0].item.value + + - assert: + that: + - result1.results[0].item.value == 'You shall not pass!' + - result2.results[0].item.value == 'You shall not pass!' + when: inventory_hostname == 'testhost' + + - assert: + that: + - result1.results[0].item.value == 'Run you fools!' + - result2.results[0].item.value == 'Run you fools!' + when: inventory_hostname == 'testhost2' + + - assert: + that: + - _ansible_loop_cache is undefined diff --git a/test/integration/targets/delegate_to/test_delegate_to_loop_randomness.yml b/test/integration/targets/delegate_to/test_delegate_to_loop_randomness.yml new file mode 100644 index 00000000..81033a16 --- /dev/null +++ b/test/integration/targets/delegate_to/test_delegate_to_loop_randomness.yml @@ -0,0 +1,73 @@ +--- +- name: Integration tests for #28231 + hosts: localhost + gather_facts: false + tasks: + - name: Add some test hosts + add_host: + name: "foo{{item}}" + groups: foo + ansible_connection: local + ansible_python_interpreter: "{{ ansible_playbook_python }}" + loop: "{{ range(10)|list }}" + + # We expect all of the next 3 runs to succeeed + # this is done multiple times to increase randomness + - assert: + that: + - item in ansible_delegated_vars + delegate_to: "{{ item }}" + loop: + - "{{ groups.foo|random }}" + ignore_errors: true + register: result1 + + - assert: + that: + - item in ansible_delegated_vars + delegate_to: "{{ item }}" + loop: + - "{{ groups.foo|random }}" + ignore_errors: true + register: result2 + + - assert: + that: + - item in ansible_delegated_vars + delegate_to: "{{ item }}" + loop: + - "{{ groups.foo|random }}" + ignore_errors: true + register: result3 + + - debug: + var: result1 + + - debug: + var: result2 + + - debug: + var: result3 + + - name: Ensure all of the 3 asserts were successful + assert: + that: + - results is all + vars: + results: + - "{{ (result1.results|first) is successful }}" + - "{{ (result2.results|first) is successful }}" + - "{{ (result3.results|first) is successful }}" + + - name: Set delegate + set_fact: + _delegate: '{{ groups.foo[0] }}' + + - command: "true" + delegate_to: "{{ _delegate }}" + register: result + + - assert: + that: + - result.stdout is defined + - result.results is undefined diff --git a/test/integration/targets/delegate_to/test_loop_control.yml b/test/integration/targets/delegate_to/test_loop_control.yml new file mode 100644 index 00000000..61e9304d --- /dev/null +++ b/test/integration/targets/delegate_to/test_loop_control.yml @@ -0,0 +1,16 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: Test delegate_to with loop_control + ping: + delegate_to: "{{ item }}" + with_items: + - localhost + loop_control: + label: "{{ item }}" + register: out + + - name: Check if delegated_host was templated properly + assert: + that: + - out.results[0]['_ansible_delegated_vars']['ansible_delegated_host'] == 'localhost' diff --git a/test/integration/targets/delegate_to/verify_interpreter.yml b/test/integration/targets/delegate_to/verify_interpreter.yml new file mode 100644 index 00000000..63c60a41 --- /dev/null +++ b/test/integration/targets/delegate_to/verify_interpreter.yml @@ -0,0 +1,47 @@ +- name: ensure they are different + hosts: localhost + tasks: + - name: dont game me + assert: + msg: 'expected different values but got ((hostvars["testhost"]["ansible_python_interpreter"]}} and {{hostvars["testhost2"]["ansible_python_interpreter"]}}' + that: + - hostvars["testhost"]["ansible_python_interpreter"] != hostvars["testhost2"]["ansible_python_interpreter"] + +- name: no delegation + hosts: all + gather_facts: false + tasks: + - name: detect interpreter used by each host + detect_interpreter: + register: baseline + + - name: verify it + assert: + msg: 'expected {{ansible_python_interpreter}} but got {{baseline.found|basename}}' + that: + - baseline.found|basename == ansible_python_interpreter + +- name: actual test + hosts: testhost + gather_facts: false + tasks: + - name: original host + detect_interpreter: + register: found + + - name: verify it orig host + assert: + msg: 'expected {{ansible_python_interpreter}} but got {{found.found|basename}}' + that: + - found.found|basename == ansible_python_interpreter + + - name: delegated host + detect_interpreter: + register: found2 + delegate_to: testhost2 + + - name: verify it delegated + assert: + msg: 'expected {{hostvars["testhost2"]["ansible_python_interpreter"]}} but got {{found2.found|basename}}' + that: + - found2.found|basename == hostvars["testhost2"]["ansible_python_interpreter"] diff --git a/test/integration/targets/dict_transformations/aliases b/test/integration/targets/dict_transformations/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/dict_transformations/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/dict_transformations/library/convert_camelCase.py b/test/integration/targets/dict_transformations/library/convert_camelCase.py new file mode 100644 index 00000000..50ca34c3 --- /dev/null +++ b/test/integration/targets/dict_transformations/library/convert_camelCase.py @@ -0,0 +1,48 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: convert_camelCase +short_description: test converting data to camelCase +description: test converting data to camelCase +options: + data: + description: Data to modify + type: dict + required: True + capitalize_first: + description: Whether to capitalize the first character + default: False + type: bool +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + data=dict(type='dict', required=True), + capitalize_first=dict(type='bool', default=False), + ), + ) + + result = snake_dict_to_camel_dict( + module.params['data'], + module.params['capitalize_first'] + ) + + module.exit_json(data=result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/dict_transformations/library/convert_snake_case.py b/test/integration/targets/dict_transformations/library/convert_snake_case.py new file mode 100644 index 00000000..4c13fbcb --- /dev/null +++ b/test/integration/targets/dict_transformations/library/convert_snake_case.py @@ -0,0 +1,55 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: convert_snake_case +short_description: test converting data to snake_case +description: test converting data to snake_case +options: + data: + description: Data to modify + type: dict + required: True + reversible: + description: + - Make the snake_case conversion in a way that can be converted back to the original value + - For example, convert IAMUser to i_a_m_user instead of iam_user + default: False + ignore_list: + description: list of top level keys that should not have their contents converted + type: list + default: [] +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + data=dict(type='dict', required=True), + reversible=dict(type='bool', default=False), + ignore_list=dict(type='list', default=[]), + ), + ) + + result = camel_dict_to_snake_dict( + module.params['data'], + module.params['reversible'], + module.params['ignore_list'] + ) + + module.exit_json(data=result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/dict_transformations/tasks/main.yml b/test/integration/targets/dict_transformations/tasks/main.yml new file mode 100644 index 00000000..03aa6e19 --- /dev/null +++ b/test/integration/targets/dict_transformations/tasks/main.yml @@ -0,0 +1,3 @@ +- include_tasks: test_convert_snake_case.yml + +- include_tasks: test_convert_camelCase.yml diff --git a/test/integration/targets/dict_transformations/tasks/test_convert_camelCase.yml b/test/integration/targets/dict_transformations/tasks/test_convert_camelCase.yml new file mode 100644 index 00000000..666e8d3a --- /dev/null +++ b/test/integration/targets/dict_transformations/tasks/test_convert_camelCase.yml @@ -0,0 +1,33 @@ +- convert_camelCase: + data: {'top_level_key': {'nested_key': 'do_not_convert'}} + register: result + +- assert: + that: + - "result.data == {'topLevelKey': {'nestedKey': 'do_not_convert'}}" + +- convert_camelCase: + data: {'t_o_p_level_key': {'n_e_s_t_e_d_key': 'do_not_convert'}} + register: result + +- assert: + that: + - "result.data == {'tOPLevelKey': {'nESTEDKey': 'do_not_convert'}}" + +- convert_camelCase: + data: {'t_o_p_level_key': {'n_e_s_t_e_d_key': 'do_not_convert'}} + capitalize_first: True + register: result + +- assert: + that: + - "result.data == {'TOPLevelKey': {'NESTEDKey': 'do_not_convert'}}" + +- convert_camelCase: + data: {'results': [{'i_a_m_user': 'user_name', 'tags': {'do_convert': 'do_not_convert'}}]} + capitalize_first: True + register: result + +- assert: + that: + - "result.data == {'Results': [{'IAMUser': 'user_name', 'Tags': {'DoConvert': 'do_not_convert'}}]}" diff --git a/test/integration/targets/dict_transformations/tasks/test_convert_snake_case.yml b/test/integration/targets/dict_transformations/tasks/test_convert_snake_case.yml new file mode 100644 index 00000000..ba80aa7a --- /dev/null +++ b/test/integration/targets/dict_transformations/tasks/test_convert_snake_case.yml @@ -0,0 +1,26 @@ +- convert_snake_case: + data: {'TOPLevelKey': {'NESTEDKey': 'DoNotConvert'}} + register: result + +- assert: + that: + - "result.data == {'top_level_key': {'nested_key': 'DoNotConvert'}}" + +- convert_snake_case: + data: {'TOPLevelKey': {'NESTEDKey': 'DoNotConvert'}} + reversible: True + register: result + +- assert: + that: + - "result.data == {'t_o_p_level_key': {'n_e_s_t_e_d_key': 'DoNotConvert'}}" + +- convert_snake_case: + data: {'Results': [{'IAMUser': 'UserName', 'Tags': {'DoConvert': 'DoNotConvert'}}], 'Tags': {'DoNotConvert': 'DoNotConvert'}} + reversible: True + ignore_list: ['Tags'] # Ignore top level 'Tags' key if found + register: result + +- assert: + that: + - "result.data == {'results': [{'i_a_m_user': 'UserName', 'tags': {'do_convert': 'DoNotConvert'}}], 'tags': {'DoNotConvert': 'DoNotConvert'}}" diff --git a/test/integration/targets/dnf/aliases b/test/integration/targets/dnf/aliases new file mode 100644 index 00000000..4d1afd64 --- /dev/null +++ b/test/integration/targets/dnf/aliases @@ -0,0 +1,7 @@ +destructive +shippable/posix/group4 +skip/aix +skip/power/centos +skip/freebsd +skip/osx +skip/macos diff --git a/test/integration/targets/dnf/meta/main.yml b/test/integration/targets/dnf/meta/main.yml new file mode 100644 index 00000000..34d81261 --- /dev/null +++ b/test/integration/targets/dnf/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - prepare_tests + - setup_rpm_repo + - setup_remote_tmp_dir diff --git a/test/integration/targets/dnf/tasks/dnf.yml b/test/integration/targets/dnf/tasks/dnf.yml new file mode 100644 index 00000000..19008188 --- /dev/null +++ b/test/integration/targets/dnf/tasks/dnf.yml @@ -0,0 +1,774 @@ +# UNINSTALL 'python2-dnf' +# The `dnf` module has the smarts to auto-install the relevant python +# bindings. To test, we will first uninstall python2-dnf (so that the tests +# on python2 will require python2-dnf) +- name: check python2-dnf with rpm + shell: rpm -q python2-dnf + register: rpm_result + ignore_errors: true + args: + warn: no + +# Don't uninstall python2-dnf with the `dnf` module in case it needs to load +# some dnf python files after the package is uninstalled. +- name: uninstall python2-dnf with shell + shell: dnf -y remove python2-dnf + when: rpm_result is successful + +# UNINSTALL +# With 'python2-dnf' uninstalled, the first call to 'dnf' should install +# python2-dnf. +- name: uninstall sos + dnf: + name: sos + state: removed + register: dnf_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_result + +- name: verify uninstallation of sos + assert: + that: + - "not dnf_result.failed | default(False)" + - "rpm_result.rc == 1" + +# UNINSTALL AGAIN +- name: uninstall sos + dnf: + name: sos + state: removed + register: dnf_result + +- name: verify no change on re-uninstall + assert: + that: + - "not dnf_result.changed" + +# INSTALL +- name: install sos (check_mode) + dnf: + name: sos + state: present + update_cache: True + check_mode: True + register: dnf_result + +- assert: + that: + - dnf_result is success + - dnf_result.results|length > 0 + - "dnf_result.results[0].startswith('Installed: ')" + +- name: install sos + dnf: + name: sos + state: present + update_cache: True + register: dnf_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_result + +- name: verify installation of sos + assert: + that: + - "not dnf_result.failed | default(False)" + - "dnf_result.changed" + - "rpm_result.rc == 0" + +- name: verify dnf module outputs + assert: + that: + - "'changed' in dnf_result" + - "'results' in dnf_result" + +# INSTALL AGAIN +- name: install sos again (check_mode) + dnf: + name: sos + state: present + check_mode: True + register: dnf_result + +- assert: + that: + - dnf_result is not changed + - dnf_result.results|length == 0 + +- name: install sos again + dnf: + name: sos + state: present + register: dnf_result + +- name: verify no change on second install + assert: + that: + - "not dnf_result.changed" + +# Multiple packages +- name: uninstall sos and pciutils + dnf: name=sos,pciutils state=removed + register: dnf_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_sos_result + +- name: check pciutils with rpm + shell: rpm -q pciutils + failed_when: False + register: rpm_pciutils_result + +- name: verify packages installed + assert: + that: + - "rpm_sos_result.rc != 0" + - "rpm_pciutils_result.rc != 0" + +- name: install sos and pciutils as comma separated + dnf: name=sos,pciutils state=present + register: dnf_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_sos_result + +- name: check pciutils with rpm + shell: rpm -q pciutils + failed_when: False + register: rpm_pciutils_result + +- name: verify packages installed + assert: + that: + - "not dnf_result.failed | default(False)" + - "dnf_result.changed" + - "rpm_sos_result.rc == 0" + - "rpm_pciutils_result.rc == 0" + +- name: uninstall sos and pciutils + dnf: name=sos,pciutils state=removed + register: dnf_result + +- name: install sos and pciutils as list + dnf: + name: + - sos + - pciutils + state: present + register: dnf_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_sos_result + +- name: check pciutils with rpm + shell: rpm -q pciutils + failed_when: False + register: rpm_pciutils_result + +- name: verify packages installed + assert: + that: + - "not dnf_result.failed | default(False)" + - "dnf_result.changed" + - "rpm_sos_result.rc == 0" + - "rpm_pciutils_result.rc == 0" + +- name: uninstall sos and pciutils + dnf: + name: "sos,pciutils" + state: removed + register: dnf_result + +- name: install sos and pciutils as comma separated with spaces + dnf: + name: "sos, pciutils" + state: present + register: dnf_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_sos_result + +- name: check sos with rpm + shell: rpm -q pciutils + failed_when: False + register: rpm_pciutils_result + +- name: verify packages installed + assert: + that: + - "not dnf_result.failed | default(False)" + - "dnf_result.changed" + - "rpm_sos_result.rc == 0" + - "rpm_pciutils_result.rc == 0" + +- name: uninstall sos and pciutils (check_mode) + dnf: + name: + - sos + - pciutils + state: removed + check_mode: True + register: dnf_result + +- assert: + that: + - dnf_result is success + - dnf_result.results|length == 2 + - "dnf_result.results[0].startswith('Removed: ')" + - "dnf_result.results[1].startswith('Removed: ')" + +- name: uninstall sos and pciutils + dnf: + name: + - sos + - pciutils + state: removed + register: dnf_result + +- assert: + that: + - dnf_result is changed + +- name: install non-existent rpm + dnf: + name: does-not-exist + register: non_existent_rpm + ignore_errors: True + +- name: check non-existent rpm install failed + assert: + that: + - non_existent_rpm is failed + +# Install in installroot='/'. This should be identical to default +- name: install sos in / + dnf: name=sos state=present installroot='/' + register: dnf_result + +- name: check sos with rpm in / + shell: rpm -q sos --root=/ + failed_when: False + register: rpm_result + +- name: verify installation of sos in / + assert: + that: + - "not dnf_result.failed | default(False)" + - "dnf_result.changed" + - "rpm_result.rc == 0" + +- name: verify dnf module outputs in / + assert: + that: + - "'changed' in dnf_result" + - "'results' in dnf_result" + +- name: uninstall sos in / + dnf: name=sos installroot='/' + register: dnf_result + +- name: uninstall sos for downloadonly test + dnf: + name: sos + state: absent + +- name: Test download_only (check_mode) + dnf: + name: sos + state: latest + download_only: true + check_mode: true + register: dnf_result + +- assert: + that: + - dnf_result is success + - "dnf_result.results[0].startswith('Downloaded: ')" + +- name: Test download_only + dnf: + name: sos + state: latest + download_only: true + register: dnf_result + +- name: verify download of sos (part 1 -- dnf "install" succeeded) + assert: + that: + - "dnf_result is success" + - "dnf_result is changed" + +- name: uninstall sos (noop) + dnf: + name: sos + state: absent + register: dnf_result + +- name: verify download of sos (part 2 -- nothing removed during uninstall) + assert: + that: + - "dnf_result is success" + - "not dnf_result is changed" + +- name: uninstall sos for downloadonly/downloaddir test + dnf: + name: sos + state: absent + +- name: Test download_only/download_dir + dnf: + name: sos + state: latest + download_only: true + download_dir: "/var/tmp/packages" + register: dnf_result + +- name: verify dnf output + assert: + that: + - "dnf_result is success" + - "dnf_result is changed" + +- command: "ls /var/tmp/packages" + register: ls_out + +- name: Verify specified download_dir was used + assert: + that: + - "'sos' in ls_out.stdout" + +# GROUP INSTALL +- name: install Custom Group group + dnf: + name: "@Custom Group" + state: present + register: dnf_result + +- name: check dinginessentail with rpm + command: rpm -q dinginessentail + failed_when: False + register: dinginessentail_result + +- name: verify installation of the group + assert: + that: + - not dnf_result is failed + - dnf_result is changed + - "'results' in dnf_result" + - dinginessentail_result.rc == 0 + +- name: install the group again + dnf: + name: "@Custom Group" + state: present + register: dnf_result + +- name: verify nothing changed + assert: + that: + - not dnf_result is changed + - "'msg' in dnf_result" + +- name: verify that landsidescalping is not installed + dnf: + name: landsidescalping + state: absent + +- name: install the group again but also with a package that is not yet installed + dnf: + name: + - "@Custom Group" + - landsidescalping + state: present + register: dnf_result + +- name: check landsidescalping with rpm + command: rpm -q landsidescalping + failed_when: False + register: landsidescalping_result + +- name: verify landsidescalping is installed + assert: + that: + - dnf_result is changed + - "'results' in dnf_result" + - landsidescalping_result.rc == 0 + +- name: try to install the group again, with --check to check 'changed' + dnf: + name: "@Custom Group" + state: present + check_mode: yes + register: dnf_result + +- name: verify nothing changed + assert: + that: + - not dnf_result is changed + - "'msg' in dnf_result" + +- name: remove landsidescalping after test + dnf: + name: landsidescalping + state: absent + +# cleanup until https://github.com/ansible/ansible/issues/27377 is resolved +- shell: 'dnf -y group install "Custom Group" && dnf -y group remove "Custom Group"' + register: shell_dnf_result + +# GROUP UPGRADE - this will go to the same method as group install +# but through group_update - it is its invocation we're testing here +# see commit 119c9e5d6eb572c4a4800fbe8136095f9063c37b +- name: install latest Custom Group + dnf: + name: "@Custom Group" + state: latest + register: dnf_result + +- name: verify installation of the group + assert: + that: + - not dnf_result is failed + - dnf_result is changed + - "'results' in dnf_result" + +# cleanup until https://github.com/ansible/ansible/issues/27377 is resolved +- shell: dnf -y group install "Custom Group" && dnf -y group remove "Custom Group" + +- name: try to install non existing group + dnf: + name: "@non-existing-group" + state: present + register: dnf_result + ignore_errors: True + +- name: verify installation of the non existing group failed + assert: + that: + - "not dnf_result.changed" + - "dnf_result is failed" + +- name: verify dnf module outputs + assert: + that: + - "'changed' in dnf_result" + - "'msg' in dnf_result" + +- name: try to install non existing file + dnf: + name: /tmp/non-existing-1.0.0.fc26.noarch.rpm + state: present + register: dnf_result + ignore_errors: yes + +- name: verify installation failed + assert: + that: + - "dnf_result is failed" + - "not dnf_result.changed" + +- name: verify dnf module outputs + assert: + that: + - "'changed' in dnf_result" + - "'msg' in dnf_result" + +- name: try to install from non existing url + dnf: + name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dnf/non-existing-1.0.0.fc26.noarch.rpm + state: present + register: dnf_result + ignore_errors: yes + +- name: verify installation failed + assert: + that: + - "dnf_result is failed" + - "not dnf_result.changed" + +- name: verify dnf module outputs + assert: + that: + - "'changed' in dnf_result" + - "'msg' in dnf_result" + +# ENVIRONMENT UPGRADE +# see commit de299ef77c03a64a8f515033a79ac6b7db1bc710 +- name: install Custom Environment Group + dnf: + name: "@Custom Environment Group" + state: latest + register: dnf_result + +- name: check landsidescalping with rpm + command: rpm -q landsidescalping + register: landsidescalping_result + +- name: verify installation of the environment + assert: + that: + - not dnf_result is failed + - dnf_result is changed + - "'results' in dnf_result" + - landsidescalping_result.rc == 0 + +# Fedora 28 (DNF 2) does not support this, just remove the package itself +- name: remove landsidescalping package on Fedora 28 + dnf: + name: landsidescalping + state: absent + when: ansible_distribution == 'Fedora' and ansible_distribution_major_version|int <= 28 + +# cleanup until https://github.com/ansible/ansible/issues/27377 is resolved +- name: remove Custom Environment Group + shell: dnf -y group install "Custom Environment Group" && dnf -y group remove "Custom Environment Group" + when: not (ansible_distribution == 'Fedora' and ansible_distribution_major_version|int <= 28) + +# https://github.com/ansible/ansible/issues/39704 +- name: install non-existent rpm, state=latest + dnf: + name: non-existent-rpm + state: latest + ignore_errors: yes + register: dnf_result + +- name: verify the result + assert: + that: + - "dnf_result is failed" + - "'non-existent-rpm' in dnf_result['failures'][0]" + - "'No package non-existent-rpm available' in dnf_result['failures'][0]" + - "'Failed to install some of the specified packages' in dnf_result['msg']" + +- name: use latest to install httpd + dnf: + name: httpd + state: latest + register: dnf_result + +- name: verify httpd was installed + assert: + that: + - "'changed' in dnf_result" + +- name: uninstall httpd + dnf: + name: httpd + state: removed + +- name: update httpd only if it exists + dnf: + name: httpd + state: latest + update_only: yes + register: dnf_result + +- name: verify httpd not installed + assert: + that: + - "not dnf_result is changed" + +- name: try to install not compatible arch rpm, should fail + dnf: + name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dnf/banner-1.3.4-3.el7.ppc64le.rpm + state: present + register: dnf_result + ignore_errors: True + +- name: verify that dnf failed + assert: + that: + - "not dnf_result is changed" + - "dnf_result is failed" + +# setup for testing installing an RPM from url + +- set_fact: + pkg_name: fpaste + +- name: cleanup + dnf: + name: "{{ pkg_name }}" + state: absent + +- set_fact: + pkg_url: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dnf/fpaste-0.3.9.1-1.fc27.noarch.rpm +# setup end + +- name: download an rpm + get_url: + url: "{{ pkg_url }}" + dest: "/tmp/{{ pkg_name }}.rpm" + +- name: install the downloaded rpm + dnf: + name: "/tmp/{{ pkg_name }}.rpm" + state: present + disable_gpg_check: true + register: dnf_result + +- name: verify installation + assert: + that: + - "dnf_result is success" + - "dnf_result is changed" + +- name: install the downloaded rpm again + dnf: + name: "/tmp/{{ pkg_name }}.rpm" + state: present + register: dnf_result + +- name: verify installation + assert: + that: + - "dnf_result is success" + - "not dnf_result is changed" + +- name: clean up + dnf: + name: "{{ pkg_name }}" + state: absent + +- name: install from url + dnf: + name: "{{ pkg_url }}" + state: present + disable_gpg_check: true + register: dnf_result + +- name: verify installation + assert: + that: + - "dnf_result is success" + - "dnf_result is changed" + - "dnf_result is not failed" + +- name: verify dnf module outputs + assert: + that: + - "'changed' in dnf_result" + - "'results' in dnf_result" + +- name: Create a temp RPM file which does not contain nevra information + file: + name: "/tmp/non_existent_pkg.rpm" + state: touch + +- name: Try installing RPM file which does not contain nevra information + dnf: + name: "/tmp/non_existent_pkg.rpm" + state: present + register: no_nevra_info_result + ignore_errors: yes + +- name: Verify RPM failed to install + assert: + that: + - "'changed' in no_nevra_info_result" + - "'msg' in no_nevra_info_result" + +- name: Delete a temp RPM file + file: + name: "/tmp/non_existent_pkg.rpm" + state: absent + +- name: uninstall lsof + dnf: + name: lsof + state: removed + +- name: check lsof with rpm + shell: rpm -q lsof + ignore_errors: True + register: rpm_lsof_result + +- name: verify lsof is uninstalled + assert: + that: + - "rpm_lsof_result is failed" + +- name: create conf file that excludes lsof + copy: + content: | + [main] + exclude=lsof* + dest: '{{ output_dir }}/test-dnf.conf' + register: test_dnf_copy + +- block: + # begin test case where disable_excludes is supported + - name: Try install lsof without disable_excludes + dnf: name=lsof state=latest conf_file={{ test_dnf_copy.dest }} + register: dnf_lsof_result + ignore_errors: True + + - name: verify lsof did not install because it is in exclude list + assert: + that: + - "dnf_lsof_result is failed" + + - name: install lsof with disable_excludes + dnf: name=lsof state=latest disable_excludes=all conf_file={{ test_dnf_copy.dest }} + register: dnf_lsof_result_using_excludes + + - name: verify lsof did install using disable_excludes=all + assert: + that: + - "dnf_lsof_result_using_excludes is success" + - "dnf_lsof_result_using_excludes is changed" + - "dnf_lsof_result_using_excludes is not failed" + always: + - name: remove exclude lsof conf file + file: + path: '{{ output_dir }}/test-dnf.conf' + state: absent + +# end test case where disable_excludes is supported + +- name: Test "dnf install /usr/bin/vi" + block: + - name: Clean vim-minimal + dnf: + name: vim-minimal + state: absent + + - name: Install vim-minimal by specifying "/usr/bin/vi" + dnf: + name: /usr/bin/vi + state: present + + - name: Get rpm output + command: rpm -q vim-minimal + register: rpm_output + + - name: Check installation was successful + assert: + that: + - "'vim-minimal' in rpm_output.stdout" + when: + - ansible_distribution == 'Fedora' + +- name: Remove wildcard package that isn't installed + dnf: + name: firefox* + state: absent + register: wildcard_absent + +- assert: + that: + - wildcard_absent is successful + - wildcard_absent is not changed diff --git a/test/integration/targets/dnf/tasks/dnfinstallroot.yml b/test/integration/targets/dnf/tasks/dnfinstallroot.yml new file mode 100644 index 00000000..b5e09011 --- /dev/null +++ b/test/integration/targets/dnf/tasks/dnfinstallroot.yml @@ -0,0 +1,47 @@ +# make a installroot +- name: Create installroot + command: mktemp -d "{{ remote_tmp_dir }}/ansible.test.XXXXXX" + register: dnfroot + +- name: Make a necessary directory + file: + path: "/{{ dnfroot.stdout }}/etc/dnf/vars/" + state: directory + mode: 0755 + +- name: Populate directory + copy: + # We need '8' for CentOS, but '8.x' for RHEL. + content: "{{ ansible_distribution_version|int if ansible_distribution != 'RedHat' else ansible_distribution_version }}\n" + dest: "/{{ dnfroot.stdout }}/etc/dnf/vars/releasever" + +# This will drag in > 200 MB. +- name: attempt installroot + dnf: name=sos installroot="/{{ dnfroot.stdout }}/" disable_gpg_check=yes + register: dnf_result + +- name: check sos with rpm in installroot + shell: rpm -q sos --root="/{{ dnfroot.stdout }}/" + failed_when: False + register: rpm_result + +- debug: var=dnf_result +- debug: var=rpm_result + +- name: verify installation of sos in installroot + assert: + that: + - "not dnf_result.failed | default(False)" + - "dnf_result.changed" + - "rpm_result.rc == 0" + +- name: verify dnf module outputs in / + assert: + that: + - "'changed' in dnf_result" + - "'results' in dnf_result" + +- name: cleanup installroot + file: + path: "/{{ dnfroot.stdout }}/" + state: absent diff --git a/test/integration/targets/dnf/tasks/dnfreleasever.yml b/test/integration/targets/dnf/tasks/dnfreleasever.yml new file mode 100644 index 00000000..351a26b1 --- /dev/null +++ b/test/integration/targets/dnf/tasks/dnfreleasever.yml @@ -0,0 +1,47 @@ +# make an installroot +- name: Create installroot + command: mktemp -d "{{ remote_tmp_dir }}/ansible.test.XXXXXX" + register: dnfroot + +- name: Make a necessary directory + file: + path: "/{{dnfroot.stdout}}/etc/dnf/vars" + state: directory + mode: 0755 + +- name: Populate directory + copy: + content: "{{ansible_distribution_version}}\n" + dest: "/{{dnfroot.stdout}}/etc/dnf/vars/releasever" + +- name: attempt releasever to the installroot + dnf: + name: filesystem + installroot: '/{{dnfroot.stdout}}' + releasever: '{{ansible_distribution_version|int - 1}}' + register: dnf_result + +- name: check filesystem version + shell: rpm -q filesystem --root="/{{dnfroot.stdout}}/" + failed_when: False + register: rpm_result + +- debug: var=dnf_result +- debug: var=rpm_result + +- name: verify installation was done + assert: + that: + - "not dnf_result.failed | default(False)" + - "dnf_result.changed" + - "rpm_result.rc == 0" + +- name: verify the version + assert: + that: + - "rpm_result.stdout.find('fc' ~ (ansible_distribution_version|int - 1)) != -1" + +- name: cleanup installroot + file: + path: "/{{dnfroot.stdout}}/" + state: absent diff --git a/test/integration/targets/dnf/tasks/filters.yml b/test/integration/targets/dnf/tasks/filters.yml new file mode 100644 index 00000000..d5e9ee90 --- /dev/null +++ b/test/integration/targets/dnf/tasks/filters.yml @@ -0,0 +1,134 @@ +# We have a test repo set up with a valid updateinfo.xml which is referenced +# from its repomd.xml. +- block: + - set_fact: + updateinfo_repo: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_rpm_repo/repo-with-updateinfo + + - name: Install the test repo + yum_repository: + name: test-repo-with-updateinfo + description: test-repo-with-updateinfo + baseurl: "{{ updateinfo_repo }}" + gpgcheck: no + + - name: Install old versions of toaster and oven + dnf: + name: + - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm" + - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm" + disable_gpg_check: true + + - name: Ask for pending updates + dnf: + name: '*' + state: latest + update_only: true + disable_gpg_check: true + disablerepo: '*' + enablerepo: test-repo-with-updateinfo + register: update_no_filter + + - assert: + that: + - update_no_filter is changed + - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_no_filter.results' + - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_no_filter.results' + - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_no_filter.results' + - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_no_filter.results' + + - name: Install old versions of toaster and oven + dnf: + name: + - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm" + - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm" + allow_downgrade: true + disable_gpg_check: true + + - name: Ask for pending updates with security=true + dnf: + name: '*' + state: latest + update_only: true + disable_gpg_check: true + security: true + disablerepo: '*' + enablerepo: test-repo-with-updateinfo + register: update_security + + - assert: + that: + - update_security is changed + - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_security.results' + - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_security.results' + - '"Installed: oven-1.2.3.5-1.el8.noarch" not in update_security.results' + - '"Removed: oven-1.2.3.4-1.el8.noarch" not in update_security.results' + + - name: Install old versions of toaster and oven + dnf: + name: + - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm" + - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm" + allow_downgrade: true + disable_gpg_check: true + + - name: Ask for pending updates with bugfix=true + dnf: + name: '*' + state: latest + update_only: true + disable_gpg_check: true + bugfix: true + disablerepo: '*' + enablerepo: test-repo-with-updateinfo + register: update_bugfix + + - assert: + that: + - update_bugfix is changed + - '"Installed: toaster-1.2.3.5-1.el8.noarch" not in update_bugfix.results' + - '"Removed: toaster-1.2.3.4-1.el8.noarch" not in update_bugfix.results' + - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_bugfix.results' + - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_bugfix.results' + + - name: Install old versions of toaster and oven + dnf: + name: + - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm" + - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm" + allow_downgrade: true + disable_gpg_check: true + + - name: Ask for pending updates with bugfix=true and security=true + dnf: + name: '*' + state: latest + update_only: true + disable_gpg_check: true + bugfix: true + security: true + disablerepo: '*' + enablerepo: test-repo-with-updateinfo + register: update_bugfix + + - assert: + that: + - update_bugfix is changed + - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_bugfix.results' + - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_bugfix.results' + - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_bugfix.results' + - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_bugfix.results' + + always: + - name: Remove installed packages + dnf: + name: + - toaster + - oven + state: absent + + - name: Remove the repo + yum_repository: + name: test-repo-with-updateinfo + state: absent + tags: + - filters diff --git a/test/integration/targets/dnf/tasks/filters_check_mode.yml b/test/integration/targets/dnf/tasks/filters_check_mode.yml new file mode 100644 index 00000000..024ac066 --- /dev/null +++ b/test/integration/targets/dnf/tasks/filters_check_mode.yml @@ -0,0 +1,118 @@ +# We have a test repo set up with a valid updateinfo.xml which is referenced +# from its repomd.xml. +- block: + - set_fact: + updateinfo_repo: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_rpm_repo/repo-with-updateinfo + + - name: Install the test repo + yum_repository: + name: test-repo-with-updateinfo + description: test-repo-with-updateinfo + baseurl: "{{ updateinfo_repo }}" + gpgcheck: no + + - name: Install old versions of toaster and oven + dnf: + name: + - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm" + - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm" + disable_gpg_check: true + + - name: Ask for pending updates (check_mode) + dnf: + name: + - toaster + - oven + state: latest + update_only: true + disable_gpg_check: true + check_mode: true + register: update_no_filter + + - assert: + that: + - update_no_filter is changed + - '"would have if not in check mode" in update_no_filter.msg' + - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_no_filter.results' + - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_no_filter.results' + - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_no_filter.results' + - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_no_filter.results' + + - name: Ask for pending updates with security=true (check_mode) + dnf: + name: + - toaster + - oven + state: latest + update_only: true + disable_gpg_check: true + security: true + check_mode: true + register: update_security + + - assert: + that: + - update_security is changed + - '"would have if not in check mode" in update_security.msg' + - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_security.results' + - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_security.results' + - '"Installed: oven-1.2.3.5-1.el8.noarch" not in update_security.results' + - '"Removed: oven-1.2.3.4-1.el8.noarch" not in update_security.results' + + - name: Ask for pending updates with bugfix=true (check_mode) + dnf: + name: + - toaster + - oven + state: latest + update_only: true + disable_gpg_check: true + bugfix: true + check_mode: true + register: update_bugfix + + - assert: + that: + - update_bugfix is changed + - '"would have if not in check mode" in update_bugfix.msg' + - '"Installed: toaster-1.2.3.5-1.el8.noarch" not in update_bugfix.results' + - '"Removed: toaster-1.2.3.4-1.el8.noarch" not in update_bugfix.results' + - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_bugfix.results' + - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_bugfix.results' + + - name: Ask for pending updates with bugfix=true and security=true (check_mode) + dnf: + name: + - toaster + - oven + state: latest + update_only: true + disable_gpg_check: true + bugfix: true + security: true + check_mode: true + register: update_bugfix + + - assert: + that: + - update_bugfix is changed + - '"would have if not in check mode" in update_bugfix.msg' + - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_bugfix.results' + - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_bugfix.results' + - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_bugfix.results' + - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_bugfix.results' + + always: + - name: Remove installed packages + dnf: + name: + - toaster + - oven + state: absent + + - name: Remove the repo + yum_repository: + name: test-repo-with-updateinfo + state: absent + tags: + - filters diff --git a/test/integration/targets/dnf/tasks/gpg.yml b/test/integration/targets/dnf/tasks/gpg.yml new file mode 100644 index 00000000..2b6f4079 --- /dev/null +++ b/test/integration/targets/dnf/tasks/gpg.yml @@ -0,0 +1,72 @@ +# Set up a repo of unsigned rpms +- block: + - name: Ensure our test package isn't already installed + dnf: + name: + - fpaste + state: absent + + - name: Install rpm-sign + dnf: + name: + - rpm-sign + state: present + + - name: Create directory to use as local repo + file: + path: "{{ remote_tmp_dir }}/unsigned" + state: directory + + - name: Download an RPM + get_url: + url: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dnf/fpaste-0.3.9.1-1.fc27.noarch.rpm + dest: "{{ remote_tmp_dir }}/unsigned/fpaste-0.3.9.1-1.fc27.noarch.rpm" + mode: 0644 + + - name: Unsign the RPM + command: rpmsign --delsign "{{ remote_tmp_dir }}/unsigned/fpaste-0.3.9.1-1.fc27.noarch.rpm" + + - name: createrepo + command: createrepo . + args: + chdir: "{{ remote_tmp_dir }}/unsigned" + + - name: Add the repo + yum_repository: + name: unsigned + description: unsigned rpms + baseurl: "file://{{ remote_tmp_dir }}/unsigned/" + # we want to ensure that signing is verified + gpgcheck: true + + - name: Install fpaste from above + dnf: + name: + - fpaste + disablerepo: '*' + enablerepo: unsigned + register: res + ignore_errors: yes + + - assert: + that: + - res is failed + - "'Failed to validate GPG signature' in res.msg" + + always: + - name: Remove rpm-sign (and fpaste if it got installed) + dnf: + name: + - rpm-sign + - fpaste + state: absent + + - name: Remove test repo + yum_repository: + name: unsigned + state: absent + + - name: Remove repo dir + file: + path: "{{ remote_tmp_dir }}/unsigned" + state: absent diff --git a/test/integration/targets/dnf/tasks/logging.yml b/test/integration/targets/dnf/tasks/logging.yml new file mode 100644 index 00000000..903bf563 --- /dev/null +++ b/test/integration/targets/dnf/tasks/logging.yml @@ -0,0 +1,48 @@ +# Verify logging function is enabled in the dnf module. +# The following tasks has been supported in dnf-4.2.17-6 or later +# Note: https://bugzilla.redhat.com/show_bug.cgi?id=1788212 +- name: Install latest version python3-dnf + dnf: + name: + - python3-dnf + - python3-libdnf # https://bugzilla.redhat.com/show_bug.cgi?id=1887502 + - libmodulemd # https://bugzilla.redhat.com/show_bug.cgi?id=1942236 + state: latest + register: dnf_result + +- name: Verify python3-dnf installed + assert: + that: + - "dnf_result.rc == 0" + +- name: Get python3-dnf version + shell: "dnf info python3-dnf | awk '/^Version/ { print $3 }'" + register: py3_dnf_version + +- name: Check logging enabled + block: + - name: remove logfiles if exist + file: + path: "{{ item }}" + state: absent + loop: "{{ dnf_log_files }}" + + - name: Install sos package + dnf: + name: sos + state: present + register: dnf_result + + - name: Get status of logfiles + stat: + path: "{{ item }}" + loop: "{{ dnf_log_files }}" + register: stats + + - name: Verify logfile exists + assert: + that: + - "item.stat.exists" + loop: "{{ stats.results }}" + when: + - 'py3_dnf_version.stdout is version("4.2.17", ">=")' diff --git a/test/integration/targets/dnf/tasks/main.yml b/test/integration/targets/dnf/tasks/main.yml new file mode 100644 index 00000000..1b6e0941 --- /dev/null +++ b/test/integration/targets/dnf/tasks/main.yml @@ -0,0 +1,62 @@ +# test code for the dnf module +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Note: We install the yum package onto Fedora so that this will work on dnf systems +# We want to test that for people who don't want to upgrade their systems. + +- include_tasks: dnf.yml + when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) + +- include_tasks: filters_check_mode.yml + when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) + tags: + - filters + +- include_tasks: filters.yml + when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) + tags: + - filters + +- include_tasks: gpg.yml + when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) + +- include_tasks: repo.yml + when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) + +- include_tasks: dnfinstallroot.yml + when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) + +# Attempting to install a different RHEL release in a tmpdir doesn't work (rhel8 beta) +- include_tasks: dnfreleasever.yml + when: + - ansible_distribution == 'Fedora' + - ansible_distribution_major_version is version('23', '>=') + +- include_tasks: modularity.yml + when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('29', '>=')) or + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) + +- include_tasks: logging.yml + when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('31', '>=')) or + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) diff --git a/test/integration/targets/dnf/tasks/modularity.yml b/test/integration/targets/dnf/tasks/modularity.yml new file mode 100644 index 00000000..48a0111a --- /dev/null +++ b/test/integration/targets/dnf/tasks/modularity.yml @@ -0,0 +1,99 @@ +# FUTURE - look at including AppStream support in our local repo +- name: Include distribution specific variables + include_vars: "{{ ansible_facts.distribution }}.yml" + +- name: install "{{ astream_name }}" module + dnf: + name: "{{ astream_name }}" + state: present + register: dnf_result + +- name: verify installation of "{{ astream_name }}" module + assert: + that: + - "not dnf_result.failed" + - "dnf_result.changed" + +- name: install "{{ astream_name }}" module again + dnf: + name: "{{ astream_name }}" + state: present + register: dnf_result + +- name: verify installation of "{{ astream_name }}" module again + assert: + that: + - "not dnf_result.failed" + - "not dnf_result.changed" + +- name: uninstall "{{ astream_name }}" module + dnf: + name: "{{ astream_name }}" + state: absent + register: dnf_result + +- name: verify uninstallation of "{{ astream_name }}" module + assert: + that: + - "not dnf_result.failed" + - "dnf_result.changed" + +- name: uninstall "{{ astream_name }}" module again + dnf: + name: "{{ astream_name }}" + state: absent + register: dnf_result + +- name: verify uninstallation of "{{ astream_name }}" module again + assert: + that: + - "not dnf_result.failed" + - "not dnf_result.changed" + +- name: install "{{ astream_name_no_stream }}" module without providing stream + dnf: + name: "{{ astream_name_no_stream }}" + state: present + register: dnf_result + +- name: verify installation of "{{ astream_name_no_stream }}" module without providing stream + assert: + that: + - "not dnf_result.failed" + - "dnf_result.changed" + +- name: install "{{ astream_name_no_stream }}" module again without providing stream + dnf: + name: "{{ astream_name_no_stream }}" + state: present + register: dnf_result + +- name: verify installation of "{{ astream_name_no_stream }}" module again without providing stream + assert: + that: + - "not dnf_result.failed" + - "not dnf_result.changed" + +- name: uninstall "{{ astream_name_no_stream }}" module without providing stream + dnf: + name: "{{ astream_name_no_stream }}" + state: absent + register: dnf_result + +- name: verify uninstallation of "{{ astream_name_no_stream }}" module without providing stream + assert: + that: + - "not dnf_result.failed" + - "dnf_result.changed" + +- name: uninstall "{{ astream_name_no_stream }}" module again without providing stream + dnf: + name: "{{ astream_name_no_stream }}" + state: absent + register: dnf_result + +- name: verify uninstallation of "{{ astream_name_no_stream }}" module again without providing stream + assert: + that: + - "not dnf_result.failed" + - "not dnf_result.changed" diff --git a/test/integration/targets/dnf/tasks/repo.yml b/test/integration/targets/dnf/tasks/repo.yml new file mode 100644 index 00000000..4f82899c --- /dev/null +++ b/test/integration/targets/dnf/tasks/repo.yml @@ -0,0 +1,309 @@ +- block: + - name: Install dinginessentail-1.0-1 + dnf: + name: dinginessentail-1.0-1 + state: present + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "dnf_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify dnf module outputs + assert: + that: + - "'results' in dnf_result" + # ============================================================================ + - name: Install dinginessentail-1.0-1 again + dnf: + name: dinginessentail-1.0-1 + state: present + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "not dnf_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify dnf module outputs + assert: + that: + - "'msg' in dnf_result" + # ============================================================================ + - name: Install dinginessentail again (noop, module is idempotent) + dnf: + name: dinginessentail + state: present + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + # No upgrade happened to 1.1.1 + - "not dnf_result.changed" + # Old version still installed + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + # ============================================================================ + - name: Install dinginessentail-1:1.0-2 + dnf: + name: "dinginessentail-1:1.0-2.{{ ansible_architecture }}" + state: present + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "dnf_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + + - name: Verify dnf module outputs + assert: + that: + - "'results' in dnf_result" + # ============================================================================ + - name: Update to the latest dinginessentail + dnf: + name: dinginessentail + state: latest + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "dnf_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.1-1')" + + - name: Verify dnf module outputs + assert: + that: + - "'results' in dnf_result" + # ============================================================================ + - name: Install dinginessentail-1.0-1 from a file (downgrade) + dnf: + name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm" + state: present + allow_downgrade: True + disable_gpg_check: True + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "dnf_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify dnf module outputs + assert: + that: + - "'results' in dnf_result" + + - name: Remove dinginessentail + dnf: + name: dinginessentail + state: absent + # ============================================================================ + - name: Install dinginessentail-1.0-1 from a file + dnf: + name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm" + state: present + disable_gpg_check: True + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "dnf_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify dnf module outputs + assert: + that: + - "'results' in dnf_result" + # ============================================================================ + - name: Install dinginessentail-1.0-1 from a file again + dnf: + name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm" + state: present + disable_gpg_check: True + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "not dnf_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + # ============================================================================ + - name: Install dinginessentail-1.0-2 from a file + dnf: + name: "{{ repodir }}/dinginessentail-1.0-2.{{ ansible_architecture }}.rpm" + state: present + disable_gpg_check: True + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "dnf_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + + - name: Verify dnf module outputs + assert: + that: + - "'results' in dnf_result" + # ============================================================================ + - name: Install dinginessentail-1.0-2 from a file again + dnf: + name: "{{ repodir }}/dinginessentail-1.0-2.{{ ansible_architecture }}.rpm" + state: present + disable_gpg_check: True + register: dnf_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "not dnf_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + # ============================================================================ + - name: Remove dinginessentail + dnf: + name: dinginessentail + state: absent + + - name: Try to install incompatible arch + dnf: + name: "{{ repodir_ppc64 }}/dinginessentail-1.0-1.ppc64.rpm" + state: present + register: dnf_result + ignore_errors: yes + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + ignore_errors: yes + + - name: Verify installation + assert: + that: + - "rpm_result.rc == 1" + - "not dnf_result.changed" + - "dnf_result is failed" + # ============================================================================ + + # Should install dinginessentail-with-weak-dep and dinginessentail-weak-dep + - name: Install package with defaults + dnf: + name: dinginessentail-with-weak-dep + state: present + + - name: Check if dinginessentail-with-weak-dep is installed + shell: rpm -q dinginessentail-with-weak-dep + register: rpm_main_result + + - name: Check if dinginessentail-weak-dep is installed + shell: rpm -q dinginessentail-weak-dep + register: rpm_weak_result + + - name: Verify install with weak deps + assert: + that: + - rpm_main_result.rc == 0 + - rpm_weak_result.rc == 0 + + - name: Uninstall dinginessentail weak dep packages + dnf: + name: + - dinginessentail-with-weak-dep + - dinginessentail-weak-dep + state: absent + + - name: Install package with weak deps but skip weak deps + dnf: + name: dinginessentail-with-weak-dep + install_weak_deps: False + state: present + + - name: Check if dinginessentail-with-weak-dep is installed + shell: rpm -q dinginessentail-with-weak-dep + register: rpm_main_result + + - name: Check if dinginessentail-weak-dep is installed + shell: rpm -q dinginessentail-weak-dep + register: rpm_weak_result + ignore_errors: yes + + - name: Verify install without weak deps + assert: + that: + - rpm_main_result.rc == 0 + - rpm_weak_result.rc == 1 # the weak dependency shouldn't be installed + + # https://github.com/ansible/ansible/issues/55938 + - name: Install dinginessentail-* + dnf: + name: dinginessentail-* + state: present + + - name: Uninstall dinginessentail-* + dnf: + name: dinginessentail-* + state: absent + + - name: Check if all dinginessentail packages are removed + shell: rpm -qa dinginessentail-* | wc -l + register: rpm_result + + - name: Verify rpm result + assert: + that: + - rpm_result.stdout == '0' + always: + - name: Clean up + dnf: + name: + - dinginessentail + - dinginessentail-with-weak-dep + - dinginessentail-weak-dep + state: absent diff --git a/test/integration/targets/dnf/vars/CentOS.yml b/test/integration/targets/dnf/vars/CentOS.yml new file mode 100644 index 00000000..c70d8538 --- /dev/null +++ b/test/integration/targets/dnf/vars/CentOS.yml @@ -0,0 +1,2 @@ +astream_name: '@php:7.2/minimal' +astream_name_no_stream: '@php/minimal' diff --git a/test/integration/targets/dnf/vars/Fedora.yml b/test/integration/targets/dnf/vars/Fedora.yml new file mode 100644 index 00000000..6e0a798c --- /dev/null +++ b/test/integration/targets/dnf/vars/Fedora.yml @@ -0,0 +1,6 @@ +astream_name: '@hub:pre-release/default' + +# For this to work, it needs to be that only shows once in `dnf module list`. +# Such packages, that exist on all the versions we test on, are hard to come by. +# TODO: This would be solved by using our own repo with modularity/streams. +astream_name_no_stream: '@hub/default' diff --git a/test/integration/targets/dnf/vars/RedHat.yml b/test/integration/targets/dnf/vars/RedHat.yml new file mode 100644 index 00000000..c70d8538 --- /dev/null +++ b/test/integration/targets/dnf/vars/RedHat.yml @@ -0,0 +1,2 @@ +astream_name: '@php:7.2/minimal' +astream_name_no_stream: '@php/minimal' diff --git a/test/integration/targets/dnf/vars/main.yml b/test/integration/targets/dnf/vars/main.yml new file mode 100644 index 00000000..86588de3 --- /dev/null +++ b/test/integration/targets/dnf/vars/main.yml @@ -0,0 +1,4 @@ +dnf_log_files: + - /var/log/dnf.log + - /var/log/dnf.rpm.log + - /var/log/dnf.librepo.log diff --git a/test/integration/targets/dpkg_selections/aliases b/test/integration/targets/dpkg_selections/aliases new file mode 100644 index 00000000..55da8c88 --- /dev/null +++ b/test/integration/targets/dpkg_selections/aliases @@ -0,0 +1,7 @@ +shippable/posix/group1 +destructive +skip/aix +skip/freebsd +skip/osx +skip/macos +skip/rhel diff --git a/test/integration/targets/dpkg_selections/defaults/main.yaml b/test/integration/targets/dpkg_selections/defaults/main.yaml new file mode 100644 index 00000000..94bd9bcc --- /dev/null +++ b/test/integration/targets/dpkg_selections/defaults/main.yaml @@ -0,0 +1 @@ +hello_old_version: 2.6-1 diff --git a/test/integration/targets/dpkg_selections/tasks/dpkg_selections.yaml b/test/integration/targets/dpkg_selections/tasks/dpkg_selections.yaml new file mode 100644 index 00000000..5a46fcd9 --- /dev/null +++ b/test/integration/targets/dpkg_selections/tasks/dpkg_selections.yaml @@ -0,0 +1,89 @@ +- name: download and install old version of hello + apt: "deb=https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dpkg_selections/hello_{{ hello_old_version }}_amd64.deb" + +- name: freeze version for hello + dpkg_selections: + name: hello + selection: hold + +- name: get dpkg selections + shell: "dpkg --get-selections | grep hold" + register: result + +- debug: var=result + +- name: check that hello is marked as hold + assert: + that: + - "'hello' in result.stdout" + +- name: attempt to upgrade hello + apt: + name: hello + state: latest + ignore_errors: yes + +- name: check hello version + shell: dpkg -s hello | grep Version | awk '{print $2}' + register: hello_version + +- name: ensure hello was not upgraded + assert: + that: + - hello_version.stdout == hello_old_version + +- name: remove version freeze + dpkg_selections: + name: hello + selection: install + +- name: upgrade hello + apt: + name: hello + state: latest + +- name: check hello version + shell: dpkg -s hello | grep Version | awk '{print $2}' + register: hello_version + +- name: check that old version upgraded correctly + assert: + that: + - hello_version.stdout != hello_old_version + +- name: set hello to deinstall + dpkg_selections: + name: hello + selection: deinstall + +- name: get dpkg selections + shell: "dpkg --get-selections | grep deinstall" + register: result + +- debug: var=result + +- name: check that hello is marked as deinstall + assert: + that: + - "'hello' in result.stdout" + +- name: set hello to purge + dpkg_selections: + name: hello + selection: purge + +- name: get dpkg selections + shell: "dpkg --get-selections | grep purge" + register: result + +- debug: var=result + +- name: check that hello is marked as purge + assert: + that: + - "'hello' in result.stdout" + +- name: remove hello + apt: + name: hello + state: absent diff --git a/test/integration/targets/dpkg_selections/tasks/main.yaml b/test/integration/targets/dpkg_selections/tasks/main.yaml new file mode 100644 index 00000000..6abd1dec --- /dev/null +++ b/test/integration/targets/dpkg_selections/tasks/main.yaml @@ -0,0 +1,3 @@ +--- + - include: 'dpkg_selections.yaml' + when: ansible_distribution in ('Ubuntu', 'Debian') diff --git a/test/integration/targets/egg-info/aliases b/test/integration/targets/egg-info/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/egg-info/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/egg-info/lookup_plugins/import_pkg_resources.py b/test/integration/targets/egg-info/lookup_plugins/import_pkg_resources.py new file mode 100644 index 00000000..c0c5ccd5 --- /dev/null +++ b/test/integration/targets/egg-info/lookup_plugins/import_pkg_resources.py @@ -0,0 +1,11 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pkg_resources + +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + return ['ok'] diff --git a/test/integration/targets/egg-info/tasks/main.yml b/test/integration/targets/egg-info/tasks/main.yml new file mode 100644 index 00000000..d7b886c0 --- /dev/null +++ b/test/integration/targets/egg-info/tasks/main.yml @@ -0,0 +1,3 @@ +- name: Make sure pkg_resources can be imported by plugins + debug: + msg: "{{ lookup('import_pkg_resources') }}" diff --git a/test/integration/targets/embedded_module/aliases b/test/integration/targets/embedded_module/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/embedded_module/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/embedded_module/library/test_integration_module b/test/integration/targets/embedded_module/library/test_integration_module new file mode 100644 index 00000000..04755b8e --- /dev/null +++ b/test/integration/targets/embedded_module/library/test_integration_module @@ -0,0 +1,3 @@ +#!/usr/bin/python + +print('{"changed":false, "msg":"this is the embedded module"}') diff --git a/test/integration/targets/embedded_module/tasks/main.yml b/test/integration/targets/embedded_module/tasks/main.yml new file mode 100644 index 00000000..6a6d6485 --- /dev/null +++ b/test/integration/targets/embedded_module/tasks/main.yml @@ -0,0 +1,9 @@ +- name: run the embedded dummy module + test_integration_module: + register: result + +- name: assert the embedded module ran + assert: + that: + - "'msg' in result" + - result.msg == "this is the embedded module" diff --git a/test/integration/targets/environment/aliases b/test/integration/targets/environment/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/environment/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/environment/runme.sh b/test/integration/targets/environment/runme.sh new file mode 100755 index 00000000..c556a17c --- /dev/null +++ b/test/integration/targets/environment/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_environment.yml -i ../../inventory "$@" diff --git a/test/integration/targets/environment/test_environment.yml b/test/integration/targets/environment/test_environment.yml new file mode 100644 index 00000000..43f9c74e --- /dev/null +++ b/test/integration/targets/environment/test_environment.yml @@ -0,0 +1,173 @@ +- hosts: testhost + gather_facts: no + tasks: + - name: get PATH from target + command: echo $PATH + register: target_path + +- hosts: testhost + vars: + - test1: + key1: val1 + environment: + PATH: '{{ansible_env.PATH + ":/lola"}}' + lola: 'ido' + tasks: + - name: ensure special case with ansible_env is skipped but others still work + assert: + that: + - target_path.stdout == ansible_env.PATH + - "'/lola' not in ansible_env.PATH" + - ansible_env.lola == 'ido' + + - name: check that envvar does not exist + shell: echo $key1 + register: test_env + + - name: assert no val in stdout + assert: + that: + - '"val1" not in test_env.stdout_lines' + + - name: check that envvar does exist + shell: echo $key1 + environment: "{{test1}}" + register: test_env2 + + - name: assert val1 in stdout + assert: + that: + - '"val1" in test_env2.stdout_lines' + +- hosts: testhost + vars: + - test1: + key1: val1 + - test2: + key1: not1 + other1: val2 + environment: "{{test1}}" + tasks: + - name: check that play envvar does exist + shell: echo $key1 + register: test_env3 + + - name: assert val1 in stdout + assert: + that: + - '"val1" in test_env3.stdout_lines' + + - name: check that task envvar does exist + shell: echo $key1; echo $other1 + register: test_env4 + environment: "{{test2}}" + + - name: assert all vars appear as expected + assert: + that: + - '"val1" not in test_env4.stdout_lines' + - '"not1" in test_env4.stdout_lines' + - '"val2" in test_env4.stdout_lines' + + - block: + - name: check that task envvar does exist in block + shell: echo $key1; echo $other1 + register: test_env5 + + - name: assert all vars appear as expected in block + assert: + that: + - '"val1" not in test_env5.stdout_lines' + - '"not1" in test_env5.stdout_lines' + - '"val2" in test_env5.stdout_lines' + environment: "{{test2}}" + +- name: test setting environment while using loops + hosts: testhost + environment: + foo: outer + tasks: + - name: verify foo==outer + command: /bin/echo $foo + loop: + - 1 + register: test_foo + + - name: assert foo==outer + assert: + that: + - "{{ test_foo.results[0].stdout == 'outer' }}" + + - name: set environment on a task + environment: + foo: in_task + command: /bin/echo $foo + loop: + - 1 + register: test_foo + + - name: assert foo==in_task + assert: + that: + - "test_foo.results[0].stdout == 'in_task'" + + - name: test that the outer env var is set appropriately still + command: /bin/echo $foo + loop: + - 1 + register: test_foo + + - name: assert foo==outer + assert: + that: + - "{{ test_foo.results[0].stdout == 'outer' }}" + + - name: set environment on a block + environment: + foo: in_block + block: + - name: test the environment is set in the block + command: /bin/echo $foo + loop: + - 1 + register: test_foo + + - name: assert foo==in_block + assert: + that: + - "test_foo.results[0].stdout == 'in_block'" + + - name: test setting environment in a task inside a block + environment: + foo: in_block_in_task + command: /bin/echo $foo + loop: + - 1 + register: test_foo + + - name: assert foo==in_block_in_task + assert: + that: + - "test_foo.results[0].stdout == 'in_block_in_task'" + + - name: test the environment var is set to the parent value + command: /bin/echo $foo + loop: + - 1 + register: test_foo + + - name: assert foo==in_block + assert: + that: + - "test_foo.results[0].stdout == 'in_block'" + + - name: test the env var foo has the initial value + command: /bin/echo $foo + loop: + - 1 + register: test_foo + + - name: assert foo==outer + assert: + that: + - "{{ test_foo.results[0].stdout == 'outer' }}" diff --git a/test/integration/targets/error_from_connection/aliases b/test/integration/targets/error_from_connection/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/error_from_connection/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/error_from_connection/connection_plugins/dummy.py b/test/integration/targets/error_from_connection/connection_plugins/dummy.py new file mode 100644 index 00000000..2a2c8795 --- /dev/null +++ b/test/integration/targets/error_from_connection/connection_plugins/dummy.py @@ -0,0 +1,45 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ + author: + - John Doe + connection: dummy + short_description: defective connection plugin + description: + - defective connection plugin + version_added: "2.0" + options: {} +""" +import ansible.constants as C +from ansible.errors import AnsibleError +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + + transport = 'dummy' + has_pipelining = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + raise AnsibleError('an error with {{ some Jinja }}') + + def transport(self): + pass + + def _connect(self): + pass + + def exec_command(self, cmd, in_data=None, sudoable=True): + pass + + def put_file(self, in_path, out_path): + pass + + def fetch_file(self, in_path, out_path): + pass + + def close(self): + pass diff --git a/test/integration/targets/error_from_connection/inventory b/test/integration/targets/error_from_connection/inventory new file mode 100644 index 00000000..324f0d3a --- /dev/null +++ b/test/integration/targets/error_from_connection/inventory @@ -0,0 +1,2 @@ +[local] +testhost diff --git a/test/integration/targets/error_from_connection/play.yml b/test/integration/targets/error_from_connection/play.yml new file mode 100644 index 00000000..04320d88 --- /dev/null +++ b/test/integration/targets/error_from_connection/play.yml @@ -0,0 +1,20 @@ +- hosts: testhost + gather_facts: false + tasks: + - name: "use a connection plugin raising an exception, exception message contains Jinja template." + connection: dummy + command: /bin/true # command won't be executed + register: result + ignore_errors: True + + - name: "check that Jinja template embedded in exception message isn't rendered" + debug: + msg: 'ok' + when: result is failed + register: debug_task + + - assert: + that: + - result is failed + - "'an error with' in result.msg" # makes sure plugin was found + - debug_task is success diff --git a/test/integration/targets/error_from_connection/runme.sh b/test/integration/targets/error_from_connection/runme.sh new file mode 100755 index 00000000..92679fd1 --- /dev/null +++ b/test/integration/targets/error_from_connection/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -o nounset -o errexit -o xtrace + +ansible-playbook -i inventory "play.yml" -v "$@" diff --git a/test/integration/targets/expect/aliases b/test/integration/targets/expect/aliases new file mode 100644 index 00000000..7211b8d0 --- /dev/null +++ b/test/integration/targets/expect/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +destructive +needs/target/setup_pexpect diff --git a/test/integration/targets/expect/files/foo.txt b/test/integration/targets/expect/files/foo.txt new file mode 100644 index 00000000..7c6ded14 --- /dev/null +++ b/test/integration/targets/expect/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git a/test/integration/targets/expect/files/test_command.py b/test/integration/targets/expect/files/test_command.py new file mode 100644 index 00000000..685c50c2 --- /dev/null +++ b/test/integration/targets/expect/files/test_command.py @@ -0,0 +1,12 @@ +import sys + +try: + input_function = raw_input +except NameError: + input_function = input + +prompts = sys.argv[1:] or ['foo'] + +for prompt in prompts: + user_input = input_function(prompt) + print(user_input) diff --git a/test/integration/targets/expect/tasks/main.yml b/test/integration/targets/expect/tasks/main.yml new file mode 100644 index 00000000..0c408d28 --- /dev/null +++ b/test/integration/targets/expect/tasks/main.yml @@ -0,0 +1,204 @@ +# test code for the ping module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +- name: Install test requirements + import_role: + name: setup_pexpect + +- name: record the test_command file + set_fact: test_command_file={{output_dir | expanduser}}/test_command.py + +- name: copy script into output directory + copy: src=test_command.py dest={{test_command_file}} mode=0444 + +- name: record the output file + set_fact: output_file={{output_dir}}/foo.txt + +- copy: + content: "foo" + dest: "{{output_file}}" + +- name: test expect + expect: + command: "{{ansible_python_interpreter}} {{test_command_file}}" + responses: + foo: bar + register: expect_result + +- name: assert expect worked + assert: + that: + - "expect_result.changed == true" + - "expect_result.stdout == 'foobar'" + +- name: test creates option + expect: + command: "{{ansible_python_interpreter}} {{test_command_file}}" + responses: + foo: bar + creates: "{{output_file}}" + register: creates_result + +- name: assert when creates is provided command is not run + assert: + that: + - "creates_result.changed == false" + - "'skipped' in creates_result.stdout" + +- name: test creates option (missing) + expect: + command: "{{ansible_python_interpreter}} {{test_command_file}}" + responses: + foo: bar + creates: "{{output_file}}.does.not.exist" + register: creates_result + +- name: assert when missing creates is provided command is run + assert: + that: + - "creates_result.changed == true" + - "creates_result.stdout == 'foobar'" + +- name: test removes option + expect: + command: "{{ansible_python_interpreter}} {{test_command_file}}" + responses: + foo: bar + removes: "{{output_file}}" + register: removes_result + +- name: assert when removes is provided command is run + assert: + that: + - "removes_result.changed == true" + - "removes_result.stdout == 'foobar'" + +- name: test removes option (missing) + expect: + command: "{{ansible_python_interpreter}} {{test_command_file}}" + responses: + foo: bar + removes: "{{output_file}}.does.not.exist" + register: removes_result + +- name: assert when missing removes is provided command is not run + assert: + that: + - "removes_result.changed == false" + - "'skipped' in removes_result.stdout" + +- name: test chdir + expect: + command: "/bin/sh -c 'pwd && sleep 1'" + chdir: "{{output_dir}}" + responses: + foo: bar + register: chdir_result + +- name: assert chdir works + assert: + that: + - "'{{chdir_result.stdout |expanduser | realpath }}' == '{{output_dir | expanduser | realpath}}'" + +- name: test timeout option + expect: + command: "sleep 10" + responses: + foo: bar + timeout: 1 + ignore_errors: true + register: timeout_result + +- name: assert failure message when timeout + assert: + that: + - "timeout_result.msg == 'command exceeded timeout'" + +- name: test echo option + expect: + command: "{{ansible_python_interpreter}} {{test_command_file}}" + responses: + foo: bar + echo: true + register: echo_result + +- name: assert echo works + assert: + that: + - "echo_result.stdout_lines|length == 2" + - "echo_result.stdout_lines[0] == 'foobar'" + - "echo_result.stdout_lines[1] == 'bar'" + +- name: test response list + expect: + command: "{{ansible_python_interpreter}} {{test_command_file}} foo foo" + responses: + foo: + - bar + - baz + register: list_result + +- name: assert list response works + assert: + that: + - "list_result.stdout_lines|length == 2" + - "list_result.stdout_lines[0] == 'foobar'" + - "list_result.stdout_lines[1] == 'foobaz'" + +- name: test no remaining responses + expect: + command: "{{ansible_python_interpreter}} {{test_command_file}} foo foo" + responses: + foo: + - bar + register: list_result + ignore_errors: yes + +- name: assert no remaining responses + assert: + that: + - "list_result.failed" + - "'No remaining responses' in list_result.msg" + +- name: test no command + expect: + command: "" + responses: + foo: bar + register: no_command_result + ignore_errors: yes + +- name: assert no command + assert: + that: + - "no_command_result.failed" + - "no_command_result.msg == 'no command given'" + - "no_command_result.rc == 256" + +- name: test non-zero return code + expect: + command: "ls /does-not-exist" + responses: + foo: bar + register: non_zero_result + ignore_errors: yes + +- name: assert non-zero return code + assert: + that: + - "non_zero_result.failed" + - "non_zero_result.msg == 'non-zero return code'" diff --git a/test/integration/targets/facts_d/aliases b/test/integration/targets/facts_d/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/facts_d/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/facts_d/files/basdscript.fact b/test/integration/targets/facts_d/files/basdscript.fact new file mode 100644 index 00000000..2bb8d868 --- /dev/null +++ b/test/integration/targets/facts_d/files/basdscript.fact @@ -0,0 +1,3 @@ +#!/bin/sh + +exit 1 diff --git a/test/integration/targets/facts_d/files/goodscript.fact b/test/integration/targets/facts_d/files/goodscript.fact new file mode 100644 index 00000000..6ee866cf --- /dev/null +++ b/test/integration/targets/facts_d/files/goodscript.fact @@ -0,0 +1,3 @@ +#!/bin/sh + +echo '{"script_ran": true}' diff --git a/test/integration/targets/facts_d/files/preferences.fact b/test/integration/targets/facts_d/files/preferences.fact new file mode 100644 index 00000000..c32583d4 --- /dev/null +++ b/test/integration/targets/facts_d/files/preferences.fact @@ -0,0 +1,2 @@ +[general] +bar=loaded diff --git a/test/integration/targets/facts_d/files/unreadable.fact b/test/integration/targets/facts_d/files/unreadable.fact new file mode 100644 index 00000000..98f562be --- /dev/null +++ b/test/integration/targets/facts_d/files/unreadable.fact @@ -0,0 +1 @@ +wontbeseen=ever diff --git a/test/integration/targets/facts_d/meta/main.yml b/test/integration/targets/facts_d/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/facts_d/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/facts_d/tasks/main.yml b/test/integration/targets/facts_d/tasks/main.yml new file mode 100644 index 00000000..aadef4c6 --- /dev/null +++ b/test/integration/targets/facts_d/tasks/main.yml @@ -0,0 +1,45 @@ +# (c) 2014, James Tanner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: prep for local facts tests + block: + - name: set factdir var + set_fact: fact_dir={{output_dir}}/facts.d + + - name: create fact dir + file: path={{ fact_dir }} state=directory + + - name: copy local facts test files + copy: src={{ item['name'] }}.fact dest={{ fact_dir }}/ mode={{ item['mode']|default(omit) }} + loop: + - name: preferences + - name: basdscript + mode: '0775' + - name: goodscript + mode: '0775' + - name: unreadable + mode: '0000' + +- name: force fact gather to get ansible_local + setup: + fact_path: "{{ fact_dir | expanduser }}" + filter: "*local*" + register: setup_result + +- name: show gathering results if rerun with -vvv + debug: var=setup_result verbosity=3 + +- name: check for expected results from local facts + assert: + that: + - "'ansible_facts' in setup_result" + - "'ansible_local' in setup_result.ansible_facts" + - "'ansible_env' not in setup_result.ansible_facts" + - "'ansible_user_id' not in setup_result.ansible_facts" + - "'preferences' in setup_result.ansible_facts['ansible_local']" + - "'general' in setup_result.ansible_facts['ansible_local']['preferences']" + - "'bar' in setup_result.ansible_facts['ansible_local']['preferences']['general']" + - "setup_result.ansible_facts['ansible_local']['preferences']['general']['bar'] == 'loaded'" + - setup_result['ansible_facts']['ansible_local']['goodscript']['script_ran']|bool + - setup_result['ansible_facts']['ansible_local']['basdscript'].startswith("Failure executing fact script") + - setup_result['ansible_facts']['ansible_local']['unreadable'].startswith('error loading facts') diff --git a/test/integration/targets/facts_linux_network/aliases b/test/integration/targets/facts_linux_network/aliases new file mode 100644 index 00000000..21a4e907 --- /dev/null +++ b/test/integration/targets/facts_linux_network/aliases @@ -0,0 +1,5 @@ +needs/privileged +shippable/posix/group2 +skip/freebsd +skip/osx +skip/macos diff --git a/test/integration/targets/facts_linux_network/meta/main.yml b/test/integration/targets/facts_linux_network/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/facts_linux_network/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/facts_linux_network/tasks/main.yml b/test/integration/targets/facts_linux_network/tasks/main.yml new file mode 100644 index 00000000..af4dde96 --- /dev/null +++ b/test/integration/targets/facts_linux_network/tasks/main.yml @@ -0,0 +1,18 @@ +- block: + - name: Add IP to interface + command: ip address add 100.42.42.1/32 dev {{ ansible_facts.default_ipv4.interface }} + ignore_errors: yes + + - name: Gather network facts + setup: + gather_subset: network + + - name: Ensure broadcast is reported as empty + assert: + that: + - ansible_facts[ansible_facts['default_ipv4']['interface']]['ipv4_secondaries'][0]['broadcast'] == '' + + always: + - name: Remove IP from interface + command: ip address delete 100.42.42.1/32 dev {{ ansible_facts.default_ipv4.interface }} + ignore_errors: yes diff --git a/test/integration/targets/failed_when/aliases b/test/integration/targets/failed_when/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/failed_when/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/failed_when/tasks/main.yml b/test/integration/targets/failed_when/tasks/main.yml new file mode 100644 index 00000000..3f8ae545 --- /dev/null +++ b/test/integration/targets/failed_when/tasks/main.yml @@ -0,0 +1,68 @@ +# Test code for failed_when. +# (c) 2014, Richard Isaacson + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: command rc 0 failed_when_result undef + shell: exit 0 + ignore_errors: True + register: result + +- assert: + that: + - "'failed' in result and not result.failed" + +- name: command rc 0 failed_when_result False + shell: exit 0 + failed_when: false + ignore_errors: true + register: result + +- assert: + that: + - "'failed' in result and not result.failed" + - "'failed_when_result' in result and not result.failed_when_result" + +- name: command rc 1 failed_when_result True + shell: exit 1 + failed_when: true + ignore_errors: true + register: result + +- assert: + that: + - "'failed' in result and result.failed" + - "'failed_when_result' in result and result.failed_when_result" + +- name: command rc 1 failed_when_result undef + shell: exit 1 + ignore_errors: true + register: result + +- assert: + that: + - "'failed' in result and result.failed" + +- name: command rc 1 failed_when_result False + shell: exit 1 + failed_when: false + ignore_errors: true + register: result + +- assert: + that: + - "'failed' in result and not result.failed" + - "'failed_when_result' in result and not result.failed_when_result" diff --git a/test/integration/targets/fetch/aliases b/test/integration/targets/fetch/aliases new file mode 100644 index 00000000..fb5d6faa --- /dev/null +++ b/test/integration/targets/fetch/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +needs/target/setup_remote_tmp_dir diff --git a/test/integration/targets/fetch/injection/avoid_slurp_return.yml b/test/integration/targets/fetch/injection/avoid_slurp_return.yml new file mode 100644 index 00000000..af62dcf4 --- /dev/null +++ b/test/integration/targets/fetch/injection/avoid_slurp_return.yml @@ -0,0 +1,26 @@ +- name: ensure that 'fake slurp' does not poison fetch source + hosts: localhost + gather_facts: False + tasks: + - name: fetch with relative source path + fetch: src=../injection/here.txt dest={{output_dir}} + become: true + register: islurp + + - name: fetch with normal source path + fetch: src=here.txt dest={{output_dir}} + become: true + register: islurp2 + + - name: ensure all is good in hollywood + assert: + that: + - "'..' not in islurp['dest']" + - "'..' not in islurp2['dest']" + - "'foo' not in islurp['dest']" + - "'foo' not in islurp2['dest']" + + - name: try to trip dest anyways + fetch: src=../injection/here.txt dest={{output_dir}} + become: true + register: islurp2 diff --git a/test/integration/targets/fetch/injection/here.txt b/test/integration/targets/fetch/injection/here.txt new file mode 100644 index 00000000..493021b1 --- /dev/null +++ b/test/integration/targets/fetch/injection/here.txt @@ -0,0 +1 @@ +this is a test file diff --git a/test/integration/targets/fetch/injection/library/slurp.py b/test/integration/targets/fetch/injection/library/slurp.py new file mode 100644 index 00000000..7b78ba18 --- /dev/null +++ b/test/integration/targets/fetch/injection/library/slurp.py @@ -0,0 +1,29 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = """ + module: fakeslurp + short_desciptoin: fake slurp module + description: + - this is a fake slurp module + options: + _notreal: + description: really not a real slurp + author: + - me +""" + +import json +import random + +bad_responses = ['../foo', '../../foo', '../../../foo', '/../../../foo', '/../foo', '//..//foo', '..//..//foo'] + + +def main(): + print(json.dumps(dict(changed=False, content='', encoding='base64', source=random.choice(bad_responses)))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/fetch/roles/fetch_tests/meta/main.yml b/test/integration/targets/fetch/roles/fetch_tests/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/fetch/roles/fetch_tests/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/fetch/roles/fetch_tests/tasks/main.yml b/test/integration/targets/fetch/roles/fetch_tests/tasks/main.yml new file mode 100644 index 00000000..267ae0f0 --- /dev/null +++ b/test/integration/targets/fetch/roles/fetch_tests/tasks/main.yml @@ -0,0 +1,141 @@ +# test code for the pip module +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: create a file that we can use to fetch + copy: content="test" dest={{ remote_tmp_dir }}/orig + +- name: fetch the test file + fetch: src={{ remote_tmp_dir }}/orig dest={{ output_dir }}/fetched + register: fetched + +- debug: var=fetched + +- name: Assert that we fetched correctly + assert: + that: + - 'fetched["changed"] == True' + - 'fetched["checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"' + - 'fetched["remote_checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"' + - 'lookup("file", output_dir + "/fetched/" + inventory_hostname + remote_tmp_dir + "/orig") == "test"' + +# TODO: check the become and non-become forms of fetch because in one form we'll do +# the get method of the connection plugin and in the become case we'll use the +# fetch module. + +- name: fetch a second time to show idempotence + fetch: src={{ remote_tmp_dir }}/orig dest={{ output_dir }}/fetched + register: fetched + +- name: Assert that the file was not fetched the second time + assert: + that: + - 'fetched["changed"] == False' + - 'fetched["checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"' + +- name: attempt to fetch a non-existent file - do not fail on missing + fetch: src={{ remote_tmp_dir }}/doesnotexist dest={{ output_dir }}/fetched fail_on_missing=False + register: fetch_missing_nofail + +- name: check fetch missing no fail result + assert: + that: + - "fetch_missing_nofail.msg" + - "fetch_missing_nofail is not changed" + +- name: attempt to fetch a non-existent file - fail on missing + fetch: src={{ remote_tmp_dir }}/doesnotexist dest={{ output_dir }}/fetched fail_on_missing=yes + register: fetch_missing + ignore_errors: true + +- name: check fetch missing with failure + assert: + that: + - "fetch_missing is failed" + - "fetch_missing.msg" + - "fetch_missing is not changed" + +- name: attempt to fetch a non-existent file - fail on missing implicit + fetch: src={{ remote_tmp_dir }}/doesnotexist dest={{ output_dir }}/fetched + register: fetch_missing_implicit + ignore_errors: true + +- name: check fetch missing with failure with implicit fail + assert: + that: + - "fetch_missing_implicit is failed" + - "fetch_missing_implicit.msg" + - "fetch_missing_implicit is not changed" + +- name: attempt to fetch a directory - should not fail but return a message + fetch: src={{ remote_tmp_dir }} dest={{ output_dir }}/somedir fail_on_missing=False + register: fetch_dir + +- name: check fetch directory result + assert: + that: + - "fetch_dir is not changed" + - "fetch_dir.msg" + +- name: attempt to fetch a directory - should fail + fetch: src={{ remote_tmp_dir }} dest={{ output_dir }}/somedir fail_on_missing=True + register: failed_fetch_dir + ignore_errors: true + +- name: check fetch directory result + assert: + that: + - "failed_fetch_dir is failed" + - "fetch_dir.msg" + +- name: create symlink to a file that we can fetch + file: + path: "{{ remote_tmp_dir }}/link" + src: "{{ remote_tmp_dir }}/orig" + state: "link" + +- name: fetch the file via a symlink + fetch: src={{ remote_tmp_dir }}/link dest={{ output_dir }}/fetched-link + register: fetched + +- debug: var=fetched + +- name: Assert that we fetched correctly + assert: + that: + - 'fetched["changed"] == True' + - 'fetched["checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"' + - 'fetched["remote_checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"' + - 'lookup("file", output_dir + "/fetched-link/" + inventory_hostname + remote_tmp_dir + "/link") == "test"' + +# TODO: check the become and non-become forms of fetch because in one form we'll do +# the get method of the connection plugin and in the become case we'll use the +# fetch module. + +- name: dest is an existing directory name without trailing slash and flat=yes, should fail + fetch: + src: "{{ remote_tmp_dir }}/orig" + dest: "{{ output_dir }}" + flat: yes + register: failed_fetch_dest_dir + ignore_errors: true + +- name: check that it indeed failed + assert: + that: + - "failed_fetch_dest_dir is failed" + - "failed_fetch_dest_dir.msg" diff --git a/test/integration/targets/fetch/run_fetch_tests.yml b/test/integration/targets/fetch/run_fetch_tests.yml new file mode 100644 index 00000000..f2ff1df3 --- /dev/null +++ b/test/integration/targets/fetch/run_fetch_tests.yml @@ -0,0 +1,5 @@ +- name: call fetch_tests role + hosts: testhost + gather_facts: false + roles: + - fetch_tests diff --git a/test/integration/targets/fetch/runme.sh b/test/integration/targets/fetch/runme.sh new file mode 100755 index 00000000..7e909dde --- /dev/null +++ b/test/integration/targets/fetch/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -eux + +# setup required roles +ln -s ../../setup_remote_tmp_dir roles/setup_remote_tmp_dir + +# run old type role tests +ansible-playbook -i ../../inventory run_fetch_tests.yml -e "output_dir=${OUTPUT_DIR}" -v "$@" + +# run tests to avoid path injection from slurp when fetch uses become +ansible-playbook -i ../../inventory injection/avoid_slurp_return.yml -e "output_dir=${OUTPUT_DIR}" -v "$@" diff --git a/test/integration/targets/file/aliases b/test/integration/targets/file/aliases new file mode 100644 index 00000000..4a2ce27c --- /dev/null +++ b/test/integration/targets/file/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +needs/root +skip/aix diff --git a/test/integration/targets/file/defaults/main.yml b/test/integration/targets/file/defaults/main.yml new file mode 100644 index 00000000..8e9a5836 --- /dev/null +++ b/test/integration/targets/file/defaults/main.yml @@ -0,0 +1,2 @@ +--- +remote_unprivileged_user: tmp_ansible_test_user diff --git a/test/integration/targets/file/files/foo.txt b/test/integration/targets/file/files/foo.txt new file mode 100644 index 00000000..7c6ded14 --- /dev/null +++ b/test/integration/targets/file/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git a/test/integration/targets/file/files/foobar/directory/fileC b/test/integration/targets/file/files/foobar/directory/fileC new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/file/files/foobar/directory/fileD b/test/integration/targets/file/files/foobar/directory/fileD new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/file/files/foobar/fileA b/test/integration/targets/file/files/foobar/fileA new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/file/files/foobar/fileB b/test/integration/targets/file/files/foobar/fileB new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/file/meta/main.yml b/test/integration/targets/file/meta/main.yml new file mode 100644 index 00000000..06d4fd29 --- /dev/null +++ b/test/integration/targets/file/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - setup_nobody diff --git a/test/integration/targets/file/tasks/diff_peek.yml b/test/integration/targets/file/tasks/diff_peek.yml new file mode 100644 index 00000000..802a99aa --- /dev/null +++ b/test/integration/targets/file/tasks/diff_peek.yml @@ -0,0 +1,10 @@ +- name: Run task with _diff_peek + file: + path: "{{ output_file }}" + _diff_peek: yes + register: diff_peek_result + +- name: Ensure warning was not issued when using _diff_peek parameter + assert: + that: + - diff_peek_result['warnings'] is not defined diff --git a/test/integration/targets/file/tasks/directory_as_dest.yml b/test/integration/targets/file/tasks/directory_as_dest.yml new file mode 100644 index 00000000..9b6ddb5d --- /dev/null +++ b/test/integration/targets/file/tasks/directory_as_dest.yml @@ -0,0 +1,345 @@ +# File module tests for overwriting directories +- name: Initialize the test output dir + include: initialize.yml + +# We need to make this more consistent: +# https://github.com/ansible/proposals/issues/111 +# +# This series of tests document the current inconsistencies. We should not +# break these by accident but if we approve a proposal we can break these on +# purpose. + +# +# Setup +# + +- name: create a test sub-directory + file: + dest: '{{output_dir}}/sub1' + state: directory + +- name: create a file for linking to + copy: + dest: '{{output_dir}}/file_to_link' + content: 'Hello World' + +# +# Error condtion: specify a directory with state={link,file}, force=False +# + +# file raises an error +- name: Try to create a file with directory as dest + file: + dest: '{{output_dir}}/sub1' + state: file + force: False + ignore_errors: True + register: file1_result + +- name: Get stat info to show the directory has not been changed to a file + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file1_dir_stat + +- name: verify that the directory was not overwritten + assert: + that: + - 'file1_result is failed' + - 'file1_dir_stat["stat"].isdir' + +# link raises an error +- name: Try to create a symlink with directory as dest + file: + src: '{{ output_dir }}/file_to_link' + dest: '{{output_dir}}/sub1' + state: link + force: False + ignore_errors: True + register: file2_result + +- name: Get stat info to show the directory has not been changed to a file + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file2_dir_stat + +- name: verify that the directory was not overwritten + assert: + that: + - 'file2_result is failed' + - 'file2_dir_stat["stat"].isdir' + +# +# Error condition: file and link with non-empty directory +# + +- copy: + content: 'test' + dest: '{{ output_dir }}/sub1/passwd' + +# file raises an error +- name: Try to create a file with directory as dest + file: + dest: '{{output_dir}}/sub1' + state: file + force: True + ignore_errors: True + register: file3_result + +- name: Get stat info to show the directory has not been changed to a file + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file3_dir_stat + +- name: verify that the directory was not overwritten + assert: + that: + - 'file3_result is failed' + - 'file3_dir_stat["stat"].isdir' + +# link raises an error +- name: Try to create a symlink with directory as dest + file: + src: '{{ output_dir }}/file_to_link' + dest: '{{output_dir}}/sub1' + state: link + force: True + ignore_errors: True + register: file4_result + +- name: Get stat info to show the directory has not been changed to a file + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file4_dir_stat + +- name: verify that the directory was not overwritten + assert: + that: + - 'file4_result is failed' + - 'file4_dir_stat["stat"].isdir' + +# Cleanup the file that made it non-empty +- name: Cleanup the file that made the directory nonempty + file: + state: 'absent' + dest: '{{ output_dir }}/sub1/passwd' + +# +# Error condition: file cannot even overwrite an empty directory with force=True +# + +# file raises an error +- name: Try to create a file with directory as dest + file: + dest: '{{output_dir}}/sub1' + state: file + force: True + ignore_errors: True + register: file5_result + +- name: Get stat info to show the directory has not been changed to a file + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file5_dir_stat + +- name: verify that the directory was not overwritten + assert: + that: + - 'file5_result is failed' + - 'file5_dir_stat["stat"].isdir' + +# +# Directory overwriting - link with force=True will overwrite an empty directory +# + +# link can overwrite an empty directory with force=True +- name: Try to create a symlink with directory as dest + file: + src: '{{ output_dir }}/file_to_link' + dest: '{{output_dir}}/sub1' + state: link + force: True + register: file6_result + +- name: Get stat info to show the directory has been overwritten + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file6_dir_stat + +- name: verify that the directory was overwritten + assert: + that: + - 'file6_result is changed' + - 'not file6_dir_stat["stat"].isdir' + - 'file6_dir_stat["stat"].islnk' + +# +# Cleanup from last set of tests +# + +- name: Cleanup the test subdirectory + file: + dest: '{{output_dir}}/sub1' + state: 'absent' + +- name: Re-create the test sub-directory + file: + dest: '{{output_dir}}/sub1' + state: 'directory' + +# +# Hard links have the proposed 111 behaviour already: Place the new file inside the directory +# + +- name: Try to create a hardlink with directory as dest + file: + src: '{{ output_dir }}/file_to_link' + dest: '{{ output_dir }}/sub1' + state: hard + force: False + ignore_errors: True + register: file7_result + +- name: Get stat info to show the directory has not been changed to a file + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file7_dir_stat + +- name: Get stat info to show the link has been created + stat: + path: '{{ output_dir }}/sub1/file_to_link' + follow: False + register: file7_link_stat + +- debug: + var: file7_link_stat + +- name: verify that the directory was not overwritten + assert: + that: + - 'file7_result is changed' + - 'file7_dir_stat["stat"].isdir' + - 'file7_link_stat["stat"].isfile' + - 'file7_link_stat["stat"].isfile' + ignore_errors: True + +# +# Touch is a bit different than everything else. +# If we need to set timestamps we should probably add atime, mtime, and ctime parameters +# But I think touch was written because state=file didn't create a file if it +# didn't already exist. We should look at changing that behaviour. +# + +- name: Get initial stat info to compare with later + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file8_initial_dir_stat + +- name: Pause to ensure stat times are not the exact same + pause: + seconds: 1 + +- name: Use touch with directory as dest + file: + dest: '{{output_dir}}/sub1' + state: touch + force: False + register: file8_result + +- name: Get stat info to show the directory has not been changed to a file + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file8_dir_stat + +- name: verify that the directory has been updated + assert: + that: + - 'file8_result is changed' + - 'file8_dir_stat["stat"].isdir' + - 'file8_dir_stat["stat"]["mtime"] != file8_initial_dir_stat["stat"]["mtime"]' + +- name: Get initial stat info to compare with later + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file11_initial_dir_stat + +- name: Use touch with directory as dest and keep mtime and atime + file: + dest: '{{output_dir}}/sub1' + state: touch + force: False + modification_time: preserve + access_time: preserve + register: file11_result + +- name: Get stat info to show the directory has not been changed + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file11_dir_stat + +- name: verify that the directory has not been updated + assert: + that: + - 'file11_result is not changed' + - 'file11_dir_stat["stat"].isdir' + - 'file11_dir_stat["stat"]["mtime"] == file11_initial_dir_stat["stat"]["mtime"]' + - 'file11_dir_stat["stat"]["atime"] == file11_initial_dir_stat["stat"]["atime"]' + +# +# State=directory realizes that the directory already exists and does nothing +# +- name: Get initial stat info to compare with later + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file9_initial_dir_stat + +- name: Use directory with directory as dest + file: + dest: '{{output_dir}}/sub1' + state: directory + force: False + register: file9_result + +- name: Get stat info to show the directory has not been changed + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file9_dir_stat + +- name: verify that the directory has been updated + assert: + that: + - 'file9_result is not changed' + - 'file9_dir_stat["stat"].isdir' + - 'file9_dir_stat["stat"]["mtime"] == file9_initial_dir_stat["stat"]["mtime"]' + +- name: Use directory with directory as dest and force=True + file: + dest: '{{output_dir}}/sub1' + state: directory + force: True + register: file10_result + +- name: Get stat info to show the directory has not been changed + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file10_dir_stat + +- name: verify that the directory has been updated + assert: + that: + - 'file10_result is not changed' + - 'file10_dir_stat["stat"].isdir' + - 'file10_dir_stat["stat"]["mtime"] == file9_initial_dir_stat["stat"]["mtime"]' diff --git a/test/integration/targets/file/tasks/initialize.yml b/test/integration/targets/file/tasks/initialize.yml new file mode 100644 index 00000000..dd7d1274 --- /dev/null +++ b/test/integration/targets/file/tasks/initialize.yml @@ -0,0 +1,15 @@ +# +# Cleanup the output dir and recreate it for the tests to operate on +# +- name: Cleanup the output directory + file: + dest: '{{ output_dir }}' + state: 'absent' + +- name: Recreate the toplevel output dir + file: + dest: '{{ output_dir }}' + state: 'directory' + +- name: prep with a basic file to operate on + copy: src=foo.txt dest={{output_file}} diff --git a/test/integration/targets/file/tasks/main.yml b/test/integration/targets/file/tasks/main.yml new file mode 100644 index 00000000..34ae4ba4 --- /dev/null +++ b/test/integration/targets/file/tasks/main.yml @@ -0,0 +1,752 @@ +# Test code for the file module. +# (c) 2014, Richard Isaacson + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: output_file={{output_dir}}/foo.txt + +# same as expanduser & expandvars called on managed host +- command: 'echo {{ output_file }}' + register: echo + +- set_fact: + remote_file_expanded: '{{ echo.stdout }}' + +# Import the test tasks +- name: Run tests for state=link + import_tasks: state_link.yml + +- name: Run tests for directory as dest + import_tasks: directory_as_dest.yml + +- name: Run tests for unicode + import_tasks: unicode_path.yml + environment: + LC_ALL: C + LANG: C + +- name: decide to include or not include selinux tests + include_tasks: selinux_tests.yml + when: selinux_installed is defined and selinux_installed.stdout != "" and selinux_enabled.stdout != "Disabled" + +- name: Initialize the test output dir + import_tasks: initialize.yml + +- name: Test _diff_peek + import_tasks: diff_peek.yml + + +# These tests need to be organized by state parameter into separate files later + +- name: verify that we are checking a file and it is present + file: path={{output_file}} state=file + register: file_result + +- name: verify that the file was marked as changed + assert: + that: + - "file_result.changed == false" + - "file_result.state == 'file'" + +- name: Make sure file does not exist + file: + path: /tmp/ghost + state: absent + +- name: Target a file that does not exist + file: + path: /tmp/ghost + ignore_errors: yes + register: ghost_file_result + +- name: Validate ghost file results + assert: + that: + - ghost_file_result is failed + - ghost_file_result is not changed + - ghost_file_result.state == 'absent' + - "'cannot continue' in ghost_file_result.msg" + +- name: verify that we are checking an absent file + file: path={{output_dir}}/bar.txt state=absent + register: file2_result + +- name: verify that the file was marked as changed + assert: + that: + - "file2_result.changed == false" + - "file2_result.state == 'absent'" + +- name: verify we can touch a file + file: path={{output_dir}}/baz.txt state=touch + register: file3_result + +- name: verify that the file was marked as changed + assert: + that: + - "file3_result.changed == true" + - "file3_result.state == 'file'" + - "file3_result.mode == '0644'" + +- name: change file mode + file: path={{output_dir}}/baz.txt mode=0600 + register: file4_result + +- name: verify that the file was marked as changed + assert: + that: + - "file4_result.changed == true" + - "file4_result.mode == '0600'" + +- name: define file to verify chattr/lsattr with + set_fact: + attributes_file: "{{ output_dir }}/attributes.txt" + attributes_supported: no + +- name: create file to verify chattr/lsattr with + command: touch "{{ attributes_file }}" + +- name: add "A" attribute to file + command: chattr +A "{{ attributes_file }}" + ignore_errors: yes + +- name: get attributes from file + # Use of `-v` is important, as that is what the module does (through `set_attributes_if_different` and then `get_file_attributes` in basic.py). + # On some systems, such as in containers, attributes work, but file versions may not. + # It should be possible to update `set_attributes_if_different` in the future to not use `-v` since the file version is unrelated to the attributes. + command: lsattr -vd "{{ attributes_file }}" + register: attribute_A_set + ignore_errors: yes + +- name: remove "A" attribute from file + command: chattr -A "{{ attributes_file }}" + ignore_errors: yes + +- name: get attributes from file + # See the note above on use of the `-v` option. + command: lsattr -vd "{{ attributes_file }}" + register: attribute_A_unset + ignore_errors: yes + +- name: determine if chattr/lsattr is supported + set_fact: + attributes_supported: yes + when: + - attribute_A_set is success + - "'A' in attribute_A_set.stdout_lines[0].split()[1]" + - attribute_A_unset is success + - "'A' not in attribute_A_unset.stdout_lines[0].split()[1]" + +- name: explicitly set file attribute "A" + file: path={{output_dir}}/baz.txt attributes=A + register: file_attributes_result + ignore_errors: True + when: attributes_supported + +- name: add file attribute "A" + file: path={{output_dir}}/baz.txt attributes=+A + register: file_attributes_result_2 + when: file_attributes_result is changed + +- name: verify that the file was not marked as changed + assert: + that: + - "file_attributes_result_2 is not changed" + when: file_attributes_result is changed + +- name: remove file attribute "A" + file: path={{output_dir}}/baz.txt attributes=-A + register: file_attributes_result_3 + ignore_errors: True + +- name: explicitly remove file attributes + file: path={{output_dir}}/baz.txt attributes="" + register: file_attributes_result_4 + when: file_attributes_result_3 is changed + +- name: verify that the file was not marked as changed + assert: + that: + - "file_attributes_result_4 is not changed" + when: file_attributes_result_4 is changed + +- name: change ownership and group + file: path={{output_dir}}/baz.txt owner=1234 group=1234 + +- name: Get stat info to check atime later + stat: path={{output_dir}}/baz.txt + register: file_attributes_result_5_before + +- name: updates access time + file: path={{output_dir}}/baz.txt access_time=now + register: file_attributes_result_5 + +- name: Get stat info to check atime later + stat: path={{output_dir}}/baz.txt + register: file_attributes_result_5_after + +- name: verify that the file was marked as changed and atime changed + assert: + that: + - "file_attributes_result_5 is changed" + - "file_attributes_result_5_after['stat']['atime'] != file_attributes_result_5_before['stat']['atime']" + +- name: setup a tmp-like directory for ownership test + file: path=/tmp/worldwritable mode=1777 state=directory + +- name: Ask to create a file without enough perms to change ownership + file: path=/tmp/worldwritable/baz.txt state=touch owner=root + become: yes + become_user: nobody + register: chown_result + ignore_errors: True + +- name: Ask whether the new file exists + stat: path=/tmp/worldwritable/baz.txt + register: file_exists_result + +- name: Verify that the file doesn't exist on failure + assert: + that: + - "chown_result.failed == True" + - "file_exists_result.stat.exists == False" + +- name: clean up + file: path=/tmp/worldwritable state=absent + +- name: create hard link to file + file: src={{output_file}} dest={{output_dir}}/hard.txt state=hard + register: file6_result + +- name: verify that the file was marked as changed + assert: + that: + - "file6_result.changed == true" + +- name: touch a hard link + file: + dest: '{{ output_dir }}/hard.txt' + state: 'touch' + register: file6_touch_result + +- name: verify that the hard link was touched + assert: + that: + - "file6_touch_result.changed == true" + +- name: stat1 + stat: path={{output_file}} + register: hlstat1 + +- name: stat2 + stat: path={{output_dir}}/hard.txt + register: hlstat2 + +- name: verify that hard link is still the same after timestamp updated + assert: + that: + - "hlstat1.stat.inode == hlstat2.stat.inode" + +- name: create hard link to file 2 + file: src={{output_file}} dest={{output_dir}}/hard.txt state=hard + register: hlink_result + +- name: verify that hard link creation is idempotent + assert: + that: + - "hlink_result.changed == False" + +- name: Change mode on a hard link + file: src={{output_file}} dest={{output_dir}}/hard.txt mode=0701 + register: file6_mode_change + +- name: verify that the hard link was touched + assert: + that: + - "file6_touch_result.changed == true" + +- name: stat1 + stat: path={{output_file}} + register: hlstat1 + +- name: stat2 + stat: path={{output_dir}}/hard.txt + register: hlstat2 + +- name: verify that hard link is still the same after timestamp updated + assert: + that: + - "hlstat1.stat.inode == hlstat2.stat.inode" + - "hlstat1.stat.mode == '0701'" + +- name: create a directory + file: path={{output_dir}}/foobar state=directory + register: file7_result + +- name: verify that the file was marked as changed + assert: + that: + - "file7_result.changed == true" + - "file7_result.state == 'directory'" + +- name: determine if selinux is installed + shell: which getenforce || exit 0 + register: selinux_installed + +- name: determine if selinux is enabled + shell: getenforce + register: selinux_enabled + when: selinux_installed.stdout != "" + ignore_errors: true + +- name: remove directory foobar + file: path={{output_dir}}/foobar state=absent + +- name: remove file foo.txt + file: path={{output_dir}}/foo.txt state=absent + +- name: remove file bar.txt + file: path={{output_dir}}/foo.txt state=absent + +- name: remove file baz.txt + file: path={{output_dir}}/foo.txt state=absent + +- name: copy directory structure over + copy: src=foobar dest={{output_dir}} + +- name: check what would be removed if folder state was absent and diff is enabled + file: + path: "{{ item }}" + state: absent + check_mode: yes + diff: yes + with_items: + - "{{ output_dir }}" + - "{{ output_dir }}/foobar/fileA" + register: folder_absent_result + +- name: 'assert that the "absent" state lists expected files and folders for only directories' + assert: + that: + - folder_absent_result.results[0].diff.before.path_content is defined + - folder_absent_result.results[1].diff.before.path_content is not defined + - test_folder in folder_absent_result.results[0].diff.before.path_content.directories + - test_file in folder_absent_result.results[0].diff.before.path_content.files + vars: + test_folder: "{{ folder_absent_result.results[0].path }}/foobar" + test_file: "{{ folder_absent_result.results[0].path }}/foobar/fileA" + +- name: Change ownership of a directory with recurse=no(default) + file: path={{output_dir}}/foobar owner=1234 + +- name: verify that the permission of the directory was set + file: path={{output_dir}}/foobar state=directory + register: file8_result + +- name: assert that the directory has changed to have owner 1234 + assert: + that: + - "file8_result.uid == 1234" + +- name: verify that the permission of a file under the directory was not set + file: path={{output_dir}}/foobar/fileA state=file + register: file9_result + +- name: assert the file owner has not changed to 1234 + assert: + that: + - "file9_result.uid != 1234" + +- name: change the ownership of a directory with recurse=yes + file: path={{output_dir}}/foobar owner=1235 recurse=yes + +- name: verify that the permission of the directory was set + file: path={{output_dir}}/foobar state=directory + register: file10_result + +- name: assert that the directory has changed to have owner 1235 + assert: + that: + - "file10_result.uid == 1235" + +- name: verify that the permission of a file under the directory was not set + file: path={{output_dir}}/foobar/fileA state=file + register: file11_result + +- name: assert that the file has changed to have owner 1235 + assert: + that: + - "file11_result.uid == 1235" + +- name: remove directory foobar + file: path={{output_dir}}/foobar state=absent + register: file14_result + +- name: verify that the directory was removed + assert: + that: + - 'file14_result.changed == true' + - 'file14_result.state == "absent"' + +- name: create a test sub-directory + file: dest={{output_dir}}/sub1 state=directory + register: file15_result + +- name: verify that the new directory was created + assert: + that: + - 'file15_result.changed == true' + - 'file15_result.state == "directory"' + +- name: create test files in the sub-directory + file: dest={{output_dir}}/sub1/{{item}} state=touch + with_items: + - file1 + - file2 + - file3 + register: file16_result + +- name: verify the files were created + assert: + that: + - 'item.changed == true' + - 'item.state == "file"' + with_items: "{{file16_result.results}}" + +- name: test file creation with symbolic mode + file: dest={{output_dir}}/test_symbolic state=touch mode=u=rwx,g=rwx,o=rwx + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0777' + +- name: modify symbolic mode for all + file: dest={{output_dir}}/test_symbolic state=touch mode=a=r + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0444' + +- name: modify symbolic mode for owner + file: dest={{output_dir}}/test_symbolic state=touch mode=u+w + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0644' + +- name: modify symbolic mode for group + file: dest={{output_dir}}/test_symbolic state=touch mode=g+w + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0664' + +- name: modify symbolic mode for world + file: dest={{output_dir}}/test_symbolic state=touch mode=o+w + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0666' + +- name: modify symbolic mode for owner + file: dest={{output_dir}}/test_symbolic state=touch mode=u+x + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0766' + +- name: modify symbolic mode for group + file: dest={{output_dir}}/test_symbolic state=touch mode=g+x + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0776' + +- name: modify symbolic mode for world + file: dest={{output_dir}}/test_symbolic state=touch mode=o+x + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0777' + +- name: remove symbolic mode for world + file: dest={{output_dir}}/test_symbolic state=touch mode=o-wx + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0774' + +- name: remove symbolic mode for group + file: dest={{output_dir}}/test_symbolic state=touch mode=g-wx + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0744' + +- name: remove symbolic mode for owner + file: dest={{output_dir}}/test_symbolic state=touch mode=u-wx + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0444' + +- name: set sticky bit with symbolic mode + file: dest={{output_dir}}/test_symbolic state=touch mode=o+t + register: result + +- name: assert file mode + assert: + that: + - result.mode == '01444' + +- name: remove sticky bit with symbolic mode + file: dest={{output_dir}}/test_symbolic state=touch mode=o-t + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0444' + +- name: add setgid with symbolic mode + file: dest={{output_dir}}/test_symbolic state=touch mode=g+s + register: result + +- name: assert file mode + assert: + that: + - result.mode == '02444' + +- name: remove setgid with symbolic mode + file: dest={{output_dir}}/test_symbolic state=touch mode=g-s + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0444' + +- name: add setuid with symbolic mode + file: dest={{output_dir}}/test_symbolic state=touch mode=u+s + register: result + +- name: assert file mode + assert: + that: + - result.mode == '04444' + +- name: remove setuid with symbolic mode + file: dest={{output_dir}}/test_symbolic state=touch mode=u-s + register: result + +- name: assert file mode + assert: + that: + - result.mode == '0444' + +# https://github.com/ansible/ansible/issues/50943 +# Need to use /tmp as nobody can't access output_dir at all +- name: create file as root with all write permissions + file: dest=/tmp/write_utime state=touch mode=0666 owner={{ansible_user_id}} + +- name: Pause to ensure stat times are not the exact same + pause: + seconds: 1 + +- block: + - name: get previous time + stat: path=/tmp/write_utime + register: previous_time + + - name: pause for 1 second to ensure the next touch is newer + pause: seconds=1 + + - name: touch file as nobody + file: dest=/tmp/write_utime state=touch + become: True + become_user: nobody + register: result + + - name: get new time + stat: path=/tmp/write_utime + register: current_time + + always: + - name: remove test utime file + file: path=/tmp/write_utime state=absent + +- name: assert touch file as nobody + assert: + that: + - result is changed + - current_time.stat.atime > previous_time.stat.atime + - current_time.stat.mtime > previous_time.stat.mtime + +# Follow + recursive tests +- name: create a toplevel directory + file: path={{output_dir}}/test_follow_rec state=directory mode=0755 + +- name: create a file outside of the toplevel + file: path={{output_dir}}/test_follow_rec_target_file state=touch mode=0700 + +- name: create a directory outside of the toplevel + file: path={{output_dir}}/test_follow_rec_target_dir state=directory mode=0700 + +- name: create a file inside of the link target directory + file: path={{output_dir}}/test_follow_rec_target_dir/foo state=touch mode=0700 + +- name: create a symlink to the file + file: path={{output_dir}}/test_follow_rec/test_link state=link src="../test_follow_rec_target_file" + +- name: create a symlink to the directory + file: path={{output_dir}}/test_follow_rec/test_link_dir state=link src="../test_follow_rec_target_dir" + +- name: create a symlink to a nonexistent file + file: path={{output_dir}}/test_follow_rec/nonexistent state=link src=does_not_exist force=True + +- name: try to change permissions without following symlinks + file: path={{output_dir}}/test_follow_rec follow=False mode="a-x" recurse=True + +- name: stat the link file target + stat: path={{output_dir}}/test_follow_rec_target_file + register: file_result + +- name: stat the link dir target + stat: path={{output_dir}}/test_follow_rec_target_dir + register: dir_result + +- name: stat the file inside the link dir target + stat: path={{output_dir}}/test_follow_rec_target_dir/foo + register: file_in_dir_result + +- name: assert that the link targets were unmodified + assert: + that: + - file_result.stat.mode == '0700' + - dir_result.stat.mode == '0700' + - file_in_dir_result.stat.mode == '0700' + +- name: try to change permissions with following symlinks + file: path={{output_dir}}/test_follow_rec follow=True mode="a-x" recurse=True + +- name: stat the link file target + stat: path={{output_dir}}/test_follow_rec_target_file + register: file_result + +- name: stat the link dir target + stat: path={{output_dir}}/test_follow_rec_target_dir + register: dir_result + +- name: stat the file inside the link dir target + stat: path={{output_dir}}/test_follow_rec_target_dir/foo + register: file_in_dir_result + +- name: assert that the link targets were modified + assert: + that: + - file_result.stat.mode == '0600' + - dir_result.stat.mode == '0600' + - file_in_dir_result.stat.mode == '0600' + +# https://github.com/ansible/ansible/issues/55971 +- name: Test missing src and path + file: + state: hard + register: file_error1 + ignore_errors: yes + +- assert: + that: + - "file_error1 is failed" + - "file_error1.msg == 'missing required arguments: path'" + +- name: Test missing src + file: + dest: "{{ output_dir }}/hard.txt" + state: hard + register: file_error2 + ignore_errors: yes + +- assert: + that: + - "file_error2 is failed" + - "file_error2.msg == 'src is required for creating new hardlinks'" + +- name: Test non-existing src + file: + src: non-existing-file-that-does-not-exist.txt + dest: "{{ output_dir }}/hard.txt" + state: hard + register: file_error3 + ignore_errors: yes + +- assert: + that: + - "file_error3 is failed" + - "file_error3.msg == 'src does not exist'" + - "file_error3.dest == '{{ output_dir }}/hard.txt' | expanduser" + - "file_error3.src == 'non-existing-file-that-does-not-exist.txt'" + +- block: + - name: Create a testing file + file: + dest: original_file.txt + state: touch + + - name: Test relative path with state=hard + file: + src: original_file.txt + dest: hard_link_file.txt + state: hard + register: hard_link_relpath + + - name: Just check if it was successful, we don't care about the actual hard link in this test + assert: + that: + - "hard_link_relpath is success" + + always: + - name: Clean up + file: + path: "{{ item }}" + state: absent + loop: + - original_file.txt + - hard_link_file.txt + +# END #55971 diff --git a/test/integration/targets/file/tasks/selinux_tests.yml b/test/integration/targets/file/tasks/selinux_tests.yml new file mode 100644 index 00000000..6a95c442 --- /dev/null +++ b/test/integration/targets/file/tasks/selinux_tests.yml @@ -0,0 +1,33 @@ +# Test code for the file module - selinux subtasks. +# (c) 2014, Richard Isaacson + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: Initialize the test output dir + include: initialize.yml + +- name: touch a file for testing + file: path={{output_dir}}/foo-se.txt state=touch + register: file_se_result + +- name: verify that the file was marked as changed + assert: + that: + - "file_se_result.changed == true" + - "file_se_result.secontext == 'unconfined_u:object_r:admin_home_t:s0'" + +- name: remove the file used for testing + file: path={{output_dir}}/foo-se.txt state=absent diff --git a/test/integration/targets/file/tasks/state_link.yml b/test/integration/targets/file/tasks/state_link.yml new file mode 100644 index 00000000..89150adc --- /dev/null +++ b/test/integration/targets/file/tasks/state_link.yml @@ -0,0 +1,487 @@ +# file module tests for dealing with symlinks (state=link) + +- name: Initialize the test output dir + include: initialize.yml + +# +# Basic absolute symlink to a file +# +- name: create soft link to file + file: src={{output_file}} dest={{output_dir}}/soft.txt state=link + register: file1_result + +- name: Get stat info for the link + stat: + path: '{{ output_dir }}/soft.txt' + follow: False + register: file1_link_stat + +- name: verify that the symlink was created correctly + assert: + that: + - 'file1_result is changed' + - 'file1_link_stat["stat"].islnk' + - 'file1_link_stat["stat"].lnk_target | expanduser == output_file | expanduser' + +# +# Change an absolute soft link into a relative soft link +# +- name: change soft link to relative + file: src={{output_file|basename}} dest={{output_dir}}/soft.txt state=link + register: file2_result + +- name: Get stat info for the link + stat: + path: '{{ output_dir }}/soft.txt' + follow: False + register: file2_link_stat + +- name: verify that the file was marked as changed + assert: + that: + - "file2_result is changed" + - "file2_result.diff.before.src == remote_file_expanded" + - "file2_result.diff.after.src == remote_file_expanded|basename" + - "file2_link_stat['stat'].islnk" + - "file2_link_stat['stat'].lnk_target == remote_file_expanded | basename" + +# +# Check that creating the soft link a second time was idempotent +# +- name: soft link idempotency check + file: src={{output_file|basename}} dest={{output_dir}}/soft.txt state=link + register: file3_result + +- name: Get stat info for the link + stat: + path: '{{ output_dir }}/soft.txt' + follow: False + register: file3_link_stat + +- name: verify that the file was not marked as changed + assert: + that: + - "not file3_result is changed" + - "file3_link_stat['stat'].islnk" + - "file3_link_stat['stat'].lnk_target == remote_file_expanded | basename" + +# +# Test symlink to nonexistent files +# +- name: fail to create soft link to non existent file + file: + src: '/nonexistent' + dest: '{{output_dir}}/soft2.txt' + state: 'link' + force: False + register: file4_result + ignore_errors: true + +- name: verify that link was not created + assert: + that: + - "file4_result is failed" + +- name: force creation soft link to non existent + file: + src: '/nonexistent' + dest: '{{ output_dir}}/soft2.txt' + state: 'link' + force: True + register: file5_result + +- name: Get stat info for the link + stat: + path: '{{ output_dir }}/soft2.txt' + follow: False + register: file5_link_stat + +- name: verify that link was created + assert: + that: + - "file5_result is changed" + - "file5_link_stat['stat'].islnk" + - "file5_link_stat['stat'].lnk_target == '/nonexistent'" + +- name: Prove idempotence of force creation soft link to non existent + file: + src: '/nonexistent' + dest: '{{ output_dir }}/soft2.txt' + state: 'link' + force: True + register: file6a_result + +- name: verify that the link to nonexistent is idempotent + assert: + that: + - "file6a_result.changed == false" + +# In order for a symlink in a sticky world writable directory to be followed, it must +# either be owned by the follower, +# or the directory and symlink must have the same owner. +- name: symlink in sticky directory + block: + - name: Create remote unprivileged remote user + user: + name: '{{ remote_unprivileged_user }}' + register: user + + - name: Create a local temporary directory + tempfile: + state: directory + register: tempdir + + - name: Set sticky bit + file: + path: '{{ tempdir.path }}' + mode: o=rwXt + + - name: 'Check mode: force creation soft link in sticky directory owned by another user (mode is used)' + file: + src: '{{ user.home }}/nonexistent' + dest: '{{ tempdir.path }}/soft3.txt' + mode: 0640 + state: 'link' + owner: '{{ remote_unprivileged_user }}' + force: true + follow: false + check_mode: true + register: missing_dst_no_follow_enable_force_use_mode1 + + - name: force creation soft link in sticky directory owned by another user (mode is used) + file: + src: '{{ user.home }}/nonexistent' + dest: '{{ tempdir.path }}/soft3.txt' + mode: 0640 + state: 'link' + owner: '{{ remote_unprivileged_user }}' + force: true + follow: false + register: missing_dst_no_follow_enable_force_use_mode2 + + - name: Get stat info for the link + stat: + path: '{{ tempdir.path }}/soft3.txt' + follow: false + register: soft3_result + + - name: 'Idempotence: force creation soft link in sticky directory owned by another user (mode is used)' + file: + src: '{{ user.home }}/nonexistent' + dest: '{{ tempdir.path }}/soft3.txt' + mode: 0640 + state: 'link' + owner: '{{ remote_unprivileged_user }}' + force: yes + follow: false + register: missing_dst_no_follow_enable_force_use_mode3 + always: + - name: Delete remote unprivileged remote user + user: + name: '{{ remote_unprivileged_user }}' + state: absent + + - name: Delete unprivileged user home and tempdir + file: + path: "{{ item }}" + state: absent + loop: + - '{{ tempdir.path }}' + - '{{ user.home }}' + +- name: verify that link was created + assert: + that: + - "missing_dst_no_follow_enable_force_use_mode1 is changed" + - "missing_dst_no_follow_enable_force_use_mode2 is changed" + - "missing_dst_no_follow_enable_force_use_mode3 is not changed" + - "soft3_result['stat'].islnk" + - "soft3_result['stat'].lnk_target == '{{ user.home }}/nonexistent'" + +# +# Test creating a link to a directory https://github.com/ansible/ansible/issues/1369 +# +- name: create soft link to directory using absolute path + file: + src: '/' + dest: '{{ output_dir }}/root' + state: 'link' + register: file6_result + +- name: Get stat info for the link + stat: + path: '{{ output_dir }}/root' + follow: False + register: file6_link_stat + +- name: Get stat info for the pointed to file + stat: + path: '{{ output_dir }}/root' + follow: True + register: file6_links_dest_stat + +- name: Get stat info for the file we intend to point to + stat: + path: '/' + follow: False + register: file6_dest_stat + +- name: verify that the link was created correctly + assert: + that: + # file command reports it created something + - "file6_result.changed == true" + # file command created a link + - 'file6_link_stat["stat"]["islnk"]' + # Link points to the right path + - 'file6_link_stat["stat"]["lnk_target"] == "/"' + # The link target and the file we intended to link to have the same inode + - 'file6_links_dest_stat["stat"]["inode"] == file6_dest_stat["stat"]["inode"]' + +# +# Test creating a relative link +# + +# Relative link to file +- name: create a test sub-directory to link to + file: + dest: '{{ output_dir }}/sub1' + state: 'directory' + +- name: create a file to link to in the test sub-directory + file: + dest: '{{ output_dir }}/sub1/file1' + state: 'touch' + +- name: create another test sub-directory to place links within + file: + dest: '{{output_dir}}/sub2' + state: 'directory' + +- name: create soft link to relative file + file: + src: '../sub1/file1' + dest: '{{ output_dir }}/sub2/link1' + state: 'link' + register: file7_result + +- name: Get stat info for the link + stat: + path: '{{ output_dir }}/sub2/link1' + follow: False + register: file7_link_stat + +- name: Get stat info for the pointed to file + stat: + path: '{{ output_dir }}/sub2/link1' + follow: True + register: file7_links_dest_stat + +- name: Get stat info for the file we intend to point to + stat: + path: '{{ output_dir }}/sub1/file1' + follow: False + register: file7_dest_stat + +- name: verify that the link was created correctly + assert: + that: + # file command reports it created something + - "file7_result.changed == true" + # file command created a link + - 'file7_link_stat["stat"]["islnk"]' + # Link points to the right path + - 'file7_link_stat["stat"]["lnk_target"] == "../sub1/file1"' + # The link target and the file we intended to link to have the same inode + - 'file7_links_dest_stat["stat"]["inode"] == file7_dest_stat["stat"]["inode"]' + +# Relative link to directory +- name: create soft link to relative directory + file: + src: sub1 + dest: '{{ output_dir }}/sub1-link' + state: 'link' + register: file8_result + +- name: Get stat info for the link + stat: + path: '{{ output_dir }}/sub1-link' + follow: False + register: file8_link_stat + +- name: Get stat info for the pointed to file + stat: + path: '{{ output_dir }}/sub1-link' + follow: True + register: file8_links_dest_stat + +- name: Get stat info for the file we intend to point to + stat: + path: '{{ output_dir }}/sub1' + follow: False + register: file8_dest_stat + +- name: verify that the link was created correctly + assert: + that: + # file command reports it created something + - "file8_result.changed == true" + # file command created a link + - 'file8_link_stat["stat"]["islnk"]' + # Link points to the right path + - 'file8_link_stat["stat"]["lnk_target"] == "sub1"' + # The link target and the file we intended to link to have the same inode + - 'file8_links_dest_stat["stat"]["inode"] == file8_dest_stat["stat"]["inode"]' + +# test the file module using follow=yes, so that the target of a +# symlink is modified, rather than the link itself + +- name: create a test file + copy: + dest: '{{output_dir}}/test_follow' + content: 'this is a test file\n' + mode: 0666 + +- name: create a symlink to the test file + file: + path: '{{output_dir}}/test_follow_link' + src: './test_follow' + state: 'link' + +- name: modify the permissions on the link using follow=yes + file: + path: '{{output_dir}}/test_follow_link' + mode: 0644 + follow: yes + register: file9_result + +- name: stat the link target + stat: + path: '{{output_dir}}/test_follow' + register: file9_stat + +- name: assert that the chmod worked + assert: + that: + - 'file9_result is changed' + - 'file9_stat["stat"]["mode"] == "0644"' + +# +# Test modifying the permissions of a link itself +# +- name: attempt to modify the permissions of the link itself + file: + path: '{{output_dir}}/test_follow_link' + src: './test_follow' + state: 'link' + mode: 0600 + follow: False + register: file10_result + +# Whether the link itself changed is platform dependent! (BSD vs Linux?) +# Just check that the underlying file was not changed +- name: stat the link target + stat: + path: '{{output_dir}}/test_follow' + register: file10_target_stat + +- name: assert that the link target was unmodified + assert: + that: + - 'file10_result is changed' + - 'file10_target_stat["stat"]["mode"] == "0644"' + + +# https://github.com/ansible/ansible/issues/56928 +- block: + + - name: Create a testing file + file: + path: "{{ output_dir }}/test_follow1" + state: touch + + - name: Create a symlink and change mode of the original file, since follow == yes by default + file: + src: "{{ output_dir }}/test_follow1" + dest: "{{ output_dir }}/test_follow1_link" + state: link + mode: 0700 + + - name: stat the original file + stat: + path: "{{ output_dir }}/test_follow1" + register: stat_out + + - name: Check if the mode of the original file was set + assert: + that: + - 'stat_out.stat.mode == "0700"' + + always: + - name: Clean up + file: + path: "{{ item }}" + state: absent + loop: + - "{{ output_dir }}/test_follow1" + - "{{ output_dir }}/test_follow1_link" + +# END #56928 + + +# Test failure with src and no state parameter +- name: Specify src without state + file: + src: "{{ output_file }}" + dest: "{{ output_dir }}/link.txt" + ignore_errors: yes + register: src_state + +- name: Ensure src without state failed + assert: + that: + - src_state is failed + - "'src option requires state to be' in src_state.msg" + +# Test creating a symlink when the destination exists and is a file +- name: create a test file + copy: + dest: '{{ output_dir }}/file.txt' + content: 'this is a test file\n' + mode: 0666 + +- name: Create a symlink with dest already a file + file: + src: '{{ output_file }}' + dest: '{{ output_dir }}/file.txt' + state: link + ignore_errors: true + register: dest_is_existing_file_fail + +- name: Stat to make sure the symlink was not created + stat: + path: '{{ output_dir }}/file.txt' + follow: false + register: dest_is_existing_file_fail_stat + +- name: Forcefully a symlink with dest already a file + file: + src: '{{ output_file }}' + dest: '{{ output_dir }}/file.txt' + state: link + force: true + register: dest_is_existing_file_force + +- name: Stat to make sure the symlink was created + stat: + path: '{{ output_dir }}/file.txt' + follow: false + register: dest_is_existing_file_force_stat + +- assert: + that: + - dest_is_existing_file_fail is failed + - not dest_is_existing_file_fail_stat.stat.islnk + - dest_is_existing_file_force is changed + - dest_is_existing_file_force_stat.stat.exists + - dest_is_existing_file_force_stat.stat.islnk diff --git a/test/integration/targets/file/tasks/unicode_path.yml b/test/integration/targets/file/tasks/unicode_path.yml new file mode 100644 index 00000000..d78af765 --- /dev/null +++ b/test/integration/targets/file/tasks/unicode_path.yml @@ -0,0 +1,10 @@ +- name: create local file with unicode filename and content + lineinfile: + dest: "{{ output_dir }}/语/汉语.txt" + create: true + line: 汉语 + +- name: remove local file with unicode filename and content + file: + path: "{{ output_dir }}/语/汉语.txt" + state: absent diff --git a/test/integration/targets/filter_core/aliases b/test/integration/targets/filter_core/aliases new file mode 100644 index 00000000..1603f435 --- /dev/null +++ b/test/integration/targets/filter_core/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller +skip/aix diff --git a/test/integration/targets/filter_core/files/9851.txt b/test/integration/targets/filter_core/files/9851.txt new file mode 100644 index 00000000..70b12793 --- /dev/null +++ b/test/integration/targets/filter_core/files/9851.txt @@ -0,0 +1,3 @@ + [{ + "k": "Quotes \"'\n" +}] diff --git a/test/integration/targets/filter_core/files/fileglob/one.txt b/test/integration/targets/filter_core/files/fileglob/one.txt new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/filter_core/files/fileglob/two.txt b/test/integration/targets/filter_core/files/fileglob/two.txt new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/filter_core/files/foo.txt b/test/integration/targets/filter_core/files/foo.txt new file mode 100644 index 00000000..9bd9b636 --- /dev/null +++ b/test/integration/targets/filter_core/files/foo.txt @@ -0,0 +1,69 @@ +This is a test of various filter plugins found in Ansible (ex: core.py), and +not so much a test of the core filters in Jinja2. + +Dumping the same structure to YAML + +- this is a list element +- this: is a hash element in a list + warp: 9 + where: endor + + +Dumping the same structure to JSON, but don't pretty print + +["this is a list element", {"this": "is a hash element in a list", "warp": 9, "where": "endor"}] + +Dumping the same structure to YAML, but don't pretty print + +- this is a list element +- {this: is a hash element in a list, warp: 9, where: endor} + + +From a recorded task, the changed, failed, success, and skipped +tests are shortcuts to ask if those tasks produced changes, failed, +succeeded, or skipped (as one might guess). + +Changed = True +Failed = False +Success = True +Skipped = False + +The mandatory filter fails if a variable is not defined and returns the value. +To avoid breaking this test, this variable is already defined. + +a = 1 + +There are various casts available + +int = 1 +bool = True + +String quoting + +quoted = quoted + +The fileglob module returns the list of things matching a pattern. + +fileglob = one.txt, two.txt + +There are also various string operations that work on paths. These do not require +files to exist and are passthrus to the python os.path functions + +/etc/motd with basename = motd +/etc/motd with dirname = /etc + +path_join_simple = /etc/subdir/test +path_join_with_slash = /test +path_join_relative = etc/subdir/test + +TODO: realpath follows symlinks. There isn't a test for this just now. + +TODO: add tests for set theory operations like union + +regex_replace = bar +# Check regex_replace with multiline +#bar +#bart +regex_search = 0001 +regex_findall = ["car", "tar", "bar"] +regex_escape = \^f\.\*o\(\.\*\)\$ diff --git a/test/integration/targets/filter_core/handle_undefined_type_errors.yml b/test/integration/targets/filter_core/handle_undefined_type_errors.yml new file mode 100644 index 00000000..70628809 --- /dev/null +++ b/test/integration/targets/filter_core/handle_undefined_type_errors.yml @@ -0,0 +1,29 @@ +- hosts: localhost + gather_facts: false + tasks: + - debug: msg={{item}} + with_dict: '{{myundef}}' + when: + - myundef is defined + register: shouldskip + + - name: check if skipped + assert: + that: + - shouldskip is skipped + + - debug: msg={{item}} + loop: '{{myundef|dict2items}}' + when: + - myundef is defined + + - debug: msg={{item}} + with_dict: '{{myundef}}' + register: notskipped + ignore_errors: true + + - name: check it failed + assert: + that: + - notskipped is not skipped + - notskipped is failed diff --git a/test/integration/targets/filter_core/host_vars/localhost b/test/integration/targets/filter_core/host_vars/localhost new file mode 100644 index 00000000..a8926a52 --- /dev/null +++ b/test/integration/targets/filter_core/host_vars/localhost @@ -0,0 +1 @@ +a: 1 diff --git a/test/integration/targets/filter_core/meta/main.yml b/test/integration/targets/filter_core/meta/main.yml new file mode 100644 index 00000000..e430ea6f --- /dev/null +++ b/test/integration/targets/filter_core/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - role: setup_passlib + when: ansible_facts.distribution == 'MacOSX' diff --git a/test/integration/targets/filter_core/runme.sh b/test/integration/targets/filter_core/runme.sh new file mode 100755 index 00000000..c055603b --- /dev/null +++ b/test/integration/targets/filter_core/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml "$@" +ANSIBLE_ROLES_PATH=../ ansible-playbook handle_undefined_type_errors.yml "$@" diff --git a/test/integration/targets/filter_core/runme.yml b/test/integration/targets/filter_core/runme.yml new file mode 100644 index 00000000..4af4b23c --- /dev/null +++ b/test/integration/targets/filter_core/runme.yml @@ -0,0 +1,3 @@ +- hosts: localhost + roles: + - { role: filter_core } diff --git a/test/integration/targets/filter_core/tasks/main.yml b/test/integration/targets/filter_core/tasks/main.yml new file mode 100644 index 00000000..2197febd --- /dev/null +++ b/test/integration/targets/filter_core/tasks/main.yml @@ -0,0 +1,576 @@ +# test code for filters +# Copyright: (c) 2014, Michael DeHaan +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Note: |groupby is already tested by the `groupby_filter` target. + +- set_fact: + output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}" + +- name: a dummy task to test the changed and success filters + shell: echo hi + register: some_registered_var + +- debug: + var: some_registered_var + +- name: Verify that we workaround a py26 json bug + template: + src: py26json.j2 + dest: "{{ output_dir }}/py26json.templated" + mode: 0644 + +- name: 9851 - Verify that we don't trigger https://github.com/ansible/ansible/issues/9851 + copy: + content: " [{{ item | to_nice_json }}]" + dest: "{{ output_dir }}/9851.out" + with_items: + - {"k": "Quotes \"'\n"} + +- name: 9851 - copy known good output into place + copy: + src: 9851.txt + dest: "{{ output_dir }}/9851.txt" + +- name: 9851 - Compare generated json to known good + shell: diff -w {{ output_dir }}/9851.out {{ output_dir }}/9851.txt + register: diff_result_9851 + +- name: 9851 - verify generated file matches known good + assert: + that: + - 'diff_result_9851.stdout == ""' + +- name: fill in a basic template + template: + src: foo.j2 + dest: "{{ output_dir }}/foo.templated" + mode: 0644 + register: template_result + +- name: copy known good into place + copy: + src: foo.txt + dest: "{{ output_dir }}/foo.txt" + +- name: compare templated file to known good + shell: diff -w {{ output_dir }}/foo.templated {{ output_dir }}/foo.txt + register: diff_result + +- name: verify templated file matches known good + assert: + that: + - 'diff_result.stdout == ""' + +- name: Test extract + assert: + that: + - '"c" == 2 | extract(["a", "b", "c"])' + - '"b" == 1 | extract(["a", "b", "c"])' + - '"a" == 0 | extract(["a", "b", "c"])' + +- name: Container lookups with extract + assert: + that: + - "'x' == [0]|map('extract',['x','y'])|list|first" + - "'y' == [1]|map('extract',['x','y'])|list|first" + - "42 == ['x']|map('extract',{'x':42,'y':31})|list|first" + - "31 == ['x','y']|map('extract',{'x':42,'y':31})|list|last" + - "'local' == ['localhost']|map('extract',hostvars,'ansible_connection')|list|first" + - "'local' == ['localhost']|map('extract',hostvars,['ansible_connection'])|list|first" + # map was added to jinja2 in version 2.7 + when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.7', '>=') + +- name: Test extract filter with defaults + vars: + container: + key: + subkey: value + assert: + that: + - "'key' | extract(badcontainer) | default('a') == 'a'" + - "'key' | extract(badcontainer, 'subkey') | default('a') == 'a'" + - "('key' | extract(badcontainer)).subkey | default('a') == 'a'" + - "'badkey' | extract(container) | default('a') == 'a'" + - "'badkey' | extract(container, 'subkey') | default('a') == 'a'" + - "('badkey' | extract(container)).subsubkey | default('a') == 'a'" + - "'key' | extract(container, 'badsubkey') | default('a') == 'a'" + - "'key' | extract(container, ['badsubkey', 'subsubkey']) | default('a') == 'a'" + - "('key' | extract(container, 'badsubkey')).subsubkey | default('a') == 'a'" + - "'badkey' | extract(hostvars) | default('a') == 'a'" + - "'badkey' | extract(hostvars, 'subkey') | default('a') == 'a'" + - "('badkey' | extract(hostvars)).subsubkey | default('a') == 'a'" + - "'localhost' | extract(hostvars, 'badsubkey') | default('a') == 'a'" + - "'localhost' | extract(hostvars, ['badsubkey', 'subsubkey']) | default('a') == 'a'" + - "('localhost' | extract(hostvars, 'badsubkey')).subsubkey | default('a') == 'a'" + +- name: Test hash filter + assert: + that: + - '"{{ "hash" | hash("sha1") }}" == "2346ad27d7568ba9896f1b7da6b5991251debdf2"' + - '"{{ "café" | hash("sha1") }}" == "f424452a9673918c6f09b0cdd35b20be8e6ae7d7"' + - '"corned beef"|hash("haha, get it?") == None' + +- name: Flatten tests + block: + - name: use flatten + set_fact: + flat_full: '{{orig_list|flatten}}' + flat_one: '{{orig_list|flatten(levels=1)}}' + flat_two: '{{orig_list|flatten(levels=2)}}' + flat_tuples: '{{ [1,3] | zip([2,4]) | list | flatten }}' + + - name: Verify flatten filter works as expected + assert: + that: + - flat_full == [1, 2, 3, 4, 5, 6, 7] + - flat_one == [1, 2, 3, [4, [5]], 6, 7] + - flat_two == [1, 2, 3, 4, [5], 6, 7] + - flat_tuples == [1, 2, 3, 4] + vars: + orig_list: [1, 2, [3, [4, [5]], 6], 7] + +- name: Test base64 filter + assert: + that: + - "'Ansible - くらとみ\n' | b64encode == 'QW5zaWJsZSAtIOOBj+OCieOBqOOBvwo='" + - "'QW5zaWJsZSAtIOOBj+OCieOBqOOBvwo=' | b64decode == 'Ansible - くらとみ\n'" + - "'Ansible - くらとみ\n' | b64encode(encoding='utf-16-le') == 'QQBuAHMAaQBiAGwAZQAgAC0AIABPMIkwaDB/MAoA'" + - "'QQBuAHMAaQBiAGwAZQAgAC0AIABPMIkwaDB/MAoA' | b64decode(encoding='utf-16-le') == 'Ansible - くらとみ\n'" + +- set_fact: + x: + x: x + key: x + y: + y: y + key: y + z: + z: z + key: z + + # Most complicated combine dicts from the documentation + default: + a: + a': + x: default_value + y: default_value + list: + - default_value + b: + - 1 + - 1 + - 2 + - 3 + patch: + a: + a': + y: patch_value + z: patch_value + list: + - patch_value + b: + - 3 + - 4 + - 4 + - key: value + result: + a: + a': + x: default_value + y: patch_value + z: patch_value + list: + - default_value + - patch_value + b: + - 1 + - 1 + - 2 + - 3 + - 4 + - 4 + - key: value + +- name: Verify combine fails with extra kwargs + set_fact: + foo: "{{[1] | combine(foo='bar')}}" + ignore_errors: yes + register: combine_fail + +- name: Verify combine filter + assert: + that: + - "([x] | combine) == x" + - "(x | combine(y)) == {'x': 'x', 'y': 'y', 'key': 'y'}" + - "(x | combine(y, z)) == {'x': 'x', 'y': 'y', 'z': 'z', 'key': 'z'}" + - "([x, y, z] | combine) == {'x': 'x', 'y': 'y', 'z': 'z', 'key': 'z'}" + - "([x, y] | combine(z)) == {'x': 'x', 'y': 'y', 'z': 'z', 'key': 'z'}" + - "None|combine == {}" + # more advanced dict combination tests are done in the "merge_hash" function unit tests + # but even though it's redundant with those unit tests, we do at least the most complicated example of the documentation here + - "(default | combine(patch, recursive=True, list_merge='append_rp')) == result" + - combine_fail is failed + - "combine_fail.msg == \"'recursive' and 'list_merge' are the only valid keyword arguments\"" + +- set_fact: + combine: "{{[x, [y]] | combine(z)}}" + ignore_errors: yes + register: result + +- name: Ensure combining objects which aren't dictionaries throws an error + assert: + that: + - "result.msg.startswith(\"failed to combine variables, expected dicts but got\")" + +- name: Ensure combining two dictionaries containing undefined variables provides a helpful error + block: + - set_fact: + foo: + key1: value1 + + - set_fact: + combined: "{{ foo | combine({'key2': undef_variable}) }}" + ignore_errors: yes + register: result + + - assert: + that: + - "result.msg.startswith('The task includes an option with an undefined variable')" + + - set_fact: + combined: "{{ foo | combine({'key2': {'nested': [undef_variable]}})}}" + ignore_errors: yes + register: result + + - assert: + that: + - "result.msg.startswith('The task includes an option with an undefined variable')" + +- name: regex_search + set_fact: + match_case: "{{ 'hello' | regex_search('HELLO', ignorecase=false) }}" + ignore_case: "{{ 'hello' | regex_search('HELLO', ignorecase=true) }}" + single_line: "{{ 'hello\nworld' | regex_search('^world', multiline=false) }}" + multi_line: "{{ 'hello\nworld' | regex_search('^world', multiline=true) }}" + named_groups: "{{ 'goodbye' | regex_search('(?Pgood)(?Pbye)', '\\g', '\\g') }}" + numbered_groups: "{{ 'goodbye' | regex_search('(good)(bye)', '\\2', '\\1') }}" + +- name: regex_search unknown argument (failure expected) + set_fact: + unknown_arg: "{{ 'hello' | regex_search('hello', 'unknown') }}" + ignore_errors: yes + register: failure + +- name: regex_search check + assert: + that: + - match_case == '' + - ignore_case == 'hello' + - single_line == '' + - multi_line == 'world' + - named_groups == ['bye', 'good'] + - numbered_groups == ['bye', 'good'] + - failure is failed + +- name: Verify to_bool + assert: + that: + - 'None|bool == None' + - 'False|bool == False' + - '"TrUe"|bool == True' + - '"FalSe"|bool == False' + - '7|bool == False' + +- name: Verify to_datetime + assert: + that: + - '"1993-03-26 01:23:45"|to_datetime < "1994-03-26 01:23:45"|to_datetime' + +- name: strftime invalid argument (failure expected) + set_fact: + foo: "{{ '%Y' | strftime('foo') }}" + ignore_errors: yes + register: strftime_fail + +- name: Verify strftime + assert: + that: + - '"%Y-%m-%d"|strftime(1585247522) == "2020-03-26"' + - '"%Y-%m-%d"|strftime("1585247522.0") == "2020-03-26"' + - '("%Y"|strftime(None)).startswith("20")' # Current date, can't check much there. + - strftime_fail is failed + - '"Invalid value for epoch value" in strftime_fail.msg' + +- name: Verify case-insensitive regex_replace + assert: + that: + - '"hElLo there"|regex_replace("hello", "hi", ignorecase=True) == "hi there"' + +- name: Verify case-insensitive regex_findall + assert: + that: + - '"hEllo there heLlo haha HELLO there"|regex_findall("h.... ", ignorecase=True)|length == 3' + +- name: Verify ternary + assert: + that: + - 'True|ternary("seven", "eight") == "seven"' + - 'None|ternary("seven", "eight") == "eight"' + - 'None|ternary("seven", "eight", "nine") == "nine"' + - 'False|ternary("seven", "eight") == "eight"' + - '123|ternary("seven", "eight") == "seven"' + - '"haha"|ternary("seven", "eight") == "seven"' + +- name: Verify regex_escape raises on posix_extended (failure expected) + set_fact: + foo: '{{"]]^"|regex_escape(re_type="posix_extended")}}' + ignore_errors: yes + register: regex_escape_fail_1 + +- name: Verify regex_escape raises on other re_type (failure expected) + set_fact: + foo: '{{"]]^"|regex_escape(re_type="haha")}}' + ignore_errors: yes + register: regex_escape_fail_2 + +- name: Verify regex_escape with re_type other than 'python' + assert: + that: + - '"]]^"|regex_escape(re_type="posix_basic") == "\\]\\]\\^"' + - regex_escape_fail_1 is failed + - 'regex_escape_fail_1.msg == "Regex type (posix_extended) not yet implemented"' + - regex_escape_fail_2 is failed + - 'regex_escape_fail_2.msg == "Invalid regex type (haha)"' + +- name: Verify from_yaml and from_yaml_all + assert: + that: + - "'---\nbananas: yellow\napples: red'|from_yaml == {'bananas': 'yellow', 'apples': 'red'}" + - "2|from_yaml == 2" + - "'---\nbananas: yellow\n---\napples: red'|from_yaml_all|list == [{'bananas': 'yellow'}, {'apples': 'red'}]" + - "2|from_yaml_all == 2" + +- name: Verify random raises on non-iterable input (failure expected) + set_fact: + foo: '{{None|random}}' + ignore_errors: yes + register: random_fail_1 + +- name: Verify random raises on iterable input with start (failure expected) + set_fact: + foo: '{{[1,2,3]|random(start=2)}}' + ignore_errors: yes + register: random_fail_2 + +- name: Verify random raises on iterable input with step (failure expected) + set_fact: + foo: '{{[1,2,3]|random(step=2)}}' + ignore_errors: yes + register: random_fail_3 + +- name: Verify random + assert: + that: + - '2|random in [0,1]' + - '2|random(seed=1337) in [0,1]' + - '["a", "b"]|random in ["a", "b"]' + - '20|random(start=10) in range(10, 20)' + - '20|random(start=10, step=2) % 2 == 0' + - random_fail_1 is failure + - '"random can only be used on" in random_fail_1.msg' + - random_fail_2 is failure + - '"start and step can only be used" in random_fail_2.msg' + - random_fail_3 is failure + - '"start and step can only be used" in random_fail_3.msg' + +# It's hard to actually verify much here since the result is, well, random. +- name: Verify randomize_list + assert: + that: + - '[1,3,5,7,9]|shuffle|length == 5' + - '[1,3,5,7,9]|shuffle(seed=1337)|length == 5' + - '22|shuffle == 22' + +- name: Verify password_hash throws on weird salt_size type + set_fact: + foo: '{{"hey"|password_hash(salt_size=[999])}}' + ignore_errors: yes + register: password_hash_1 + +- name: Verify password_hash throws on weird hashtype + set_fact: + foo: '{{"hey"|password_hash(hashtype="supersecurehashtype")}}' + ignore_errors: yes + register: password_hash_2 + +- name: Verify password_hash + assert: + that: + - "'what in the WORLD is up?'|password_hash|length == 106" + # This throws a vastly different error on py2 vs py3, so we just check + # that it's a failure, not a substring of the exception. + - password_hash_1 is failed + - password_hash_2 is failed + - "'not support' in password_hash_2.msg" + +- name: Verify to_uuid throws on weird namespace + set_fact: + foo: '{{"hey"|to_uuid(namespace=22)}}' + ignore_errors: yes + register: to_uuid_1 + +- name: Verify to_uuid + assert: + that: + - '"monkeys"|to_uuid == "0d03a178-da0f-5b51-934e-cda9c76578c3"' + - to_uuid_1 is failed + - '"Invalid value" in to_uuid_1.msg' + +- name: Verify mandatory throws on undefined variable + set_fact: + foo: '{{hey|mandatory}}' + ignore_errors: yes + register: mandatory_1 + +- name: Verify mandatory throws on undefined variable with custom message + set_fact: + foo: '{{hey|mandatory("You did not give me a variable. I am a sad wolf.")}}' + ignore_errors: yes + register: mandatory_2 + +- name: Set a variable + set_fact: + mandatory_demo: 123 + +- name: Verify mandatory + assert: + that: + - '{{mandatory_demo|mandatory}} == 123' + - mandatory_1 is failed + - "mandatory_1.msg == \"Mandatory variable 'hey' not defined.\"" + - mandatory_2 is failed + - "mandatory_2.msg == 'You did not give me a variable. I am a sad wolf.'" + +- name: Verify comment + assert: + that: + - '"boo!"|comment == "#\n# boo!\n#"' + - '"boo!"|comment(decoration="-- ") == "--\n-- boo!\n--"' + - '"boo!"|comment(style="cblock") == "/*\n *\n * boo!\n *\n */"' + - '"boo!"|comment(decoration="") == "boo!\n"' + - '"boo!"|comment(prefix="\n", prefix_count=20) == "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# boo!\n#"' + +- name: Verify subelements throws on invalid obj + set_fact: + foo: '{{True|subelements("foo")}}' + ignore_errors: yes + register: subelements_1 + +- name: Verify subelements throws on invalid subelements arg + set_fact: + foo: '{{{}|subelements(17)}}' + ignore_errors: yes + register: subelements_2 + +- name: Set demo data for subelements + set_fact: + subelements_demo: '{{ [{"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}] }}' + +- name: Verify subelements throws on bad key + set_fact: + foo: '{{subelements_demo | subelements("does not compute")}}' + ignore_errors: yes + register: subelements_3 + +- name: Verify subelements throws on key pointing to bad value + set_fact: + foo: '{{subelements_demo | subelements("name")}}' + ignore_errors: yes + register: subelements_4 + +- name: Verify subelements throws on list of keys ultimately pointing to bad value + set_fact: + foo: '{{subelements_demo | subelements(["groups", "authorized"])}}' + ignore_errors: yes + register: subelements_5 + +- name: Verify subelements + assert: + that: + - subelements_1 is failed + - 'subelements_1.msg == "obj must be a list of dicts or a nested dict"' + - subelements_2 is failed + - '"subelements must be a list or a string" in subelements_2.msg' + - 'subelements_demo|subelements("does not compute", skip_missing=True) == []' + - subelements_3 is failed + - '"could not find" in subelements_3.msg' + - subelements_4 is failed + - '"should point to a list" in subelements_4.msg' + - subelements_5 is failed + - '"should point to a dictionary" in subelements_5.msg' + - 'subelements_demo|subelements("groups") == [({"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}, "wheel")]' + - 'subelements_demo|subelements(["groups"]) == [({"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}, "wheel")]' + + +- name: Verify dict2items throws on non-Mapping + set_fact: + foo: '{{True|dict2items}}' + ignore_errors: yes + register: dict2items_fail + +- name: Verify dict2items + assert: + that: + - '{"foo": "bar", "banana": "fruit"}|dict2items == [{"key": "foo", "value": "bar"}, {"key": "banana", "value": "fruit"}]' + - dict2items_fail is failed + - '"dict2items requires a dictionary" in dict2items_fail.msg' + +- name: Verify items2dict throws on non-Mapping + set_fact: + foo: '{{True|items2dict}}' + ignore_errors: yes + register: items2dict_fail + +- name: Verify items2dict + assert: + that: + - '[{"key": "foo", "value": "bar"}, {"key": "banana", "value": "fruit"}]|items2dict == {"foo": "bar", "banana": "fruit"}' + - items2dict_fail is failed + - '"items2dict requires a list" in items2dict_fail.msg' + +- name: Verify path_join throws on non-string and non-sequence + set_fact: + foo: '{{True|path_join}}' + ignore_errors: yes + register: path_join_fail + +- name: Verify path_join + assert: + that: + - '"foo"|path_join == "foo"' + - '["foo", "bar"]|path_join in ["foo/bar", "foo\bar"]' + - path_join_fail is failed + - '"expects string or sequence" in path_join_fail.msg' + +- name: Verify type_debug + assert: + that: + - '"foo"|type_debug == "str"' + +- name: Assert that a jinja2 filter that produces a map is auto unrolled + assert: + that: + - thing|map(attribute="bar")|first == 123 + - thing_result|first == 123 + - thing_items|first|last == 123 + - thing_range == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + vars: + thing: + - bar: 123 + thing_result: '{{ thing|map(attribute="bar") }}' + thing_dict: + bar: 123 + thing_items: '{{ thing_dict.items() }}' + thing_range: '{{ range(10) }}' diff --git a/test/integration/targets/filter_core/templates/foo.j2 b/test/integration/targets/filter_core/templates/foo.j2 new file mode 100644 index 00000000..a69ba5ef --- /dev/null +++ b/test/integration/targets/filter_core/templates/foo.j2 @@ -0,0 +1,62 @@ +This is a test of various filter plugins found in Ansible (ex: core.py), and +not so much a test of the core filters in Jinja2. + +Dumping the same structure to YAML + +{{ some_structure | to_nice_yaml }} + +Dumping the same structure to JSON, but don't pretty print + +{{ some_structure | to_json(sort_keys=true) }} + +Dumping the same structure to YAML, but don't pretty print + +{{ some_structure | to_yaml }} + +From a recorded task, the changed, failed, success, and skipped +tests are shortcuts to ask if those tasks produced changes, failed, +succeeded, or skipped (as one might guess). + +Changed = {{ some_registered_var is changed }} +Failed = {{ some_registered_var is failed }} +Success = {{ some_registered_var is successful }} +Skipped = {{ some_registered_var is skipped }} + +The mandatory filter fails if a variable is not defined and returns the value. +To avoid breaking this test, this variable is already defined. + +a = {{ a | mandatory }} + +There are various casts available + +int = {{ a | int }} +bool = {{ 1 | bool }} + +String quoting + +quoted = {{ 'quoted' | quote }} + +The fileglob module returns the list of things matching a pattern. + +fileglob = {{ (playbook_dir + '/files/fileglob/*') | fileglob | map('basename') | sort | join(', ') }} + +There are also various string operations that work on paths. These do not require +files to exist and are passthrus to the python os.path functions + +/etc/motd with basename = {{ '/etc/motd' | basename }} +/etc/motd with dirname = {{ '/etc/motd' | dirname }} + +path_join_simple = {{ ('/etc', 'subdir', 'test') | path_join }} +path_join_with_slash = {{ ('/etc', 'subdir', '/test') | path_join }} +path_join_relative = {{ ('etc', 'subdir', 'test') | path_join }} + +TODO: realpath follows symlinks. There isn't a test for this just now. + +TODO: add tests for set theory operations like union + +regex_replace = {{ 'foo' | regex_replace('^foo', 'bar') }} +# Check regex_replace with multiline +{{ '#foo\n#foot' | regex_replace('^#foo', '#bar', multiline=True) }} +regex_search = {{ 'test_value_0001' | regex_search('([0-9]+)$')}} +regex_findall = {{ 'car\ntar\nfoo\nbar\n' | regex_findall('^.ar$', multiline=True)|to_json }} +regex_escape = {{ '^f.*o(.*)$' | regex_escape() }} diff --git a/test/integration/targets/filter_core/templates/py26json.j2 b/test/integration/targets/filter_core/templates/py26json.j2 new file mode 100644 index 00000000..dba62ad1 --- /dev/null +++ b/test/integration/targets/filter_core/templates/py26json.j2 @@ -0,0 +1,2 @@ +Provoke a python2.6 json bug +{{ hostvars[inventory_hostname] | to_nice_json }} diff --git a/test/integration/targets/filter_core/vars/main.yml b/test/integration/targets/filter_core/vars/main.yml new file mode 100644 index 00000000..aedecd8f --- /dev/null +++ b/test/integration/targets/filter_core/vars/main.yml @@ -0,0 +1,106 @@ +some_structure: + - "this is a list element" + - + this: "is a hash element in a list" + warp: 9 + where: endor + +other_data: + level1: + foo: bar + blip: baz + nested: + abc: def + ghi: xyz + alist: + - alpha + - beta + - charlie + - delta + level2: + asd: df + xc: dsdfsfsd + nested: + abc: foo + alist: + - zebra + - yellow + - xray + +# from https://github.com/ansible/ansible/issues/20379#issuecomment-280492883 +example_20379: { + "ApplicationVersions": [ + { + "ApplicationName": "gitlab_ci_elasticbeanstalk", + "Status": "UNPROCESSED", + "VersionLabel": "test-npm-check-626-1313", + "Description": "bla", + "DateCreated": "2017-01-22T02:02:31.798Z", + "DateUpdated": "2017-01-22T02:02:31.798Z", + "SourceBundle": { + "S3Bucket": "bla", + "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-626-1313.war" + } + }, + { + "ApplicationName": "gitlab_ci_elasticbeanstalk", + "Status": "UNPROCESSED", + "VersionLabel": "terminate-611-1289", + "Description": "bla", + "DateCreated": "2017-01-20T00:34:29.864Z", + "DateUpdated": "2017-01-20T00:34:29.864Z", + "SourceBundle": { + "S3Bucket": "bla", + "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-611-1289.war" + } + }, + { + "ApplicationName": "gitlab_ci_elasticbeanstalk", + "Status": "UNPROCESSED", + "VersionLabel": "terminate-610-1286", + "Description": "bla", + "DateCreated": "2017-01-20T00:22:02.229Z", + "DateUpdated": "2017-01-20T00:22:02.229Z", + "SourceBundle": { + "S3Bucket": "bla", + "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-610-1286.war" + } + }, + { + "ApplicationName": "gitlab_ci_elasticbeanstalk", + "Status": "UNPROCESSED", + "VersionLabel": "master-609-1284", + "Description": "bla", + "DateCreated": "2017-01-19T23:54:32.902Z", + "DateUpdated": "2017-01-19T23:54:32.902Z", + "SourceBundle": { + "S3Bucket": "bla", + "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-609-1284.war" + } + }, + { + "ApplicationName": "gitlab_ci_elasticbeanstalk", + "Status": "UNPROCESSED", + "VersionLabel": "master-608-1282", + "Description": "bla", + "DateCreated": "2017-01-19T23:02:44.902Z", + "DateUpdated": "2017-01-19T23:02:44.902Z", + "SourceBundle": { + "S3Bucket": "bla", + "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-608-1282.war" + } + }, + { + "ApplicationName": "gitlab_ci_elasticbeanstalk", + "Status": "UNPROCESSED", + "VersionLabel": "master-606-1278", + "Description": "bla'", + "DateCreated": "2017-01-19T22:47:57.741Z", + "DateUpdated": "2017-01-19T22:47:57.741Z", + "SourceBundle": { + "S3Bucket": "bla", + "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-606-1278.war" + } + } + ] +} diff --git a/test/integration/targets/filter_mathstuff/aliases b/test/integration/targets/filter_mathstuff/aliases new file mode 100644 index 00000000..1603f435 --- /dev/null +++ b/test/integration/targets/filter_mathstuff/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller +skip/aix diff --git a/test/integration/targets/filter_mathstuff/tasks/main.yml b/test/integration/targets/filter_mathstuff/tasks/main.yml new file mode 100644 index 00000000..2a708be1 --- /dev/null +++ b/test/integration/targets/filter_mathstuff/tasks/main.yml @@ -0,0 +1,288 @@ +- name: Verify unique's fallback's exception throwing for case_sensitive=True + set_fact: + unique_fallback_exc1: '{{ [{"foo": "bar", "moo": "cow"}]|unique(case_sensitive=True) }}' + ignore_errors: true + tags: unique + register: unique_fallback_exc1_res + +- name: Verify unique's fallback's exception throwing for a Hashable thing that triggers TypeError + set_fact: + unique_fallback_exc2: '{{ True|unique }}' + ignore_errors: true + tags: unique + register: unique_fallback_exc2_res + +- name: Verify unique + tags: unique + assert: + that: + - '[1,2,3,4,4,3,2,1]|unique == [1,2,3,4]' + - '["a", "b", "a", "b"]|unique == ["a", "b"]' + - '[{"foo": "bar", "moo": "cow"}, {"foo": "bar", "moo": "cow"}, {"haha": "bar", "moo": "mar"}]|unique == [{"foo": "bar", "moo": "cow"}, {"haha": "bar", "moo": "mar"}]' + - '[{"foo": "bar", "moo": "cow"}, {"foo": "bar", "moo": "mar"}]|unique == [{"foo": "bar", "moo": "cow"}, {"foo": "bar", "moo": "mar"}]' + - '{"foo": "bar", "moo": "cow"}|unique == ["foo", "moo"]' + - '"foo"|unique|sort|join == "fo"' + - '[1,2,3,4,5]|unique == [1,2,3,4,5]' + - unique_fallback_exc1_res is failed + - unique_fallback_exc2_res is failed + - "\"'bool' object is not iterable\" in unique_fallback_exc2_res.msg" + +# `unique` will fall back to a custom implementation if the Jinja2 version is +# too old to support `jinja2.filters.do_unique`. However, the built-in fallback +# is quite different by default. Namely, it ignores the case-sensitivity +# setting. This means running: +# ['a', 'b', 'A', 'B']|unique +# ... will give a different result for someone running Jinja 2.9 vs 2.10 when +# do_unique was added. So here, we do a test to see if we have `do_unique`. If +# we do, then we do another test to make sure attribute and case_sensitive +# work on it. +- name: Test for do_unique + shell: "{{ansible_python_interpreter}} -c 'from jinja2 import filters; print(\"do_unique\" in dir(filters))'" + tags: unique + register: do_unique_res + +- name: Verify unique some more + tags: unique + assert: + that: + - '["a", "b", "A", "B"]|unique(case_sensitive=True) == ["a", "b", "A", "B"]' + - '[{"foo": "bar", "moo": "cow"}, {"foo": "bar", "moo": "mar"}]|unique(attribute="foo") == [{"foo": "bar", "moo": "cow"}]' + - '["a", "b", "A", "B"]|unique == ["a", "b"]' # defaults to case_sensitive=False + - "'cannot fall back' in unique_fallback_exc1_res.msg" + when: do_unique_res.stdout == 'True' + +- name: Verify unique some more + tags: unique + assert: + that: + - "'does not support case_sensitive' in unique_fallback_exc1_res.msg" + when: do_unique_res.stdout == 'False' + +- name: Verify intersect + tags: intersect + assert: + that: + - '[1,2,3]|intersect([4,5,6]) == []' + - '[1,2,3]|intersect([3,4,5,6]) == [3]' + - '[1,2,3]|intersect([3,2,1]) == [1,2,3]' + - '(1,2,3)|intersect((4,5,6))|list == []' + - '(1,2,3)|intersect((3,4,5,6))|list == [3]' + +- name: Verify difference + tags: difference + assert: + that: + - '[1,2,3]|difference([4,5,6]) == [1,2,3]' + - '[1,2,3]|difference([3,4,5,6]) == [1,2]' + - '[1,2,3]|difference([3,2,1]) == []' + - '(1,2,3)|difference((4,5,6))|list == [1,2,3]' + - '(1,2,3)|difference((3,4,5,6))|list == [1,2]' + +- name: Verify symmetric_difference + tags: symmetric_difference + assert: + that: + - '[1,2,3]|symmetric_difference([4,5,6]) == [1,2,3,4,5,6]' + - '[1,2,3]|symmetric_difference([3,4,5,6]) == [1,2,4,5,6]' + - '[1,2,3]|symmetric_difference([3,2,1]) == []' + - '(1,2,3)|symmetric_difference((4,5,6))|list == [1,2,3,4,5,6]' + - '(1,2,3)|symmetric_difference((3,4,5,6))|list == [1,2,4,5,6]' + +- name: Verify union + tags: union + assert: + that: + - '[1,2,3]|union([4,5,6]) == [1,2,3,4,5,6]' + - '[1,2,3]|union([3,4,5,6]) == [1,2,3,4,5,6]' + - '[1,2,3]|union([3,2,1]) == [1,2,3]' + - '(1,2,3)|union((4,5,6))|list == [1,2,3,4,5,6]' + - '(1,2,3)|union((3,4,5,6))|list == [1,2,3,4,5,6]' + +- name: Verify min + tags: min + assert: + that: + - '[1000,-99]|min == -99' + - '[0,4]|min == 0' + +- name: Verify max + tags: max + assert: + that: + - '[1000,-99]|max == 1000' + - '[0,4]|max == 4' + +- name: Verify logarithm on a value of invalid type + set_fact: + logarithm_exc1: '{{ "yo"|log }}' + ignore_errors: true + tags: logarithm + register: logarithm_exc1_res + +- name: Verify logarithm (which is passed to Jinja as "log" because consistency is boring) + tags: logarithm + assert: + that: + - '1|log == 0.0' + - '100|log(10) == 2.0' + - '100|log(10) == 2.0' + - '21|log(21) == 1.0' + - '(2.3|log(42)|string).startswith("0.222841")' + - '(21|log(42)|string).startswith("0.814550")' + - logarithm_exc1_res is failed + - '"can only be used on numbers" in logarithm_exc1_res.msg' + +- name: Verify power on a value of invalid type + set_fact: + power_exc1: '{{ "yo"|pow(4) }}' + ignore_errors: true + tags: power + register: power_exc1_res + +- name: Verify power (which is passed to Jinja as "pow" because consistency is boring) + tags: power + assert: + that: + - '2|pow(4) == 16.0' + - power_exc1_res is failed + - '"can only be used on numbers" in power_exc1_res.msg' + +- name: Verify inversepower on a value of invalid type + set_fact: + inversepower_exc1: '{{ "yo"|root }}' + ignore_errors: true + tags: inversepower + register: inversepower_exc1_res + +- name: Verify inversepower (which is passed to Jinja as "root" because consistency is boring) + tags: inversepower + assert: + that: + - '4|root == 2.0' + - '4|root(2) == 2.0' + - '9|root(1) == 9.0' + - '(9|root(6)|string).startswith("1.4422495")' + - inversepower_exc1_res is failed + - '"can only be used on numbers" in inversepower_exc1_res.msg' + +- name: Verify human_readable on invalid input + set_fact: + human_readable_exc1: '{{ "monkeys"|human_readable }}' + ignore_errors: true + tags: human_readable + register: human_readable_exc1_res + +- name: Verify human_readable + tags: human_readable + assert: + that: + - '"1.00 Bytes" == 1|human_readable' + - '"1.00 bits" == 1|human_readable(isbits=True)' + - '"10.00 KB" == 10240|human_readable' + - '"97.66 MB" == 102400000|human_readable' + - '"0.10 GB" == 102400000|human_readable(unit="G")' + - '"0.10 Gb" == 102400000|human_readable(isbits=True, unit="G")' + - human_readable_exc1_res is failed + - '"failed on bad input" in human_readable_exc1_res.msg' + +- name: Verify human_to_bytes + tags: human_to_bytes + assert: + that: + - "{{'0'|human_to_bytes}} == 0" + - "{{'0.1'|human_to_bytes}} == 0" + - "{{'0.9'|human_to_bytes}} == 1" + - "{{'1'|human_to_bytes}} == 1" + - "{{'10.00 KB'|human_to_bytes}} == 10240" + - "{{ '11 MB'|human_to_bytes}} == 11534336" + - "{{ '1.1 GB'|human_to_bytes}} == 1181116006" + - "{{'10.00 Kb'|human_to_bytes(isbits=True)}} == 10240" + +- name: Verify human_to_bytes (bad string) + set_fact: + bad_string: "{{ '10.00 foo' | human_to_bytes }}" + ignore_errors: yes + tags: human_to_bytes + register: _human_bytes_test + +- name: Verify human_to_bytes (bad string) + tags: human_to_bytes + assert: + that: "{{_human_bytes_test.failed}}" + +- name: Verify that union can be chained + tags: union + vars: + unions: '{{ [1,2,3]|union([4,5])|union([6,7]) }}' + assert: + that: + - "unions|type_debug == 'list'" + - "unions|length == 7" + +- name: Test union with unhashable item + tags: union + vars: + unions: '{{ [1,2,3]|union([{}]) }}' + assert: + that: + - "unions|type_debug == 'list'" + - "unions|length == 4" + +- name: Verify rekey_on_member with invalid "duplicates" kwarg + set_fact: + rekey_on_member_exc1: '{{ []|rekey_on_member("asdf", duplicates="boo") }}' + ignore_errors: true + tags: rekey_on_member + register: rekey_on_member_exc1_res + +- name: Verify rekey_on_member with invalid data + set_fact: + rekey_on_member_exc2: '{{ "minkeys"|rekey_on_member("asdf") }}' + ignore_errors: true + tags: rekey_on_member + register: rekey_on_member_exc2_res + +- name: Verify rekey_on_member with partially invalid data (list item is not dict) + set_fact: + rekey_on_member_exc3: '{{ [True]|rekey_on_member("asdf") }}' + ignore_errors: true + tags: rekey_on_member + register: rekey_on_member_exc3_res + +- name: Verify rekey_on_member with partially invalid data (key not in all dicts) + set_fact: + rekey_on_member_exc4: '{{ [{"foo": "bar", "baz": "buzz"}, {"hello": 8, "different": "haha"}]|rekey_on_member("foo") }}' + ignore_errors: true + tags: rekey_on_member + register: rekey_on_member_exc4_res + +- name: Verify rekey_on_member with duplicates and duplicates=error + set_fact: + rekey_on_member_exc5: '{{ [{"proto": "eigrp", "state": "enabled"}, {"proto": "eigrp", "state": "enabled"}]|rekey_on_member("proto", duplicates="error") }}' + ignore_errors: true + tags: rekey_on_member + register: rekey_on_member_exc5_res + +- name: Verify rekey_on_member + tags: rekey_on_member + assert: + that: + - rekey_on_member_exc1_res is failed + - '"duplicates parameter to rekey_on_member has unknown value" in rekey_on_member_exc1_res.msg' + - '[{"proto": "eigrp", "state": "enabled"}, {"proto": "ospf", "state": "enabled"}]|rekey_on_member("proto") == {"eigrp": {"proto": "eigrp", "state": "enabled"}, "ospf": {"proto": "ospf", "state": "enabled"}}' + - '{"a": {"proto": "eigrp", "state": "enabled"}, "b": {"proto": "ospf", "state": "enabled"}}|rekey_on_member("proto") == {"eigrp": {"proto": "eigrp", "state": "enabled"}, "ospf": {"proto": "ospf", "state": "enabled"}}' + - '[{"proto": "eigrp", "state": "enabled"}, {"proto": "eigrp", "state": "enabled"}]|rekey_on_member("proto", duplicates="overwrite") == {"eigrp": {"proto": "eigrp", "state": "enabled"}}' + - rekey_on_member_exc2_res is failed + - '"Type is not a valid list, set, or dict" in rekey_on_member_exc2_res.msg' + - rekey_on_member_exc3_res is failed + - '"List item is not a valid dict" in rekey_on_member_exc3_res.msg' + - rekey_on_member_exc4_res is failed + - '"was not found" in rekey_on_member_exc4_res.msg' + - rekey_on_member_exc5_res is failed + - '"is not unique, cannot correctly turn into dict" in rekey_on_member_exc5_res.msg' + +# TODO: For some reason, the coverage tool isn't accounting for the last test +# so add another "last test" to fake it... +- assert: + that: + - true diff --git a/test/integration/targets/filter_urls/aliases b/test/integration/targets/filter_urls/aliases new file mode 100644 index 00000000..1603f435 --- /dev/null +++ b/test/integration/targets/filter_urls/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller +skip/aix diff --git a/test/integration/targets/filter_urls/runme.sh b/test/integration/targets/filter_urls/runme.sh new file mode 100755 index 00000000..9362a385 --- /dev/null +++ b/test/integration/targets/filter_urls/runme.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_ROLES_PATH=../ + +ansible-playbook runme.yml "$@" + +export ANSIBLE_TEST_PREFER_VENV=1 +source virtualenv.sh + +# This is necessary for installing Jinja 2.6. We need this because Jinja 2.6 +# won't install with newer setuptools, and because setuptools 45+ won't work +# with Python 2. +pip install 'setuptools<45' + +# Install Jinja 2.6 since we want to test the fallback to Ansible's custom +# urlencode functions. Jinja 2.6 does not have urlencode so we will trigger the +# fallback. +pip install 'jinja2 >= 2.6, < 2.7' + +# Run the playbook again in the venv with Jinja 2.6 +ansible-playbook runme.yml "$@" diff --git a/test/integration/targets/filter_urls/runme.yml b/test/integration/targets/filter_urls/runme.yml new file mode 100644 index 00000000..527a03e3 --- /dev/null +++ b/test/integration/targets/filter_urls/runme.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: false + roles: + - { role: filter_urls } diff --git a/test/integration/targets/filter_urls/tasks/main.yml b/test/integration/targets/filter_urls/tasks/main.yml new file mode 100644 index 00000000..935ed479 --- /dev/null +++ b/test/integration/targets/filter_urls/tasks/main.yml @@ -0,0 +1,31 @@ +- name: Get Jinja2 version + shell: "{{ ansible_python_interpreter }} -c 'import jinja2; print(jinja2.__version__)'" + register: jinja2_version + +- name: Print Jinja2 version + debug: var=jinja2_version.stdout + +- name: Test urldecode filter + set_fact: + urldecoded_string: key="@{}é&%£ foo bar '(;\<>""°) + +- name: Test urlencode filter + set_fact: + urlencoded_string: 'key%3D%22%40%7B%7D%C3%A9%26%25%C2%A3%20foo%20bar%20%27%28%3B%5C%3C%3E%22%22%C2%B0%29' + +- name: Verify urlencode / urldecode isomorphism + assert: + that: + - urldecoded_string == urlencoded_string|urldecode + - urlencoded_string == urldecoded_string|urlencode + +- name: Verify urlencode handles dicts properly + assert: + that: + - "{'foo': 'bar'}|urlencode == 'foo=bar'" + - "{'foo': 'bar', 'baz': 'buz'}|urlencode == 'foo=bar&baz=buz'" + - "()|urlencode == ''" + +# Needed (temporarily) due to coverage reports not including the last task. +- assert: + that: true diff --git a/test/integration/targets/filter_urlsplit/aliases b/test/integration/targets/filter_urlsplit/aliases new file mode 100644 index 00000000..1603f435 --- /dev/null +++ b/test/integration/targets/filter_urlsplit/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller +skip/aix diff --git a/test/integration/targets/filter_urlsplit/tasks/main.yml b/test/integration/targets/filter_urlsplit/tasks/main.yml new file mode 100644 index 00000000..c3ff3ec9 --- /dev/null +++ b/test/integration/targets/filter_urlsplit/tasks/main.yml @@ -0,0 +1,30 @@ +- debug: + var: "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit" + verbosity: 1 + tags: debug + +- name: Test urlsplit filter + assert: + that: + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('fragment') == 'fragment'" + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('hostname') == 'www.acme.com'" + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('netloc') == 'mary:MySecret@www.acme.com:9000'" + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('path') == '/dir/index.html'" + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('port') == 9000" + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('query') == 'query=term'" + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('scheme') == 'http'" + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('username') == 'mary'" + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('password') == 'MySecret'" + - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit == { 'fragment': 'fragment', 'hostname': 'www.acme.com', 'netloc': 'mary:MySecret@www.acme.com:9000', 'password': 'MySecret', 'path': '/dir/index.html', 'port': 9000, 'query': 'query=term', 'scheme': 'http', 'username': 'mary' }" + +- name: Test urlsplit filter bad argument + debug: + var: "'http://www.acme.com:9000/dir/index.html' | urlsplit('bad_filter')" + register: _bad_urlsplit_filter + ignore_errors: yes + +- name: Verify urlsplit filter showed an error message + assert: + that: + - _bad_urlsplit_filter is failed + - "'unknown URL component' in _bad_urlsplit_filter.msg" diff --git a/test/integration/targets/find/aliases b/test/integration/targets/find/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/find/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/find/meta/main.yml b/test/integration/targets/find/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/find/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/find/tasks/main.yml b/test/integration/targets/find/tasks/main.yml new file mode 100644 index 00000000..d38daf44 --- /dev/null +++ b/test/integration/targets/find/tasks/main.yml @@ -0,0 +1,163 @@ +# Test code for the find module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: output_dir_test={{output_dir}}/test_find + +- name: make sure our testing sub-directory does not exist + file: path="{{ output_dir_test }}" state=absent + +- name: create our testing sub-directory + file: path="{{ output_dir_test }}" state=directory + +## +## find +## + +- name: make some directories + file: + path: "{{ output_dir_test }}/{{ item }}" + state: directory + with_items: + - a/b/c/d + - e/f/g/h + +- name: make some files + copy: + dest: "{{ output_dir_test }}/{{ item }}" + content: 'data' + with_items: + - a/1.txt + - a/b/2.jpg + - a/b/c/3 + - a/b/c/d/4.xml + - e/5.json + - e/f/6.swp + - e/f/g/7.img + - e/f/g/h/8.ogg + +- name: find the directories + find: + paths: "{{ output_dir_test }}" + file_type: directory + recurse: yes + register: find_test0 +- debug: var=find_test0 +- name: validate directory results + assert: + that: + - 'find_test0.changed is defined' + - 'find_test0.examined is defined' + - 'find_test0.files is defined' + - 'find_test0.matched is defined' + - 'find_test0.msg is defined' + - 'find_test0.matched == 8' + - 'find_test0.files | length == 8' + - 'find_test0.examined == 16' + +- name: find the xml and img files + find: + paths: "{{ output_dir_test }}" + file_type: file + patterns: "*.xml,*.img" + recurse: yes + register: find_test1 +- debug: var=find_test1 +- name: validate directory results + assert: + that: + - 'find_test1.matched == 2' + - 'find_test1.files | length == 2' + +- name: find the xml file + find: + paths: "{{ output_dir_test }}" + patterns: "*.xml" + recurse: yes + register: find_test2 +- debug: var=find_test2 +- name: validate gr_name and pw_name are defined + assert: + that: + - 'find_test2.matched == 1' + - 'find_test2.files[0].pw_name is defined' + - 'find_test2.files[0].gr_name is defined' + +- name: test number of examined directories/files + block: + - name: Get all files/directories in the path + find: + paths: "{{ output_dir_test }}" + recurse: yes + file_type: any + register: total_contents + + - assert: + that: + - total_contents.matched == 16 + - total_contents.examined == 16 + + - name: Get files and directories with depth + find: + paths: "{{ output_dir_test }}" + recurse: yes + file_type: any + depth: 2 + register: contents_with_depth + + - assert: + that: + - contents_with_depth.matched == 6 + # dir contents are considered until the depth exceeds the requested depth + # there are 6 files/directories in the requested depth and 4 that exceed it by 1 + - contents_with_depth.examined == 10 + + - name: Find files with depth + find: + paths: "{{ output_dir_test }}" + depth: 2 + recurse: yes + register: files_with_depth + + - assert: + that: + - files_with_depth.matched == 2 + # dir contents are considered until the depth exceeds the requested depth + # there are 6 files/directories in the requested depth and 4 that exceed it by 1 + - files_with_depth.examined == 10 + +- name: exclude with regex + find: + paths: "{{ output_dir_test }}" + recurse: yes + use_regex: true + exclude: .*\.ogg + register: find_test3 +# Note that currently sane ways of doing this with map() or +# selectattr() aren't available in centos6 era jinja2 ... +- set_fact: + find_test3_list: >- + [ {% for f in find_test3.files %} + {{ f.path }} + {% if not loop.last %},{% endif %} + {% endfor %} + ] +- debug: var=find_test3_list +- name: assert we skipped the ogg file + assert: + that: + - '"{{ output_dir_test }}/e/f/g/h/8.ogg" not in find_test3_list' diff --git a/test/integration/targets/gathering/aliases b/test/integration/targets/gathering/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/gathering/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/gathering/explicit.yml b/test/integration/targets/gathering/explicit.yml new file mode 100644 index 00000000..453dfb6a --- /dev/null +++ b/test/integration/targets/gathering/explicit.yml @@ -0,0 +1,14 @@ +- hosts: testhost + tasks: + - name: ensure facts have not been collected + assert: + that: + - ansible_facts is undefined or not 'fqdn' in ansible_facts + +- hosts: testhost + gather_facts: True + tasks: + - name: ensure facts have been collected + assert: + that: + - ansible_facts is defined and 'fqdn' in ansible_facts diff --git a/test/integration/targets/gathering/implicit.yml b/test/integration/targets/gathering/implicit.yml new file mode 100644 index 00000000..f1ea965d --- /dev/null +++ b/test/integration/targets/gathering/implicit.yml @@ -0,0 +1,23 @@ +- hosts: testhost + tasks: + - name: check that facts were gathered but no local facts exist + assert: + that: + - ansible_facts is defined and 'fqdn' in ansible_facts + - not 'uuid' in ansible_local + - name: create 'local facts' for next gathering + copy: + src: uuid.fact + dest: /etc/ansible/facts.d/ + mode: 0755 + +- hosts: testhost + tasks: + - name: ensure facts are gathered and includes the new 'local facts' created above + assert: + that: + - ansible_facts is defined and 'fqdn' in ansible_facts + - "'uuid' in ansible_local" + + - name: cleanup 'local facts' from target + file: path=/etc/ansible/facts.d/uuid.fact state=absent diff --git a/test/integration/targets/gathering/runme.sh b/test/integration/targets/gathering/runme.sh new file mode 100755 index 00000000..1c0832c5 --- /dev/null +++ b/test/integration/targets/gathering/runme.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_GATHERING=smart ansible-playbook smart.yml --flush-cache -i ../../inventory -v "$@" +ANSIBLE_GATHERING=implicit ansible-playbook implicit.yml --flush-cache -i ../../inventory -v "$@" +ANSIBLE_GATHERING=explicit ansible-playbook explicit.yml --flush-cache -i ../../inventory -v "$@" diff --git a/test/integration/targets/gathering/smart.yml b/test/integration/targets/gathering/smart.yml new file mode 100644 index 00000000..735cb461 --- /dev/null +++ b/test/integration/targets/gathering/smart.yml @@ -0,0 +1,23 @@ +- hosts: testhost + tasks: + - name: ensure facts are gathered but no local exists + assert: + that: + - ansible_facts is defined and 'fqdn' in ansible_facts + - not 'uuid' in ansible_local + - name: create local facts for latter test + copy: + src: uuid.fact + dest: /etc/ansible/facts.d/ + mode: 0755 + +- hosts: testhost + tasks: + - name: ensure we still have facts, but didnt pickup new local ones + assert: + that: + - ansible_facts is defined and 'fqdn' in ansible_facts + - not 'uuid' in ansible_local + + - name: remove local facts file + file: path=/etc/ansible/facts.d/uuid.fact state=absent diff --git a/test/integration/targets/gathering/uuid.fact b/test/integration/targets/gathering/uuid.fact new file mode 100644 index 00000000..79e3f626 --- /dev/null +++ b/test/integration/targets/gathering/uuid.fact @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + + +import json +import uuid + + +# return a random string +print(json.dumps(str(uuid.uuid4()))) diff --git a/test/integration/targets/gathering_facts/aliases b/test/integration/targets/gathering_facts/aliases new file mode 100644 index 00000000..0ee704e1 --- /dev/null +++ b/test/integration/targets/gathering_facts/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +needs/root diff --git a/test/integration/targets/gathering_facts/cache_plugins/none.py b/test/integration/targets/gathering_facts/cache_plugins/none.py new file mode 100644 index 00000000..5681dee0 --- /dev/null +++ b/test/integration/targets/gathering_facts/cache_plugins/none.py @@ -0,0 +1,50 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.cache import BaseCacheModule + +DOCUMENTATION = ''' + cache: none + short_description: write-only cache (no cache) + description: + - No caching at all + version_added: historical + author: core team (@ansible-core) +''' + + +class CacheModule(BaseCacheModule): + def __init__(self, *args, **kwargs): + self.empty = {} + + def get(self, key): + return self.empty.get(key) + + def set(self, key, value): + return value + + def keys(self): + return self.empty.keys() + + def contains(self, key): + return key in self.empty + + def delete(self, key): + del self.emtpy[key] + + def flush(self): + self.empty = {} + + def copy(self): + return self.empty.copy() + + def __getstate__(self): + return self.copy() + + def __setstate__(self, data): + self.empty = data diff --git a/test/integration/targets/gathering_facts/inventory b/test/integration/targets/gathering_facts/inventory new file mode 100644 index 00000000..e15ae780 --- /dev/null +++ b/test/integration/targets/gathering_facts/inventory @@ -0,0 +1,2 @@ +[local] +facthost[0:25] ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/gathering_facts/library/bogus_facts b/test/integration/targets/gathering_facts/library/bogus_facts new file mode 100644 index 00000000..a6aeede5 --- /dev/null +++ b/test/integration/targets/gathering_facts/library/bogus_facts @@ -0,0 +1,12 @@ +#!/bin/sh + +echo '{ + "changed": false, + "ansible_facts": { + "ansible_facts": { + "discovered_interpreter_python": "(touch /tmp/pwned-$(date -Iseconds)-$(whoami) ) 2>/dev/null >/dev/null && /usr/bin/python", + "bogus_overwrite": "yes" + }, + "dansible_iscovered_interpreter_python": "(touch /tmp/pwned-$(date -Iseconds)-$(whoami) ) 2>/dev/null >/dev/null && /usr/bin/python" + } +}' diff --git a/test/integration/targets/gathering_facts/library/facts_one b/test/integration/targets/gathering_facts/library/facts_one new file mode 100644 index 00000000..c74ab9a7 --- /dev/null +++ b/test/integration/targets/gathering_facts/library/facts_one @@ -0,0 +1,25 @@ +#!/bin/sh + +echo '{ + "changed": false, + "ansible_facts": { + "factsone": "from facts_one module", + "common_fact": "also from facts_one module", + "common_dict_fact": { + "key_one": "from facts_one", + "key_two": "from facts_one" + }, + "common_list_fact": [ + "one", + "three", + "five" + ], + "common_list_fact2": [ + "one", + "two", + "three", + "five", + "five" + ] + } +}' diff --git a/test/integration/targets/gathering_facts/library/facts_two b/test/integration/targets/gathering_facts/library/facts_two new file mode 100644 index 00000000..4e7c6684 --- /dev/null +++ b/test/integration/targets/gathering_facts/library/facts_two @@ -0,0 +1,24 @@ +#!/bin/sh + +echo '{ + "changed": false, + "ansible_facts": { + "factstwo": "from facts_two module", + "common_fact": "also from facts_two module", + "common_dict_fact": { + "key_two": "from facts_two", + "key_four": "from facts_two" + }, + "common_list_fact": [ + "one", + "two", + "four" + ], + "common_list_fact2": [ + "one", + "two", + "four", + "four" + ] + } +}' diff --git a/test/integration/targets/gathering_facts/library/file_utils.py b/test/integration/targets/gathering_facts/library/file_utils.py new file mode 100644 index 00000000..58538029 --- /dev/null +++ b/test/integration/targets/gathering_facts/library/file_utils.py @@ -0,0 +1,54 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.facts.utils import ( + get_file_content, + get_file_lines, + get_mount_size, +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + test=dict(type='str', default='strip'), + touch_file=dict(type='str', default='/dev/null'), + line_sep_file=dict(type='str', default='/dev/null'), + line_sep_sep=dict(type='str', default='\n'), + ) + ) + + test = module.params['test'] + facts = {} + + if test == 'strip': + etc_passwd = get_file_content('/etc/passwd') + etc_passwd_unstripped = get_file_content('/etc/passwd', strip=False) + facts['etc_passwd_newlines'] = etc_passwd.count('\n') + facts['etc_passwd_newlines_unstripped'] = etc_passwd_unstripped.count('\n') + + elif test == 'default': + path = module.params['touch_file'] + facts['touch_default'] = get_file_content(path, default='i am a default') + + elif test == 'line_sep': + path = module.params['line_sep_file'] + sep = module.params['line_sep_sep'] + facts['line_sep'] = get_file_lines(path, line_sep=sep) + + elif test == 'invalid_mountpoint': + facts['invalid_mountpoint'] = get_mount_size('/doesnotexist') + + result = { + 'changed': False, + 'ansible_facts': facts, + } + + module.exit_json(**result) + + +main() diff --git a/test/integration/targets/gathering_facts/one_two.json b/test/integration/targets/gathering_facts/one_two.json new file mode 100644 index 00000000..ecc698c3 --- /dev/null +++ b/test/integration/targets/gathering_facts/one_two.json @@ -0,0 +1,27 @@ +{ + "_ansible_facts_gathered": true, + "common_dict_fact": { + "key_four": "from facts_two", + "key_one": "from facts_one", + "key_two": "from facts_two" + }, + "common_fact": "also from facts_two module", + "common_list_fact": [ + "three", + "five", + "one", + "two", + "four" + ], + "common_list_fact2": [ + "three", + "five", + "five", + "one", + "two", + "four", + "four" + ], + "factsone": "from facts_one module", + "factstwo": "from facts_two module" +} \ No newline at end of file diff --git a/test/integration/targets/gathering_facts/prevent_clobbering.yml b/test/integration/targets/gathering_facts/prevent_clobbering.yml new file mode 100644 index 00000000..94bb4512 --- /dev/null +++ b/test/integration/targets/gathering_facts/prevent_clobbering.yml @@ -0,0 +1,8 @@ +- name: Verify existing facts don't go undefined on unrelated new facts in loop + hosts: localhost + gather_facts: True + tasks: + - name: Ensure that 'virtualization_type' is not undefined after first loop iteration + bogus_facts: + loop: [1, 2, 3] + when: ansible_facts['virtualization_type'] != 'NotDocker' diff --git a/test/integration/targets/gathering_facts/runme.sh b/test/integration/targets/gathering_facts/runme.sh new file mode 100755 index 00000000..9904c9e7 --- /dev/null +++ b/test/integration/targets/gathering_facts/runme.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -eux + +#ANSIBLE_CACHE_PLUGINS=cache_plugins/ ANSIBLE_CACHE_PLUGIN=none ansible-playbook test_gathering_facts.yml -i inventory -v "$@" +ansible-playbook test_gathering_facts.yml -i inventory -e output_dir="$OUTPUT_DIR" -v "$@" +#ANSIBLE_CACHE_PLUGIN=base ansible-playbook test_gathering_facts.yml -i inventory -v "$@" + +ANSIBLE_GATHERING=smart ansible-playbook test_run_once.yml -i inventory -v "$@" + +# ensure clean_facts is working properly +ansible-playbook test_prevent_injection.yml -i inventory -v "$@" + +# ensure fact merging is working properly +ansible-playbook verify_merge_facts.yml -v "$@" -e 'ansible_facts_parallel: False' + +# ensure we dont clobber facts in loop +ansible-playbook prevent_clobbering.yml -v "$@" + +# ensure we dont fail module on bad subset +ansible-playbook verify_subset.yml "$@" diff --git a/test/integration/targets/gathering_facts/test_gathering_facts.yml b/test/integration/targets/gathering_facts/test_gathering_facts.yml new file mode 100644 index 00000000..d4364d29 --- /dev/null +++ b/test/integration/targets/gathering_facts/test_gathering_facts.yml @@ -0,0 +1,474 @@ +--- +- hosts: facthost7 + tags: [ 'fact_negation' ] + connection: local + gather_subset: "!hardware" + gather_facts: no + tasks: + - name: setup with not hardware + setup: + gather_subset: + - "!hardware" + register: not_hardware_facts + +- name: min and network test for platform added + hosts: facthost21 + tags: [ 'fact_network' ] + connection: local + gather_subset: ["!all", "network"] + gather_facts: yes + tasks: + - name: Test that retrieving network facts works and gets prereqs from platform and distribution + assert: + that: + - 'ansible_default_ipv4|default("UNDEF") != "UNDEF"' + - 'ansible_interfaces|default("UNDEF") != "UNDEF"' + # these are true for linux, but maybe not for other os + - 'ansible_system|default("UNDEF") != "UNDEF"' + - 'ansible_distribution|default("UNDEF") != "UNDEF"' + # we dont really require these but they are in the min set + # - 'ansible_virtualization_role|default("UNDEF") == "UNDEF"' + # - 'ansible_user_id|default("UNDEF") == "UNDEF"' + # - 'ansible_env|default("UNDEF") == "UNDEF"' + # - 'ansible_selinux|default("UNDEF") == "UNDEF"' + # - 'ansible_pkg_mgr|default("UNDEF") == "UNDEF"' + +- name: min and hardware test for platform added + hosts: facthost22 + tags: [ 'fact_hardware' ] + connection: local + gather_subset: "hardware" + gather_facts: yes + tasks: + - name: debug stuff + debug: + var: hostvars['facthost22'] + # we should also collect platform, but not distribution + - name: Test that retrieving hardware facts works and gets prereqs from platform and distribution + when: ansible_system|default("UNDEF") == "Linux" + assert: + # LinuxHardwareCollector requires 'platform' facts + that: + - 'ansible_memory_mb|default("UNDEF") != "UNDEF"' + - 'ansible_default_ipv4|default("UNDEF") == "UNDEF"' + - 'ansible_interfaces|default("UNDEF") == "UNDEF"' + # these are true for linux, but maybe not for other os + # hardware requires 'platform' + - 'ansible_system|default("UNDEF") != "UNDEF"' + - 'ansible_machine|default("UNDEF") != "UNDEF"' + # hardware does not require 'distribution' but it is min set + # - 'ansible_distribution|default("UNDEF") == "UNDEF"' + # we dont really require these but they are in the min set + # - 'ansible_virtualization_role|default("UNDEF") == "UNDEF"' + # - 'ansible_user_id|default("UNDEF") == "UNDEF"' + # - 'ansible_env|default("UNDEF") == "UNDEF"' + # - 'ansible_selinux|default("UNDEF") == "UNDEF"' + # - 'ansible_pkg_mgr|default("UNDEF") == "UNDEF"' + +- name: min and service_mgr test for platform added + hosts: facthost23 + tags: [ 'fact_service_mgr' ] + connection: local + gather_subset: ["!all", "service_mgr"] + gather_facts: yes + tasks: + - name: Test that retrieving service_mgr facts works and gets prereqs from platform and distribution + assert: + that: + - 'ansible_service_mgr|default("UNDEF") != "UNDEF"' + - 'ansible_default_ipv4|default("UNDEF") == "UNDEF"' + - 'ansible_interfaces|default("UNDEF") == "UNDEF"' + # these are true for linux, but maybe not for other os + - 'ansible_system|default("UNDEF") != "UNDEF"' + - 'ansible_distribution|default("UNDEF") != "UNDEF"' + # we dont really require these but they are in the min set + # - 'ansible_virtualization_role|default("UNDEF") == "UNDEF"' + # - 'ansible_user_id|default("UNDEF") == "UNDEF"' + # - 'ansible_env|default("UNDEF") == "UNDEF"' + # - 'ansible_selinux|default("UNDEF") == "UNDEF"' + # - 'ansible_pkg_mgr|default("UNDEF") == "UNDEF"' + +- hosts: facthost0 + tags: [ 'fact_min' ] + connection: local + gather_subset: "all" + gather_facts: yes + tasks: + #- setup: + # register: facts + - name: Test that retrieving all facts works + assert: + that: + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") != "UNDEF_MOUNT" or ansible_distribution == "MacOSX"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"' + + +- hosts: facthost19 + tags: [ 'fact_min' ] + connection: local + gather_facts: no + tasks: + - setup: + filter: "*env*" + # register: fact_results + + - name: Test that retrieving all facts filtered to env works + assert: + that: + - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"' + - 'ansible_env|default("UNDEF_ENV") != "UNDEF_ENV"' + +- hosts: facthost13 + tags: [ 'fact_min' ] + connection: local + gather_facts: no + tasks: + - setup: + filter: "ansible_user_id" + # register: fact_results + + - name: Test that retrieving all facts filtered to specific fact ansible_user_id works + assert: + that: + - 'ansible_user_id|default("UNDEF_USER") != "UNDEF_USER"' + - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"' + - 'ansible_env|default("UNDEF_ENV") == "UNDEF_ENV"' + - 'ansible_pkg_mgr|default("UNDEF_PKG_MGR") == "UNDEF_PKG_MGR"' + +- hosts: facthost11 + tags: [ 'fact_min' ] + connection: local + gather_facts: no + tasks: + - setup: + filter: "*" + # register: fact_results + + - name: Test that retrieving all facts filtered to splat + assert: + that: + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") != "UNDEF_MOUNT" or ansible_distribution == "MacOSX"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"' + +- hosts: facthost12 + tags: [ 'fact_min' ] + connection: local + gather_facts: no + tasks: + - setup: + filter: "" + # register: fact_results + + - name: Test that retrieving all facts filtered to empty filter_spec works + assert: + that: + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") != "UNDEF_MOUNT" or ansible_distribution == "MacOSX"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"' + +- hosts: facthost1 + tags: [ 'fact_min' ] + connection: local + gather_subset: "!all" + gather_facts: yes + tasks: + - name: Test that only retrieving minimal facts work + assert: + that: + # from the min set, which should still collect + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_env|default("UNDEF_ENV") != "UNDEF_ENV"' + # non min facts that are not collected + - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"' + +- hosts: facthost2 + tags: [ 'fact_network' ] + connection: local + gather_subset: ["!all", "!min", "network"] + gather_facts: yes + tasks: + - name: Test that retrieving network facts work + assert: + that: + - 'ansible_user_id|default("UNDEF") == "UNDEF"' + - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF") == "UNDEF"' + - 'ansible_virtualization_role|default("UNDEF") == "UNDEF"' + +- hosts: facthost3 + tags: [ 'fact_hardware' ] + connection: local + gather_subset: "hardware" + gather_facts: yes + tasks: + - name: Test that retrieving hardware facts work + assert: + that: + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") != "UNDEF_MOUNT" or ansible_distribution == "MacOSX"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"' + +- hosts: facthost4 + tags: [ 'fact_virtual' ] + connection: local + gather_subset: "virtual" + gather_facts: yes + tasks: + - name: Test that retrieving virtualization facts work + assert: + that: + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"' + +- hosts: facthost5 + tags: [ 'fact_comma_string' ] + connection: local + gather_subset: ["virtual", "network"] + gather_facts: yes + tasks: + - name: Test that retrieving virtualization and network as a string works + assert: + that: + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"' + +- hosts: facthost6 + tags: [ 'fact_yaml_list' ] + connection: local + gather_subset: + - virtual + - network + gather_facts: yes + tasks: + - name: Test that retrieving virtualization and network as a string works + assert: + that: + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"' + + +- hosts: facthost7 + tags: [ 'fact_negation' ] + connection: local + gather_subset: "!hardware" + gather_facts: yes + tasks: + - name: Test that negation of fact subsets work + assert: + that: + # network, not collected since it is not in min + - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"' + # not collecting virt, should be undef + - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"' + # mounts/devices are collected by hardware, so should be not collected and undef + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_devices|default("UNDEF_DEVICES") == "UNDEF_DEVICES"' + # from the min set, which should still collect + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_env|default("UNDEF_ENV") != "UNDEF_ENV"' + +- hosts: facthost8 + tags: [ 'fact_mixed_negation_addition' ] + connection: local + gather_subset: ["!hardware", "network"] + gather_facts: yes + tasks: + - name: Test that negation and additional subsets work together + assert: + that: + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"' + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"' + +- hosts: facthost14 + tags: [ 'fact_mixed_negation_addition_min' ] + connection: local + gather_subset: ["!all", "!min", "network"] + gather_facts: yes + tasks: + - name: Test that negation and additional subsets work together for min subset + assert: + that: + - 'ansible_user_id|default("UNDEF_MIN") == "UNDEF_MIN"' + - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"' + - 'ansible_default_ipv4|default("UNDEF_DEFAULT_IPV4") != "UNDEF_DEFAULT_IPV4"' + - 'ansible_all_ipv4_addresses|default("UNDEF_ALL_IPV4") != "UNDEF_ALL_IPV4"' + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"' + - 'ansible_env|default("UNDEF_ENV") == "UNDEF_ENV"' + +- hosts: facthost15 + tags: [ 'fact_negate_all_min_add_pkg_mgr' ] + connection: local + gather_subset: ["!all", "!min", "pkg_mgr"] + gather_facts: yes + tasks: + - name: Test that negation and additional subsets work together for min subset + assert: + that: + # network, not collected since it is not in min + - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"' + # not collecting virt, should be undef + - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"' + # mounts/devices are collected by hardware, so should be not collected and undef + - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"' + - 'ansible_devices|default("UNDEF_DEVICES") == "UNDEF_DEVICES"' + # from the min set, which should not collect + - 'ansible_user_id|default("UNDEF_MIN") == "UNDEF_MIN"' + - 'ansible_env|default("UNDEF_ENV") == "UNDEF_ENV"' + # the pkg_mgr fact we requested explicitly + - 'ansible_pkg_mgr|default("UNDEF_PKG_MGR") != "UNDEF_PKG_MGR"' + + +- hosts: facthost9 + tags: [ 'fact_local'] + connection: local + gather_facts: no + tasks: + - name: Create fact directories + become: true + with_items: + - /etc/ansible/facts.d + - /tmp/custom_facts.d + file: + state: directory + path: "{{ item }}" + mode: '0777' + - name: Deploy local facts + with_items: + - path: /etc/ansible/facts.d/testfact.fact + content: '{ "fact_dir": "default" }' + - path: /tmp/custom_facts.d/testfact.fact + content: '{ "fact_dir": "custom" }' + copy: + dest: "{{ item.path }}" + content: "{{ item.content }}" + +- hosts: facthost9 + tags: [ 'fact_local'] + connection: local + gather_facts: yes + tasks: + - name: Test reading facts from default fact_path + assert: + that: + - '"{{ ansible_local.testfact.fact_dir }}" == "default"' + +- hosts: facthost9 + tags: [ 'fact_local'] + connection: local + gather_facts: yes + fact_path: /tmp/custom_facts.d + tasks: + - name: Test reading facts from custom fact_path + assert: + that: + - '"{{ ansible_local.testfact.fact_dir }}" == "custom"' + +- hosts: facthost20 + tags: [ 'fact_facter_ohai' ] + connection: local + gather_subset: + - facter + - ohai + gather_facts: yes + tasks: + - name: Test that retrieving facter and ohai doesnt fail + assert: + # not much to assert here, aside from not crashing, since test images dont have + # facter/ohai + that: + - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"' + +- hosts: facthost9 + tags: [ 'fact_file_utils' ] + connection: local + gather_facts: false + tasks: + - block: + - name: Ensure get_file_content works when strip=False + file_utils: + test: strip + + - assert: + that: + - ansible_facts.get('etc_passwd_newlines', 0) + 1 == ansible_facts.get('etc_passwd_newlines_unstripped', 0) + + - name: Make an empty file + file: + path: "{{ output_dir }}/empty_file" + state: touch + + - name: Ensure get_file_content gives default when file is empty + file_utils: + test: default + touch_file: "{{ output_dir }}/empty_file" + + - assert: + that: + - ansible_facts.get('touch_default') == 'i am a default' + + - copy: + dest: "{{ output_dir }}/1charsep" + content: "foo:bar:baz:buzz:" + + - copy: + dest: "{{ output_dir }}/2charsep" + content: "foo::bar::baz::buzz::" + + - name: Ensure get_file_lines works as expected with specified 1-char line_sep + file_utils: + test: line_sep + line_sep_file: "{{ output_dir }}/1charsep" + line_sep_sep: ":" + + - assert: + that: + - ansible_facts.get('line_sep') == ['foo', 'bar', 'baz', 'buzz'] + + - name: Ensure get_file_lines works as expected with specified 1-char line_sep + file_utils: + test: line_sep + line_sep_file: "{{ output_dir }}/2charsep" + line_sep_sep: "::" + + - assert: + that: + - ansible_facts.get('line_sep') == ['foo', 'bar', 'baz', 'buzz', ''] + + - name: Ensure get_mount_size fails gracefully + file_utils: + test: invalid_mountpoint + + - assert: + that: + - ansible_facts['invalid_mountpoint']|length == 0 + + always: + - name: Remove test files + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ output_dir }}/empty_file" + - "{{ output_dir }}/1charsep" + - "{{ output_dir }}/2charsep" diff --git a/test/integration/targets/gathering_facts/test_prevent_injection.yml b/test/integration/targets/gathering_facts/test_prevent_injection.yml new file mode 100644 index 00000000..f304fe88 --- /dev/null +++ b/test/integration/targets/gathering_facts/test_prevent_injection.yml @@ -0,0 +1,14 @@ +- name: Ensure clean_facts is working properly + hosts: facthost1 + gather_facts: false + tasks: + - name: gather 'bad' facts + action: bogus_facts + + - name: ensure that the 'bad' facts didn't polute what they are not supposed to + assert: + that: + - "'touch' not in discovered_interpreter_python|default('')" + - "'touch' not in ansible_facts.get('discovered_interpreter_python', '')" + - "'touch' not in ansible_facts.get('ansible_facts', {}).get('discovered_interpreter_python', '')" + - bogus_overwrite is undefined diff --git a/test/integration/targets/gathering_facts/test_run_once.yml b/test/integration/targets/gathering_facts/test_run_once.yml new file mode 100644 index 00000000..37023b24 --- /dev/null +++ b/test/integration/targets/gathering_facts/test_run_once.yml @@ -0,0 +1,32 @@ +--- +- hosts: facthost1 + gather_facts: no + tasks: + - name: check that smart gathering is enabled + fail: + msg: 'smart gathering must be enabled' + when: 'lookup("env", "ANSIBLE_GATHERING") != "smart"' + - name: install test local facts + copy: + src: uuid.fact + dest: /etc/ansible/facts.d/ + mode: 0755 + +- hosts: facthost1,facthost2 + gather_facts: yes + run_once: yes + tasks: + - block: + - name: 'Check the same host is used' + assert: + that: 'hostvars.facthost1.ansible_fqdn == hostvars.facthost2.ansible_fqdn' + msg: 'This test requires 2 inventory hosts referring to the same host.' + - name: "Check that run_once doesn't prevent fact gathering (#39453)" + assert: + that: 'hostvars.facthost1.ansible_local.uuid != hostvars.facthost2.ansible_local.uuid' + msg: "{{ 'Same value for ansible_local.uuid on both hosts: ' ~ hostvars.facthost1.ansible_local.uuid }}" + always: + - name: remove test local facts + file: + path: /etc/ansible/facts.d/uuid.fact + state: absent diff --git a/test/integration/targets/gathering_facts/two_one.json b/test/integration/targets/gathering_facts/two_one.json new file mode 100644 index 00000000..4b34a2d5 --- /dev/null +++ b/test/integration/targets/gathering_facts/two_one.json @@ -0,0 +1,27 @@ +{ + "_ansible_facts_gathered": true, + "common_dict_fact": { + "key_four": "from facts_two", + "key_one": "from facts_one", + "key_two": "from facts_one" + }, + "common_fact": "also from facts_one module", + "common_list_fact": [ + "two", + "four", + "one", + "three", + "five" + ], + "common_list_fact2": [ + "four", + "four", + "one", + "two", + "three", + "five", + "five" + ], + "factsone": "from facts_one module", + "factstwo": "from facts_two module" +} \ No newline at end of file diff --git a/test/integration/targets/gathering_facts/uuid.fact b/test/integration/targets/gathering_facts/uuid.fact new file mode 100644 index 00000000..79e3f626 --- /dev/null +++ b/test/integration/targets/gathering_facts/uuid.fact @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + + +import json +import uuid + + +# return a random string +print(json.dumps(str(uuid.uuid4()))) diff --git a/test/integration/targets/gathering_facts/verify_merge_facts.yml b/test/integration/targets/gathering_facts/verify_merge_facts.yml new file mode 100644 index 00000000..d2144024 --- /dev/null +++ b/test/integration/targets/gathering_facts/verify_merge_facts.yml @@ -0,0 +1,41 @@ +- name: rune one and two, verify merge is as expected + hosts: localhost + vars: + ansible_facts_modules: + - facts_one + - facts_two + tasks: + + - name: populate original + include_vars: + name: original + file: one_two.json + + - name: fail if ref file is updated + assert: + msg: '{{ansible_facts}} vs {{original}}' + that: + - ansible_facts|to_json(indent=4, sort_keys=True) == original|to_json(indent=4, sort_keys=True) + + - name: clear existing facts for next play + meta: clear_facts + + +- name: rune two and one, verify merge is as expected + hosts: localhost + vars: + ansible_facts_modules: + - facts_two + - facts_one + tasks: + + - name: populate original + include_vars: + name: original + file: two_one.json + + - name: fail if ref file is updated + assert: + msg: '{{ansible_facts}} vs {{original}}' + that: + - ansible_facts|to_json(indent=4, sort_keys=True) == original|to_json(indent=4, sort_keys=True) diff --git a/test/integration/targets/gathering_facts/verify_subset.yml b/test/integration/targets/gathering_facts/verify_subset.yml new file mode 100644 index 00000000..89132756 --- /dev/null +++ b/test/integration/targets/gathering_facts/verify_subset.yml @@ -0,0 +1,13 @@ +- hosts: localhost + gather_facts: false + tasks: + - name: bad subset used + setup: gather_subset=nonsense + register: bad_sub + ignore_errors: true + + - name: verify we fail the right way + assert: + that: + - bad_sub is failed + - "'MODULE FAILURE' not in bad_sub['msg']" diff --git a/test/integration/targets/get_url/aliases b/test/integration/targets/get_url/aliases new file mode 100644 index 00000000..f82a267b --- /dev/null +++ b/test/integration/targets/get_url/aliases @@ -0,0 +1,4 @@ +destructive +shippable/posix/group1 +needs/httptester +skip/aix diff --git a/test/integration/targets/get_url/files/testserver.py b/test/integration/targets/get_url/files/testserver.py new file mode 100644 index 00000000..81043b66 --- /dev/null +++ b/test/integration/targets/get_url/files/testserver.py @@ -0,0 +1,20 @@ +import sys + +if __name__ == '__main__': + if sys.version_info[0] >= 3: + import http.server + import socketserver + PORT = int(sys.argv[1]) + + class Handler(http.server.SimpleHTTPRequestHandler): + pass + + Handler.extensions_map['.json'] = 'application/json' + httpd = socketserver.TCPServer(("", PORT), Handler) + httpd.serve_forever() + else: + import mimetypes + mimetypes.init() + mimetypes.add_type('application/json', '.json') + import SimpleHTTPServer + SimpleHTTPServer.test() diff --git a/test/integration/targets/get_url/meta/main.yml b/test/integration/targets/get_url/meta/main.yml new file mode 100644 index 00000000..2c2155ab --- /dev/null +++ b/test/integration/targets/get_url/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - prepare_tests + - prepare_http_tests + - setup_remote_tmp_dir diff --git a/test/integration/targets/get_url/tasks/main.yml b/test/integration/targets/get_url/tasks/main.yml new file mode 100644 index 00000000..052bde22 --- /dev/null +++ b/test/integration/targets/get_url/tasks/main.yml @@ -0,0 +1,463 @@ +# Test code for the get_url module +# (c) 2014, Richard Isaacson + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: Determine if python looks like it will support modern ssl features like SNI + command: "{{ ansible_python.executable }} -c 'from ssl import SSLContext'" + ignore_errors: True + register: python_test + +- name: Set python_has_sslcontext if we have it + set_fact: + python_has_ssl_context: True + when: python_test.rc == 0 + +- name: Set python_has_sslcontext False if we don't have it + set_fact: + python_has_ssl_context: False + when: python_test.rc != 0 + +- name: Define test files for file schema + set_fact: + geturl_srcfile: "{{ remote_tmp_dir }}/aurlfile.txt" + geturl_dstfile: "{{ remote_tmp_dir }}/aurlfile_copy.txt" + +- name: Create source file + copy: + dest: "{{ geturl_srcfile }}" + content: "foobar" + register: source_file_copied + +- name: test file fetch + get_url: + url: "file://{{ source_file_copied.dest }}" + dest: "{{ geturl_dstfile }}" + register: result + +- name: assert success and change + assert: + that: + - result is changed + - '"OK" in result.msg' + +- name: test nonexisting file fetch + get_url: + url: "file://{{ source_file_copied.dest }}NOFILE" + dest: "{{ geturl_dstfile }}NOFILE" + register: result + ignore_errors: True + +- name: assert success and change + assert: + that: + - result is failed + +- name: test HTTP HEAD request for file in check mode + get_url: + url: "https://{{ httpbin_host }}/get" + dest: "{{ remote_tmp_dir }}/get_url_check.txt" + force: yes + check_mode: True + register: result + +- name: assert that the HEAD request was successful in check mode + assert: + that: + - result is changed + - '"OK" in result.msg' + +- name: test HTTP HEAD for nonexistent URL in check mode + get_url: + url: "https://{{ httpbin_host }}/DOESNOTEXIST" + dest: "{{ remote_tmp_dir }}/shouldnotexist.html" + force: yes + check_mode: True + register: result + ignore_errors: True + +- name: assert that HEAD request for nonexistent URL failed + assert: + that: + - result is failed + +- name: test https fetch + get_url: url="https://{{ httpbin_host }}/get" dest={{remote_tmp_dir}}/get_url.txt force=yes + register: result + +- name: assert the get_url call was successful + assert: + that: + - result is changed + - '"OK" in result.msg' + +- name: test https fetch to a site with mismatched hostname and certificate + get_url: + url: "https://{{ badssl_host }}/" + dest: "{{ remote_tmp_dir }}/shouldnotexist.html" + ignore_errors: True + register: result + +- stat: + path: "{{ remote_tmp_dir }}/shouldnotexist.html" + register: stat_result + +- name: Assert that the file was not downloaded + assert: + that: + - "result is failed" + - "'Failed to validate the SSL certificate' in result.msg or 'Hostname mismatch' in result.msg or ( result.msg is match('hostname .* doesn.t match .*'))" + - "stat_result.stat.exists == false" + +- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no + get_url: + url: "https://{{ badssl_host }}/" + dest: "{{ remote_tmp_dir }}/get_url_no_validate.html" + validate_certs: no + register: result + +- stat: + path: "{{ remote_tmp_dir }}/get_url_no_validate.html" + register: stat_result + +- name: Assert that the file was downloaded + assert: + that: + - result is changed + - "stat_result.stat.exists == true" + +# SNI Tests +# SNI is only built into the stdlib from python-2.7.9 onwards +- name: Test that SNI works + get_url: + url: 'https://{{ sni_host }}/' + dest: "{{ remote_tmp_dir }}/sni.html" + register: get_url_result + ignore_errors: True + +- command: "grep '{{ sni_host }}' {{ remote_tmp_dir}}/sni.html" + register: data_result + when: python_has_ssl_context + +- debug: + var: get_url_result + +- name: Assert that SNI works with this python version + assert: + that: + - 'data_result.rc == 0' + when: python_has_ssl_context + +# If the client doesn't support SNI then get_url should have failed with a certificate mismatch +- name: Assert that hostname verification failed because SNI is not supported on this version of python + assert: + that: + - 'get_url_result is failed' + when: not python_has_ssl_context + +# These tests are just side effects of how the site is hosted. It's not +# specifically a test site. So the tests may break due to the hosting changing +- name: Test that SNI works + get_url: + url: 'https://{{ sni_host }}/' + dest: "{{ remote_tmp_dir }}/sni.html" + register: get_url_result + ignore_errors: True + +- command: "grep '{{ sni_host }}' {{ remote_tmp_dir}}/sni.html" + register: data_result + when: python_has_ssl_context + +- debug: + var: get_url_result + +- name: Assert that SNI works with this python version + assert: + that: + - 'data_result.rc == 0' + - 'get_url_result is not failed' + when: python_has_ssl_context + +# If the client doesn't support SNI then get_url should have failed with a certificate mismatch +- name: Assert that hostname verification failed because SNI is not supported on this version of python + assert: + that: + - 'get_url_result is failed' + when: not python_has_ssl_context +# End hacky SNI test section + +- name: Test get_url with redirect + get_url: + url: 'https://{{ httpbin_host }}/redirect/6' + dest: "{{ remote_tmp_dir }}/redirect.json" + +- name: Test that setting file modes work + get_url: + url: 'https://{{ httpbin_host }}/' + dest: '{{ remote_tmp_dir }}/test' + mode: '0707' + register: result + +- stat: + path: "{{ remote_tmp_dir }}/test" + register: stat_result + +- name: Assert that the file has the right permissions + assert: + that: + - result is changed + - "stat_result.stat.mode == '0707'" + +- name: Test that setting file modes on an already downloaded file work + get_url: + url: 'https://{{ httpbin_host }}/' + dest: '{{ remote_tmp_dir }}/test' + mode: '0070' + register: result + +- stat: + path: "{{ remote_tmp_dir }}/test" + register: stat_result + +- name: Assert that the file has the right permissions + assert: + that: + - result is changed + - "stat_result.stat.mode == '0070'" + +# https://github.com/ansible/ansible/pull/65307/ +- name: Test that on http status 304, we get a status_code field. + get_url: + url: 'https://{{ httpbin_host }}/status/304' + dest: '{{ remote_tmp_dir }}/test' + register: result + +- name: Assert that we get the appropriate status_code + assert: + that: + - "'status_code' in result" + - "result.status_code == 304" + +# https://github.com/ansible/ansible/issues/29614 +- name: Change mode on an already downloaded file and specify checksum + get_url: + url: 'https://{{ httpbin_host }}/get' + dest: '{{ remote_tmp_dir }}/test' + checksum: 'sha256:7036ede810fad2b5d2e7547ec703cae8da61edbba43c23f9d7203a0239b765c4.' + mode: '0775' + register: result + +- stat: + path: "{{ remote_tmp_dir }}/test" + register: stat_result + +- name: Assert that file permissions on already downloaded file were changed + assert: + that: + - result is changed + - "stat_result.stat.mode == '0775'" + +- name: test checksum match in check mode + get_url: + url: 'https://{{ httpbin_host }}/get' + dest: '{{ remote_tmp_dir }}/test' + checksum: 'sha256:7036ede810fad2b5d2e7547ec703cae8da61edbba43c23f9d7203a0239b765c4.' + check_mode: True + register: result + +- name: Assert that check mode was green + assert: + that: + - result is not changed + +- name: Get a file that already exists with a checksum + get_url: + url: 'https://{{ httpbin_host }}/cache' + dest: '{{ remote_tmp_dir }}/test' + checksum: 'sha1:{{ stat_result.stat.checksum }}' + register: result + +- name: Assert that the file was not downloaded + assert: + that: + - result.msg == 'file already exists' + +- name: Get a file that already exists + get_url: + url: 'https://{{ httpbin_host }}/cache' + dest: '{{ remote_tmp_dir }}/test' + register: result + +- name: Assert that we didn't re-download unnecessarily + assert: + that: + - result is not changed + - "'304' in result.msg" + +- name: get a file that doesn't respond to If-Modified-Since without checksum + get_url: + url: 'https://{{ httpbin_host }}/get' + dest: '{{ remote_tmp_dir }}/test' + register: result + +- name: Assert that we downloaded the file + assert: + that: + - result is changed + +# https://github.com/ansible/ansible/issues/27617 + +- name: set role facts + set_fact: + http_port: 27617 + files_dir: '{{ remote_tmp_dir }}/files' + +- name: create files_dir + file: + dest: "{{ files_dir }}" + state: directory + +- name: create src file + copy: + dest: '{{ files_dir }}/27617.txt' + content: "ptux" + +- name: create sha1 checksum file of src + copy: + dest: '{{ files_dir }}/sha1sum.txt' + content: | + a97e6837f60cec6da4491bab387296bbcd72bdba 27617.txt + 3911340502960ca33aece01129234460bfeb2791 not_target1.txt + 1b4b6adf30992cedb0f6edefd6478ff0a593b2e4 not_target2.txt + +- name: create sha256 checksum file of src + copy: + dest: '{{ files_dir }}/sha256sum.txt' + content: | + b1b6ce5073c8fac263a8fc5edfffdbd5dec1980c784e09c5bc69f8fb6056f006. 27617.txt + 30949cc401e30ac494d695ab8764a9f76aae17c5d73c67f65e9b558f47eff892 not_target1.txt + d0dbfc1945bc83bf6606b770e442035f2c4e15c886ee0c22fb3901ba19900b5b not_target2.txt + +- name: create sha256 checksum file of src with a dot leading path + copy: + dest: '{{ files_dir }}/sha256sum_with_dot.txt' + content: | + b1b6ce5073c8fac263a8fc5edfffdbd5dec1980c784e09c5bc69f8fb6056f006. ./27617.txt + 30949cc401e30ac494d695ab8764a9f76aae17c5d73c67f65e9b558f47eff892 ./not_target1.txt + d0dbfc1945bc83bf6606b770e442035f2c4e15c886ee0c22fb3901ba19900b5b ./not_target2.txt + +- copy: + src: "testserver.py" + dest: "{{ remote_tmp_dir }}/testserver.py" + +- name: start SimpleHTTPServer for issues 27617 + shell: cd {{ files_dir }} && {{ ansible_python.executable }} {{ remote_tmp_dir}}/testserver.py {{ http_port }} + async: 90 + poll: 0 + +- name: Wait for SimpleHTTPServer to come up online + wait_for: + host: 'localhost' + port: '{{ http_port }}' + state: started + +- name: download src with sha1 checksum url + get_url: + url: 'http://localhost:{{ http_port }}/27617.txt' + dest: '{{ remote_tmp_dir }}' + checksum: 'sha1:http://localhost:{{ http_port }}/sha1sum.txt' + register: result_sha1 + +- stat: + path: "{{ remote_tmp_dir }}/27617.txt" + register: stat_result_sha1 + +- name: download src with sha256 checksum url + get_url: + url: 'http://localhost:{{ http_port }}/27617.txt' + dest: '{{ remote_tmp_dir }}/27617sha256.txt' + checksum: 'sha256:http://localhost:{{ http_port }}/sha256sum.txt' + register: result_sha256 + +- stat: + path: "{{ remote_tmp_dir }}/27617.txt" + register: stat_result_sha256 + +- name: download src with sha256 checksum url with dot leading paths + get_url: + url: 'http://localhost:{{ http_port }}/27617.txt' + dest: '{{ remote_tmp_dir }}/27617sha256_with_dot.txt' + checksum: 'sha256:http://localhost:{{ http_port }}/sha256sum_with_dot.txt' + register: result_sha256_with_dot + +- stat: + path: "{{ remote_tmp_dir }}/27617sha256_with_dot.txt" + register: stat_result_sha256_with_dot + +- name: Assert that the file was downloaded + assert: + that: + - result_sha1 is changed + - result_sha256 is changed + - result_sha256_with_dot is changed + - "stat_result_sha1.stat.exists == true" + - "stat_result_sha256.stat.exists == true" + - "stat_result_sha256_with_dot.stat.exists == true" + +#https://github.com/ansible/ansible/issues/16191 +- name: Test url split with no filename + get_url: + url: https://{{ httpbin_host }} + dest: "{{ remote_tmp_dir }}" + +- name: Test headers dict + get_url: + url: https://{{ httpbin_host }}/headers + headers: + Foo: bar + Baz: qux + dest: "{{ remote_tmp_dir }}/headers_dict.json" + +- name: Get downloaded file + slurp: + src: "{{ remote_tmp_dir }}/headers_dict.json" + register: result + +- name: Test headers dict + assert: + that: + - (result.content | b64decode | from_json).headers.get('Foo') == 'bar' + - (result.content | b64decode | from_json).headers.get('Baz') == 'qux' + +- name: Test client cert auth, with certs + get_url: + url: "https://ansible.http.tests/ssl_client_verify" + client_cert: "{{ remote_tmp_dir }}/client.pem" + client_key: "{{ remote_tmp_dir }}/client.key" + dest: "{{ remote_tmp_dir }}/ssl_client_verify" + when: has_httptester + +- name: Get downloaded file + slurp: + src: "{{ remote_tmp_dir }}/ssl_client_verify" + register: result + when: has_httptester + +- name: Assert that the ssl_client_verify file contains the correct content + assert: + that: + - '(result.content | b64decode) == "ansible.http.tests:SUCCESS"' + when: has_httptester diff --git a/test/integration/targets/getent/aliases b/test/integration/targets/getent/aliases new file mode 100644 index 00000000..f8e28c7e --- /dev/null +++ b/test/integration/targets/getent/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/getent/meta/main.yml b/test/integration/targets/getent/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/getent/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/getent/tasks/main.yml b/test/integration/targets/getent/tasks/main.yml new file mode 100644 index 00000000..825ad5ea --- /dev/null +++ b/test/integration/targets/getent/tasks/main.yml @@ -0,0 +1,46 @@ +# Test code for the getent module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +- name: check for getent command + shell: which getent + failed_when: False + register: getent_check +## +## getent +## +- block: + - name: run getent with specified service + getent: + database: passwd + key: root + service: files + register: getent_test0 + when: ansible_system != 'FreeBSD' + - name: run getent w/o specified service (FreeBSD) + getent: + database: passwd + key: root + register: getent_test0 + when: ansible_system == 'FreeBSD' + - debug: var=getent_test0 + - name: validate results + assert: + that: + - 'getent_passwd is defined' + - 'getent_passwd.root is defined' + - 'getent_passwd.root|length == 6' + when: getent_check.rc == 0 diff --git a/test/integration/targets/git/aliases b/test/integration/targets/git/aliases new file mode 100644 index 00000000..f71c8117 --- /dev/null +++ b/test/integration/targets/git/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +skip/aix diff --git a/test/integration/targets/git/handlers/cleanup-default.yml b/test/integration/targets/git/handlers/cleanup-default.yml new file mode 100644 index 00000000..02a79882 --- /dev/null +++ b/test/integration/targets/git/handlers/cleanup-default.yml @@ -0,0 +1,6 @@ +# TODO remove everything we'd installed (see git_required_packages), not just git +# problem is that we should not remove what we hadn't installed +- name: remove git + package: + name: git + state: absent diff --git a/test/integration/targets/git/handlers/cleanup-freebsd.yml b/test/integration/targets/git/handlers/cleanup-freebsd.yml new file mode 100644 index 00000000..1ee35013 --- /dev/null +++ b/test/integration/targets/git/handlers/cleanup-freebsd.yml @@ -0,0 +1,5 @@ +- name: remove git fromn FreeBSD + pkgng: + name: git + state: absent + autoremove: yes diff --git a/test/integration/targets/git/handlers/main.yml b/test/integration/targets/git/handlers/main.yml new file mode 100644 index 00000000..875f513a --- /dev/null +++ b/test/integration/targets/git/handlers/main.yml @@ -0,0 +1,7 @@ +- name: cleanup + include_tasks: "{{ cleanup_filename }}" + with_first_found: + - "cleanup-{{ ansible_distribution | lower }}.yml" + - "cleanup-default.yml" + loop_control: + loop_var: cleanup_filename diff --git a/test/integration/targets/git/meta/main.yml b/test/integration/targets/git/meta/main.yml new file mode 100644 index 00000000..34a77cb7 --- /dev/null +++ b/test/integration/targets/git/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - setup_gnutar diff --git a/test/integration/targets/git/tasks/ambiguous-ref.yml b/test/integration/targets/git/tasks/ambiguous-ref.yml new file mode 100644 index 00000000..f06112e5 --- /dev/null +++ b/test/integration/targets/git/tasks/ambiguous-ref.yml @@ -0,0 +1,37 @@ +# test for https://github.com/ansible/ansible-modules-core/pull/3386 + +- name: AMBIGUOUS-REF | clone repo + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + +- name: AMBIGUOUS-REF | rename remote to be ambiguous + command: git remote rename origin v0.1 + args: + chdir: "{{ checkout_dir }}" + +- name: AMBIGUOUS-REF | switch to HEAD + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + remote: v0.1 + +- name: AMBIGUOUS-REF | rev-parse remote HEAD + command: git rev-parse v0.1/HEAD + args: + chdir: "{{ checkout_dir }}" + register: git_remote_head + +- name: AMBIGUOUS-REF | rev-parse local HEAD + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}" + register: git_local_head + +- assert: + that: git_remote_head.stdout == git_local_head.stdout + +- name: AMBIGUOUS-REF | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" diff --git a/test/integration/targets/git/tasks/archive.yml b/test/integration/targets/git/tasks/archive.yml new file mode 100644 index 00000000..574559ef --- /dev/null +++ b/test/integration/targets/git/tasks/archive.yml @@ -0,0 +1,135 @@ +- name: ARCHIVE | Clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: ARCHIVE | Archive repo using various archival format + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + archive: '{{ checkout_dir }}/test_role.{{ item }}' + register: git_archive + with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}" + +# The map filter was added in Jinja2 2.7, which is newer than the version on RHEL/CentOS 6, +# so we skip this validation on those hosts +- name: ARCHIVE | Assert that archives were downloaded + assert: + that: (git_archive.results | map(attribute='changed') | unique | list)[0] + when: + - "ansible_os_family == 'RedHat'" + - ansible_distribution_major_version is version('7', '>=') + +- name: ARCHIVE | Check if archive file is created or not + stat: + path: '{{ checkout_dir }}/test_role.{{ item }}' + register: archive_check + with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}" + +- name: ARCHIVE | Assert that archive files exist + assert: + that: (archive_check.results | map(attribute='stat.exists') | unique | list)[0] + when: + - "ansible_os_family == 'RedHat'" + - ansible_distribution_major_version is version('7', '>=') + +- name: ARCHIVE | Clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: ARCHIVE | Clone clean repo + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + +# Check git archive functionality without update +- name: ARCHIVE | Archive repo using various archival format and without update + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + update: no + archive: '{{ checkout_dir }}/test_role.{{ item }}' + register: git_archive + with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}" + +# The map filter was added in Jinja2 2.7, which is newer than the version on RHEL/CentOS 6, +# so we skip this validation on those hosts +- name: ARCHIVE | Assert that archives were downloaded + assert: + that: (git_archive.results | map(attribute='changed') | unique | list)[0] + when: + - "ansible_os_family == 'RedHat'" + - ansible_distribution_major_version is version('7', '>=') + +- name: ARCHIVE | Check if archive file is created or not + stat: + path: '{{ checkout_dir }}/test_role.{{ item }}' + register: archive_check + with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}" + +- name: ARCHIVE | Assert that archive files exist + assert: + that: (archive_check.results | map(attribute='stat.exists') | unique | list)[0] + when: + - "ansible_os_family == 'RedHat'" + - ansible_distribution_major_version is version('7', '>=') + +- name: ARCHIVE | Inspect archive file + command: + cmd: "{{ git_list_commands[item] }} {{ checkout_dir }}/test_role.{{ item }}" + warn: no + register: archive_content + with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}" + +# Does not work on RedHat6 (jinja2 too old?) +- name: ARCHIVE | Ensure archive content is correct + assert: + that: + - item.stdout_lines | sort | first == 'defaults/' + with_items: "{{ archive_content.results }}" + when: + - ansible_os_family ~ ansible_distribution_major_version != 'RedHat6' + +- name: ARCHIVE | Clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: ARCHIVE | Generate an archive prefix + set_fact: + git_archive_prefix: '{{ range(2 ** 31, 2 ** 32) | random }}' # Generate some random archive prefix + +- name: ARCHIVE | Archive repo using various archival format and with an archive prefix + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + archive: '{{ checkout_dir }}/test_role.{{ item }}' + archive_prefix: '{{ git_archive_prefix }}/' + register: git_archive + with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}" + +- name: ARCHIVE | Prepare the target for archive(s) extraction + file: + state: directory + path: '{{ checkout_dir }}/{{ git_archive_prefix }}.{{ item }}' + with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}" + +- name: ARCHIVE | Extract the archive(s) into that target + unarchive: + src: '{{ checkout_dir }}/test_role.{{ item }}' + dest: '{{ checkout_dir }}/{{ git_archive_prefix }}.{{ item }}' + with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}" + +- name: ARCHIVE | Check if prefix directory exists in what's extracted + find: + path: '{{ checkout_dir }}/{{ git_archive_prefix }}.{{ item }}' + patterns: '{{ git_archive_prefix }}' + file_type: directory + register: archive_check + with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}" + +- name: ARCHIVE | Assert that prefix directory is found + assert: + that: '{{ item.matched == 1 }}' + with_items: "{{ archive_check.results }}" diff --git a/test/integration/targets/git/tasks/change-repo-url.yml b/test/integration/targets/git/tasks/change-repo-url.yml new file mode 100644 index 00000000..b12fca1f --- /dev/null +++ b/test/integration/targets/git/tasks/change-repo-url.yml @@ -0,0 +1,132 @@ +# test change of repo url +# see https://github.com/ansible/ansible-modules-core/pull/721 + +- name: CHANGE-REPO-URL | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: CHANGE-REPO-URL | Clone example git repo + git: + repo: "{{ repo_update_url_1 }}" + dest: "{{ checkout_dir }}" + +- name: CHANGE-REPO-URL | Clone repo with changed url to the same place + git: + repo: "{{ repo_update_url_2 }}" + dest: "{{ checkout_dir }}" + register: clone2 + +- assert: + that: "clone2 is successful" + +- name: CHANGE-REPO-URL | check url updated + shell: git remote show origin | grep Fetch + register: remote_url + args: + chdir: "{{ checkout_dir }}" + environment: + LC_ALL: C + +- assert: + that: + - "'git-test-new' in remote_url.stdout" + - "'git-test-old' not in remote_url.stdout" + +- name: CHANGE-REPO-URL | check for new content in git-test-new + stat: path={{ checkout_dir }}/newfilename + register: repo_content + +- name: CHANGE-REPO-URL | assert presence of new file in repo (i.e. working copy updated) + assert: + that: "repo_content.stat.exists" + +# Make sure 'changed' result is accurate in check mode. +# See https://github.com/ansible/ansible-modules-core/pull/4243 + +- name: CHANGE-REPO-URL | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: CHANGE-REPO-URL | clone repo + git: + repo: "{{ repo_update_url_1 }}" + dest: "{{ checkout_dir }}" + +- name: CHANGE-REPO-URL | clone repo with same url to same destination + git: + repo: "{{ repo_update_url_1 }}" + dest: "{{ checkout_dir }}" + register: checkout_same_url + +- name: CHANGE-REPO-URL | check repo not changed + assert: + that: + - checkout_same_url is not changed + + +- name: CHANGE-REPO-URL | clone repo with new url to same destination + git: + repo: "{{ repo_update_url_2 }}" + dest: "{{ checkout_dir }}" + register: checkout_new_url + +- name: CHANGE-REPO-URL | check repo changed + assert: + that: + - checkout_new_url is changed + + +- name: CHANGE-REPO-URL | clone repo with new url in check mode + git: + repo: "{{ repo_update_url_1 }}" + dest: "{{ checkout_dir }}" + register: checkout_new_url_check_mode + check_mode: True + +- name: CHANGE-REPO-URL | check repo reported changed in check mode + assert: + that: + - checkout_new_url_check_mode is changed + when: git_version.stdout is version(git_version_supporting_ls_remote, '>=') + +- name: CHANGE-REPO-URL | clone repo with new url after check mode + git: + repo: "{{ repo_update_url_1 }}" + dest: "{{ checkout_dir }}" + register: checkout_new_url_after_check_mode + +- name: CHANGE-REPO-URL | check repo still changed after check mode + assert: + that: + - checkout_new_url_after_check_mode is changed + + +# Test that checkout by branch works when the branch is not in our current repo but the sha is + +- name: CHANGE-REPO-URL | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: CHANGE-REPO-URL | "Clone example git repo that we're going to modify" + git: + repo: "{{ repo_update_url_1 }}" + dest: "{{ checkout_dir }}/repo" + +- name: CHANGE-REPO-URL | Clone the repo again - this is what we test + git: + repo: "{{ checkout_dir }}/repo" + dest: "{{ checkout_dir }}/checkout" + +- name: CHANGE-REPO-URL | Add a branch to the repo + command: git branch new-branch + args: + chdir: "{{ checkout_dir }}/repo" + +- name: CHANGE-REPO-URL | Checkout the new branch in the checkout + git: + repo: "{{ checkout_dir}}/repo" + version: 'new-branch' + dest: "{{ checkout_dir }}/checkout" diff --git a/test/integration/targets/git/tasks/checkout-new-tag.yml b/test/integration/targets/git/tasks/checkout-new-tag.yml new file mode 100644 index 00000000..eac73f67 --- /dev/null +++ b/test/integration/targets/git/tasks/checkout-new-tag.yml @@ -0,0 +1,54 @@ +# test for https://github.com/ansible/ansible-modules-core/issues/527 +# clone a repo, add a tag to the same commit and try to checkout the new commit + + +- name: clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: checkout example repo + git: + repo: "{{ repo_dir }}/format1" + dest: "{{ checkout_dir }}" + +- name: get tags of head + command: git tag --contains + args: + chdir: "{{ checkout_dir }}" + register: listoftags + +- name: make sure the tag does not yet exist + assert: + that: + - "'newtag' not in listoftags.stdout_lines" + +- name: add tag in orig repo + command: git tag newtag + args: + chdir: "{{ repo_dir }}/format1" + +- name: update copy with new tag + git: + repo: "{{ repo_dir }}/format1" + dest: "{{checkout_dir}}" + version: newtag + register: update_new_tag + +- name: get tags of new head + command: git tag --contains + args: + chdir: "{{ checkout_dir }}" + register: listoftags + +- name: check new head + assert: + that: + - update_new_tag is not changed + - "'newtag' in listoftags.stdout_lines" + + +- name: clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" diff --git a/test/integration/targets/git/tasks/depth.yml b/test/integration/targets/git/tasks/depth.yml new file mode 100644 index 00000000..547f84f7 --- /dev/null +++ b/test/integration/targets/git/tasks/depth.yml @@ -0,0 +1,229 @@ +# Test the depth option and fetching revisions that were ignored first + +- name: DEPTH | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: DEPTH | Clone example git repo with depth 1 + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + +- name: DEPTH | try to access earlier commit + command: "git checkout {{git_shallow_head_1.stdout}}" + register: checkout_early + failed_when: False + args: + chdir: '{{ checkout_dir }}' + +- name: DEPTH | make sure the old commit was not fetched + assert: + that: 'checkout_early.rc != 0' + when: git_version.stdout is version(git_version_supporting_depth, '>=') + +# tests https://github.com/ansible/ansible/issues/14954 +- name: DEPTH | fetch repo again with depth=1 + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + register: checkout2 + +- assert: + that: "checkout2 is not changed" + when: git_version.stdout is version(git_version_supporting_depth, '>=') + +- name: DEPTH | again try to access earlier commit + shell: "git checkout {{git_shallow_head_1.stdout}}" + register: checkout_early + failed_when: False + args: + chdir: '{{ checkout_dir }}' + +- name: DEPTH | again make sure the old commit was not fetched + assert: + that: 'checkout_early.rc != 0' + when: git_version.stdout is version(git_version_supporting_depth, '>=') + +# make sure we are still able to fetch other versions +- name: DEPTH | Clone same repo with older version + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + version: earlytag + register: cloneold + +- assert: + that: cloneold is successful + +- name: DEPTH | try to access earlier commit + shell: "git checkout {{git_shallow_head_1.stdout}}" + args: + chdir: '{{ checkout_dir }}' + +- name: DEPTH | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +# Test for https://github.com/ansible/ansible/issues/21316 +- name: DEPTH | Shallow clone with tag + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + version: earlytag + register: cloneold + +- assert: + that: cloneold is successful + +- name: DEPTH | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + + + # Test for https://github.com/ansible/ansible-modules-core/issues/3456 + # clone a repo with depth and version specified + +- name: DEPTH | clone repo with both version and depth specified + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + version: master + +- name: DEPTH | run a second time (now fetch, not clone) + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + version: master + register: git_fetch + +- name: DEPTH | ensure the fetch succeeded + assert: + that: git_fetch is successful + + +- name: DEPTH | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: DEPTH | clone repo with both version and depth specified + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + version: master + +- name: DEPTH | switch to older branch with depth=1 (uses fetch) + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + version: earlybranch + register: git_fetch + +- name: DEPTH | ensure the fetch succeeded + assert: + that: git_fetch is successful + +- name: DEPTH | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +# test for https://github.com/ansible/ansible-modules-core/issues/3782 +# make sure shallow fetch works when no version is specified + +- name: DEPTH | checkout old repo + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + +- name: DEPTH | "update repo" + shell: echo "3" > a; git commit -a -m "3" + args: + chdir: "{{ repo_dir }}/shallow" + +- name: DEPTH | fetch updated repo + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow' + dest: '{{ checkout_dir }}' + depth: 1 + register: git_fetch + ignore_errors: yes + +- name: DEPTH | get "a" file + slurp: + src: '{{ checkout_dir }}/a' + register: a_file + +- name: DEPTH | check update arrived + assert: + that: + - "{{ a_file.content | b64decode | trim }} == 3" + - git_fetch is changed + +- name: DEPTH | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +# +# Make sure shallow fetch works when switching to (fetching) a new a branch +# + +- name: DEPTH | clone from branch with depth specified + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow_branches' + dest: '{{ checkout_dir }}' + depth: 1 + version: test_branch + +- name: DEPTH | check if clone is shallow + stat: path={{ checkout_dir }}/.git/shallow + register: is_shallow + when: git_version.stdout is version(git_version_supporting_depth, '>=') + +- name: DEPTH | assert that clone is shallow + assert: + that: + - is_shallow.stat.exists + when: git_version.stdout is version(git_version_supporting_depth, '>=') + +- name: DEPTH | switch to new branch (fetch) with the shallow clone + git: + repo: 'file://{{ repo_dir|expanduser }}/shallow_branches' + dest: '{{ checkout_dir }}' + depth: 1 + version: new_branch + register: git_fetch + +- name: DEPTH | assert if switching a shallow clone to a new branch worked + assert: + that: + - git_fetch is changed + +- name: DEPTH | check if clone is still shallow + stat: path={{ checkout_dir }}/.git/shallow + register: is_shallow + when: git_version.stdout is version(git_version_supporting_depth, '>=') + +- name: DEPTH | assert that clone still is shallow + assert: + that: + - is_shallow.stat.exists + when: git_version.stdout is version(git_version_supporting_depth, '>=') + +- name: DEPTH | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" diff --git a/test/integration/targets/git/tasks/forcefully-fetch-tag.yml b/test/integration/targets/git/tasks/forcefully-fetch-tag.yml new file mode 100644 index 00000000..47c37478 --- /dev/null +++ b/test/integration/targets/git/tasks/forcefully-fetch-tag.yml @@ -0,0 +1,38 @@ +# Tests against https://github.com/ansible/ansible/issues/67972 + +# Do our first clone manually; there are no commits yet and Ansible doesn't like +# that. +- name: FORCEFULLY-FETCH-TAG | Clone the bare repo in a non-bare clone + shell: git clone {{ repo_dir }}/tag_force_push {{ repo_dir }}/tag_force_push_clone1 + +- name: FORCEFULLY-FETCH-TAG | Prepare repo with a tag + shell: | + echo 1337 > leet; + git add leet; + git commit -m uh-oh; + git tag -f herewego; + git push --tags origin master + args: + chdir: "{{ repo_dir }}/tag_force_push_clone1" + +- name: FORCEFULLY-FETCH-TAG | clone the repo for the second time + git: + repo: "{{ repo_dir }}/tag_force_push" + dest: "{{ repo_dir }}/tag_force_push_clone2" + +- name: FORCEFULLY-FETCH-TAG | Forcefully overwrite the tag in clone1 + shell: | + echo 1338 > leet; + git add leet; + git commit -m uh-oh; + git tag -f herewego; + git push -f --tags origin master + args: + chdir: "{{ repo_dir }}/tag_force_push_clone1" + +- name: FORCEFULLY-FETCH-TAG | Try to update the second clone + git: + repo: "{{ repo_dir }}/tag_force_push" + dest: "{{ repo_dir }}/tag_force_push_clone2" + force: yes + register: git_res diff --git a/test/integration/targets/git/tasks/formats.yml b/test/integration/targets/git/tasks/formats.yml new file mode 100644 index 00000000..e5fcda72 --- /dev/null +++ b/test/integration/targets/git/tasks/formats.yml @@ -0,0 +1,40 @@ +- name: FORMATS | initial checkout + git: + repo: "{{ repo_format1 }}" + dest: "{{ repo_dir }}/format1" + register: git_result + +- name: FORMATS | verify information about the initial clone + assert: + that: + - "'before' in git_result" + - "'after' in git_result" + - "not git_result.before" + - "git_result.changed" + +- name: FORMATS | repeated checkout + git: + repo: "{{ repo_format1 }}" + dest: "{{ repo_dir }}/format1" + register: git_result2 + +- name: FORMATS | check for tags + stat: + path: "{{ repo_dir }}/format1/.git/refs/tags" + register: tags + +- name: FORMATS | check for HEAD + stat: + path: "{{ repo_dir }}/format1/.git/HEAD" + register: head + +- name: FORMATS | assert presence of tags/trunk/branches + assert: + that: + - "tags.stat.isdir" + - "head.stat.isreg" + +- name: FORMATS | verify on a reclone things are marked unchanged + assert: + that: + - "not git_result2.changed" diff --git a/test/integration/targets/git/tasks/gpg-verification.yml b/test/integration/targets/git/tasks/gpg-verification.yml new file mode 100644 index 00000000..8c8834a9 --- /dev/null +++ b/test/integration/targets/git/tasks/gpg-verification.yml @@ -0,0 +1,212 @@ +# Test for verification of GnuPG signatures + +- name: GPG-VERIFICATION | Create GnuPG verification workdir + tempfile: + state: directory + register: git_gpg_workdir + +- name: GPG-VERIFICATION | Define variables based on workdir + set_fact: + git_gpg_keyfile: "{{ git_gpg_workdir.path }}/testkey.asc" + git_gpg_source: "{{ git_gpg_workdir.path }}/source" + git_gpg_dest: "{{ git_gpg_workdir.path }}/dest" + git_gpg_gpghome: "{{ git_gpg_workdir.path }}/gpg" + +- name: GPG-VERIFICATION | Temporary store GnuPG test key + copy: + content: "{{ git_gpg_testkey }}" + dest: "{{ git_gpg_keyfile }}" + +- name: GPG-VERIFICATION | Create temporary GNUPGHOME directory + file: + path: "{{ git_gpg_gpghome }}" + state: directory + mode: 0700 + +- name: GPG-VERIFICATION | Import GnuPG test key + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + command: gpg --import {{ git_gpg_keyfile }} + +- name: GPG-VERIFICATION | Create local GnuPG signed repository directory + file: + path: "{{ git_gpg_source }}" + state: directory + +- name: GPG-VERIFICATION | Generate local GnuPG signed repository + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + shell: | + set -e + git init + touch an_empty_file + git add an_empty_file + git commit --no-gpg-sign --message "Commit, and don't sign" + git tag lightweight_tag/unsigned_commit HEAD + git commit --allow-empty --gpg-sign --message "Commit, and sign" + git tag lightweight_tag/signed_commit HEAD + git tag --annotate --message "This is not a signed tag" unsigned_annotated_tag HEAD + git commit --allow-empty --gpg-sign --message "Commit, and sign" + git tag --sign --message "This is a signed tag" signed_annotated_tag HEAD + git checkout -b some_branch/signed_tip master + git commit --allow-empty --gpg-sign --message "Commit, and sign" + git checkout -b another_branch/unsigned_tip master + git commit --allow-empty --no-gpg-sign --message "Commit, and don't sign" + git checkout master + args: + chdir: "{{ git_gpg_source }}" + +- name: GPG-VERIFICATION | Get hash of an unsigned commit + command: git show-ref --hash --verify refs/tags/lightweight_tag/unsigned_commit + args: + chdir: "{{ git_gpg_source }}" + register: git_gpg_unsigned_commit + +- name: GPG-VERIFICATION | Get hash of a signed commit + command: git show-ref --hash --verify refs/tags/lightweight_tag/signed_commit + args: + chdir: "{{ git_gpg_source }}" + register: git_gpg_signed_commit + +- name: GPG-VERIFICATION | Clone repo and verify signed HEAD + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + git: + repo: "{{ git_gpg_source }}" + dest: "{{ git_gpg_dest }}" + verify_commit: yes + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Clone repo and verify a signed lightweight tag + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + git: + repo: "{{ git_gpg_source }}" + dest: "{{ git_gpg_dest }}" + version: lightweight_tag/signed_commit + verify_commit: yes + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Clone repo and verify an unsigned lightweight tag (should fail) + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + git: + repo: "{{ git_gpg_source }}" + dest: "{{ git_gpg_dest }}" + version: lightweight_tag/unsigned_commit + verify_commit: yes + register: git_verify + ignore_errors: yes + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Check that unsigned lightweight tag verification failed + assert: + that: + - git_verify is failed + - git_verify.msg is match("Failed to verify GPG signature of commit/tag.+") + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Clone repo and verify a signed commit + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + git: + repo: "{{ git_gpg_source }}" + dest: "{{ git_gpg_dest }}" + version: "{{ git_gpg_signed_commit.stdout }}" + verify_commit: yes + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Clone repo and verify an unsigned commit + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + git: + repo: "{{ git_gpg_source }}" + dest: "{{ git_gpg_dest }}" + version: "{{ git_gpg_unsigned_commit.stdout }}" + verify_commit: yes + register: git_verify + ignore_errors: yes + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Check that unsigned commit verification failed + assert: + that: + - git_verify is failed + - git_verify.msg is match("Failed to verify GPG signature of commit/tag.+") + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Clone repo and verify a signed annotated tag + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + git: + repo: "{{ git_gpg_source }}" + dest: "{{ git_gpg_dest }}" + version: signed_annotated_tag + verify_commit: yes + +- name: GPG-VERIFICATION | Clone repo and verify an unsigned annotated tag (should fail) + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + git: + repo: "{{ git_gpg_source }}" + dest: "{{ git_gpg_dest }}" + version: unsigned_annotated_tag + verify_commit: yes + register: git_verify + ignore_errors: yes + +- name: GPG-VERIFICATION | Check that unsigned annotated tag verification failed + assert: + that: + - git_verify is failed + - git_verify.msg is match("Failed to verify GPG signature of commit/tag.+") + +- name: GPG-VERIFICATION | Clone repo and verify a signed branch + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + git: + repo: "{{ git_gpg_source }}" + dest: "{{ git_gpg_dest }}" + version: some_branch/signed_tip + verify_commit: yes + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Clone repo and verify an unsigned branch (should fail) + environment: + - GNUPGHOME: "{{ git_gpg_gpghome }}" + git: + repo: "{{ git_gpg_source }}" + dest: "{{ git_gpg_dest }}" + version: another_branch/unsigned_tip + verify_commit: yes + register: git_verify + ignore_errors: yes + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Check that unsigned branch verification failed + assert: + that: + - git_verify is failed + - git_verify.msg is match("Failed to verify GPG signature of commit/tag.+") + when: + - git_version.stdout is version("2.1.0", '>=') + +- name: GPG-VERIFICATION | Stop gpg-agent so we can remove any locks on the GnuPG dir + command: gpgconf --kill gpg-agent + environment: + GNUPGHOME: "{{ git_gpg_gpghome }}" + ignore_errors: yes + +- name: GPG-VERIFICATION | Remove GnuPG verification workdir + file: + path: "{{ git_gpg_workdir.path }}" + state: absent diff --git a/test/integration/targets/git/tasks/localmods.yml b/test/integration/targets/git/tasks/localmods.yml new file mode 100644 index 00000000..09a1326d --- /dev/null +++ b/test/integration/targets/git/tasks/localmods.yml @@ -0,0 +1,112 @@ +# test for https://github.com/ansible/ansible-modules-core/pull/5505 +- name: LOCALMODS | prepare old git repo + shell: rm -rf localmods; mkdir localmods; cd localmods; git init; echo "1" > a; git add a; git commit -m "1" + args: + chdir: "{{repo_dir}}" + +- name: LOCALMODS | checkout old repo + git: + repo: '{{ repo_dir }}/localmods' + dest: '{{ checkout_dir }}' + +- name: LOCALMODS | "update repo" + shell: echo "2" > a; git commit -a -m "2" + args: + chdir: "{{repo_dir}}/localmods" + +- name: LOCALMODS | "add local mods" + shell: echo "3" > a + args: + chdir: "{{ checkout_dir }}" + +- name: LOCALMODS | fetch with local mods without force (should fail) + git: + repo: '{{ repo_dir }}/localmods' + dest: '{{ checkout_dir }}' + register: git_fetch + ignore_errors: yes + +- name: LOCALMODS | check fetch with localmods failed + assert: + that: + - git_fetch is failed + +- name: LOCALMODS | fetch with local mods with force + git: + repo: '{{ repo_dir }}/localmods' + dest: '{{ checkout_dir }}' + force: True + register: git_fetch_force + ignore_errors: yes + +- name: LOCALMODS | get "a" file + slurp: + src: '{{ checkout_dir }}/a' + register: a_file + +- name: LOCALMODS | check update arrived + assert: + that: + - "{{ a_file.content | b64decode | trim }} == 2" + - git_fetch_force is changed + +- name: LOCALMODS | clear checkout_dir + file: state=absent path={{ checkout_dir }} + +# localmods and shallow clone +- name: LOCALMODS | prepare old git repo + shell: rm -rf localmods; mkdir localmods; cd localmods; git init; echo "1" > a; git add a; git commit -m "1" + args: + chdir: "{{repo_dir}}" + +- name: LOCALMODS | checkout old repo + git: + repo: '{{ repo_dir }}/localmods' + dest: '{{ checkout_dir }}' + depth: 1 + +- name: LOCALMODS | "update repo" + shell: echo "2" > a; git commit -a -m "2" + args: + chdir: "{{repo_dir}}/localmods" + +- name: LOCALMODS | "add local mods" + shell: echo "3" > a + args: + chdir: "{{ checkout_dir }}" + +- name: LOCALMODS | fetch with local mods without force (should fail) + git: + repo: '{{ repo_dir }}/localmods' + dest: '{{ checkout_dir }}' + depth: 1 + register: git_fetch + ignore_errors: yes + +- name: LOCALMODS | check fetch with localmods failed + assert: + that: + - git_fetch is failed + +- name: LOCALMODS | fetch with local mods with force + git: + repo: '{{ repo_dir }}/localmods' + dest: '{{ checkout_dir }}' + depth: 1 + force: True + register: git_fetch_force + ignore_errors: yes + +- name: LOCALMODS | get "a" file + slurp: + src: '{{ checkout_dir }}/a' + register: a_file + +- name: LOCALMODS | check update arrived + assert: + that: + - "{{ a_file.content | b64decode | trim }} == 2" + - git_fetch_force is changed + +- name: LOCALMODS | clear checkout_dir + file: state=absent path={{ checkout_dir }} diff --git a/test/integration/targets/git/tasks/main.yml b/test/integration/targets/git/tasks/main.yml new file mode 100644 index 00000000..722713bf --- /dev/null +++ b/test/integration/targets/git/tasks/main.yml @@ -0,0 +1,40 @@ +# test code for the git module +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- include_tasks: setup.yml +- include_tasks: setup-local-repos.yml + +- include_tasks: formats.yml +- include_tasks: missing_hostkey.yml +- include_tasks: no-destination.yml +- include_tasks: specific-revision.yml +- include_tasks: submodules.yml +- include_tasks: change-repo-url.yml +- include_tasks: depth.yml +- include_tasks: checkout-new-tag.yml +- include_tasks: gpg-verification.yml + when: + - not gpg_version.stderr + - gpg_version.stdout + - not (ansible_os_family == 'RedHat' and ansible_distribution_major_version is version('7', '<')) +- include_tasks: localmods.yml +- include_tasks: reset-origin.yml +- include_tasks: ambiguous-ref.yml +- include_tasks: archive.yml +- include_tasks: separate-git-dir.yml +- include_tasks: forcefully-fetch-tag.yml diff --git a/test/integration/targets/git/tasks/missing_hostkey.yml b/test/integration/targets/git/tasks/missing_hostkey.yml new file mode 100644 index 00000000..02d5be35 --- /dev/null +++ b/test/integration/targets/git/tasks/missing_hostkey.yml @@ -0,0 +1,48 @@ +- name: MISSING-HOSTKEY | checkout ssh://git@github.com repo without accept_hostkey (expected fail) + git: + repo: '{{ repo_format2 }}' + dest: '{{ checkout_dir }}' + ssh_opts: '-o UserKnownHostsFile={{ output_dir }}/known_hosts' + register: git_result + ignore_errors: true + +- assert: + that: + - git_result is failed + +- name: MISSING-HOSTKEY | checkout git@github.com repo with accept_hostkey (expected pass) + git: + repo: '{{ repo_format2 }}' + dest: '{{ checkout_dir }}' + accept_hostkey: true + key_file: '{{ github_ssh_private_key }}' + ssh_opts: '-o UserKnownHostsFile={{ output_dir }}/known_hosts' + register: git_result + when: github_ssh_private_key is defined + +- assert: + that: + - git_result is changed + when: github_ssh_private_key is defined + +- name: MISSING-HOSTKEY | clear checkout_dir + file: + state: absent + path: '{{ checkout_dir }}' + when: github_ssh_private_key is defined + +- name: MISSING-HOSTKEY | checkout ssh://git@github.com repo with accept_hostkey (expected pass) + git: + repo: '{{ repo_format3 }}' + dest: '{{ checkout_dir }}' + version: 'master' + accept_hostkey: false # should already have been accepted + key_file: '{{ github_ssh_private_key }}' + ssh_opts: '-o UserKnownHostsFile={{ output_dir }}/known_hosts' + register: git_result + when: github_ssh_private_key is defined + +- assert: + that: + - git_result is changed + when: github_ssh_private_key is defined diff --git a/test/integration/targets/git/tasks/no-destination.yml b/test/integration/targets/git/tasks/no-destination.yml new file mode 100644 index 00000000..1ef7f2fd --- /dev/null +++ b/test/integration/targets/git/tasks/no-destination.yml @@ -0,0 +1,13 @@ +# Test a non-updating repo query with no destination specified + +- name: NO-DESTINATION | get info on a repo without updating and with no destination specified + git: + repo: '{{ repo_dir }}/minimal' + update: no + clone: no + accept_hostkey: yes + register: git_result + +- assert: + that: + - git_result is changed diff --git a/test/integration/targets/git/tasks/reset-origin.yml b/test/integration/targets/git/tasks/reset-origin.yml new file mode 100644 index 00000000..8fddd4b1 --- /dev/null +++ b/test/integration/targets/git/tasks/reset-origin.yml @@ -0,0 +1,25 @@ +- name: RESET-ORIGIN | Clean up the directories + file: + state: absent + path: "{{ item }}" + with_items: + - "{{ repo_dir }}/origin" + - "{{ checkout_dir }}" + +- name: RESET-ORIGIN | Create a directory + file: + name: "{{ repo_dir }}/origin" + state: directory + +- name: RESET-ORIGIN | Initialise the repo with a file named origin,see github.com/ansible/ansible/pull/22502 + shell: git init; echo "PR 22502" > origin; git add origin; git commit -m "PR 22502" + args: + chdir: "{{ repo_dir }}/origin" + +- name: RESET-ORIGIN | Clone a git repo with file named origin + git: + repo: "{{ repo_dir }}/origin" + dest: "{{ checkout_dir }}" + remote: origin + update: no + register: status diff --git a/test/integration/targets/git/tasks/separate-git-dir.yml b/test/integration/targets/git/tasks/separate-git-dir.yml new file mode 100644 index 00000000..5b874043 --- /dev/null +++ b/test/integration/targets/git/tasks/separate-git-dir.yml @@ -0,0 +1,132 @@ +# test code for repositories with separate git dir updating +# see https://github.com/ansible/ansible/pull/38016 +# see https://github.com/ansible/ansible/issues/30034 + +- name: SEPARATE-GIT-DIR | clear checkout_dir + file: + state: absent + path: '{{ checkout_dir }}' + +- name: SEPARATE-GIT-DIR | make a pre-exist repo dir + file: + state: directory + path: '{{ separate_git_dir }}' + +- name: SEPARATE-GIT-DIR | clone with a separate git dir + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + separate_git_dir: '{{ separate_git_dir }}' + ignore_errors: yes + register: result + +- name: SEPARATE-GIT-DIR | the clone will fail due to pre-exist dir + assert: + that: 'result is failed' + +- name: SEPARATE-GIT-DIR | delete pre-exist dir + file: + state: absent + path: '{{ separate_git_dir }}' + +- name: SEPARATE-GIT-DIR | clone again with a separate git dir + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + separate_git_dir: '{{ separate_git_dir }}' + +- name: SEPARATE-GIT-DIR | check the stat of git dir + stat: + path: '{{ separate_git_dir }}' + register: stat_result + +- name: SEPARATE-GIT-DIR | the git dir should exist + assert: + that: 'stat_result.stat.exists == True' + +- name: SEPARATE-GIT-DIR | update repo the usual way + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + separate_git_dir: '{{ separate_git_dir }}' + register: result + +- name: SEPARATE-GIT-DIR | update should not fail + assert: + that: + - result is not failed + +- name: SEPARATE-GIT-DIR | move the git dir to new place + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + separate_git_dir: '{{ separate_git_dir }}_new' + register: result + +- name: SEPARATE-GIT-DIR | the movement should not failed + assert: + that: 'result is not failed' + +- name: SEPARATE-GIT-DIR | check the stat of new git dir + stat: + path: '{{ separate_git_dir }}_new' + register: stat_result + +- name: SEPARATE-GIT-DIR | the new git dir should exist + assert: + that: 'stat_result.stat.exists == True' + +- name: SEPARATE-GIT-DIR | test the update + git: + repo: '{{ repo_format1 }}' + dest: '{{ checkout_dir }}' + register: result + +- name: SEPARATE-GIT-DIR | the update should not failed + assert: + that: + - result is not failed + +- name: SEPARATE-GIT-DIR | set git dir to non-existent dir + shell: "echo gitdir: /dev/null/non-existent-dir > .git" + args: + chdir: "{{ checkout_dir }}" + +- name: SEPARATE-GIT-DIR | update repo the usual way + git: + repo: "{{ repo_format1 }}" + dest: "{{ checkout_dir }}" + ignore_errors: yes + register: result + +- name: SEPARATE-GIT-DIR | check update has failed + assert: + that: + - result is failed + +- name: SEPARATE-GIT-DIR | set .git file to bad format + shell: "echo some text gitdir: {{ checkout_dir }} > .git" + args: + chdir: "{{ checkout_dir }}" + +- name: SEPARATE-GIT-DIR | update repo the usual way + git: + repo: "{{ repo_format1 }}" + dest: "{{ checkout_dir }}" + ignore_errors: yes + register: result + +- name: SEPARATE-GIT-DIR | check update has failed + assert: + that: + - result is failed + +- name: SEPARATE-GIT-DIR | clear separate git dir + file: + state: absent + path: "{{ separate_git_dir }}_new" + +- name: SEPARATE-GIT-DIR | clear checkout_dir + file: + state: absent + path: '{{ checkout_dir }}' diff --git a/test/integration/targets/git/tasks/setup-local-repos.yml b/test/integration/targets/git/tasks/setup-local-repos.yml new file mode 100644 index 00000000..584a1693 --- /dev/null +++ b/test/integration/targets/git/tasks/setup-local-repos.yml @@ -0,0 +1,45 @@ +- name: SETUP-LOCAL-REPOS | create dirs + file: + name: "{{ item }}" + state: directory + with_items: + - "{{ repo_dir }}/minimal" + - "{{ repo_dir }}/shallow" + - "{{ repo_dir }}/shallow_branches" + - "{{ repo_dir }}/tag_force_push" + +- name: SETUP-LOCAL-REPOS | prepare minimal git repo + shell: git init; echo "1" > a; git add a; git commit -m "1" + args: + chdir: "{{ repo_dir }}/minimal" + +- name: SETUP-LOCAL-REPOS | prepare git repo for shallow clone + shell: | + git init; + echo "1" > a; git add a; git commit -m "1"; git tag earlytag; git branch earlybranch; + echo "2" > a; git add a; git commit -m "2"; + args: + chdir: "{{ repo_dir }}/shallow" + +- name: SETUP-LOCAL-REPOS | set old hash var for shallow test + command: 'git rev-parse HEAD~1' + register: git_shallow_head_1 + args: + chdir: "{{ repo_dir }}/shallow" + +- name: SETUP-LOCAL-REPOS | prepare tmp git repo with two branches + shell: | + git init + echo "1" > a; git add a; git commit -m "1" + git checkout -b test_branch; echo "2" > a; git commit -m "2 on branch" a + git checkout -b new_branch; echo "3" > a; git commit -m "3 on new branch" a + args: + chdir: "{{ repo_dir }}/shallow_branches" + +# Make this a bare one, we need to be able to push to it from clones +# We make the repo here for consistency with the other repos, +# but we finish setting it up in forcefully-fetch-tag.yml. +- name: SETUP-LOCAL-REPOS | prepare tag_force_push git repo + shell: git init --bare + args: + chdir: "{{ repo_dir }}/tag_force_push" diff --git a/test/integration/targets/git/tasks/setup.yml b/test/integration/targets/git/tasks/setup.yml new file mode 100644 index 00000000..16c56904 --- /dev/null +++ b/test/integration/targets/git/tasks/setup.yml @@ -0,0 +1,43 @@ +- name: SETUP | clean out the output_dir + file: + path: "{{ output_dir }}" + state: absent + +- name: SETUP | create clean output_dir + file: + path: "{{ output_dir }}" + state: directory + +- name: SETUP | install git + package: + name: '{{ item }}' + when: ansible_distribution != "MacOSX" + notify: + - cleanup + with_items: "{{ git_required_packages[ansible_os_family | default('default') ] | default(git_required_packages.default) }}" + +- name: SETUP | verify that git is installed so this test can continue + shell: which git + +- name: SETUP | get git version, only newer than {{git_version_supporting_depth}} has fixed git depth + shell: git --version | grep 'git version' | sed 's/git version //' + register: git_version + +- name: SETUP | get gpg version + shell: gpg --version 2>1 | head -1 | sed -e 's/gpg (GnuPG) //' + register: gpg_version + +- name: SETUP | set git global user.email if not already set + shell: git config --global user.email || git config --global user.email "noreply@example.com" + +- name: SETUP | set git global user.name if not already set + shell: git config --global user.name || git config --global user.name "Ansible Test Runner" + +- name: SETUP | create repo_dir + file: + path: "{{ repo_dir }}" + state: directory + +- name: SETUP | show git version + debug: + msg: "Running test with git {{ git_version.stdout }}" diff --git a/test/integration/targets/git/tasks/specific-revision.yml b/test/integration/targets/git/tasks/specific-revision.yml new file mode 100644 index 00000000..26fa7cf3 --- /dev/null +++ b/test/integration/targets/git/tasks/specific-revision.yml @@ -0,0 +1,238 @@ +# Test that a specific revision can be checked out + +- name: SPECIFIC-REVISION | clear checkout_dir + file: + state: absent + path: '{{ checkout_dir }}' + +- name: SPECIFIC-REVISION | clone to specific revision + git: + repo: "{{ repo_dir }}/format1" + dest: "{{ checkout_dir }}" + version: df4612ba925fbc1b3c51cbb006f51a0443bd2ce9 + +- name: SPECIFIC-REVISION | check HEAD after clone to revision + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "df4612ba925fbc1b3c51cbb006f51a0443bd2ce9"' + +- name: SPECIFIC-REVISION | update to specific revision + git: + repo: "{{ repo_dir }}/format1" + dest: "{{ checkout_dir }}" + version: 4e739a34719654db7b04896966e2354e1256ea5d + register: git_result + +- assert: + that: + - git_result is changed + +- name: SPECIFIC-REVISION | check HEAD after update to revision + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "4e739a34719654db7b04896966e2354e1256ea5d"' + +- name: SPECIFIC-REVISION | update to HEAD from detached HEAD state + git: + repo: "{{ repo_dir }}/format1" + dest: "{{ checkout_dir }}" + version: HEAD + register: git_result + +- assert: + that: + - git_result is changed + +# Test a revision not available under refs/heads/ or refs/tags/ + +- name: SPECIFIC-REVISION | attempt to get unavailable revision + git: + repo: "{{ repo_dir }}/format1" + dest: "{{ checkout_dir }}" + version: 5473e343e33255f2da0b160f53135c56921d875c + ignore_errors: true + register: git_result + +- assert: + that: + - git_result is failed + +# Same as the previous test, but this time we specify which ref +# contains the SHA1 +- name: SPECIFIC-REVISION | update to revision by specifying the refspec + git: + repo: https://github.com/ansible/ansible-examples.git + dest: '{{ checkout_dir }}' + version: 5473e343e33255f2da0b160f53135c56921d875c + refspec: refs/pull/7/merge + +- name: SPECIFIC-REVISION | check HEAD after update with refspec + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "5473e343e33255f2da0b160f53135c56921d875c"' + +# try out combination of refspec and depth +- name: SPECIFIC-REVISION | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: SPECIFIC-REVISION | update to revision by specifying the refspec with depth=1 + git: + repo: https://github.com/ansible/ansible-examples.git + dest: '{{ checkout_dir }}' + version: 5473e343e33255f2da0b160f53135c56921d875c + refspec: refs/pull/7/merge + depth: 1 + +- name: SPECIFIC-REVISION | check HEAD after update with refspec + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "5473e343e33255f2da0b160f53135c56921d875c"' + +- name: SPECIFIC-REVISION | try to access other commit + shell: git checkout 0ce1096 + register: checkout_shallow + failed_when: False + args: + chdir: "{{ checkout_dir }}" + +- name: SPECIFIC-REVISION | "make sure the old commit was not fetched, task is 'forced success'" + assert: + that: + - checkout_shallow.rc != 0 + - checkout_shallow is successful + when: git_version.stdout is version(git_version_supporting_depth, '>=') + +- name: SPECIFIC-REVISION | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: SPECIFIC-REVISION | clone to revision by specifying the refspec + git: + repo: https://github.com/ansible/ansible-examples.git + dest: "{{ checkout_dir }}" + version: 5473e343e33255f2da0b160f53135c56921d875c + refspec: refs/pull/7/merge + +- name: SPECIFIC-REVISION | check HEAD after update with refspec + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "5473e343e33255f2da0b160f53135c56921d875c"' + +# Test that a forced shallow checkout referincing branch only always fetches latest head + +- name: SPECIFIC-REVISION | clear checkout_dir + file: + state: absent + path: "{{ item }}" + with_items: + - "{{ checkout_dir }}" + - "{{ checkout_dir }}.copy" + +- name: SPECIFIC-REVISION | create original repo dir + file: + state: directory + path: "{{ checkout_dir }}" + +- name: SPECIFIC-REVISION | prepare origina repo + shell: git init; echo "1" > a; git add a; git commit -m "1" + args: + chdir: "{{ checkout_dir }}" + +- name: SPECIFIC-REVISION | clone example repo locally + git: + repo: "{{ checkout_dir }}" + dest: "{{ checkout_dir }}.copy" + +- name: SPECIFIC-REVISION | create branch in original + command: git checkout -b test/branch + args: + chdir: "{{ checkout_dir }}" + +- name: SPECIFIC-REVISION | get commit for HEAD on new branch + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}.copy" + register: originaltip0 + +- name: SPECIFIC-REVISION | shallow force checkout new branch in copy + git: + repo: "{{ checkout_dir }}" + dest: "{{ checkout_dir }}.copy" + version: test/branch + depth: 1 + force: yes + +- name: SPECIFIC-REVISION | create new commit in original + shell: git init; echo "2" > b; git add b; git commit -m "2" + args: + chdir: "{{ checkout_dir }}" + +- name: SPECIFIC-REVISION | get commit for new HEAD on original branch + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}" + register: originaltip1 + +- name: SPECIFIC-REVISION | get commit for HEAD on new branch + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}.copy" + register: newtip + +- name: SPECIFIC-REVISION | assert that copy is still pointing at previous tip + assert: + that: + - newtip.stdout == originaltip0.stdout + +- name: SPECIFIC-REVISION | create a local modification in the copy + shell: echo "3" > c + args: + chdir: "{{ checkout_dir }}.copy" + +- name: SPECIFIC-REVISION | shallow force checkout new branch in copy (again) + git: + repo: "{{ checkout_dir }}" + dest: "{{ checkout_dir }}.copy" + version: test/branch + depth: 1 + force: yes + +- name: SPECIFIC-REVISION | get commit for HEAD on new branch + command: git rev-parse HEAD + args: + chdir: "{{ checkout_dir }}.copy" + register: newtip + +- name: SPECIFIC-REVISION | make sure copy tip is not pointing at previous sha and that new tips match + assert: + that: + - newtip.stdout != originaltip0.stdout + - newtip.stdout == originaltip1.stdout diff --git a/test/integration/targets/git/tasks/submodules.yml b/test/integration/targets/git/tasks/submodules.yml new file mode 100644 index 00000000..647d1e23 --- /dev/null +++ b/test/integration/targets/git/tasks/submodules.yml @@ -0,0 +1,124 @@ +# +# Submodule tests +# + +# Repository A with submodules defined (repo_submodules) +# .gitmodules file points to Repository I +# Repository B forked from A that has newer commits (repo_submodules_newer) +# .gitmodules file points to Repository II instead of I +# .gitmodules file also points to Repository III +# Repository I for submodule1 (repo_submodule1) +# Has 1 file checked in +# Repository II forked from I that has newer commits (repo_submodule1_newer) +# Has 2 files checked in +# Repository III for a second submodule (repo_submodule2) +# Has 1 file checked in + +- name: SUBMODULES | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: SUBMODULES | Test that clone without recursive does not retrieve submodules + git: + repo: "{{ repo_submodules }}" + version: 45c6c07ef10fd9e453d90207e63da1ce5bd3ae1e + dest: "{{ checkout_dir }}" + recursive: no + +- name: SUBMODULES | List submodule1 + command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- name: SUBMODULES | Ensure submodu1 is at the appropriate commit + assert: + that: '{{ submodule1.stdout_lines | length }} == 2' + +- name: SUBMODULES | clear checkout_dir + file: + state: absent + path: "{{ checkout_dir }}" + + +- name: SUBMODULES | Test that clone with recursive retrieves submodules + git: + repo: "{{ repo_submodules }}" + dest: "{{ checkout_dir }}" + version: 45c6c07ef10fd9e453d90207e63da1ce5bd3ae1e + recursive: yes + +- name: SUBMODULES | List submodule1 + command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- name: SUBMODULES | Ensure submodule1 is at the appropriate commit + assert: + that: '{{ submodule1.stdout_lines | length }} == 4' + +- name: SUBMODULES | Copy the checkout so we can run several different tests on it + command: 'cp -pr {{ checkout_dir }} {{ checkout_dir }}.bak' + + +- name: SUBMODULES | Test that update without recursive does not change submodules + git: + repo: "{{ repo_submodules }}" + version: d2974e4bbccdb59368f1d5eff2205f0fa863297e + dest: "{{ checkout_dir }}" + recursive: no + update: yes + track_submodules: yes + +- name: SUBMODULES | List submodule1 + command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- name: SUBMODULES | Stat submodule2 + stat: + path: "{{ checkout_dir }}/submodule2" + register: submodule2 + +- name: SUBMODULES | List submodule2 + command: ls -1a {{ checkout_dir }}/submodule2 + register: submodule2 + +- name: SUBMODULES | Ensure both submodules are at the appropriate commit + assert: + that: + - '{{ submodule1.stdout_lines|length }} == 4' + - '{{ submodule2.stdout_lines|length }} == 2' + + +- name: SUBMODULES | Remove checkout dir + file: + state: absent + path: "{{ checkout_dir }}" + +- name: SUBMODULES | Restore checkout to prior state + command: 'cp -pr {{ checkout_dir }}.bak {{ checkout_dir }}' + + +- name: SUBMODULES | Test that update with recursive updated existing submodules + git: + repo: "{{ repo_submodules }}" + version: d2974e4bbccdb59368f1d5eff2205f0fa863297e + dest: "{{ checkout_dir }}" + update: yes + recursive: yes + track_submodules: yes + +- name: SUBMODULES | List submodule 1 + command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- name: SUBMODULES | Ensure submodule1 is at the appropriate commit + assert: + that: '{{ submodule1.stdout_lines | length }} == 5' + + +- name: SUBMODULES | Test that update with recursive found new submodules + command: 'ls -1a {{ checkout_dir }}/submodule2' + register: submodule2 + +- name: SUBMODULES | Enusre submodule2 is at the appropriate commit + assert: + that: '{{ submodule2.stdout_lines | length }} == 4' diff --git a/test/integration/targets/git/vars/main.yml b/test/integration/targets/git/vars/main.yml new file mode 100644 index 00000000..a5bae5ba --- /dev/null +++ b/test/integration/targets/git/vars/main.yml @@ -0,0 +1,97 @@ +git_archive_extensions: + default: + - tar.gz + - tar + - tgz + - zip + RedHat6: + - tar + - zip + +git_required_packages: + default: + - git + - gzip + - tar + - unzip + - zip + FreeBSD: + - git + - gzip + - unzip + - zip + +git_list_commands: + tar.gz: tar -tf + tar: tar -tf + tgz: tar -tf + zip: unzip -Z1 + +checkout_dir: '{{ output_dir }}/git' +repo_dir: '{{ output_dir }}/local_repos' +separate_git_dir: '{{ output_dir }}/sep_git_dir' +repo_format1: 'https://github.com/jimi-c/test_role' +repo_format2: 'git@github.com:jimi-c/test_role.git' +repo_format3: 'ssh://git@github.com/jimi-c/test_role.git' +repo_submodules: 'https://github.com/abadger/test_submodules_newer.git' +repo_submodule1: 'https://github.com/abadger/test_submodules_subm1.git' +repo_submodule2: 'https://github.com/abadger/test_submodules_subm2.git' +repo_update_url_1: 'https://github.com/ansible-test-robinro/git-test-old' +repo_update_url_2: 'https://github.com/ansible-test-robinro/git-test-new' +known_host_files: + - "{{ lookup('env','HOME') }}/.ssh/known_hosts" + - '/etc/ssh/ssh_known_hosts' +git_version_supporting_depth: 1.9.1 +git_version_supporting_ls_remote: 1.7.5 +# path to a SSH private key for use with github.com (tests skipped if undefined) +# github_ssh_private_key: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa" +git_gpg_testkey: | + -----BEGIN PGP PRIVATE KEY BLOCK----- + + lQOYBFlkmX0BCACtE81Xj/351nnvwnAWMf8ZUP9B1YOPe9ohqNsCQY1DxODVJc9y + ljCoh9fTdoHXuaUMUFistozxCMP81RuZxfbfsGePnl8OAOgWT5Sln6yEG45oClJ0 + RmJJZdDT1lF3VaVwK9NQ5E1oqmk1IOjISi7iFa9TmMn1h7ISP/p+/xtMxQhzUXt8 + APAEhRdc9FfwxaxCHKZBiM7ND+pAm6vpom07ZUgxSppsrXZAxDncTwAeCumDpeOL + LAcSBsw02swOIHFfqHNrkELLr4KJqws+zeAk6R2nq0k16AVdNX+Rb7T3OKmuLawx + HXe8rKpaw0RC+JCogZK4tz0KDNuZPLW2Y5JJABEBAAEAB/4zkKpFk79p35YNskLd + wgCMRN7/+MKNDavUCnBRsEELt0z7BBxVudx+YZaSSITvxj4fuJJqxqqgJ2no2n8y + JdJjG7YHCnqse+WpvAUAAV4PL/ySD704Kj4fOwfoDTrRUIGNNWlseNB9RgQ5UXg5 + MCzeq/JD+En3bnnFySzzCENUcAQfu2FVYgKEiKaKL5Djs6p5w/jTm+Let3EsIczb + ykJ8D4/G/tSrNdp/g10DDy+VclWMhMFqmFesedvytE8jzCVxPKOoRkFTGrX76gIK + eMVxHIYxdCfSTHLjBykMGO9gxfk9lf18roNYs0VV2suyi4fVFxEozSAxwWlwKrXn + 0arvBADPsm5NjlZ5uR06YKbpUUwPTYcwLbasic0qHuUWgNsTVv8dd2il/jbha77m + StU7qRJ1jwbFEFxx7HnTmeGfPbdyKe2qyLJUyD/rpQSC5YirisUchtG8nZsHlnzn + k10SIeB480tkgkdMQx1Eif40aiuQb09/TxaaXAEFKttZhEO4RwQA1VQ8a0IrMBI2 + i4WqaIDNDl3x61JvvFD74v43I0AHKmZUPwcgAd6q2IvCDaKH0hIuBKu6BGq6DPvx + Oc/4r3iRn/xccconxRop2A9ffa00B/eQXrBq+uLBQfyiFL9UfkU8eTAAgbDKRxjY + ScaevoBbbYxkpgJUCL6VnoSdXlbNOO8EAL2ypsVkDmXNgR8ZT8cKSUft47di5T+9 + mhT1qmD62B+D86892y2QAohmUDadYRK9m9WD91Y7gOMeNhYj9qbxyPprPYUL0aPt + L8KS1H73C5WQMOsl2RyIw81asss30LWghsFIJ1gz8gVEjXhV+YC6W9XQ42iabmRR + A67f5sqK1scuO0q0KUFuc2libGUgVGVzdCBSdW5uZXIgPG5vcmVwbHlAZXhhbXBs + ZS5jb20+iQE3BBMBCAAhBQJZZJl9AhsDBQsJCAcCBhUICQoLAgQWAgMBAh4BAheA + AAoJEK0vcLBcXpbYi/kH/R0xk42MFpGd4pndTAsVIjRk/VhmhFc1v6sBeR40GXlt + hyEeOQQnIeHKLhsVT6YnfFZa8b4JwgTD6NeIiibOAlLgaKOWNwZu8toixMPVAzfQ + cRei+/gFXNil0FmBwWreVBDppuIn6XiSEPik0C7eCcw4lD+A+BbL3WGkp+OSQPho + hodIU02hgkrgs/6YJPats8Rgzw9hICsa2j0MjnG6P2z9atMz6tw2SiE5iBl7mZ2Z + zG/HiplleMhf/G8OZOskrWkKiLbpSPfQSKdOFkw1C6yqOlQ+HmuCZ56oyxtpItET + R11uAKt+ABdi4DX3FQQ+A+bGJ1+aKrcorZ8Z8s0XhPo= + =tV71 + -----END PGP PRIVATE KEY BLOCK----- + -----BEGIN PGP PUBLIC KEY BLOCK----- + + mQENBFlkmX0BCACtE81Xj/351nnvwnAWMf8ZUP9B1YOPe9ohqNsCQY1DxODVJc9y + ljCoh9fTdoHXuaUMUFistozxCMP81RuZxfbfsGePnl8OAOgWT5Sln6yEG45oClJ0 + RmJJZdDT1lF3VaVwK9NQ5E1oqmk1IOjISi7iFa9TmMn1h7ISP/p+/xtMxQhzUXt8 + APAEhRdc9FfwxaxCHKZBiM7ND+pAm6vpom07ZUgxSppsrXZAxDncTwAeCumDpeOL + LAcSBsw02swOIHFfqHNrkELLr4KJqws+zeAk6R2nq0k16AVdNX+Rb7T3OKmuLawx + HXe8rKpaw0RC+JCogZK4tz0KDNuZPLW2Y5JJABEBAAG0KUFuc2libGUgVGVzdCBS + dW5uZXIgPG5vcmVwbHlAZXhhbXBsZS5jb20+iQE3BBMBCAAhBQJZZJl9AhsDBQsJ + CAcCBhUICQoLAgQWAgMBAh4BAheAAAoJEK0vcLBcXpbYi/kH/R0xk42MFpGd4pnd + TAsVIjRk/VhmhFc1v6sBeR40GXlthyEeOQQnIeHKLhsVT6YnfFZa8b4JwgTD6NeI + iibOAlLgaKOWNwZu8toixMPVAzfQcRei+/gFXNil0FmBwWreVBDppuIn6XiSEPik + 0C7eCcw4lD+A+BbL3WGkp+OSQPhohodIU02hgkrgs/6YJPats8Rgzw9hICsa2j0M + jnG6P2z9atMz6tw2SiE5iBl7mZ2ZzG/HiplleMhf/G8OZOskrWkKiLbpSPfQSKdO + Fkw1C6yqOlQ+HmuCZ56oyxtpItETR11uAKt+ABdi4DX3FQQ+A+bGJ1+aKrcorZ8Z + 8s0XhPo= + =mUYY + -----END PGP PUBLIC KEY BLOCK----- diff --git a/test/integration/targets/group/aliases b/test/integration/targets/group/aliases new file mode 100644 index 00000000..f8e28c7e --- /dev/null +++ b/test/integration/targets/group/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/group/files/gidget.py b/test/integration/targets/group/files/gidget.py new file mode 100644 index 00000000..128985e7 --- /dev/null +++ b/test/integration/targets/group/files/gidget.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python + +import grp + +gids = [g.gr_gid for g in grp.getgrall()] + +i = 0 +while True: + if i not in gids: + print(i) + break + i += 1 diff --git a/test/integration/targets/group/files/grouplist.sh b/test/integration/targets/group/files/grouplist.sh new file mode 100644 index 00000000..d3129dfe --- /dev/null +++ b/test/integration/targets/group/files/grouplist.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +#- name: make a list of groups +# shell: | +# cat /etc/group | cut -d: -f1 +# register: group_names +# when: 'ansible_distribution != "MacOSX"' + +#- name: make a list of groups [mac] +# shell: dscl localhost -list /Local/Default/Groups +# register: group_names +# when: 'ansible_distribution == "MacOSX"' + +DISTRO="$*" + +if [[ "$DISTRO" == "MacOSX" ]]; then + dscl localhost -list /Local/Default/Groups +else + grep -E -v ^\# /etc/group | cut -d: -f1 +fi diff --git a/test/integration/targets/group/meta/main.yml b/test/integration/targets/group/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/group/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/group/tasks/main.yml b/test/integration/targets/group/tasks/main.yml new file mode 100644 index 00000000..eb8126dd --- /dev/null +++ b/test/integration/targets/group/tasks/main.yml @@ -0,0 +1,40 @@ +# Test code for the group module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: ensure test groups are deleted before the test + group: + name: '{{ item }}' + state: absent + loop: + - ansibullgroup + - ansibullgroup2 + - ansibullgroup3 + +- block: + - name: run tests + include_tasks: tests.yml + + always: + - name: remove test groups after test + group: + name: '{{ item }}' + state: absent + loop: + - ansibullgroup + - ansibullgroup2 + - ansibullgroup3 \ No newline at end of file diff --git a/test/integration/targets/group/tasks/tests.yml b/test/integration/targets/group/tasks/tests.yml new file mode 100644 index 00000000..e35b23c9 --- /dev/null +++ b/test/integration/targets/group/tasks/tests.yml @@ -0,0 +1,329 @@ +--- +## +## group add +## + +- name: create group (check mode) + group: + name: ansibullgroup + state: present + register: create_group_check + check_mode: True + +- name: get result of create group (check mode) + script: 'grouplist.sh "{{ ansible_distribution }}"' + register: create_group_actual_check + +- name: assert create group (check mode) + assert: + that: + - create_group_check is changed + - '"ansibullgroup" not in create_group_actual_check.stdout_lines' + +- name: create group + group: + name: ansibullgroup + state: present + register: create_group + +- name: get result of create group + script: 'grouplist.sh "{{ ansible_distribution }}"' + register: create_group_actual + +- name: assert create group + assert: + that: + - create_group is changed + - create_group.gid is defined + - '"ansibullgroup" in create_group_actual.stdout_lines' + +- name: create group (idempotent) + group: + name: ansibullgroup + state: present + register: create_group_again + +- name: assert create group (idempotent) + assert: + that: + - not create_group_again is changed + +## +## group check +## + +- name: run existing group check tests + group: + name: "{{ create_group_actual.stdout_lines|random }}" + state: present + with_sequence: start=1 end=5 + register: group_test1 + +- name: validate results for testcase 1 + assert: + that: + - group_test1.results is defined + - group_test1.results|length == 5 + +- name: validate change results for testcase 1 + assert: + that: + - not group_test1 is changed + +## +## group add with gid +## + +- name: get the next available gid + script: gidget.py + args: + executable: '{{ ansible_python_interpreter }}' + register: gid + +- name: create a group with a gid (check mode) + group: + name: ansibullgroup2 + gid: '{{ gid.stdout_lines[0] }}' + state: present + register: create_group_gid_check + check_mode: True + +- name: get result of create a group with a gid (check mode) + script: 'grouplist.sh "{{ ansible_distribution }}"' + register: create_group_gid_actual_check + +- name: assert create group with a gid (check mode) + assert: + that: + - create_group_gid_check is changed + - '"ansibullgroup2" not in create_group_gid_actual_check.stdout_lines' + +- name: create a group with a gid + group: + name: ansibullgroup2 + gid: '{{ gid.stdout_lines[0] }}' + state: present + register: create_group_gid + +- name: get gid of created group + command: "{{ ansible_python_interpreter | quote }} -c \"import grp; print(grp.getgrnam('ansibullgroup2').gr_gid)\"" + register: create_group_gid_actual + +- name: assert create group with a gid + assert: + that: + - create_group_gid is changed + - create_group_gid.gid | int == gid.stdout_lines[0] | int + - create_group_gid_actual.stdout | trim | int == gid.stdout_lines[0] | int + +- name: create a group with a gid (idempotent) + group: + name: ansibullgroup2 + gid: '{{ gid.stdout_lines[0] }}' + state: present + register: create_group_gid_again + +- name: assert create group with a gid (idempotent) + assert: + that: + - not create_group_gid_again is changed + - create_group_gid_again.gid | int == gid.stdout_lines[0] | int + +- block: + - name: create a group with a non-unique gid + group: + name: ansibullgroup3 + gid: '{{ gid.stdout_lines[0] }}' + non_unique: true + state: present + register: create_group_gid_non_unique + + - name: assert create group with a non unique gid + assert: + that: + - create_group_gid_non_unique is changed + - create_group_gid_non_unique.gid | int == gid.stdout_lines[0] | int + when: ansible_facts.distribution not in ['MacOSX', 'Alpine'] + +## +## group remove +## + +- name: delete group (check mode) + group: + name: ansibullgroup + state: absent + register: delete_group_check + check_mode: True + +- name: get result of delete group (check mode) + script: grouplist.sh "{{ ansible_distribution }}" + register: delete_group_actual_check + +- name: assert delete group (check mode) + assert: + that: + - delete_group_check is changed + - '"ansibullgroup" in delete_group_actual_check.stdout_lines' + +- name: delete group + group: + name: ansibullgroup + state: absent + register: delete_group + +- name: get result of delete group + script: grouplist.sh "{{ ansible_distribution }}" + register: delete_group_actual + +- name: assert delete group + assert: + that: + - delete_group is changed + - '"ansibullgroup" not in delete_group_actual.stdout_lines' + +- name: delete group (idempotent) + group: + name: ansibullgroup + state: absent + register: delete_group_again + +- name: assert delete group (idempotent) + assert: + that: + - not delete_group_again is changed + +- name: Ensure lgroupadd is present + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: libuser + state: present + when: ansible_facts.system in ['Linux'] + tags: + - user_test_local_mode + +# https://github.com/ansible/ansible/issues/56481 +- block: + - name: Test duplicate GID with local=yes + group: + name: "{{ item }}" + gid: 1337 + local: yes + loop: + - group1_local_test + - group2_local_test + ignore_errors: yes + register: local_duplicate_gid_result + + - assert: + that: + - local_duplicate_gid_result['results'][0] is success + - local_duplicate_gid_result['results'][1]['msg'] == "GID '1337' already exists with group 'group1_local_test'" + always: + - name: Cleanup + group: + name: group1_local_test + state: absent + # only applicable to Linux, limit further to CentOS where 'luseradd' is installed + when: ansible_distribution == 'CentOS' + +# https://github.com/ansible/ansible/pull/59769 +- block: + - name: create a local group with a gid + group: + name: group1_local_test + gid: 1337 + local: yes + state: present + register: create_local_group_gid + + - name: get gid of created local group + command: "{{ ansible_python_interpreter | quote }} -c \"import grp; print(grp.getgrnam('group1_local_test').gr_gid)\"" + register: create_local_group_gid_actual + + - name: assert create local group with a gid + assert: + that: + - create_local_group_gid is changed + - create_local_group_gid.gid | int == 1337 | int + - create_local_group_gid_actual.stdout | trim | int == 1337 | int + + - name: create a local group with a gid (idempotent) + group: + name: group1_local_test + gid: 1337 + state: present + register: create_local_group_gid_again + + - name: assert create local group with a gid (idempotent) + assert: + that: + - not create_local_group_gid_again is changed + - create_local_group_gid_again.gid | int == 1337 | int + always: + - name: Cleanup create local group with a gid + group: + name: group1_local_test + state: absent + # only applicable to Linux, limit further to CentOS where 'luseradd' is installed + when: ansible_distribution == 'CentOS' + +# https://github.com/ansible/ansible/pull/59772 +- block: + - name: create group with a gid + group: + name: group1_test + gid: 1337 + local: no + state: present + register: create_group_gid + + - name: get gid of created group + command: "{{ ansible_python_interpreter | quote }} -c \"import grp; print(grp.getgrnam('group1_test').gr_gid)\"" + register: create_group_gid_actual + + - name: assert create group with a gid + assert: + that: + - create_group_gid is changed + - create_group_gid.gid | int == 1337 | int + - create_group_gid_actual.stdout | trim | int == 1337 | int + + - name: create local group with the same gid + group: + name: group1_test + gid: 1337 + local: yes + state: present + register: create_local_group_gid + + - name: assert create local group with a gid + assert: + that: + - create_local_group_gid.gid | int == 1337 | int + always: + - name: Cleanup create group with a gid + group: + name: group1_test + local: no + state: absent + - name: Cleanup create local group with the same gid + group: + name: group1_test + local: yes + state: absent + # only applicable to Linux, limit further to CentOS where 'lgroupadd' is installed + when: ansible_distribution == 'CentOS' + +# create system group + +- name: remove group + group: + name: ansibullgroup + state: absent + +- name: create system group + group: + name: ansibullgroup + state: present + system: yes diff --git a/test/integration/targets/group_by/aliases b/test/integration/targets/group_by/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/group_by/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/group_by/create_groups.yml b/test/integration/targets/group_by/create_groups.yml new file mode 100644 index 00000000..3494a20f --- /dev/null +++ b/test/integration/targets/group_by/create_groups.yml @@ -0,0 +1,39 @@ +# test code for the group_by module +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- debug: var=genus + +- name: group by genus + group_by: key={{ genus }} + register: grouped_by_genus + +- debug: var=grouped_by_genus + +- name: ensure we reflect 'changed' on change + assert: + that: + - grouped_by_genus is changed + +- name: group by first three letters of genus with key in quotes + group_by: key="{{ genus[:3] }}" + +- name: group by first two letters of genus with key not in quotes + group_by: key={{ genus[:2] }} + +- name: group by genus in uppercase using complex args + group_by: { key: "{{ genus | upper() }}" } diff --git a/test/integration/targets/group_by/group_vars/all b/test/integration/targets/group_by/group_vars/all new file mode 100644 index 00000000..0b674e00 --- /dev/null +++ b/test/integration/targets/group_by/group_vars/all @@ -0,0 +1,3 @@ +uno: 1 +dos: 2 +tres: 3 diff --git a/test/integration/targets/group_by/group_vars/camelus b/test/integration/targets/group_by/group_vars/camelus new file mode 100644 index 00000000..b214ad69 --- /dev/null +++ b/test/integration/targets/group_by/group_vars/camelus @@ -0,0 +1 @@ +dos: 'two' diff --git a/test/integration/targets/group_by/group_vars/vicugna b/test/integration/targets/group_by/group_vars/vicugna new file mode 100644 index 00000000..8feb93fc --- /dev/null +++ b/test/integration/targets/group_by/group_vars/vicugna @@ -0,0 +1 @@ +tres: 'three' diff --git a/test/integration/targets/group_by/inventory.group_by b/test/integration/targets/group_by/inventory.group_by new file mode 100644 index 00000000..9c7fe7ee --- /dev/null +++ b/test/integration/targets/group_by/inventory.group_by @@ -0,0 +1,9 @@ +# ungrouped +camel genus=camelus ansible_connection=local + +[lamini] +alpaca genus=vicugna +llama genus=lama + +[lamini:vars] +ansible_connection=local diff --git a/test/integration/targets/group_by/runme.sh b/test/integration/targets/group_by/runme.sh new file mode 100755 index 00000000..d1192681 --- /dev/null +++ b/test/integration/targets/group_by/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_group_by.yml -i inventory.group_by -v "$@" +ANSIBLE_HOST_PATTERN_MISMATCH=warning ansible-playbook test_group_by_skipped.yml -i inventory.group_by -v "$@" diff --git a/test/integration/targets/group_by/test_group_by.yml b/test/integration/targets/group_by/test_group_by.yml new file mode 100644 index 00000000..07368dfe --- /dev/null +++ b/test/integration/targets/group_by/test_group_by.yml @@ -0,0 +1,187 @@ +# test code for the group_by module +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: Create overall groups + hosts: all + gather_facts: false + tasks: + - include_tasks: create_groups.yml + +- name: Vicunga group validation + hosts: vicugna + gather_facts: false + tasks: + - name: verify that only the alpaca is in this group + assert: { that: "inventory_hostname == 'alpaca'" } + - name: set a fact to check that we ran this play + set_fact: genus_vicugna=true + +- name: Lama group validation + hosts: lama + gather_facts: false + tasks: + - name: verify that only the llama is in this group + assert: { that: "inventory_hostname == 'llama'" } + - name: set a fact to check that we ran this play + set_fact: genus_lama=true + +- name: Camelus group validation + hosts: camelus + gather_facts: false + tasks: + - name: verify that only the camel is in this group + assert: { that: "inventory_hostname == 'camel'" } + - name: set a fact to check that we ran this play + set_fact: genus_camelus=true + +- name: Vic group validation + hosts: vic + gather_facts: false + tasks: + - name: verify that only the alpaca is in this group + assert: { that: "inventory_hostname == 'alpaca'" } + - name: set a fact to check that we ran this play + set_fact: genus_vic=true + +- name: Lam group validation + hosts: lam + gather_facts: false + tasks: + - name: verify that only the llama is in this group + assert: { that: "inventory_hostname == 'llama'" } + - name: set a fact to check that we ran this play + set_fact: genus_lam=true + +- name: Cam group validation + hosts: cam + gather_facts: false + tasks: + - name: verify that only the camel is in this group + assert: { that: "inventory_hostname == 'camel'" } + - name: set a fact to check that we ran this play + set_fact: genus_cam=true + +- name: Vi group validation + hosts: vi + gather_facts: false + tasks: + - name: verify that only the alpaca is in this group + assert: { that: "inventory_hostname == 'alpaca'" } + - name: set a fact to check that we ran this play + set_fact: genus_vi=true + +- name: La group validation + hosts: la + gather_facts: false + tasks: + - name: verify that only the llama is in this group + assert: { that: "inventory_hostname == 'llama'" } + - name: set a fact to check that we ran this play + set_fact: genus_la=true + +- name: Ca group validation + hosts: ca + gather_facts: false + tasks: + - name: verify that only the camel is in this group + assert: { that: "inventory_hostname == 'camel'" } + - name: set a fact to check that we ran this play + set_fact: genus_ca=true + +- name: VICUGNA group validation + hosts: VICUGNA + gather_facts: false + tasks: + - name: verify that only the alpaca is in this group + assert: { that: "inventory_hostname == 'alpaca'" } + - name: set a fact to check that we ran this play + set_fact: genus_VICUGNA=true + +- name: LAMA group validation + hosts: LAMA + gather_facts: false + tasks: + - name: verify that only the llama is in this group + assert: { that: "inventory_hostname == 'llama'" } + - name: set a fact to check that we ran this play + set_fact: genus_LAMA=true + +- name: CAMELUS group validation + hosts: CAMELUS + gather_facts: false + tasks: + - name: verify that only the camel is in this group + assert: { that: "inventory_hostname == 'camel'" } + - name: set a fact to check that we ran this play + set_fact: genus_CAMELUS=true + +- name: alpaca validation of groups + hosts: alpaca + gather_facts: false + tasks: + - name: check that alpaca matched all four groups + assert: { that: ["genus_vicugna", "genus_vic", "genus_vi", "genus_VICUGNA"] } + +- name: llama validation of groups + hosts: llama + gather_facts: false + tasks: + - name: check that llama matched all four groups + assert: { that: ["genus_lama", "genus_lam", "genus_la", "genus_LAMA"] } + +- hosts: camel + gather_facts: false + tasks: + - name: check that camel matched all four groups + assert: { that: ["genus_camelus", "genus_cam", "genus_ca", "genus_CAMELUS"] } + +- hosts: vicugna + gather_facts: false + tasks: + - name: check group_vars variable overrides for vicugna + assert: { that: ["uno == 1", "dos == 2", "tres == 'three'"] } + +- hosts: lama + gather_facts: false + tasks: + - name: check group_vars variable overrides for lama + assert: { that: ["uno == 1", "dos == 2", "tres == 3"] } + +- hosts: camelus + gather_facts: false + tasks: + - name: check group_vars variable overrides for camelus + assert: { that: ["uno == 1", "dos == 'two'", "tres == 3"] } + +- name: Nested group validation + hosts: lama + gather_facts: false + tasks: + - name: group by genus with parent + group_by: key=vicugna-{{ genus }} parents=vicugna + - name: check group_vars variable overrides for vicugna-lama + assert: { that: ["uno == 1", "dos == 2", "tres == 'three'"] } + + - name: group by genus with nonexistent parent + group_by: + key: "{{ genus }}" + parents: + - oxydactylus + - stenomylus + - name: check parent groups + assert: { that: ["'oxydactylus' in group_names", "'stenomylus' in group_names"] } diff --git a/test/integration/targets/group_by/test_group_by_skipped.yml b/test/integration/targets/group_by/test_group_by_skipped.yml new file mode 100644 index 00000000..6c18b4e8 --- /dev/null +++ b/test/integration/targets/group_by/test_group_by_skipped.yml @@ -0,0 +1,30 @@ +# test code for the group_by module +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: Create overall groups + hosts: all + gather_facts: false + tasks: + - include_tasks: create_groups.yml + +- name: genus group validation (expect skipped) + hosts: 'genus' + gather_facts: false + tasks: + - name: no hosts should match this group + fail: msg="should never get here" diff --git a/test/integration/targets/groupby_filter/aliases b/test/integration/targets/groupby_filter/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/groupby_filter/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/groupby_filter/runme.sh b/test/integration/targets/groupby_filter/runme.sh new file mode 100755 index 00000000..07894b0f --- /dev/null +++ b/test/integration/targets/groupby_filter/runme.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_TEST_PREFER_VENV=1 +source virtualenv.sh + +pip install -U jinja2==2.9.4 + +ansible-playbook -i ../../inventory test_jinja2_groupby.yml -v "$@" + +pip install -U "jinja2<2.9.0" + +ansible-playbook -i ../../inventory test_jinja2_groupby.yml -v "$@" diff --git a/test/integration/targets/groupby_filter/test_jinja2_groupby.yml b/test/integration/targets/groupby_filter/test_jinja2_groupby.yml new file mode 100644 index 00000000..3cd02959 --- /dev/null +++ b/test/integration/targets/groupby_filter/test_jinja2_groupby.yml @@ -0,0 +1,29 @@ +--- +- name: Test jinja2 groupby + hosts: localhost + gather_facts: True + connection: local + vars: + fruits: + - name: apple + enjoy: yes + - name: orange + enjoy: no + - name: strawberry + enjoy: yes + expected: [[false, [{"enjoy": false, "name": "orange"}]], [true, [{"enjoy": true, "name": "apple"}, {"enjoy": true, "name": "strawberry"}]]] + tasks: + - name: show python interpreter + debug: + msg: "{{ ansible_python['executable'] }}" + + - name: show jinja2 version + debug: + msg: "{{ lookup('pipe', '{{ ansible_python[\"executable\"] }} -c \"import jinja2; print(jinja2.__version__)\"') }}" + + - set_fact: + result: "{{ fruits | groupby('enjoy') }}" + + - assert: + that: + - result == expected diff --git a/test/integration/targets/handler_race/aliases b/test/integration/targets/handler_race/aliases new file mode 100644 index 00000000..68d6d978 --- /dev/null +++ b/test/integration/targets/handler_race/aliases @@ -0,0 +1,3 @@ +shippable/posix/group5 +handler_race +skip/aix diff --git a/test/integration/targets/handler_race/inventory b/test/integration/targets/handler_race/inventory new file mode 100644 index 00000000..87879294 --- /dev/null +++ b/test/integration/targets/handler_race/inventory @@ -0,0 +1,30 @@ +host001 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host002 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host003 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host004 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host005 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host006 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host007 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host008 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host009 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host010 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host011 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host012 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host013 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host014 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host015 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host016 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host017 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host018 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host019 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host020 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host021 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host022 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host023 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host024 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host025 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host026 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host027 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host028 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host029 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host030 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/handler_race/roles/do_handlers/handlers/main.yml b/test/integration/targets/handler_race/roles/do_handlers/handlers/main.yml new file mode 100644 index 00000000..4c43df8c --- /dev/null +++ b/test/integration/targets/handler_race/roles/do_handlers/handlers/main.yml @@ -0,0 +1,4 @@ +--- +# handlers file for do_handlers +- name: My Handler + shell: sleep 5 diff --git a/test/integration/targets/handler_race/roles/do_handlers/tasks/main.yml b/test/integration/targets/handler_race/roles/do_handlers/tasks/main.yml new file mode 100644 index 00000000..028e9a55 --- /dev/null +++ b/test/integration/targets/handler_race/roles/do_handlers/tasks/main.yml @@ -0,0 +1,9 @@ +--- +# tasks file for do_handlers +- name: Invoke handler + shell: sleep 1 + notify: + - My Handler + +- name: Flush handlers + meta: flush_handlers diff --git a/test/integration/targets/handler_race/roles/more_sleep/tasks/main.yml b/test/integration/targets/handler_race/roles/more_sleep/tasks/main.yml new file mode 100644 index 00000000..aefbce26 --- /dev/null +++ b/test/integration/targets/handler_race/roles/more_sleep/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# tasks file for more_sleep +- name: Random more sleep + set_fact: + more_sleep_time: "{{ 5 | random }}" + +- name: Moar sleep + shell: sleep "{{ more_sleep_time }}" diff --git a/test/integration/targets/handler_race/roles/random_sleep/tasks/main.yml b/test/integration/targets/handler_race/roles/random_sleep/tasks/main.yml new file mode 100644 index 00000000..607318bb --- /dev/null +++ b/test/integration/targets/handler_race/roles/random_sleep/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# tasks file for random_sleep +- name: Generate sleep time + set_fact: + sleep_time: "{{ 60 | random }}" + +- name: Do random sleep + shell: sleep "{{ sleep_time }}" diff --git a/test/integration/targets/handler_race/runme.sh b/test/integration/targets/handler_race/runme.sh new file mode 100755 index 00000000..ba0f9873 --- /dev/null +++ b/test/integration/targets/handler_race/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_handler_race.yml -i inventory -v "$@" + diff --git a/test/integration/targets/handler_race/test_handler_race.yml b/test/integration/targets/handler_race/test_handler_race.yml new file mode 100644 index 00000000..ef713829 --- /dev/null +++ b/test/integration/targets/handler_race/test_handler_race.yml @@ -0,0 +1,10 @@ +- hosts: all + gather_facts: no + strategy: free + tasks: + - include_role: + name: random_sleep + - include_role: + name: do_handlers + - include_role: + name: more_sleep diff --git a/test/integration/targets/handlers/aliases b/test/integration/targets/handlers/aliases new file mode 100644 index 00000000..30bb677a --- /dev/null +++ b/test/integration/targets/handlers/aliases @@ -0,0 +1,3 @@ +shippable/posix/group5 +handlers +skip/aix diff --git a/test/integration/targets/handlers/from_handlers.yml b/test/integration/targets/handlers/from_handlers.yml new file mode 100644 index 00000000..7b2dea2d --- /dev/null +++ b/test/integration/targets/handlers/from_handlers.yml @@ -0,0 +1,39 @@ +- name: verify handlers_from on include_role + hosts: A + gather_facts: False + tags: ['scenario1'] + tasks: + - name: test include_role + include_role: name=test_handlers_meta handlers_from=alternate.yml + + - name: force handler run + meta: flush_handlers + + - name: verify handlers ran + assert: + that: + - "'handler1_alt_called' in hostvars[inventory_hostname]" + - "'handler2_alt_called' in hostvars[inventory_hostname]" + tags: ['scenario1'] + + +- name: verify handlers_from on import_role + hosts: A + gather_facts: False + tasks: + - name: set facts to false + set_fact: + handler1_alt_called: False + handler2_alt_called: False + + - import_role: name=test_handlers_meta handlers_from=alternate.yml + + - name: force handler run + meta: flush_handlers + + - name: verify handlers ran + assert: + that: + - handler1_alt_called|bool + - handler2_alt_called|bool + tags: ['scenario1'] diff --git a/test/integration/targets/handlers/handlers.yml b/test/integration/targets/handlers/handlers.yml new file mode 100644 index 00000000..aed75bd2 --- /dev/null +++ b/test/integration/targets/handlers/handlers.yml @@ -0,0 +1,2 @@ +- name: test handler + debug: msg="handler called" diff --git a/test/integration/targets/handlers/inventory.handlers b/test/integration/targets/handlers/inventory.handlers new file mode 100644 index 00000000..268cf657 --- /dev/null +++ b/test/integration/targets/handlers/inventory.handlers @@ -0,0 +1,10 @@ +[testgroup] +A +B +C +D +E + +[testgroup:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/handlers/roles/test_force_handlers/handlers/main.yml b/test/integration/targets/handlers/roles/test_force_handlers/handlers/main.yml new file mode 100644 index 00000000..962d7561 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_force_handlers/handlers/main.yml @@ -0,0 +1,2 @@ +- name: echoing handler + command: echo CALLED_HANDLER_{{ inventory_hostname }} diff --git a/test/integration/targets/handlers/roles/test_force_handlers/tasks/main.yml b/test/integration/targets/handlers/roles/test_force_handlers/tasks/main.yml new file mode 100644 index 00000000..f5d78c73 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_force_handlers/tasks/main.yml @@ -0,0 +1,26 @@ +--- + +# We notify for A and B, and hosts B and C fail. +# When forcing, we expect A and B to run handlers +# When not forcing, we expect only B to run handlers + +- name: notify the handler for host A and B + shell: echo + notify: + - echoing handler + when: inventory_hostname == 'A' or inventory_hostname == 'B' + +- name: EXPECTED FAILURE fail task for all + fail: msg="Fail All" + when: fail_all is defined and fail_all + +- name: EXPECTED FAILURE fail task for A + fail: msg="Fail A" + when: inventory_hostname == 'A' + +- name: EXPECTED FAILURE fail task for C + fail: msg="Fail C" + when: inventory_hostname == 'C' + +- name: echo after A and C have failed + command: echo CALLED_TASK_{{ inventory_hostname }} diff --git a/test/integration/targets/handlers/roles/test_handlers/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers/handlers/main.yml new file mode 100644 index 00000000..0261f935 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers/handlers/main.yml @@ -0,0 +1,5 @@ +- name: set handler fact + set_fact: + handler_called: True +- name: test handler + debug: msg="handler called" diff --git a/test/integration/targets/handlers/roles/test_handlers/meta/main.yml b/test/integration/targets/handlers/roles/test_handlers/meta/main.yml new file mode 100644 index 00000000..32cf5dda --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/test/integration/targets/handlers/roles/test_handlers/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers/tasks/main.yml new file mode 100644 index 00000000..a857dacf --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers/tasks/main.yml @@ -0,0 +1,52 @@ +# test code for the async keyword +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +- name: reset handler_called variable to false for all hosts + set_fact: + handler_called: False + tags: scenario1 + +- name: notify the handler for host A only + shell: echo + notify: + - set handler fact + when: inventory_hostname == 'A' + tags: scenario1 + +- name: force handler execution now + meta: "flush_handlers" + tags: scenario1 + +- debug: var=handler_called + tags: scenario1 + +- name: validate the handler only ran on one host + assert: + that: + - "inventory_hostname == 'A' and handler_called == True or handler_called == False" + tags: scenario1 + +- name: 'test notify with loop' + debug: msg='a task' + changed_when: item == 1 + notify: test handler + with_items: + - 1 + - 2 + tags: scenario2 diff --git a/test/integration/targets/handlers/roles/test_handlers_include/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers_include/handlers/main.yml new file mode 100644 index 00000000..abe01be4 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_include/handlers/main.yml @@ -0,0 +1 @@ +- include: handlers.yml diff --git a/test/integration/targets/handlers/roles/test_handlers_include/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers_include/tasks/main.yml new file mode 100644 index 00000000..84f0a583 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_include/tasks/main.yml @@ -0,0 +1,4 @@ +- name: 'main task' + debug: msg='main task' + changed_when: True + notify: test handler diff --git a/test/integration/targets/handlers/roles/test_handlers_include_role/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers_include_role/handlers/main.yml new file mode 100644 index 00000000..0261f935 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_include_role/handlers/main.yml @@ -0,0 +1,5 @@ +- name: set handler fact + set_fact: + handler_called: True +- name: test handler + debug: msg="handler called" diff --git a/test/integration/targets/handlers/roles/test_handlers_include_role/meta/main.yml b/test/integration/targets/handlers/roles/test_handlers_include_role/meta/main.yml new file mode 100644 index 00000000..32cf5dda --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_include_role/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/test/integration/targets/handlers/roles/test_handlers_include_role/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers_include_role/tasks/main.yml new file mode 100644 index 00000000..fbc3d1c5 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_include_role/tasks/main.yml @@ -0,0 +1,47 @@ +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +- name: reset handler_called variable to false for all hosts + set_fact: + handler_called: False + tags: scenario1 + +- name: notify the handler for host A only + shell: echo + notify: + - set handler fact + when: inventory_hostname == 'A' + tags: scenario1 + +- name: force handler execution now + meta: "flush_handlers" + tags: scenario1 + +- debug: var=handler_called + tags: scenario1 + +- name: validate the handler only ran on one host + assert: + that: + - "inventory_hostname == 'A' and handler_called == True or handler_called == False" + tags: scenario1 + +# item below is passed in by the playbook that calls this +- name: 'test notify with loop' + debug: msg='a task' + changed_when: item == 1 + notify: test handler + tags: scenario2 diff --git a/test/integration/targets/handlers/roles/test_handlers_listen/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers_listen/handlers/main.yml new file mode 100644 index 00000000..3bfd82a2 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_listen/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: notify_listen_ran_4_3 + set_fact: + notify_listen_ran_4_3: True + listen: notify_listen + +- name: notify_listen_in_role_4 + set_fact: + notify_listen_in_role_4: True + listen: notify_listen_in_role diff --git a/test/integration/targets/handlers/roles/test_handlers_listen/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers_listen/tasks/main.yml new file mode 100644 index 00000000..bac9b71e --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_listen/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: notify some handlers from a role + command: uptime + notify: + - notify_listen_from_role + - notify_listen_in_role diff --git a/test/integration/targets/handlers/roles/test_handlers_meta/handlers/alternate.yml b/test/integration/targets/handlers/roles/test_handlers_meta/handlers/alternate.yml new file mode 100644 index 00000000..9268ce51 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_meta/handlers/alternate.yml @@ -0,0 +1,12 @@ +- name: set_handler_fact_1 + set_fact: + handler1_called: True + handler1_alt_called: True + +- name: set_handler_fact_2 + set_fact: + handler2_called: True + handler2_alt_called: True + +- name: count_handler + shell: echo . >> {{ handler_countpath }} diff --git a/test/integration/targets/handlers/roles/test_handlers_meta/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers_meta/handlers/main.yml new file mode 100644 index 00000000..0dd408b7 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_meta/handlers/main.yml @@ -0,0 +1,10 @@ +- name: set_handler_fact_1 + set_fact: + handler1_called: True + +- name: set_handler_fact_2 + set_fact: + handler2_called: True + +- name: count_handler + shell: echo . >> {{ handler_countpath }} diff --git a/test/integration/targets/handlers/roles/test_handlers_meta/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers_meta/tasks/main.yml new file mode 100644 index 00000000..d9f5c574 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_handlers_meta/tasks/main.yml @@ -0,0 +1,75 @@ +# test code for the async keyword +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: notify the first handler + shell: echo + notify: + - set_handler_fact_1 + +- name: force handler execution now + meta: "flush_handlers" + +- name: assert handler1 ran and not handler2 + assert: + that: + - "handler1_called is defined" + - "handler2_called is not defined" + +- name: make a tempfile for counting + shell: mktemp + register: mktemp_out + +- name: register tempfile path + set_fact: + handler_countpath: "{{ mktemp_out.stdout }}" + +- name: notify the counting handler + shell: echo + notify: + - count_handler + +- name: notify the counting handler again + shell: echo + notify: + - count_handler + +- name: force handler execution now + meta: flush_handlers + +- name: get handler execution count + shell: cat {{ handler_countpath }} | grep -o . | wc -l + register: exec_count_out + +- debug: var=exec_count_out.stdout + +- name: ensure single execution + assert: + that: + - exec_count_out.stdout | int == 1 + +- name: cleanup tempfile + file: path={{ handler_countpath }} state=absent + +- name: reset handler1_called + set_fact: + handler1_called: False + +- name: notify the second handler + shell: echo + notify: + - set_handler_fact_2 diff --git a/test/integration/targets/handlers/roles/test_templating_in_handlers/handlers/main.yml b/test/integration/targets/handlers/roles/test_templating_in_handlers/handlers/main.yml new file mode 100644 index 00000000..7dbf3347 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_templating_in_handlers/handlers/main.yml @@ -0,0 +1,21 @@ +--- +- name: name1 + set_fact: + role_non_templated_name: True +- name: "{{ handler2 }}" + set_fact: + role_templated_name: True +- name: testlistener1 + set_fact: + role_non_templated_listener: True + listen: name3 +- name: testlistener2 + set_fact: + role_templated_listener: True + listen: "{{ handler4 }}" +- name: name5 + set_fact: + role_handler5: True +- set_fact: + role_handler6: True + listen: name6 diff --git a/test/integration/targets/handlers/roles/test_templating_in_handlers/tasks/main.yml b/test/integration/targets/handlers/roles/test_templating_in_handlers/tasks/main.yml new file mode 100644 index 00000000..54174172 --- /dev/null +++ b/test/integration/targets/handlers/roles/test_templating_in_handlers/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- command: echo Hello World + notify: + - "{{ handler1 }}" + - "{{ handler2 }}" + - "{{ handler3 }}" + - "{{ handler4 }}" + +- meta: flush_handlers + +- assert: + that: + - role_non_templated_name is defined + - role_templated_name is defined + - role_non_templated_listener is defined + - role_templated_listener is undefined + +- command: echo + notify: "{{ handler_list }}" + +- meta: flush_handlers + +- assert: + that: + - role_handler5 is defined + - role_handler6 is defined diff --git a/test/integration/targets/handlers/runme.sh b/test/integration/targets/handlers/runme.sh new file mode 100755 index 00000000..59c81bce --- /dev/null +++ b/test/integration/targets/handlers/runme.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_FORCE_HANDLERS + +ANSIBLE_FORCE_HANDLERS=false + +# simple handler test +ansible-playbook test_handlers.yml -i inventory.handlers -v "$@" --tags scenario1 + +# simple from_handlers test +ansible-playbook from_handlers.yml -i inventory.handlers -v "$@" --tags scenario1 + +ansible-playbook test_listening_handlers.yml -i inventory.handlers -v "$@" + +[ "$(ansible-playbook test_handlers.yml -i inventory.handlers -v "$@" --tags scenario2 -l A \ +| grep -E -o 'RUNNING HANDLER \[test_handlers : .*?]')" = "RUNNING HANDLER [test_handlers : test handler]" ] + +# Test forcing handlers using the linear and free strategy +for strategy in linear free; do + + export ANSIBLE_STRATEGY=$strategy + + # Not forcing, should only run on successful host + [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal \ + | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ] + + # Forcing from command line + [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal --force-handlers \ + | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ] + + # Forcing from command line, should only run later tasks on unfailed hosts + [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal --force-handlers \ + | grep -E -o CALLED_TASK_. | sort | uniq | xargs)" = "CALLED_TASK_B CALLED_TASK_D CALLED_TASK_E" ] + + # Forcing from command line, should call handlers even if all hosts fail + [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal --force-handlers -e fail_all=yes \ + | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ] + + # Forcing from ansible.cfg + [ "$(ANSIBLE_FORCE_HANDLERS=true ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal \ + | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ] + + # Forcing true in play + [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags force_true_in_play \ + | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ] + + # Forcing false in play, which overrides command line + [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags force_false_in_play --force-handlers \ + | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ] + + unset ANSIBLE_STRATEGY + +done + +[ "$(ansible-playbook test_handlers_include.yml -i ../../inventory -v "$@" --tags playbook_include_handlers \ +| grep -E -o 'RUNNING HANDLER \[.*?]')" = "RUNNING HANDLER [test handler]" ] + +[ "$(ansible-playbook test_handlers_include.yml -i ../../inventory -v "$@" --tags role_include_handlers \ +| grep -E -o 'RUNNING HANDLER \[test_handlers_include : .*?]')" = "RUNNING HANDLER [test_handlers_include : test handler]" ] + +[ "$(ansible-playbook test_handlers_include_role.yml -i ../../inventory -v "$@" \ +| grep -E -o 'RUNNING HANDLER \[test_handlers_include_role : .*?]')" = "RUNNING HANDLER [test_handlers_include_role : test handler]" ] + +# Notify handler listen +ansible-playbook test_handlers_listen.yml -i inventory.handlers -v "$@" + +# Notify inexistent handlers results in error +set +e +result="$(ansible-playbook test_handlers_inexistent_notify.yml -i inventory.handlers "$@" 2>&1)" +set -e +grep -q "ERROR! The requested handler 'notify_inexistent_handler' was not found in either the main handlers list nor in the listening handlers list" <<< "$result" + +# Notify inexistent handlers without errors when ANSIBLE_ERROR_ON_MISSING_HANDLER=false +ANSIBLE_ERROR_ON_MISSING_HANDLER=false ansible-playbook test_handlers_inexistent_notify.yml -i inventory.handlers -v "$@" + +ANSIBLE_ERROR_ON_MISSING_HANDLER=false ansible-playbook test_templating_in_handlers.yml -v "$@" + +# https://github.com/ansible/ansible/issues/36649 +output_dir=/tmp +set +e +result="$(ansible-playbook test_handlers_any_errors_fatal.yml -e output_dir=$output_dir -i inventory.handlers -v "$@" 2>&1)" +set -e +[ ! -f $output_dir/should_not_exist_B ] || (rm -f $output_dir/should_not_exist_B && exit 1) + +# https://github.com/ansible/ansible/issues/47287 +[ "$(ansible-playbook test_handlers_including_task.yml -i ../../inventory -v "$@" | grep -E -o 'failed=[0-9]+')" = "failed=0" ] + +# https://github.com/ansible/ansible/issues/27237 +set +e +result="$(ansible-playbook test_handlers_template_run_once.yml -i inventory.handlers "$@" 2>&1)" +set -e +grep -q "handler A" <<< "$result" +grep -q "handler B" <<< "$result" diff --git a/test/integration/targets/handlers/test_force_handlers.yml b/test/integration/targets/handlers/test_force_handlers.yml new file mode 100644 index 00000000..9cff7729 --- /dev/null +++ b/test/integration/targets/handlers/test_force_handlers.yml @@ -0,0 +1,27 @@ +--- + +- name: test force handlers (default) + tags: normal + hosts: testgroup + gather_facts: False + roles: + - { role: test_force_handlers } + tasks: + - debug: msg="you should see this with --tags=normal" + +- name: test force handlers (set to true) + tags: force_true_in_play + hosts: testgroup + gather_facts: False + force_handlers: True + roles: + - { role: test_force_handlers, tags: force_true_in_play } + + +- name: test force handlers (set to false) + tags: force_false_in_play + hosts: testgroup + gather_facts: False + force_handlers: False + roles: + - { role: test_force_handlers, tags: force_false_in_play } diff --git a/test/integration/targets/handlers/test_handlers.yml b/test/integration/targets/handlers/test_handlers.yml new file mode 100644 index 00000000..ae9847ba --- /dev/null +++ b/test/integration/targets/handlers/test_handlers.yml @@ -0,0 +1,47 @@ +--- +- name: run handlers + hosts: A + gather_facts: False + roles: + - { role: test_handlers_meta, tags: ['scenario1'] } + +- name: verify final handler was run + hosts: A + gather_facts: False + tasks: + - name: verify handler2 ran + assert: + that: + - "not hostvars[inventory_hostname]['handler1_called']" + - "'handler2_called' in hostvars[inventory_hostname]" + tags: ['scenario1'] + +- name: verify listening handlers + hosts: A + gather_facts: False + tasks: + - name: notify some handlers + command: echo foo + notify: + - notify_listen + post_tasks: + - name: assert all defined handlers ran without error + assert: + that: + - "notify_listen_ran_1 is defined" + - "notify_listen_ran_2 is defined" + handlers: + - name: first listening handler has a name + set_fact: + notify_listen_ran_1: True + listen: notify_listen + # second listening handler does not + - set_fact: + notify_listen_ran_2: True + listen: notify_listen + +- name: test handlers + hosts: testgroup + gather_facts: False + roles: + - { role: test_handlers } diff --git a/test/integration/targets/handlers/test_handlers_any_errors_fatal.yml b/test/integration/targets/handlers/test_handlers_any_errors_fatal.yml new file mode 100644 index 00000000..6b791a3b --- /dev/null +++ b/test/integration/targets/handlers/test_handlers_any_errors_fatal.yml @@ -0,0 +1,24 @@ +- hosts: + - A + - B + gather_facts: no + any_errors_fatal: yes + vars: + output_dir: /tmp + tasks: + - name: Task one + debug: + msg: 'task 1' + changed_when: yes + notify: EXPECTED FAILURE failed_handler + + - meta: flush_handlers + + - name: This task should never happen + file: + path: "{{ output_dir }}/should_not_exist_{{ inventory_hostname }}" + state: touch + handlers: + - name: EXPECTED FAILURE failed_handler + fail: + when: 'inventory_hostname == "A"' diff --git a/test/integration/targets/handlers/test_handlers_include.yml b/test/integration/targets/handlers/test_handlers_include.yml new file mode 100644 index 00000000..5514fc10 --- /dev/null +++ b/test/integration/targets/handlers/test_handlers_include.yml @@ -0,0 +1,14 @@ +- name: verify that play can include handler + hosts: testhost + tasks: + - debug: msg="main task" + changed_when: True + notify: test handler + tags: ['playbook_include_handlers'] + handlers: + - include: handlers.yml + +- name: verify that role can include handler + hosts: testhost + roles: + - { role: test_handlers_include, tags: ['role_include_handlers'] } diff --git a/test/integration/targets/handlers/test_handlers_include_role.yml b/test/integration/targets/handlers/test_handlers_include_role.yml new file mode 100644 index 00000000..77e6b53a --- /dev/null +++ b/test/integration/targets/handlers/test_handlers_include_role.yml @@ -0,0 +1,8 @@ +- name: verify that play can include handler + hosts: testhost + tasks: + - include_role: + name: test_handlers_include_role + with_items: + - 1 + - 2 diff --git a/test/integration/targets/handlers/test_handlers_including_task.yml b/test/integration/targets/handlers/test_handlers_including_task.yml new file mode 100644 index 00000000..8f7933ab --- /dev/null +++ b/test/integration/targets/handlers/test_handlers_including_task.yml @@ -0,0 +1,16 @@ +--- +- name: Verify handler can include other tasks (#47287) + hosts: testhost + tasks: + - name: include a task from the tasks section + include_tasks: handlers.yml + + - name: notify a handler + debug: + msg: notifying handler + changed_when: yes + notify: include a task from the handlers section + + handlers: + - name: include a task from the handlers section + include_tasks: handlers.yml diff --git a/test/integration/targets/handlers/test_handlers_inexistent_notify.yml b/test/integration/targets/handlers/test_handlers_inexistent_notify.yml new file mode 100644 index 00000000..15de38aa --- /dev/null +++ b/test/integration/targets/handlers/test_handlers_inexistent_notify.yml @@ -0,0 +1,10 @@ +--- +- name: notify inexistent handler + hosts: localhost + gather_facts: false + tasks: + - name: test notify an inexistent handler + command: uptime + notify: + - notify_inexistent_handler + register: result diff --git a/test/integration/targets/handlers/test_handlers_listen.yml b/test/integration/targets/handlers/test_handlers_listen.yml new file mode 100644 index 00000000..dd2cd87d --- /dev/null +++ b/test/integration/targets/handlers/test_handlers_listen.yml @@ -0,0 +1,128 @@ +--- +- name: test listen with named handlers + hosts: localhost + gather_facts: false + tasks: + - name: test notify handlers listen + command: uptime + notify: + - notify_listen + - meta: flush_handlers + - name: verify test notify handlers listen + assert: + that: + - "notify_listen_ran_1_1 is defined" + - "notify_listen_ran_1_2 is defined" + - "notify_listen_ran_1_3 is undefined" + handlers: + - name: notify_handler_ran_1_1 + set_fact: + notify_listen_ran_1_1: True + listen: notify_listen + - name: notify_handler_ran_1_2 + set_fact: + notify_listen_ran_1_2: True + listen: notify_listen + - name: notify_handler_ran_1_3 + set_fact: + notify_handler_ran_1_3: True + listen: notify_listen2 + +- name: test listen unnamed handlers + hosts: localhost + gather_facts: false + pre_tasks: + - name: notify some handlers + command: echo foo + notify: + - notify_listen + tasks: + - meta: flush_handlers + - name: assert all defined handlers ran without error + assert: + that: + - "notify_listen_ran_1 is defined" + - "notify_listen_ran_2 is defined" + - "notify_listen_ran_3 is undefined" + handlers: + - set_fact: + notify_listen_ran_1: True + listen: notify_listen + - set_fact: + notify_listen_ran_2: True + listen: notify_listen + - set_fact: + notify_handler_ran_3: True + listen: notify_listen2 + +- name: test with mixed notify by name and listen + hosts: localhost + gather_facts: false + tasks: + - name: test notify handlers names and identical listen + command: uptime + notify: + - notify_listen + - meta: flush_handlers + - name: verify test notify handlers names and identical listen + assert: + that: + - "notify_handler_name_ran_3 is defined" + - "notify_handler_name_ran_3_1 is not defined" + - "notify_listen_ran_3_2 is defined" + - "notify_listen_ran_3_3 is defined" + - "not_notify_listen_3_4 is not defined" + handlers: + - name: notify_listen + set_fact: + notify_handler_name_ran_3: True + # this will not run as we have a handler with a identical name notified first + - name: notify_listen + set_fact: + notify_handler_name_ran_3_1: True + - name: notify_handler_ran_3_2 + set_fact: + notify_listen_ran_3_2: True + listen: notify_listen + - name: notify_handler_ran_3_3 + set_fact: + notify_listen_ran_3_3: True + listen: notify_listen + # this one is not notified + - name: not_notify_listen_3_4 + set_fact: + not_notify_listen_3_4: True + listen: not_notified + +- name: test listen in roles + hosts: localhost + gather_facts: false + roles: + - role: test_handlers_listen + tasks: + - name: test notify handlers listen in roles + command: uptime + notify: + - notify_listen + - meta: flush_handlers + - name: verify test notify handlers listen in roles + assert: + that: + - "notify_listen_ran_4_1 is defined" + - "notify_listen_ran_4_2 is defined" + - "notify_listen_ran_4_3 is defined" + - "notify_listen_in_role_4 is defined" + - "notify_listen_from_role_4 is defined" + handlers: + - name: notify_listen_ran_4_1 + set_fact: + notify_listen_ran_4_1: True + listen: notify_listen + - name: notify_listen_ran_4_2 + set_fact: + notify_listen_ran_4_2: True + listen: notify_listen + - name: notify_listen_from_role_4 + set_fact: + notify_listen_from_role_4: True + listen: notify_listen_from_role diff --git a/test/integration/targets/handlers/test_handlers_template_run_once.yml b/test/integration/targets/handlers/test_handlers_template_run_once.yml new file mode 100644 index 00000000..6edc32e2 --- /dev/null +++ b/test/integration/targets/handlers/test_handlers_template_run_once.yml @@ -0,0 +1,12 @@ +- hosts: A,B + gather_facts: no + tasks: + - debug: + changed_when: true + notify: + - handler + handlers: + - name: handler + debug: + msg: "handler {{ inventory_hostname }}" + run_once: "{{ testvar | default(False) }}" diff --git a/test/integration/targets/handlers/test_listening_handlers.yml b/test/integration/targets/handlers/test_listening_handlers.yml new file mode 100644 index 00000000..67bdad9a --- /dev/null +++ b/test/integration/targets/handlers/test_listening_handlers.yml @@ -0,0 +1,24 @@ +--- +- name: verify listening handlers + hosts: A + gather_facts: False + tasks: + - name: notify some handlers + command: echo foo + notify: + - notify_listen + post_tasks: + - name: assert all defined handlers ran without error + assert: + that: + - "notify_listen_ran_1 is defined" + - "notify_listen_ran_2 is defined" + handlers: + - name: first listening handler has a name + set_fact: + notify_listen_ran_1: True + listen: notify_listen + # second listening handler does not + - set_fact: + notify_listen_ran_2: True + listen: notify_listen diff --git a/test/integration/targets/handlers/test_templating_in_handlers.yml b/test/integration/targets/handlers/test_templating_in_handlers.yml new file mode 100644 index 00000000..662b8c1e --- /dev/null +++ b/test/integration/targets/handlers/test_templating_in_handlers.yml @@ -0,0 +1,62 @@ +- name: test templated values in handlers + hosts: localhost + gather_facts: no + vars: + handler1: name1 + handler2: name2 + handler3: name3 + handler4: name4 + handler_list: + - name5 + - name6 + + handlers: + - name: name1 + set_fact: + non_templated_name: True + - name: "{{ handler2 }}" + set_fact: + templated_name: True + - name: testlistener1 + set_fact: + non_templated_listener: True + listen: name3 + - name: testlistener2 + set_fact: + templated_listener: True + listen: "{{ handler4 }}" + - name: name5 + set_fact: + handler5: True + - set_fact: + handler6: True + listen: name6 + + tasks: + - command: echo Hello World + notify: + - "{{ handler1 }}" + - "{{ handler2 }}" + - "{{ handler3 }}" + - "{{ handler4 }}" + + - meta: flush_handlers + + - assert: + that: + - non_templated_name is defined + - templated_name is defined + - non_templated_listener is defined + - templated_listener is undefined + + - command: echo + notify: "{{ handler_list }}" + + - meta: flush_handlers + + - assert: + that: + - handler5 is defined + - handler6 is defined + + - include_role: name=test_templating_in_handlers diff --git a/test/integration/targets/hash/aliases b/test/integration/targets/hash/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/hash/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/hash/group_vars/all b/test/integration/targets/hash/group_vars/all new file mode 100644 index 00000000..805ac26a --- /dev/null +++ b/test/integration/targets/hash/group_vars/all @@ -0,0 +1,3 @@ +# variables used for hash merging behavior testing +test_hash: + group_vars_all: "this is in group_vars/all" diff --git a/test/integration/targets/hash/host_vars/testhost b/test/integration/targets/hash/host_vars/testhost new file mode 100644 index 00000000..3a75ee66 --- /dev/null +++ b/test/integration/targets/hash/host_vars/testhost @@ -0,0 +1,2 @@ +test_hash: + host_vars_testhost: "this is in host_vars/testhost" diff --git a/test/integration/targets/hash/roles/test_hash_behaviour/defaults/main.yml b/test/integration/targets/hash/roles/test_hash_behaviour/defaults/main.yml new file mode 100644 index 00000000..10cc09f3 --- /dev/null +++ b/test/integration/targets/hash/roles/test_hash_behaviour/defaults/main.yml @@ -0,0 +1,21 @@ +# test code for the hash variable behavior +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +--- +test_hash: + default_vars: "this is in role defaults/main.yml" diff --git a/test/integration/targets/hash/roles/test_hash_behaviour/meta/main.yml b/test/integration/targets/hash/roles/test_hash_behaviour/meta/main.yml new file mode 100644 index 00000000..59adf997 --- /dev/null +++ b/test/integration/targets/hash/roles/test_hash_behaviour/meta/main.yml @@ -0,0 +1,17 @@ +# test code for the hash variable behavior +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/test/integration/targets/hash/roles/test_hash_behaviour/tasks/main.yml b/test/integration/targets/hash/roles/test_hash_behaviour/tasks/main.yml new file mode 100644 index 00000000..bc635498 --- /dev/null +++ b/test/integration/targets/hash/roles/test_hash_behaviour/tasks/main.yml @@ -0,0 +1,37 @@ +# test code for the hash variable behaviour +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: debug hash behaviour result + debug: + var: "{{ lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') }}" + verbosity: 2 + +- name: assert hash behaviour is merge or replace + assert: + that: + - lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') in ('merge', 'replace') + +- name: debug test_hash var + debug: + var: test_hash + verbosity: 2 + +- name: assert the dictionary values match + assert: + that: + - "lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') == 'merge' and test_hash == merged_hash or lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') == 'replace' and test_hash == replaced_hash" diff --git a/test/integration/targets/hash/roles/test_hash_behaviour/vars/main.yml b/test/integration/targets/hash/roles/test_hash_behaviour/vars/main.yml new file mode 100644 index 00000000..2068e9fb --- /dev/null +++ b/test/integration/targets/hash/roles/test_hash_behaviour/vars/main.yml @@ -0,0 +1,21 @@ +# test code for the hash variable behavior +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +--- +test_hash: + role_vars: "this is in role vars/main.yml" diff --git a/test/integration/targets/hash/runme.sh b/test/integration/targets/hash/runme.sh new file mode 100755 index 00000000..3689d83b --- /dev/null +++ b/test/integration/targets/hash/runme.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -eux + +JSON_ARG='{"test_hash":{"extra_args":"this is an extra arg"}}' + +ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i ../../inventory -v "$@" -e "${JSON_ARG}" +ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i ../../inventory -v "$@" -e "${JSON_ARG}" + +ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_inventory_hash.yml -i test_inv1.yml -i test_inv2.yml -v "$@" +ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_inventory_hash.yml -i test_inv1.yml -i test_inv2.yml -v "$@" diff --git a/test/integration/targets/hash/test_hash.yml b/test/integration/targets/hash/test_hash.yml new file mode 100644 index 00000000..37b56e65 --- /dev/null +++ b/test/integration/targets/hash/test_hash.yml @@ -0,0 +1,21 @@ +- hosts: testhost + vars_files: + - vars/test_hash_vars.yml + vars: + test_hash: + playbook_vars: "this is a playbook variable" + replaced_hash: + extra_args: "this is an extra arg" + merged_hash: + default_vars: "this is in role defaults/main.yml" + extra_args: "this is an extra arg" + group_vars_all: "this is in group_vars/all" + host_vars_testhost: "this is in host_vars/testhost" + playbook_vars: "this is a playbook variable" + role_argument: "this is a role argument variable" + role_vars: "this is in role vars/main.yml" + vars_file: "this is in a vars_file" + roles: + - role: test_hash_behaviour + test_hash: + role_argument: 'this is a role argument variable' diff --git a/test/integration/targets/hash/test_inv1.yml b/test/integration/targets/hash/test_inv1.yml new file mode 100644 index 00000000..02bd017f --- /dev/null +++ b/test/integration/targets/hash/test_inv1.yml @@ -0,0 +1,10 @@ +all: + hosts: + host1: + test_inventory_host_hash: + host_var1: "inventory 1" + host_var2: "inventory 1" + vars: + test_inventory_group_hash: + group_var1: "inventory 1" + group_var2: "inventory 1" diff --git a/test/integration/targets/hash/test_inv2.yml b/test/integration/targets/hash/test_inv2.yml new file mode 100644 index 00000000..6529b933 --- /dev/null +++ b/test/integration/targets/hash/test_inv2.yml @@ -0,0 +1,8 @@ +all: + hosts: + host1: + test_inventory_host_hash: + host_var1: "inventory 2" + vars: + test_inventory_group_hash: + group_var1: "inventory 2" diff --git a/test/integration/targets/hash/test_inventory_hash.yml b/test/integration/targets/hash/test_inventory_hash.yml new file mode 100644 index 00000000..1091b135 --- /dev/null +++ b/test/integration/targets/hash/test_inventory_hash.yml @@ -0,0 +1,41 @@ +--- +- hosts: localhost + gather_facts: no + vars: + host_hash_merged: {'host_var1': 'inventory 2', 'host_var2': 'inventory 1'} + host_hash_replaced: {'host_var1': 'inventory 2'} + group_hash_merged: {'group_var1': 'inventory 2', 'group_var2': 'inventory 1'} + group_hash_replaced: {'group_var1': 'inventory 2'} + tasks: + + - name: debug hash behaviour result + debug: + var: "{{ lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') }}" + verbosity: 2 + + - name: assert hash behaviour is merge or replace + assert: + that: + - lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') in ('merge', 'replace') + + - name: debug test_inventory_host_hash + debug: + var: hostvars['host1']['test_inventory_host_hash'] + verbosity: 2 + + - name: debug test_inventory_group_hash + debug: + var: test_inventory_group_hash + verbosity: 2 + + - assert: + that: + - hostvars['host1']['test_inventory_host_hash'] == host_hash_replaced + - test_inventory_group_hash == group_hash_replaced + when: "lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') == 'replace'" + + - assert: + that: + - hostvars['host1']['test_inventory_host_hash'] == host_hash_merged + - test_inventory_group_hash == group_hash_merged + when: "lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') == 'merge'" diff --git a/test/integration/targets/hash/vars/test_hash_vars.yml b/test/integration/targets/hash/vars/test_hash_vars.yml new file mode 100644 index 00000000..e25f8576 --- /dev/null +++ b/test/integration/targets/hash/vars/test_hash_vars.yml @@ -0,0 +1,3 @@ +--- +test_hash: + vars_file: "this is in a vars_file" diff --git a/test/integration/targets/hosts_field/aliases b/test/integration/targets/hosts_field/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/hosts_field/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/hosts_field/inventory.hosts_field b/test/integration/targets/hosts_field/inventory.hosts_field new file mode 100644 index 00000000..46644046 --- /dev/null +++ b/test/integration/targets/hosts_field/inventory.hosts_field @@ -0,0 +1 @@ +42 ansible_host=127.0.0.42 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/hosts_field/runme.sh b/test/integration/targets/hosts_field/runme.sh new file mode 100755 index 00000000..1291933c --- /dev/null +++ b/test/integration/targets/hosts_field/runme.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +set -eux + +# Hosts in playbook has a list of strings consisting solely of digits +ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \ + -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \ + -t string_digit_host_in_list -v "$@" | tee test_hosts_field.out +grep 'Running on 42' test_hosts_field.out 2>&1 +test "$(grep -c 'ok=1' test_hosts_field.out)" = 1 + +# Hosts taken from kv extra_var on the CLI +ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \ + -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \ + -t hosts_from_kv_string -v "$@" | tee test_hosts_field.out +grep 'Running on 42' test_hosts_field.out 2>&1 +test "$(grep -c 'ok=1' test_hosts_field.out)" = 1 + +# hosts is taken from an all digit json extra_vars string on the CLI +ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \ + -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \ + -t hosts_from_cli_json_string -v "$@" | tee test_hosts_field.out +grep 'Running on 42' test_hosts_field.out 2>&1 +test "$(grep -c 'ok=1' test_hosts_field.out)" = 1 + +# hosts is taken from a json list in extra_vars on the CLI +ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \ + -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \ + -t hosts_from_cli_json_list -v "$@" | tee test_hosts_field.out +grep 'Running on 42' test_hosts_field.out 2>&1 +grep 'Running on localhost' test_hosts_field.out 2>&1 +test "$(grep -c 'ok=1' test_hosts_field.out)" = 2 + +# hosts is taken from a json string in an extra_vars file +ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \ + -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \ + -t hosts_from_json_file_string -v "$@" | tee test_hosts_field.out +grep 'Running on 42' test_hosts_field.out 2>&1 +test "$(grep -c 'ok=1' test_hosts_field.out)" = 1 + +# hosts is taken from a json list in an extra_vars file +ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \ + -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \ + -t hosts_from_json_file_list -v "$@" | tee test_hosts_field.out +grep 'Running on 42' test_hosts_field.out 2>&1 +grep 'Running on localhost' test_hosts_field.out 2>&1 +test "$(grep -c 'ok=1' test_hosts_field.out)" = 2 + +rm test_hosts_field.out diff --git a/test/integration/targets/hosts_field/test_hosts_field.json b/test/integration/targets/hosts_field/test_hosts_field.json new file mode 100644 index 00000000..26875560 --- /dev/null +++ b/test/integration/targets/hosts_field/test_hosts_field.json @@ -0,0 +1 @@ +{ "target_json_file": "42", "target_json_file_list": ["42", "localhost"] } diff --git a/test/integration/targets/hosts_field/test_hosts_field.yml b/test/integration/targets/hosts_field/test_hosts_field.yml new file mode 100644 index 00000000..568d7025 --- /dev/null +++ b/test/integration/targets/hosts_field/test_hosts_field.yml @@ -0,0 +1,62 @@ +--- +#- name: Host in playbook is an integer +# hosts: 42 +# tags: numeric_host +# tasks: +# - command: echo 'Running on {{ inventory_hostname }}' + +#- name: Host in playbook is a string of digits +# hosts: "42" +# tags: string_digit_host +# tasks: +# - command: echo 'Running on {{ inventory_hostname }}' + +#- name: Host in playbook is a list of integer +# hosts: +# - 42 +# tags: numeric_host_in_list +# tasks: +# - command: echo 'Running on {{ inventory_hostname }}' + +- name: Host in playbook is a list of strings of digits + hosts: + - "42" + gather_facts: False + tags: string_digit_host_in_list + tasks: + - command: echo 'Running on {{ inventory_hostname }}' + +- name: Hosts taken from kv extra_var on the CLI + hosts: "{{ target_kv }}" + gather_facts: False + tags: hosts_from_kv_string + tasks: + - command: echo 'Running on {{ inventory_hostname }}' + +- name: Hosts taken from a json string on the CLI + hosts: "{{ target_json_cli }}" + gather_facts: False + tags: hosts_from_cli_json_string + tasks: + - command: echo 'Running on {{ inventory_hostname }}' + +- name: Hosts taken from a json list on the CLI + hosts: "{{ target_json_cli_list }}" + gather_facts: False + tags: hosts_from_cli_json_list + tasks: + - command: echo 'Running on {{ inventory_hostname }}' + +- name: Hosts is taken from a json string in an extra_vars file + hosts: "{{ target_json_file }}" + gather_facts: False + tags: hosts_from_json_file_string + tasks: + - command: echo 'Running on {{ inventory_hostname }}' + +- name: Hosts is taken from a json list in an extra_vars file + hosts: "{{ target_json_file_list }}" + gather_facts: False + tags: hosts_from_json_file_list + tasks: + - command: echo 'Running on {{ inventory_hostname }}' diff --git a/test/integration/targets/ignore_errors/aliases b/test/integration/targets/ignore_errors/aliases new file mode 100644 index 00000000..3005e4b2 --- /dev/null +++ b/test/integration/targets/ignore_errors/aliases @@ -0,0 +1 @@ +shippable/posix/group4 diff --git a/test/integration/targets/ignore_errors/meta/main.yml b/test/integration/targets/ignore_errors/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/ignore_errors/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/ignore_errors/tasks/main.yml b/test/integration/targets/ignore_errors/tasks/main.yml new file mode 100644 index 00000000..a6964e04 --- /dev/null +++ b/test/integration/targets/ignore_errors/tasks/main.yml @@ -0,0 +1,22 @@ +# test code +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: this will not stop the playbook + shell: /bin/false + register: failed + ignore_errors: True diff --git a/test/integration/targets/ignore_unreachable/aliases b/test/integration/targets/ignore_unreachable/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/ignore_unreachable/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py b/test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py new file mode 100644 index 00000000..b5e9ca88 --- /dev/null +++ b/test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py @@ -0,0 +1,11 @@ +import ansible.plugins.connection.local as ansible_local +from ansible.errors import AnsibleConnectionFailure + +from ansible.utils.display import Display +display = Display() + + +class Connection(ansible_local.Connection): + def exec_command(self, cmd, in_data=None, sudoable=True): + display.debug('Intercepted call to exec remote command') + raise AnsibleConnectionFailure('BADLOCAL Error: this is supposed to fail') diff --git a/test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py b/test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py new file mode 100644 index 00000000..98927997 --- /dev/null +++ b/test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py @@ -0,0 +1,11 @@ +import ansible.plugins.connection.local as ansible_local +from ansible.errors import AnsibleConnectionFailure + +from ansible.utils.display import Display +display = Display() + + +class Connection(ansible_local.Connection): + def put_file(self, in_path, out_path): + display.debug('Intercepted call to send data') + raise AnsibleConnectionFailure('BADLOCAL Error: this is supposed to fail') diff --git a/test/integration/targets/ignore_unreachable/inventory b/test/integration/targets/ignore_unreachable/inventory new file mode 100644 index 00000000..495a68cf --- /dev/null +++ b/test/integration/targets/ignore_unreachable/inventory @@ -0,0 +1,3 @@ +nonexistent ansible_host=169.254.199.200 +bad_put_file ansible_host=localhost ansible_connection=bad_put_file +bad_exec ansible_host=localhost ansible_connection=bad_exec diff --git a/test/integration/targets/ignore_unreachable/meta/main.yml b/test/integration/targets/ignore_unreachable/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/ignore_unreachable/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/ignore_unreachable/runme.sh b/test/integration/targets/ignore_unreachable/runme.sh new file mode 100755 index 00000000..5b0ef190 --- /dev/null +++ b/test/integration/targets/ignore_unreachable/runme.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -eux + +export ANSIBLE_CONNECTION_PLUGINS=./fake_connectors +# use fake connectors that raise srrors at different stages +ansible-playbook test_with_bad_plugins.yml -i inventory -v "$@" +unset ANSIBLE_CONNECTION_PLUGINS + +ansible-playbook test_cannot_connect.yml -i inventory -v "$@" + +if ansible-playbook test_base_cannot_connect.yml -i inventory -v "$@"; then + echo "Playbook intended to fail succeeded. Connection succeeded to nonexistent host" + exit 99 +else + echo "Connection to nonexistent hosts failed without using ignore_unreachable. Success!" +fi diff --git a/test/integration/targets/ignore_unreachable/test_base_cannot_connect.yml b/test/integration/targets/ignore_unreachable/test_base_cannot_connect.yml new file mode 100644 index 00000000..931c82bf --- /dev/null +++ b/test/integration/targets/ignore_unreachable/test_base_cannot_connect.yml @@ -0,0 +1,5 @@ +- hosts: [localhost, nonexistent] + gather_facts: false + tasks: + - name: Hi + ping: diff --git a/test/integration/targets/ignore_unreachable/test_cannot_connect.yml b/test/integration/targets/ignore_unreachable/test_cannot_connect.yml new file mode 100644 index 00000000..64e2bfea --- /dev/null +++ b/test/integration/targets/ignore_unreachable/test_cannot_connect.yml @@ -0,0 +1,29 @@ +--- +- hosts: localhost + gather_facts: false + tasks: + - name: Hi + ping: +- hosts: [localhost, nonexistent] + ignore_unreachable: true + gather_facts: false + tasks: + - name: Hi + ping: +- hosts: nonexistent + ignore_unreachable: true + gather_facts: false + tasks: + - name: Hi + ping: + - name: This should print anyway + debug: + msg: This should print worked even though host was unreachable + - name: Hi + ping: + register: should_fail + - assert: + that: + - 'should_fail is unreachable' + - 'not (should_fail is skipped)' + - 'not (should_fail is failed)' diff --git a/test/integration/targets/ignore_unreachable/test_with_bad_plugins.yml b/test/integration/targets/ignore_unreachable/test_with_bad_plugins.yml new file mode 100644 index 00000000..5d62f199 --- /dev/null +++ b/test/integration/targets/ignore_unreachable/test_with_bad_plugins.yml @@ -0,0 +1,24 @@ +- hosts: bad_put_file + gather_facts: false + ignore_unreachable: true + tasks: + - name: Hi + ping: +- hosts: bad_put_file + gather_facts: true + ignore_unreachable: true + tasks: + - name: Hi + ping: +- hosts: bad_exec + gather_facts: false + ignore_unreachable: true + tasks: + - name: Hi + ping: +- hosts: bad_exec + gather_facts: true + ignore_unreachable: true + tasks: + - name: Hi + ping: diff --git a/test/integration/targets/incidental_azure_rm_mariadbserver/aliases b/test/integration/targets/incidental_azure_rm_mariadbserver/aliases new file mode 100644 index 00000000..9901373a --- /dev/null +++ b/test/integration/targets/incidental_azure_rm_mariadbserver/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +shippable/azure/incidental diff --git a/test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml b/test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml new file mode 100644 index 00000000..5b33ffb9 --- /dev/null +++ b/test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml @@ -0,0 +1,640 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create instance of MariaDB Server -- check mode + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Testpasswordxyz12! + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Testpasswordxyz12! + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.state == 'Ready' + +- name: Create again instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Testpasswordxyz12! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.state == 'Ready' + +- name: Update instance of MariaDB Server, change storage size + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 128000 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Testpasswordxyz12! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed + - output.state == 'Ready' +- debug: + var: output + +- name: Gather facts MariaDB Server + azure_rm_mariadbserver_facts: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + register: output +- name: Assert that storage size is correct + assert: + that: + - output.servers[0]['storage_mb'] == 128000 + +- name: Create second instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }}second + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Testpasswordxyz12! + tags: + aaa: bbb + +- name: Create second instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }}second + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Testpasswordxyz12! + tags: + ccc: ddd + +- name: Gather facts MariaDB Server + azure_rm_mariadbserver_facts: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }}second + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers[0]['id'] != None + - output.servers[0]['name'] != None + - output.servers[0]['location'] != None + - output.servers[0]['sku']['name'] != None + - output.servers[0]['sku']['tier'] != None + - output.servers[0]['sku']['capacity'] != None + - output.servers[0]['version'] != None + - output.servers[0]['user_visible_state'] != None + - output.servers[0]['fully_qualified_domain_name'] != None + - output.servers[0]['tags']['aaa'] == 'bbb' + - output.servers[0]['tags']['ccc'] == 'ddd' + +- name: Gather facts MariaDB Server + azure_rm_mariadbserver_facts: + resource_group: "{{ resource_group }}" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers[0]['id'] != None + - output.servers[0]['name'] != None + - output.servers[0]['location'] != None + - output.servers[0]['sku']['name'] != None + - output.servers[0]['sku']['tier'] != None + - output.servers[0]['sku']['capacity'] != None + - output.servers[0]['version'] != None + - output.servers[0]['user_visible_state'] != None + - output.servers[0]['fully_qualified_domain_name'] != None + - output.servers[1]['id'] != None + - output.servers[1]['name'] != None + - output.servers[1]['location'] != None + - output.servers[1]['sku']['name'] != None + - output.servers[1]['sku']['tier'] != None + - output.servers[1]['sku']['capacity'] != None + - output.servers[1]['version'] != None + - output.servers[1]['user_visible_state'] != None + - output.servers[1]['fully_qualified_domain_name'] != None + +# +# azure_rm_mariadbdatabase tests below +# +- name: Create instance of MariaDB Database -- check mode + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + collation: latin1_swedish_ci + charset: latin1 + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.name == 'testdatabase' + +- name: Create again instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + collation: latin1_swedish_ci + charset: latin1 + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.name == 'testdatabase' + +- name: Try to update database without force_update + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + collation: latin1_czech_ci + charset: latin1 + ignore_errors: yes + register: output +- name: Assert that nothing has changed + assert: + that: + - output.changed == False + +- name: Update instance of database using force_update + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + collation: latin1_czech_ci + charset: latin1 + force_update: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + - output.name == 'testdatabase' + +- name: Create second instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase2 + +- name: Gather facts MariaDB Database + azure_rm_mariadbdatabase_facts: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0]['server_name'] != None + - output.databases[0]['name'] != None + - output.databases[0]['charset'] != None + - output.databases[0]['collation'] != None + +- name: Gather facts MariaDB Database + azure_rm_mariadbdatabase_facts: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0]['server_name'] != None + - output.databases[0]['name'] != None + - output.databases[0]['charset'] != None + - output.databases[0]['collation'] != None + - output.databases[1]['server_name'] != None + - output.databases[1]['name'] != None + - output.databases[1]['charset'] != None + - output.databases[1]['collation'] != None + +- name: Delete instance of MariaDB Database -- check mode + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +# +# azure_rm_firewallrule tests below +# +- name: Create instance of Firewall Rule -- check mode + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Firewall Rule + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create again instance of Firewall Rule + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + +- name: Delete instance of Firewall Rule -- check mode + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Create instance of Firewall Rule -- second + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }}second + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Gather facts MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_facts: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + - "output.rules | length == 1" + +- name: Gather facts MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_facts: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + - output.rules[1].id != None + - output.rules[1].name != None + - output.rules[1].start_ip_address != None + - output.rules[1].end_ip_address != None + - "output.rules | length == 2" + +- name: Delete instance of Firewall Rule + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of Firewall Rule + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete instance of Firewall Rule - second + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }}second + state: absent + +- name: Gather facts MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_facts: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + register: output +- name: Assert that empty list was returned + assert: + that: + - output.changed == False + - "output.rules | length == 0" + +# +# configuration +# +- name: Create instance of Configuration -- check mode + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + value: "ON" + check_mode: yes + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to delete default configuraion + azure_rm_mariadbconfiguration_facts: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + register: output +- name: Get facts of event_scheduler + debug: + var: output + +- name: Try to delete default configuraion + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Try to change default configuraion + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + value: "ON" + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to change default configuration -- idempotent + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + value: "ON" + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Try to reset configuration + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to reset configuration -- idempotent + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Gather facts MariaDB Configuration + azure_rm_mariadbconfiguration_facts: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.settings[0].id != None + - output.settings[0].name != None + - output.settings[0].value != None + - output.settings[0].description != None + - output.settings[0].source != None + - output.settings | length == 1 + +- name: Gather facts MariaDB Configuration + azure_rm_mariadbconfiguration_facts: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.settings[0].id != None + - output.settings[0].name != None + - output.settings[0].value != None + - output.settings[0].description != None + - output.settings[0].source != None + - output.settings | length > 1 + +# +# clean up azure_rm_mariadbserver test +# + +- name: Delete instance of MariaDB Server -- check mode + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete second instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }}second + state: absent + async: 400 + poll: 0 diff --git a/test/integration/targets/incidental_azure_rm_resource/aliases b/test/integration/targets/incidental_azure_rm_resource/aliases new file mode 100644 index 00000000..9901373a --- /dev/null +++ b/test/integration/targets/incidental_azure_rm_resource/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +shippable/azure/incidental diff --git a/test/integration/targets/incidental_azure_rm_resource/tasks/main.yml b/test/integration/targets/incidental_azure_rm_resource/tasks/main.yml new file mode 100644 index 00000000..7c3024a5 --- /dev/null +++ b/test/integration/targets/incidental_azure_rm_resource/tasks/main.yml @@ -0,0 +1,158 @@ +- name: Prepare random number + set_fact: + nsgname: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + storageaccountname: "stacc{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + dbname: "mdb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Call REST API + azure_rm_resource: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + resource_name: "{{ nsgname }}" + body: + location: eastus + idempotency: yes + register: output + +- name: Assert that something has changed + assert: + that: output.changed + +- name: Call REST API + azure_rm_resource: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + resource_name: "{{ nsgname }}" + body: + location: eastus + idempotency: yes + register: output + +- name: Assert that nothing has changed + assert: + that: not output.changed + +- name: Call REST API + azure_rm_resource: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + resource_name: "{{ nsgname }}" + body: + location: eastus + tags: + a: "abc" + b: "cde" + idempotency: yes + register: output + +- name: Assert that something has changed + assert: + that: output.changed + +- name: Try to get information about account + azure_rm_resource_facts: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + resource_name: "{{ nsgname }}" + register: output + +- name: Assert value was returned + assert: + that: + - not output.changed + - output.response[0]['name'] != None + - output.response | length == 1 + +- name: Try to query a list + azure_rm_resource_facts: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + register: output +- name: Assert value was returned + assert: + that: + - not output.changed + - output.response[0]['name'] != None + - output.response | length >= 1 + +- name: Try to query a list - same without API version + azure_rm_resource_facts: + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + register: output +- name: Assert value was returned + assert: + that: + - not output.changed + - output.response[0]['name'] != None + - output.response | length >= 1 + +- name: Query all the resources in the resource group + azure_rm_resource_facts: + resource_group: "{{ resource_group }}" + resource_type: resources + register: output +- name: Assert value was returned + assert: + that: + - not output.changed + - output.response | length >= 1 + +- name: Create storage account that requires LRO polling + azure_rm_resource: + polling_timeout: 600 + polling_interval: 60 + api_version: '2018-07-01' + resource_group: "{{ resource_group }}" + provider: Storage + resource_type: storageAccounts + resource_name: "{{ storageaccountname }}" + body: + sku: + name: Standard_GRS + kind: Storage + location: eastus + register: output + +- name: Assert that storage was successfully created + assert: + that: "output['response']['name'] == '{{ storageaccountname }}'" + + +- name: Try to storage keys -- special case when subresource part has no name + azure_rm_resource: + resource_group: "{{ resource_group }}" + provider: storage + resource_type: storageAccounts + resource_name: "{{ storageaccountname }}" + subresource: + - type: listkeys + api_version: '2018-03-01-preview' + method: POST + register: keys + +- name: Assert that key was returned + assert: + that: keys['response']['keys'][0]['value'] | length > 0 + +- name: Delete storage - without API version + azure_rm_resource: + polling_timeout: 600 + polling_interval: 60 + method: DELETE + resource_group: "{{ resource_group }}" + provider: Storage + resource_type: storageAccounts + resource_name: "{{ storageaccountname }}" diff --git a/test/integration/targets/incidental_cloud_init_data_facts/aliases b/test/integration/targets/incidental_cloud_init_data_facts/aliases new file mode 100644 index 00000000..85f7fe0f --- /dev/null +++ b/test/integration/targets/incidental_cloud_init_data_facts/aliases @@ -0,0 +1,6 @@ +destructive +shippable/posix/incidental +skip/aix +skip/osx +skip/macos +skip/freebsd diff --git a/test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml b/test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml new file mode 100644 index 00000000..eca905c6 --- /dev/null +++ b/test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: test cloud-init + # TODO: check for a workaround + # install 'cloud-init'' failed: dpkg-divert: error: `diversion of /etc/init/ureadahead.conf + # to /etc/init/ureadahead.conf.disabled by cloud-init' clashes with `local diversion of + # /etc/init/ureadahead.conf to /etc/init/ureadahead.conf.distrib + # https://bugs.launchpad.net/ubuntu/+source/ureadahead/+bug/997838 + # Will also have to skip on OpenSUSE when running on Python 2 on newer Leap versions + # (!= 42 and >= 15) ascloud-init will install the Python 3 package, breaking our build on py2. + when: + - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14) + - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3) + block: + - name: setup install cloud-init + package: + name: + - cloud-init + - udev + + - name: setup run cloud-init + service: + name: cloud-init-local + state: restarted + + - name: test gather cloud-init facts in check mode + cloud_init_data_facts: + check_mode: yes + register: result + - name: verify test gather cloud-init facts in check mode + assert: + that: + - result.cloud_init_data_facts.status.v1 is defined + - result.cloud_init_data_facts.status.v1.stage is defined + - not result.cloud_init_data_facts.status.v1.stage + - cloud_init_data_facts.status.v1 is defined + - cloud_init_data_facts.status.v1.stage is defined + - not cloud_init_data_facts.status.v1.stage + + - name: test gather cloud-init facts + cloud_init_data_facts: + register: result + - name: verify test gather cloud-init facts + assert: + that: + - result.cloud_init_data_facts.status.v1 is defined + - result.cloud_init_data_facts.status.v1.stage is defined + - not result.cloud_init_data_facts.status.v1.stage + - cloud_init_data_facts.status.v1 is defined + - cloud_init_data_facts.status.v1.stage is defined + - not cloud_init_data_facts.status.v1.stage diff --git a/test/integration/targets/incidental_cloudformation/aliases b/test/integration/targets/incidental_cloudformation/aliases new file mode 100644 index 00000000..29f60feb --- /dev/null +++ b/test/integration/targets/incidental_cloudformation/aliases @@ -0,0 +1,2 @@ +cloud/aws +shippable/aws/incidental diff --git a/test/integration/targets/incidental_cloudformation/defaults/main.yml b/test/integration/targets/incidental_cloudformation/defaults/main.yml new file mode 100644 index 00000000..aaf0ca7e --- /dev/null +++ b/test/integration/targets/incidental_cloudformation/defaults/main.yml @@ -0,0 +1,8 @@ +stack_name: "{{ resource_prefix }}" + +vpc_name: '{{ resource_prefix }}-vpc' +vpc_seed: '{{ resource_prefix }}' +vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16' +subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24' + +ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2' diff --git a/test/integration/targets/incidental_cloudformation/files/cf_template.json b/test/integration/targets/incidental_cloudformation/files/cf_template.json new file mode 100644 index 00000000..ff4c5693 --- /dev/null +++ b/test/integration/targets/incidental_cloudformation/files/cf_template.json @@ -0,0 +1,37 @@ +{ + "AWSTemplateFormatVersion" : "2010-09-09", + + "Description" : "Create an Amazon EC2 instance.", + + "Parameters" : { + "InstanceType" : { + "Description" : "EC2 instance type", + "Type" : "String", + "Default" : "t3.nano", + "AllowedValues" : [ "t3.micro", "t3.nano"] + }, + "ImageId" : { + "Type" : "String" + }, + "SubnetId" : { + "Type" : "String" + } + }, + + "Resources" : { + "EC2Instance" : { + "Type" : "AWS::EC2::Instance", + "Properties" : { + "InstanceType" : { "Ref" : "InstanceType" }, + "ImageId" : { "Ref" : "ImageId" }, + "SubnetId": { "Ref" : "SubnetId" } + } + } + }, + + "Outputs" : { + "InstanceId" : { + "Value" : { "Ref" : "EC2Instance" } + } + } +} diff --git a/test/integration/targets/incidental_cloudformation/tasks/main.yml b/test/integration/targets/incidental_cloudformation/tasks/main.yml new file mode 100644 index 00000000..10924bcd --- /dev/null +++ b/test/integration/targets/incidental_cloudformation/tasks/main.yml @@ -0,0 +1,476 @@ +--- +- name: set up aws connection info + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key | default(omit) }}" + aws_secret_key: "{{ aws_secret_key | default(omit) }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region | default(omit) }}" + no_log: yes + +- module_defaults: + cloudformation: + <<: *aws_connection_info + cloudformation_info: + <<: *aws_connection_info + + block: + + # ==== Env setup ========================================================== + - name: list available AZs + aws_az_info: + <<: *aws_connection_info + register: region_azs + + - name: pick an AZ for testing + set_fact: + availability_zone: "{{ region_azs.availability_zones[0].zone_name }}" + + - name: Create a test VPC + ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + tags: + Name: Cloudformation testing + <<: *aws_connection_info + register: testing_vpc + + - name: Create a test subnet + ec2_vpc_subnet: + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + <<: *aws_connection_info + register: testing_subnet + + - name: Find AMI to use + ec2_ami_info: + owners: 'amazon' + filters: + name: '{{ ec2_ami_name }}' + <<: *aws_connection_info + register: ec2_amis + + - name: Set fact with latest AMI + vars: + latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}' + set_fact: + ec2_ami_image: '{{ latest_ami.image_id }}' + + # ==== Cloudformation tests =============================================== + + # 1. Basic stack creation (check mode, actual run and idempotency) + # 2. Tags + # 3. cloudformation_info tests (basic + all_facts) + # 4. termination_protection + # 5. create_changeset + changeset_name + + # There is still scope to add tests for - + # 1. capabilities + # 2. stack_policy + # 3. on_create_failure (covered in unit tests) + # 4. Passing in a role + # 5. nested stacks? + + + - name: create a cloudformation stack (check mode) + cloudformation: + stack_name: "{{ stack_name }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_image }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + check_mode: yes + + - name: check task return attributes + assert: + that: + - cf_stack.changed + - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg" + + - name: create a cloudformation stack + cloudformation: + stack_name: "{{ stack_name }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_image }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + + - name: check task return attributes + assert: + that: + - cf_stack.changed + - "'events' in cf_stack" + - "'output' in cf_stack and 'Stack CREATE complete' in cf_stack.output" + - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs" + - "'stack_resources' in cf_stack" + + - name: create a cloudformation stack (check mode) (idempotent) + cloudformation: + stack_name: "{{ stack_name }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_image }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + check_mode: yes + + - name: check task return attributes + assert: + that: + - not cf_stack.changed + + - name: create a cloudformation stack (idempotent) + cloudformation: + stack_name: "{{ stack_name }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_image }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + + - name: check task return attributes + assert: + that: + - not cf_stack.changed + - "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output" + - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs" + - "'stack_resources' in cf_stack" + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: assert stack info + assert: + that: + - "'cloudformation' in stack_info" + - "stack_info.cloudformation | length == 1" + - "stack_name in stack_info.cloudformation" + - "'stack_description' in stack_info.cloudformation[stack_name]" + - "'stack_outputs' in stack_info.cloudformation[stack_name]" + - "'stack_parameters' in stack_info.cloudformation[stack_name]" + - "'stack_tags' in stack_info.cloudformation[stack_name]" + - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name" + + - name: get stack details (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "'cloudformation' in stack_info" + - "stack_info.cloudformation | length == 1" + - "stack_name in stack_info.cloudformation" + - "'stack_description' in stack_info.cloudformation[stack_name]" + - "'stack_outputs' in stack_info.cloudformation[stack_name]" + - "'stack_parameters' in stack_info.cloudformation[stack_name]" + - "'stack_tags' in stack_info.cloudformation[stack_name]" + - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name" + + - name: get stack details (all_facts) + cloudformation_info: + stack_name: "{{ stack_name }}" + all_facts: yes + register: stack_info + + - name: assert stack info + assert: + that: + - "'stack_events' in stack_info.cloudformation[stack_name]" + - "'stack_policy' in stack_info.cloudformation[stack_name]" + - "'stack_resource_list' in stack_info.cloudformation[stack_name]" + - "'stack_resources' in stack_info.cloudformation[stack_name]" + - "'stack_template' in stack_info.cloudformation[stack_name]" + + - name: get stack details (all_facts) (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + all_facts: yes + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "'stack_events' in stack_info.cloudformation[stack_name]" + - "'stack_policy' in stack_info.cloudformation[stack_name]" + - "'stack_resource_list' in stack_info.cloudformation[stack_name]" + - "'stack_resources' in stack_info.cloudformation[stack_name]" + - "'stack_template' in stack_info.cloudformation[stack_name]" + + # ==== Cloudformation tests (create changeset) ============================ + + # try to create a changeset by changing instance type + - name: create a changeset + cloudformation: + stack_name: "{{ stack_name }}" + create_changeset: yes + changeset_name: "test-changeset" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.micro" + ImageId: "{{ ec2_ami_image }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: create_changeset_result + + - name: assert changeset created + assert: + that: + - "create_changeset_result.changed" + - "'change_set_id' in create_changeset_result" + - "'Stack CREATE_CHANGESET complete' in create_changeset_result.output" + + - name: get stack details with changesets + cloudformation_info: + stack_name: "{{ stack_name }}" + stack_change_sets: True + register: stack_info + + - name: assert changesets in info + assert: + that: + - "'stack_change_sets' in stack_info.cloudformation[stack_name]" + + - name: get stack details with changesets (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + stack_change_sets: True + register: stack_info + check_mode: yes + + - name: assert changesets in info + assert: + that: + - "'stack_change_sets' in stack_info.cloudformation[stack_name]" + + # try to create an empty changeset by passing in unchanged template + - name: create a changeset + cloudformation: + stack_name: "{{ stack_name }}" + create_changeset: yes + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_image }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: create_changeset_result + + - name: assert changeset created + assert: + that: + - "not create_changeset_result.changed" + - "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output" + + # ==== Cloudformation tests (termination_protection) ====================== + + - name: set termination protection to true + cloudformation: + stack_name: "{{ stack_name }}" + termination_protection: yes + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_image }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + +# This fails - #65592 +# - name: check task return attributes +# assert: +# that: +# - cf_stack.changed + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: assert stack info + assert: + that: + - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + + - name: get stack details (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + + - name: set termination protection to false + cloudformation: + stack_name: "{{ stack_name }}" + termination_protection: no + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_image }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + +# This fails - #65592 +# - name: check task return attributes +# assert: +# that: +# - cf_stack.changed + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: assert stack info + assert: + that: + - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + + - name: get stack details (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + + # ==== Cloudformation tests (delete stack tests) ========================== + + - name: delete cloudformation stack (check mode) + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + check_mode: yes + register: cf_stack + + - name: check task return attributes + assert: + that: + - cf_stack.changed + - "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg" + + - name: delete cloudformation stack + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + register: cf_stack + + - name: check task return attributes + assert: + that: + - cf_stack.changed + - "'output' in cf_stack and 'Stack Deleted' in cf_stack.output" + + - name: delete cloudformation stack (check mode) (idempotent) + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + check_mode: yes + register: cf_stack + + - name: check task return attributes + assert: + that: + - not cf_stack.changed + - "'msg' in cf_stack" + - >- + "Stack doesn't exist" in cf_stack.msg + + - name: delete cloudformation stack (idempotent) + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + register: cf_stack + + - name: check task return attributes + assert: + that: + - not cf_stack.changed + - "'output' in cf_stack and 'Stack not found.' in cf_stack.output" + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: assert stack info + assert: + that: + - "not stack_info.cloudformation" + + - name: get stack details (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "not stack_info.cloudformation" + + # ==== Cleanup ============================================================ + + always: + + - name: delete stack + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + ignore_errors: yes + + - name: Delete test subnet + ec2_vpc_subnet: + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: absent + <<: *aws_connection_info + ignore_errors: yes + + - name: Delete test VPC + ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + state: absent + <<: *aws_connection_info + ignore_errors: yes diff --git a/test/integration/targets/incidental_cs_common/aliases b/test/integration/targets/incidental_cs_common/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/incidental_cs_common/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/incidental_cs_common/defaults/main.yml b/test/integration/targets/incidental_cs_common/defaults/main.yml new file mode 100644 index 00000000..942316bd --- /dev/null +++ b/test/integration/targets/incidental_cs_common/defaults/main.yml @@ -0,0 +1,6 @@ +--- +cs_resource_prefix: "cs-{{ (ansible_date_time.iso8601_micro | to_uuid).split('-')[0] }}" +cs_common_template: CentOS 5.6 (64-bit) no GUI (Simulator) +cs_common_service_offering: Small Instance +cs_common_zone_adv: Sandbox-simulator-advanced +cs_common_zone_basic: Sandbox-simulator-basic diff --git a/test/integration/targets/incidental_deploy_helper/aliases b/test/integration/targets/incidental_deploy_helper/aliases new file mode 100644 index 00000000..31c6a8b4 --- /dev/null +++ b/test/integration/targets/incidental_deploy_helper/aliases @@ -0,0 +1 @@ +shippable/posix/incidental diff --git a/test/integration/targets/incidental_deploy_helper/tasks/main.yml b/test/integration/targets/incidental_deploy_helper/tasks/main.yml new file mode 100644 index 00000000..962c894a --- /dev/null +++ b/test/integration/targets/incidental_deploy_helper/tasks/main.yml @@ -0,0 +1,149 @@ +--- +- name: record the output directory + set_fact: deploy_helper_test_root={{output_dir}}/deploy_helper_test_root + +- name: State=query with default parameters + deploy_helper: path={{ deploy_helper_test_root }} state=query +- name: Assert State=query with default parameters + assert: + that: + - "'project_path' in deploy_helper" + - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/current'" + - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/releases'" + - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/shared'" + - "deploy_helper.unfinished_filename == 'DEPLOY_UNFINISHED'" + - "'previous_release' in deploy_helper" + - "'previous_release_path' in deploy_helper" + - "'new_release' in deploy_helper" + - "'new_release_path' in deploy_helper" + - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release }}'" + +- name: State=query with relative overridden paths + deploy_helper: path={{ deploy_helper_test_root }} current_path=CURRENT_PATH releases_path=RELEASES_PATH shared_path=SHARED_PATH state=query +- name: Assert State=query with relative overridden paths + assert: + that: + - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/CURRENT_PATH'" + - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/RELEASES_PATH'" + - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/SHARED_PATH'" + - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'" + +- name: State=query with absolute overridden paths + deploy_helper: path={{ deploy_helper_test_root }} current_path=/CURRENT_PATH releases_path=/RELEASES_PATH shared_path=/SHARED_PATH state=query +- name: Assert State=query with absolute overridden paths + assert: + that: + - "deploy_helper.current_path == '/CURRENT_PATH'" + - "deploy_helper.releases_path == '/RELEASES_PATH'" + - "deploy_helper.shared_path == '/SHARED_PATH'" + - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'" + +- name: State=query with overridden unfinished_filename + deploy_helper: path={{ deploy_helper_test_root }} unfinished_filename=UNFINISHED_DEPLOY state=query +- name: Assert State=query with overridden unfinished_filename + assert: + that: + - "'UNFINISHED_DEPLOY' == deploy_helper.unfinished_filename" + +# Remove the root folder just in case it exists +- file: path={{ deploy_helper_test_root }} state=absent + +- name: State=present with default parameters + deploy_helper: path={{ deploy_helper_test_root }} state=present +- stat: path={{ deploy_helper.releases_path }} + register: releases_path +- stat: path={{ deploy_helper.shared_path }} + register: shared_path +- name: Assert State=present with default parameters + assert: + that: + - "releases_path.stat.exists" + - "shared_path.stat.exists" + +# Setup older releases for tests +- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory + with_items: ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh'] +# Setup the new release +- file: path={{ deploy_helper.new_release_path }} state=directory +# Add a buildfile, just like in a real deploy +- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }} +# Add a buildfile, to an older deploy +- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }} + +- name: State=finalize with default parameters + deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=finalize +- stat: path={{ deploy_helper.current_path }} + register: current_path +- stat: path={{ deploy_helper.current_path }}/DEPLOY_UNFINISHED + register: current_path_unfinished_filename +- name: Assert State=finalize with default parameters + assert: + that: + - "current_path.stat.islnk" + - "deploy_helper.new_release_path in current_path.stat.lnk_source" + - "not current_path_unfinished_filename.stat.exists" +- stat: path={{ deploy_helper.releases_path }}/third + register: third_release_path +- shell: "ls {{ deploy_helper.releases_path }} | wc -l" + register: releases_count +- name: Assert State=finalize with default parameters (clean=true checks) + assert: + that: + - "not third_release_path.stat.exists" + - "releases_count.stdout|trim == '6'" +- deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=query +- name: Assert State=finalize with default parameters (previous_release checks) + assert: + that: + - "deploy_helper.new_release == deploy_helper.previous_release" + +- name: State=absent with default parameters + deploy_helper: path={{ deploy_helper_test_root }} state=absent +- stat: path={{ deploy_helper_test_root }} + register: project_path +- name: Assert State=absent with default parameters + assert: + that: + - "not project_path.stat.exists" + +- debug: msg="Clearing all release data and facts ---------" + +- name: State=present with shared_path set to False + deploy_helper: path={{ deploy_helper_test_root }} state=present shared_path='' +- stat: path={{ deploy_helper.releases_path }} + register: releases_path +- stat: path={{ deploy_helper.shared_path }} + register: shared_path +- name: Assert State=present with shared_path set to False + assert: + that: + - "releases_path.stat.exists" + - "not shared_path.stat.exists" + +# Setup older releases for tests +- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory + with_items: ['first', 'second', 'third', 'fourth', 'fifth'] +# Setup the new release +- file: path={{ deploy_helper.new_release_path }} state=directory +# Add a buildfile, just like in a real deploy +- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }} +# Add a buildfile, to an older deploy +- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }} + +- shell: "ls {{ deploy_helper_test_root }}/releases | wc -l" + register: before_releases_count +- name: State=clean with keep_releases=3 + deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=clean keep_releases=3 +- stat: path={{ deploy_helper.releases_path }}/third + register: third_release_path +- shell: "ls {{ deploy_helper.releases_path }} | wc -l" + register: releases_count +- name: Assert State=finalize with default parameters (clean=true checks) + assert: + that: + - "not third_release_path.stat.exists" + - "before_releases_count.stdout|trim == '6'" + - "releases_count.stdout|trim == '3'" + +# Remove the root folder +- file: path={{ deploy_helper_test_root }} state=absent diff --git a/test/integration/targets/incidental_flatpak_remote/aliases b/test/integration/targets/incidental_flatpak_remote/aliases new file mode 100644 index 00000000..32b7f55a --- /dev/null +++ b/test/integration/targets/incidental_flatpak_remote/aliases @@ -0,0 +1,8 @@ +shippable/posix/incidental +destructive +skip/aix +skip/freebsd +skip/osx +skip/macos +skip/rhel +needs/root diff --git a/test/integration/targets/incidental_flatpak_remote/meta/main.yml b/test/integration/targets/incidental_flatpak_remote/meta/main.yml new file mode 100644 index 00000000..a1c58bf1 --- /dev/null +++ b/test/integration/targets/incidental_flatpak_remote/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - incidental_setup_flatpak_remote diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml b/test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml new file mode 100644 index 00000000..7ce89a8c --- /dev/null +++ b/test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml @@ -0,0 +1,101 @@ +# - Tests with absent flatpak remote ------------------------------------------- + +# state=present + +- name: Test addition of absent flatpak remote (check mode) + flatpak_remote: + name: flatpak-test + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo + state: present + register: addition_result + check_mode: true + +- name: Verify addition of absent flatpak remote test result (check mode) + assert: + that: + - "addition_result.changed == true" + msg: "Adding an absent flatpak remote shall mark module execution as changed" + +- name: Test non-existent idempotency of addition of absent flatpak remote (check mode) + flatpak_remote: + name: flatpak-test + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo + state: present + register: double_addition_result + check_mode: true + +- name: > + Verify non-existent idempotency of addition of absent flatpak remote + test result (check mode) + assert: + that: + - "double_addition_result.changed == true" + msg: | + Adding an absent flatpak remote a second time shall still mark module execution + as changed in check mode + +# state=absent + +- name: Test removal of absent flatpak remote not doing anything in check mode + flatpak_remote: + name: flatpak-test + state: absent + register: removal_result + check_mode: true + +- name: Verify removal of absent flatpak remote test result (check mode) + assert: + that: + - "removal_result.changed == false" + msg: "Removing an absent flatpak remote shall mark module execution as not changed" + + +# - Tests with present flatpak remote ------------------------------------------- + +# state=present + +- name: Test addition of present flatpak remote (check mode) + flatpak_remote: + name: check-mode-test-remote + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo + state: present + register: addition_result + check_mode: true + +- name: Verify addition of present flatpak remote test result (check mode) + assert: + that: + - "addition_result.changed == false" + msg: "Adding a present flatpak remote shall mark module execution as not changed" + +# state=absent + +- name: Test removal of present flatpak remote not doing anything in check mode + flatpak_remote: + name: check-mode-test-remote + state: absent + register: removal_result + check_mode: true + +- name: Verify removal of present flatpak remote test result (check mode) + assert: + that: + - "removal_result.changed == true" + msg: "Removing a present flatpak remote shall mark module execution as changed" + +- name: Test non-existent idempotency of removal of present flatpak remote (check mode) + flatpak_remote: + name: check-mode-test-remote + state: absent + register: double_removal_result + check_mode: true + +- name: > + Verify non-existent idempotency of removal of present flatpak remote + test result (check mode) + assert: + that: + - "double_removal_result.changed == true" + msg: | + Removing a present flatpak remote a second time shall still mark module execution + as changed in check mode diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/main.yml b/test/integration/targets/incidental_flatpak_remote/tasks/main.yml new file mode 100644 index 00000000..9c3ec6d7 --- /dev/null +++ b/test/integration/targets/incidental_flatpak_remote/tasks/main.yml @@ -0,0 +1,57 @@ +# (c) 2018, Alexander Bethke +# (c) 2018, Ansible Project + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- block: + + - import_tasks: setup.yml + become: true + + # executable override + + - name: Test executable override + flatpak_remote: + name: irrelevant + remote: irrelevant + state: present + executable: nothing-that-exists + ignore_errors: true + register: executable_override_result + + - name: Verify executable override test result + assert: + that: + - "executable_override_result.failed == true" + - "executable_override_result.changed == false" + msg: "Specifying non-existing executable shall fail module execution" + + - import_tasks: check_mode.yml + become: false + + - import_tasks: test.yml + become: false + vars: + method: user + + - import_tasks: test.yml + become: true + vars: + method: system + + when: | + ansible_distribution == 'Fedora' or + ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16 diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/setup.yml b/test/integration/targets/incidental_flatpak_remote/tasks/setup.yml new file mode 100644 index 00000000..b2fd2766 --- /dev/null +++ b/test/integration/targets/incidental_flatpak_remote/tasks/setup.yml @@ -0,0 +1,27 @@ +- name: Install flatpak on Fedora + dnf: + name: flatpak + state: present + + when: ansible_distribution == 'Fedora' + +- block: + - name: Activate flatpak ppa on Ubuntu versions older than 18.04/bionic + apt_repository: + repo: "ppa:alexlarsson/flatpak" + state: present + mode: 0644 + when: ansible_lsb.major_release | int < 18 + + - name: Install flatpak package on Ubuntu + apt: + name: flatpak + state: present + + when: ansible_distribution == 'Ubuntu' + +- name: Install flatpak remote for testing check mode + flatpak_remote: + name: check-mode-test-remote + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo + state: present diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/test.yml b/test/integration/targets/incidental_flatpak_remote/tasks/test.yml new file mode 100644 index 00000000..97a13f0c --- /dev/null +++ b/test/integration/targets/incidental_flatpak_remote/tasks/test.yml @@ -0,0 +1,72 @@ +# state=present + +- name: Test addition - {{ method }} + flatpak_remote: + name: flatpak-test + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo + state: present + method: "{{ method }}" + register: addition_result + +- name: Verify addition test result - {{ method }} + assert: + that: + - "addition_result.changed == true" + msg: "state=preset shall add flatpak when absent" + +- name: Test idempotency of addition - {{ method }} + flatpak_remote: + name: flatpak-test + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo + state: present + method: "{{ method }}" + register: double_addition_result + +- name: Verify idempotency of addition test result - {{ method }} + assert: + that: + - "double_addition_result.changed == false" + msg: "state=present shall not do anything when flatpak is already present" + +- name: Test updating remote url does not do anything - {{ method }} + flatpak_remote: + name: flatpak-test + flatpakrepo_url: https://a.different/repo.flatpakrepo + state: present + method: "{{ method }}" + register: url_update_result + +- name: Verify updating remote url does not do anything - {{ method }} + assert: + that: + - "url_update_result.changed == false" + msg: "Trying to update the URL of an existing flatpak remote shall not do anything" + + +# state=absent + +- name: Test removal - {{ method }} + flatpak_remote: + name: flatpak-test + state: absent + method: "{{ method }}" + register: removal_result + +- name: Verify removal test result - {{ method }} + assert: + that: + - "removal_result.changed == true" + msg: "state=absent shall remove flatpak when present" + +- name: Test idempotency of removal - {{ method }} + flatpak_remote: + name: flatpak-test + state: absent + method: "{{ method }}" + register: double_removal_result + +- name: Verify idempotency of removal test result - {{ method }} + assert: + that: + - "double_removal_result.changed == false" + msg: "state=absent shall not do anything when flatpak is not present" diff --git a/test/integration/targets/incidental_inventory_aws_ec2/aliases b/test/integration/targets/incidental_inventory_aws_ec2/aliases new file mode 100644 index 00000000..29f60feb --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/aliases @@ -0,0 +1,2 @@ +cloud/aws +shippable/aws/incidental diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml new file mode 100644 index 00000000..8680c38d --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml @@ -0,0 +1,11 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + vars: + template_name: "../templates/{{ template | default('inventory.yml') }}" + tasks: + - name: write inventory config file + copy: + dest: ../test.aws_ec2.yml + content: "{{ lookup('template', template_name) }}" diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml new file mode 100644 index 00000000..f67fff1a --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml @@ -0,0 +1,9 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: write inventory config file + copy: + dest: ../test.aws_ec2.yml + content: "" diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml new file mode 100644 index 00000000..07b0eec4 --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml @@ -0,0 +1,64 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - block: + + # Create VPC, subnet, security group, and find image_id to create instance + + - include_tasks: setup.yml + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + # Create new host, add it to inventory and then terminate it without updating the cache + + - name: set connection information for all tasks + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + region: '{{ aws_region }}' + no_log: yes + + - name: create a new host + ec2: + image: '{{ image_id }}' + exact_count: 1 + count_tag: + Name: '{{ resource_prefix }}' + instance_tags: + Name: '{{ resource_prefix }}' + instance_type: t2.micro + wait: yes + group_id: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + <<: *aws_connection_info + register: setup_instance + + - meta: refresh_inventory + + always: + + - name: remove setup ec2 instance + ec2: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + wait: yes + instance_tags: + Name: '{{ resource_prefix }}' + group_id: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + <<: *aws_connection_info + ignore_errors: yes + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml new file mode 100644 index 00000000..8a9b8893 --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml @@ -0,0 +1,62 @@ +- name: set connection information for all tasks + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + region: '{{ aws_region }}' + no_log: yes + +- name: get image ID to create an instance + ec2_ami_info: + filters: + architecture: x86_64 + owner-id: '125523088429' + virtualization-type: hvm + root-device-type: ebs + name: 'Fedora-Atomic-27*' + <<: *aws_connection_info + register: fedora_images + +- set_fact: + image_id: '{{ fedora_images.images.0.image_id }}' + +- name: create a VPC to work in + ec2_vpc_net: + cidr_block: 10.10.0.0/24 + state: present + name: '{{ resource_prefix }}_setup' + resource_tags: + Name: '{{ resource_prefix }}_setup' + <<: *aws_connection_info + register: setup_vpc + +- set_fact: + vpc_id: '{{ setup_vpc.vpc.id }}' + +- name: create a subnet to use for creating an ec2 instance + ec2_vpc_subnet: + az: '{{ aws_region }}a' + tags: '{{ resource_prefix }}_setup' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: 10.10.0.0/24 + state: present + resource_tags: + Name: '{{ resource_prefix }}_setup' + <<: *aws_connection_info + register: setup_subnet + +- set_fact: + subnet_id: '{{ setup_subnet.subnet.id }}' + +- name: create a security group to use for creating an ec2 instance + ec2_group: + name: '{{ resource_prefix }}_setup' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + <<: *aws_connection_info + register: setup_sg + +- set_fact: + sg_id: '{{ setup_sg.group_id }}' diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml new file mode 100644 index 00000000..4c8240e4 --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml @@ -0,0 +1,39 @@ +- name: set connection information for all tasks + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + region: '{{ aws_region }}' + no_log: yes + +- name: remove setup security group + ec2_group: + name: '{{ resource_prefix }}_setup' + description: 'created by Ansible integration tests' + state: absent + vpc_id: '{{ vpc_id }}' + <<: *aws_connection_info + ignore_errors: yes + +- name: remove setup subnet + ec2_vpc_subnet: + az: '{{ aws_region }}a' + tags: '{{ resource_prefix }}_setup' + vpc_id: '{{ vpc_id }}' + cidr: 10.10.0.0/24 + state: absent + resource_tags: + Name: '{{ resource_prefix }}_setup' + <<: *aws_connection_info + ignore_errors: yes + +- name: remove setup VPC + ec2_vpc_net: + cidr_block: 10.10.0.0/24 + state: absent + name: '{{ resource_prefix }}_setup' + resource_tags: + Name: '{{ resource_prefix }}_setup' + <<: *aws_connection_info + ignore_errors: yes diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml new file mode 100644 index 00000000..cc1b9a5a --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml @@ -0,0 +1,9 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: assert inventory was not populated by aws_ec2 inventory plugin + assert: + that: + - "'aws_ec2' not in groups" diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml new file mode 100644 index 00000000..d83cb0bf --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml @@ -0,0 +1,18 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: assert cache was used to populate inventory + assert: + that: + - "'aws_ec2' in groups" + - "groups.aws_ec2 | length == 1" + + - meta: refresh_inventory + + - name: assert refresh_inventory updated the cache + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml new file mode 100644 index 00000000..73a67db0 --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml @@ -0,0 +1,91 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - block: + + # Create VPC, subnet, security group, and find image_id to create instance + + - include_tasks: setup.yml + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + # Create new host, refresh inventory, remove host, refresh inventory + + - name: set connection information for all tasks + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + region: '{{ aws_region }}' + no_log: yes + + - name: create a new host + ec2: + image: '{{ image_id }}' + exact_count: 1 + count_tag: + Name: '{{ resource_prefix }}' + instance_tags: + Name: '{{ resource_prefix }}' + instance_type: t2.micro + wait: yes + group_id: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + <<: *aws_connection_info + register: setup_instance + + - meta: refresh_inventory + + - name: assert group was populated with inventory and is no longer empty + assert: + that: + - "'aws_ec2' in groups" + - "groups.aws_ec2 | length == 1" + - "groups.aws_ec2.0 == '{{ resource_prefix }}'" + + - name: remove setup ec2 instance + ec2: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + wait: yes + instance_tags: + Name: '{{ resource_prefix }}' + group_id: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + <<: *aws_connection_info + + - meta: refresh_inventory + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + always: + + - name: remove setup ec2 instance + ec2: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + wait: yes + instance_tags: + Name: '{{ resource_prefix }}' + group_id: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + <<: *aws_connection_info + ignore_errors: yes + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml new file mode 100644 index 00000000..fdeeeeff --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml @@ -0,0 +1,79 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - block: + + # Create VPC, subnet, security group, and find image_id to create instance + + - include_tasks: setup.yml + + # Create new host, refresh inventory + + - name: set connection information for all tasks + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + region: '{{ aws_region }}' + no_log: yes + + - name: create a new host + ec2: + image: '{{ image_id }}' + exact_count: 1 + count_tag: + Name: '{{ resource_prefix }}' + instance_tags: + Name: '{{ resource_prefix }}' + tag1: value1 + tag2: value2 + instance_type: t2.micro + wait: yes + group_id: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + <<: *aws_connection_info + register: setup_instance + + - meta: refresh_inventory + + - name: register the keyed sg group name + set_fact: + sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}" + + - name: register one of the keyed tag groups name + set_fact: + tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}" + + - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars + assert: + that: + # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group + - "groups | length == 9" + - "groups[tag_group_name] | length == 1" + - "groups[sg_group_name] | length == 1" + - "groups.arch_x86_64 | length == 1" + - "groups.tag_with_name_key | length == 1" + - vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2' + + always: + + - name: remove setup ec2 instance + ec2: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + wait: yes + instance_tags: + Name: '{{ resource_prefix }}' + group_id: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + <<: *aws_connection_info + ignore_errors: yes + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml new file mode 100644 index 00000000..6b46599b --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml @@ -0,0 +1,74 @@ +- name: test updating inventory + block: + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + - name: set connection information for all tasks + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: yes + + - name: create a new host + ec2: + image: "{{ images[aws_region] }}" + exact_count: 1 + count_tag: + Name: '{{ resource_prefix }}' + instance_tags: + Name: '{{ resource_prefix }}' + instance_type: t2.micro + wait: yes + group_id: '{{ setup_sg.group_id }}' + vpc_subnet_id: '{{ setup_subnet.subnet.id }}' + <<: *aws_connection_info + register: setup_instance + + - meta: refresh_inventory + + - name: assert group was populated with inventory and is no longer empty + assert: + that: + - "'aws_ec2' in groups" + - "groups.aws_ec2 | length == 1" + - "groups.aws_ec2.0 == '{{ resource_prefix }}'" + + - name: remove setup ec2 instance + ec2: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + wait: yes + instance_tags: + Name: '{{ resource_prefix }}' + group_id: '{{ setup_sg.group_id }}' + vpc_subnet_id: '{{ setup_subnet.subnet.id }}' + <<: *aws_connection_info + + - meta: refresh_inventory + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + always: + - name: remove setup ec2 instance + ec2: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + wait: yes + instance_tags: + Name: '{{ resource_prefix }}' + group_id: '{{ setup_sg.group_id }}' + vpc_subnet_id: '{{ setup_subnet.subnet.id }}' + <<: *aws_connection_info + ignore_errors: yes diff --git a/test/integration/targets/incidental_inventory_aws_ec2/runme.sh b/test/integration/targets/incidental_inventory_aws_ec2/runme.sh new file mode 100755 index 00000000..916f7e8f --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/runme.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -eux + +# ensure test config is empty +ansible-playbook playbooks/empty_inventory_config.yml "$@" + +export ANSIBLE_INVENTORY_ENABLED=aws_ec2 + +# test with default inventory file +ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@" + +export ANSIBLE_INVENTORY=test.aws_ec2.yml + +# test empty inventory config +ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@" + +# generate inventory config and test using it +ansible-playbook playbooks/create_inventory_config.yml "$@" +ansible-playbook playbooks/test_populating_inventory.yml "$@" + +# generate inventory config with caching and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml'" "$@" +ansible-playbook playbooks/populate_cache.yml "$@" +ansible-playbook playbooks/test_inventory_cache.yml "$@" + +# remove inventory cache +rm -r aws_ec2_cache_dir/ + +# generate inventory config with constructed features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@" + +# cleanup inventory config +ansible-playbook playbooks/empty_inventory_config.yml "$@" diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml new file mode 100644 index 00000000..942edb30 --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml @@ -0,0 +1,12 @@ +plugin: aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +aws_security_token: '{{ security_token }}' +regions: + - '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}' +hostnames: + - tag:Name + - dns-name diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml new file mode 100644 index 00000000..e35bf901 --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml @@ -0,0 +1,12 @@ +plugin: aws_ec2 +cache: True +cache_plugin: jsonfile +cache_connection: aws_ec2_cache_dir +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +aws_security_token: '{{ security_token }}' +regions: + - '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}' diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml new file mode 100644 index 00000000..6befb4e3 --- /dev/null +++ b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml @@ -0,0 +1,20 @@ +plugin: aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +aws_security_token: '{{ security_token }}' +regions: + - '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}' +keyed_groups: + - key: 'security_groups|json_query("[].group_id")' + prefix: 'security_groups' + - key: 'tags' + prefix: 'tag' + - prefix: 'arch' + key: "architecture" +compose: + test_compose_var_sum: tags.tag1 + tags.tag2 +groups: + tag_with_name_key: "'Name' in (tags | list)" diff --git a/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml b/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/incidental_inventory_docker_swarm/aliases b/test/integration/targets/incidental_inventory_docker_swarm/aliases new file mode 100644 index 00000000..c3a38c06 --- /dev/null +++ b/test/integration/targets/incidental_inventory_docker_swarm/aliases @@ -0,0 +1,13 @@ +shippable/posix/incidental +skip/aix +skip/power/centos +skip/osx +skip/macos +skip/freebsd +destructive +skip/docker # The tests sometimes make docker daemon unstable; hence, + # we skip all docker-based CI runs to avoid disrupting + # the whole CI system. On VMs, we restart docker daemon + # after finishing the tests to minimize potential effects + # on other tests. +needs/root diff --git a/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml b/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml new file mode 100644 index 00000000..e8e6d55e --- /dev/null +++ b/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml @@ -0,0 +1,3 @@ +--- +plugin: docker_swarm +docker_host: unix://var/run/docker.sock diff --git a/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml b/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml new file mode 100644 index 00000000..e36bd00f --- /dev/null +++ b/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml @@ -0,0 +1,5 @@ +--- +plugin: docker_swarm +docker_host: unix://var/run/docker.sock +verbose_output: no +include_host_uri: yes diff --git a/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml b/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml new file mode 100644 index 00000000..569a453c --- /dev/null +++ b/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - incidental_setup_docker diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml new file mode 100644 index 00000000..fc4455ec --- /dev/null +++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml @@ -0,0 +1,19 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: yes + tasks: + - name: Make sure swarm is removed + docker_swarm: + state: absent + force: yes + + - name: remove docker pagkages + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: + - docker + - docker-ce + - docker-ce-cli + - containerd.io + state: absent diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml new file mode 100644 index 00000000..d9f77732 --- /dev/null +++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml @@ -0,0 +1,15 @@ +--- +- hosts: 127.0.0.1 + connection: local + vars: + docker_skip_cleanup: yes + + tasks: + - name: Setup docker + import_role: + name: incidental_setup_docker + + - name: Create a Swarm cluster + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml new file mode 100644 index 00000000..600a89b1 --- /dev/null +++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml @@ -0,0 +1,58 @@ +--- +- hosts: 127.0.0.1 + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + gather_facts: no + tasks: + - name: Show all groups + debug: + var: groups + - name: Make sure docker_swarm groups are there + assert: + that: + - groups.all | length > 0 + - groups.leader | length == 1 + - groups.manager | length > 0 + - groups.worker | length >= 0 + - groups.nonleaders | length >= 0 + +- hosts: all + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + vars: + # for some reason, Ansible can't find the Python interpreter when connecting to the nodes, + # which is in fact just localhost in disguise. That's why we use ansible_playbook_python. + ansible_python_interpreter: "{{ ansible_playbook_python }}" + tasks: + - name: Check for groups + assert: + that: + - "groups.manager | length > 0" + - "groups.worker | length >= 0" + - "groups.leader | length == 1" + run_once: yes + + - name: List manager group + debug: + var: groups.manager + run_once: yes + + - name: List worker group + debug: + var: groups.worker + run_once: yes + + - name: List leader group + debug: + var: groups.leader + run_once: yes + + - name: Print ansible_host per host + debug: + var: ansible_host + + - name: Make sure docker_swarm_node_attributes is available + assert: + that: + - docker_swarm_node_attributes is not undefined + - name: Print docker_swarm_node_attributes per host + debug: + var: docker_swarm_node_attributes diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml new file mode 100644 index 00000000..b2a794d3 --- /dev/null +++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml @@ -0,0 +1,35 @@ +--- +- hosts: 127.0.0.1 + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + gather_facts: no + tasks: + - name: Show all groups + debug: + var: groups + - name: Make sure docker_swarm groups are there + assert: + that: + - groups.all | length > 0 + - groups.leader | length == 1 + - groups.manager | length > 0 + - groups.worker | length >= 0 + - groups.nonleaders | length >= 0 + +- hosts: all + connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 + vars: + # for some reason, Ansible can't find the Python interpreter when connecting to the nodes, + # which is in fact just localhost in disguise. That's why we use ansible_playbook_python. + ansible_python_interpreter: "{{ ansible_playbook_python }}" + tasks: + - name: Make sure docker_swarm_node_attributes is not available + assert: + that: + - docker_swarm_node_attributes is undefined + - name: Make sure ansible_host_uri is available + assert: + that: + - ansible_host_uri is defined + - name: Print ansible_host_uri + debug: + var: ansible_host_uri diff --git a/test/integration/targets/incidental_inventory_docker_swarm/runme.sh b/test/integration/targets/incidental_inventory_docker_swarm/runme.sh new file mode 100755 index 00000000..b93d386a --- /dev/null +++ b/test/integration/targets/incidental_inventory_docker_swarm/runme.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x + +set -euo pipefail + +cleanup() { + echo "Cleanup" + ansible-playbook playbooks/swarm_cleanup.yml + echo "Done" +} + +trap cleanup INT TERM EXIT + +echo "Setup" +ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/swarm_setup.yml + +echo "Test docker_swarm inventory 1" +ansible-playbook -i inventory_1.docker_swarm.yml playbooks/test_inventory_1.yml + +echo "Test docker_swarm inventory 2" +ansible-playbook -i inventory_2.docker_swarm.yml playbooks/test_inventory_2.yml diff --git a/test/integration/targets/incidental_inventory_foreman/aliases b/test/integration/targets/incidental_inventory_foreman/aliases new file mode 100644 index 00000000..c28a056e --- /dev/null +++ b/test/integration/targets/incidental_inventory_foreman/aliases @@ -0,0 +1,3 @@ +shippable/cloud/incidental +cloud/foreman +destructive diff --git a/test/integration/targets/incidental_inventory_foreman/ansible.cfg b/test/integration/targets/incidental_inventory_foreman/ansible.cfg new file mode 100644 index 00000000..63e24c4b --- /dev/null +++ b/test/integration/targets/incidental_inventory_foreman/ansible.cfg @@ -0,0 +1,5 @@ +[defaults] +inventory = test-config.foreman.yaml + +[inventory] +enable_plugins = foreman diff --git a/test/integration/targets/incidental_inventory_foreman/inspect_cache.yml b/test/integration/targets/incidental_inventory_foreman/inspect_cache.yml new file mode 100644 index 00000000..c91f4c38 --- /dev/null +++ b/test/integration/targets/incidental_inventory_foreman/inspect_cache.yml @@ -0,0 +1,31 @@ +--- +- hosts: localhost + vars: + foreman_stub_host: "{{ lookup('env', 'FOREMAN_HOST') }}" + foreman_stub_port: "{{ lookup('env', 'FOREMAN_PORT') }}" + foreman_stub_api_path: /api/v2 + cached_hosts_key: "http://{{ foreman_stub_host }}:{{ foreman_stub_port }}{{ foreman_stub_api_path }}/hosts" + tasks: + - name: verify a cache file was created + find: + path: + - ./foreman_cache + register: matching_files + + - assert: + that: + - matching_files.matched == 1 + - name: read the cached inventory + set_fact: + contents: "{{ lookup('file', matching_files.files.0.path) }}" + + - name: extract all the host names + set_fact: + cached_hosts: "{{ contents[cached_hosts_key] | json_query('[*].name') }}" + + - assert: + that: + "'{{ item }}' in cached_hosts" + loop: + - "v6.example-780.com" + - "c4.j1.y5.example-487.com" diff --git a/test/integration/targets/incidental_inventory_foreman/runme.sh b/test/integration/targets/incidental_inventory_foreman/runme.sh new file mode 100755 index 00000000..ba94a936 --- /dev/null +++ b/test/integration/targets/incidental_inventory_foreman/runme.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x + +set -euo pipefail + +export ANSIBLE_INVENTORY +export ANSIBLE_PYTHON_INTERPRETER + +unset ANSIBLE_INVENTORY +unset ANSIBLE_PYTHON_INTERPRETER + +export ANSIBLE_CONFIG=ansible.cfg +export FOREMAN_HOST="${FOREMAN_HOST:-localhost}" +export FOREMAN_PORT="${FOREMAN_PORT:-8080}" +FOREMAN_CONFIG=test-config.foreman.yaml + +# Set inventory caching environment variables to populate a jsonfile cache +export ANSIBLE_INVENTORY_CACHE=True +export ANSIBLE_INVENTORY_CACHE_PLUGIN=jsonfile +export ANSIBLE_INVENTORY_CACHE_CONNECTION=./foreman_cache + +# flag for checking whether cleanup has already fired +_is_clean= + +function _cleanup() { + [[ -n "$_is_clean" ]] && return # don't double-clean + echo Cleanup: removing $FOREMAN_CONFIG... + rm -vf "$FOREMAN_CONFIG" + unset ANSIBLE_CONFIG + unset FOREMAN_HOST + unset FOREMAN_PORT + unset FOREMAN_CONFIG + _is_clean=1 +} +trap _cleanup INT TERM EXIT + +cat > "$FOREMAN_CONFIG" <- + Foreman host: {{ foreman_stub_host }} | + Foreman port: {{ foreman_stub_port }} | + API path: {{ foreman_stub_api_path }} | + Foreman API URL: {{ foreman_stub_api_uri }} + + - name: Wait for Foreman API stub to come up online + wait_for: + host: "{{ foreman_stub_host }}" + port: "{{ foreman_stub_port }}" + state: started + + # smoke test that flask app is serving + - name: Smoke test HTTP response from Foreman stub + uri: + url: "{{ foreman_stub_heartbeat_uri }}" + return_content: yes + register: heartbeat_resp + failed_when: > + heartbeat_resp.json.status != 'ok' or heartbeat_resp.json.response != 'pong' + + #### Testing start + - name: > + Check that there are 'foreman_pgagne_sats' and 'foreman_base' + groups present in inventory + assert: + that: > + '{{ item }}' in groups + with_items: + - foreman_pgagne_sats + - foreman_base + + - name: Check that host are in appropriate groups + assert: + that: > + '{{ item.key }}' in groups['{{ item.value }}'] + with_dict: + v6.example-780.com: foreman_base + c4.j1.y5.example-487.com: ungrouped + + - name: Check host UUIDs + assert: + that: > + hostvars['{{ item.key }}']['foreman_subscription_facet_attributes']['uuid'] == '{{ item.value }}' + with_dict: + v6.example-780.com: 2c72fa49-995a-4bbf-bda0-684c7048ad9f + c4.j1.y5.example-487.com: 0a494b6e-7e90-4ed2-8edc-43a41436a242 + #### Testing end diff --git a/test/integration/targets/incidental_ios_file/aliases b/test/integration/targets/incidental_ios_file/aliases new file mode 100644 index 00000000..cbcfec65 --- /dev/null +++ b/test/integration/targets/incidental_ios_file/aliases @@ -0,0 +1,2 @@ +shippable/ios/incidental +network/ios diff --git a/test/integration/targets/incidental_ios_file/defaults/main.yaml b/test/integration/targets/incidental_ios_file/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/test/integration/targets/incidental_ios_file/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/test/integration/targets/incidental_ios_file/ios1.cfg b/test/integration/targets/incidental_ios_file/ios1.cfg new file mode 100644 index 00000000..120dd4ca --- /dev/null +++ b/test/integration/targets/incidental_ios_file/ios1.cfg @@ -0,0 +1,3 @@ +vlan 3 + name ank_vlan3 +! diff --git a/test/integration/targets/incidental_ios_file/nonascii.bin b/test/integration/targets/incidental_ios_file/nonascii.bin new file mode 100644 index 00000000..14c6ddb1 Binary files /dev/null and b/test/integration/targets/incidental_ios_file/nonascii.bin differ diff --git a/test/integration/targets/incidental_ios_file/tasks/cli.yaml b/test/integration/targets/incidental_ios_file/tasks/cli.yaml new file mode 100644 index 00000000..d4f663b3 --- /dev/null +++ b/test/integration/targets/incidental_ios_file/tasks/cli.yaml @@ -0,0 +1,17 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + delegate_to: localhost + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test cases (connection=ansible.netcommon.network_cli) + include: "{{ test_case_to_run }}" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + tags: connection_network_cli diff --git a/test/integration/targets/incidental_ios_file/tasks/main.yaml b/test/integration/targets/incidental_ios_file/tasks/main.yaml new file mode 100644 index 00000000..415c99d8 --- /dev/null +++ b/test/integration/targets/incidental_ios_file/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } diff --git a/test/integration/targets/incidental_ios_file/tests/cli/net_get.yaml b/test/integration/targets/incidental_ios_file/tests/cli/net_get.yaml new file mode 100644 index 00000000..5a7ebf07 --- /dev/null +++ b/test/integration/targets/incidental_ios_file/tests/cli/net_get.yaml @@ -0,0 +1,52 @@ +--- +- debug: msg="START ios cli/net_get.yaml on connection={{ ansible_connection }}" + +# Add minimal testcase to check args are passed correctly to +# implementation module and module run is successful. + +- name: setup + cisco.ios.ios_config: + lines: + - ip ssh version 2 + - ip scp server enable + - username {{ ansible_ssh_user }} privilege 15 + match: none + +- name: setup (copy file to be fetched from device) + ansible.netcommon.net_put: + src: ios1.cfg + register: result + +- name: setup (remove file from localhost if present) + file: + path: ios_{{ inventory_hostname }}.cfg + state: absent + delegate_to: localhost + +- name: get the file from device with relative destination + ansible.netcommon.net_get: + src: ios1.cfg + dest: 'ios_{{ inventory_hostname }}.cfg' + register: result + +- assert: + that: + - result.changed == true + +- name: Idempotency check + ansible.netcommon.net_get: + src: ios1.cfg + dest: 'ios_{{ inventory_hostname }}.cfg' + register: result + +- assert: + that: + - result.changed == false + +- name: setup (remove file from localhost if present) + file: + path: ios_{{ inventory_hostname }}.cfg + state: absent + delegate_to: localhost + +- debug: msg="END ios cli/net_get.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_ios_file/tests/cli/net_put.yaml b/test/integration/targets/incidental_ios_file/tests/cli/net_put.yaml new file mode 100644 index 00000000..215b524d --- /dev/null +++ b/test/integration/targets/incidental_ios_file/tests/cli/net_put.yaml @@ -0,0 +1,73 @@ +--- +- debug: + msg: "START ios cli/net_put.yaml on connection={{ ansible_connection }}" + +# Add minimal testcase to check args are passed correctly to +# implementation module and module run is successful. + +- name: setup + cisco.ios.ios_config: + lines: + - ip ssh version 2 + - ip scp server enable + - username {{ ansible_ssh_user }} privilege 15 + match: none + +- name: Delete existing files if present on remote host + cisco.ios.ios_command: + commands: "{{ item }}" + loop: + - delete /force ios1.cfg + - delete /force ios.cfg + - delete /force nonascii.bin + ignore_errors: true + +- name: copy file from controller to ios + scp (Default) + ansible.netcommon.net_put: + src: ios1.cfg + register: result + +- assert: + that: + - result.changed == true + +- name: Idempotency Check + ansible.netcommon.net_put: + src: ios1.cfg + register: result + +- assert: + that: + - result.changed == false + +- name: copy file from controller to ios + dest specified + ansible.netcommon.net_put: + src: ios1.cfg + dest: ios.cfg + register: result + +- assert: + that: + - result.changed == true + +- name: copy file with non-ascii characters to ios in template mode(Fail case) + ansible.netcommon.net_put: + src: nonascii.bin + mode: 'text' + register: result + ignore_errors: true + +- assert: + that: + - result.failed == true + +- name: copy file with non-ascii characters to ios in default mode(binary) + ansible.netcommon.net_put: + src: nonascii.bin + register: result + +- assert: + that: + - result.changed == true + +- debug: msg="END ios cli/net_put.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_lookup_rabbitmq/aliases b/test/integration/targets/incidental_lookup_rabbitmq/aliases new file mode 100644 index 00000000..f89752b8 --- /dev/null +++ b/test/integration/targets/incidental_lookup_rabbitmq/aliases @@ -0,0 +1,6 @@ +destructive +shippable/posix/incidental +skip/aix +skip/osx +skip/freebsd +skip/rhel diff --git a/test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml b/test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml new file mode 100644 index 00000000..33fa97dc --- /dev/null +++ b/test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - incidental_setup_rabbitmq diff --git a/test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml b/test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml new file mode 100644 index 00000000..7c9553c5 --- /dev/null +++ b/test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml @@ -0,0 +1,5 @@ +# Rabbitmq lookup +- include: ubuntu.yml + when: + - ansible_distribution == 'Ubuntu' + - ansible_distribution_release not in ('trusty', 'focal') diff --git a/test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml b/test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml new file mode 100644 index 00000000..3b007ede --- /dev/null +++ b/test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml @@ -0,0 +1,138 @@ +- name: Test failure without pika installed + set_fact: + rabbit_missing_pika: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.250.1:5672/%2F', queue='hello', count=3) }}" + ignore_errors: yes + register: rabbitmq_missing_pika_error + +- assert: + that: + - "'pika python package is required' in rabbitmq_missing_pika_error.msg" + +- name: Install pika and requests + pip: + name: pika<1.0.0,requests + state: latest + +- name: Test that giving an incorrect amqp protocol in URL will error + set_fact: + rabbitmq_test_protocol: "{{ lookup('rabbitmq', url='zzzamqp://guest:guest@192.168.250.1:5672/%2F', queue='hello', count=3) }}" + ignore_errors: yes + register: rabbitmq_protocol_error + +- assert: + that: + - "rabbitmq_protocol_error is failed" + - "'URL malformed' in rabbitmq_protocol_error.msg" + +- name: Test that giving an incorrect IP address in URL will error + set_fact: + rabbitmq_test_protocol: "{{ lookup('rabbitmq', url='amqp://guest:guest@xxxxx192.112312368.250.1:5672/%2F', queue='hello', count=3) }}" + ignore_errors: yes + register: rabbitmq_ip_error + +- assert: + that: + - "rabbitmq_ip_error is failed" + - "'Connection issue' in rabbitmq_ip_error.msg" + +- name: Test missing parameters will error + set_fact: + rabbitmq_test_protocol: "{{ lookup('rabbitmq') }}" + ignore_errors: yes + register: rabbitmq_params_error + +- assert: + that: + - "rabbitmq_params_error is failed" + - "'URL is required for rabbitmq lookup.' in rabbitmq_params_error.msg" + +- name: Test missing queue will error + set_fact: + rabbitmq_queue_protocol: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.250.1:5672/%2F') }}" + ignore_errors: yes + register: rabbitmq_queue_error + +- assert: + that: + - "rabbitmq_queue_error is failed" + - "'Queue is required for rabbitmq lookup' in rabbitmq_queue_error.msg" + +- name: Enables the rabbitmq_management plugin + rabbitmq_plugin: + names: rabbitmq_management + state: enabled + +- name: Setup test queue + rabbitmq_queue: + name: hello + +- name: Post test message to the exchange (string) + uri: + url: http://localhost:15672/api/exchanges/%2f/amq.default/publish + method: POST + body: '{"properties":{},"routing_key":"hello","payload":"ansible-test","payload_encoding":"string"}' + user: guest + password: guest + force_basic_auth: yes + return_content: yes + headers: + Content-Type: "application/json" + register: post_data + + +- name: Post test message to the exchange (json) + uri: + url: http://localhost:15672/api/exchanges/%2f/amq.default/publish + method: POST + body: '{"properties":{"content_type": "application/json"},"routing_key":"hello","payload":"{\"key\": \"value\" }","payload_encoding":"string"}' + user: guest + password: guest + force_basic_auth: yes + return_content: yes + headers: + Content-Type: "application/json" + register: post_data_json + +- name: Test retrieve messages + set_fact: + rabbitmq_msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@localhost:5672/%2f/hello', queue='hello') }}" + ignore_errors: yes + register: rabbitmq_msg_error + +- name: Ensure two messages received + assert: + that: + - "rabbitmq_msg_error is not failed" + - rabbitmq_msg | length == 2 + +- name: Ensure first message is a string + assert: + that: + - rabbitmq_msg[0].msg == "ansible-test" + +- name: Ensure second message is json + assert: + that: + - rabbitmq_msg[1].json.key == "value" + +- name: Test missing vhost + set_fact: + rabbitmq_msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@localhost:5672/missing/', queue='hello') }}" + ignore_errors: yes + register: rabbitmq_vhost_error + +- assert: + that: + - "rabbitmq_vhost_error is failed" + - "'NOT_ALLOWED' in rabbitmq_vhost_error.msg" + +# Tidy up +- name: Uninstall pika and requests + pip: + name: pika,requests + state: absent + +- name: Disable the rabbitmq_management plugin + rabbitmq_plugin: + names: rabbitmq_management + state: disabled diff --git a/test/integration/targets/incidental_lvg/aliases b/test/integration/targets/incidental_lvg/aliases new file mode 100644 index 00000000..d5baa06d --- /dev/null +++ b/test/integration/targets/incidental_lvg/aliases @@ -0,0 +1,6 @@ +destructive +needs/privileged +shippable/posix/incidental +skip/aix +skip/freebsd +skip/osx diff --git a/test/integration/targets/incidental_lvg/meta/main.yml b/test/integration/targets/incidental_lvg/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/incidental_lvg/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/incidental_lvg/tasks/main.yml b/test/integration/targets/incidental_lvg/tasks/main.yml new file mode 100644 index 00000000..a57f591b --- /dev/null +++ b/test/integration/targets/incidental_lvg/tasks/main.yml @@ -0,0 +1,15 @@ +- name: Install required packages (Linux) + package: + name: lvm2 + state: present + when: ansible_system == 'Linux' + +- name: Test lvg module + block: + - import_tasks: setup.yml + + - import_tasks: test_indempotency.yml + + - import_tasks: test_grow_reduce.yml + always: + - import_tasks: teardown.yml diff --git a/test/integration/targets/incidental_lvg/tasks/setup.yml b/test/integration/targets/incidental_lvg/tasks/setup.yml new file mode 100644 index 00000000..e63c2d64 --- /dev/null +++ b/test/integration/targets/incidental_lvg/tasks/setup.yml @@ -0,0 +1,13 @@ +- name: "Create files to use as a disk devices" + command: "dd if=/dev/zero of={{ remote_tmp_dir }}/img{{ item }} bs=1M count=10" + with_sequence: 'count=2' + +- name: "Create loop device for file" + command: "losetup --show -f {{ remote_tmp_dir }}/img{{ item }}" + with_sequence: 'count=2' + register: loop_devices + +- name: "Affect name on disk to work on" + set_fact: + loop_device1: "{{ loop_devices.results[0] }}" + loop_device2: "{{ loop_devices.results[1] }}" diff --git a/test/integration/targets/incidental_lvg/tasks/teardown.yml b/test/integration/targets/incidental_lvg/tasks/teardown.yml new file mode 100644 index 00000000..ed662f1e --- /dev/null +++ b/test/integration/targets/incidental_lvg/tasks/teardown.yml @@ -0,0 +1,17 @@ +- name: Remove test volume group + lvg: + vg: testvg + state: absent + +- name: Detach loop device + command: "losetup -d {{ item.stdout }}" + loop: "{{ loop_devices.results|default([]) }}" + when: + - item.stdout is defined + - item.stdout is match("/dev/.*") + +- name: Remove device files + file: + path: "{{ remote_tmp_dir }}/img{{ item }}" + state: absent + with_sequence: 'count={{ loop_devices.results|length }}' diff --git a/test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml b/test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml new file mode 100644 index 00000000..1e988045 --- /dev/null +++ b/test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml @@ -0,0 +1,33 @@ +- name: "Create volume group on first disk" + lvg: + vg: testvg + pvs: "{{ loop_device1.stdout }}" + +- name: "get lvm facts" + setup: + +- debug: var=ansible_lvm + +- name: "Assert the testvg span only on first disk" + assert: + that: + - ansible_lvm.pvs[loop_device1.stdout].vg == "testvg" + - 'loop_device2.stdout not in ansible_lvm.pvs or + ansible_lvm.pvs[loop_device2.stdout].vg == ""' + +- name: "Extend to second disk AND reduce from the first disk" + lvg: + vg: testvg + pvs: "{{ loop_device2.stdout }}" + +- name: "get lvm facts" + setup: + +- debug: var=ansible_lvm + +- name: "Assert the testvg span only on first disk" + assert: + that: + - 'loop_device1.stdout not in ansible_lvm.pvs or + ansible_lvm.pvs[loop_device1.stdout].vg == ""' + - ansible_lvm.pvs[loop_device2.stdout].vg == "testvg" diff --git a/test/integration/targets/incidental_lvg/tasks/test_indempotency.yml b/test/integration/targets/incidental_lvg/tasks/test_indempotency.yml new file mode 100644 index 00000000..5007e56a --- /dev/null +++ b/test/integration/targets/incidental_lvg/tasks/test_indempotency.yml @@ -0,0 +1,15 @@ +- name: Create volume group on disk device + lvg: + vg: testvg + pvs: "{{ loop_device1.stdout }}" + +- name: Create the volume group again to verify idempotence + lvg: + vg: testvg + pvs: "{{ loop_device1.stdout }}" + register: repeat_vg_create + +- name: Do all assertions to verify expected results + assert: + that: + - repeat_vg_create is not changed diff --git a/test/integration/targets/incidental_mongodb_parameter/aliases b/test/integration/targets/incidental_mongodb_parameter/aliases new file mode 100644 index 00000000..dc285483 --- /dev/null +++ b/test/integration/targets/incidental_mongodb_parameter/aliases @@ -0,0 +1,8 @@ +destructive +shippable/posix/incidental +skip/aix +skip/osx +skip/macos +skip/freebsd +skip/rhel +needs/root diff --git a/test/integration/targets/incidental_mongodb_parameter/defaults/main.yml b/test/integration/targets/incidental_mongodb_parameter/defaults/main.yml new file mode 100644 index 00000000..aac55526 --- /dev/null +++ b/test/integration/targets/incidental_mongodb_parameter/defaults/main.yml @@ -0,0 +1,21 @@ +--- +# defaults file for test_mongodb_user +mongodb_admin_user: test_root +mongodb_admin_password: saE_Rr9!gE6gh#e~R#nZ +mongod_auth: false +kill_signal: SIGTERM +# Should be one of +# --storageEngine wiredTiger --wiredTigerEngineConfigString="cache_size=200M" +# --storageEngine mmapv1 --nojournal +mongod_storage_engine_opts: "--storageEngine wiredTiger --wiredTigerEngineConfigString='cache_size=200M'" +mongodb_user: mongodb +mongodb_user_list: + - { "name": "user1", "password": "password1", "roles": "read", "database": "test" } + - { "name": "user2", "password": "password2", "roles": "readWrite", "database": "test" } + - { "name": "user3", "password": "password3", "roles": "dbAdmin", "database": "test" } + - { "name": "user4", "password": "password4", "roles": "userAdmin", "database": "test" } + - { "name": "user5", "password": "password5", "roles": "clusterAdmin", "database": "admin" } + - { "name": "user6", "password": "password6", "roles": "readAnyDatabase", "database": "admin" } + - { "name": "user7", "password": "password7", "roles": "readWriteAnyDatabase", "database": "admin" } + - { "name": "user8", "password": "password8", "roles": "userAdminAnyDatabase", "database": "admin" } + - { "name": "user9", "password": "password9", "roles": "dbAdminAnyDatabase", "database": "admin" } diff --git a/test/integration/targets/incidental_mongodb_parameter/meta/main.yml b/test/integration/targets/incidental_mongodb_parameter/meta/main.yml new file mode 100644 index 00000000..10fc3936 --- /dev/null +++ b/test/integration/targets/incidental_mongodb_parameter/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - incidental_setup_mongodb + - setup_remote_tmp_dir diff --git a/test/integration/targets/incidental_mongodb_parameter/tasks/main.yml b/test/integration/targets/incidental_mongodb_parameter/tasks/main.yml new file mode 100644 index 00000000..a0fda1dc --- /dev/null +++ b/test/integration/targets/incidental_mongodb_parameter/tasks/main.yml @@ -0,0 +1,143 @@ +# test code for the mongodb_parameter module +# (c) 2019, Rhys Campbell + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# ============================================================ + +- name: Ensure tests home exists + file: + path: "{{ remote_tmp_dir }}/tests" + state: directory + +- include_tasks: mongod_teardown.yml + +- include_tasks: mongod_singlenode.yml + +- name: Set syncdelay to 99 + mongodb_parameter: + login_port: 3001 + param: syncdelay + value: 99 + param_type: int + register: sd_change + +- assert: + that: + - sd_change.before | int == 60 + - sd_change.after | int == 99 + - sd_change.changed == True + +- name: Set syncdelay to 99 (again) + mongodb_parameter: + login_port: 3001 + param: syncdelay + value: 99 + param_type: int + register: sd_change + +- assert: + that: + - sd_change.before | int == 99 + - sd_change.after | int == 99 + - sd_change.changed == False + +- name: Create admin user with module + mongodb_user: + login_port: 3001 + database: admin + name: "{{ mongodb_admin_user }}" + password: "{{ mongodb_admin_password }}" + roles: root + state: present + register: mongodb_admin_user_created + +- assert: + that: + - mongodb_admin_user_created.changed == True + +- name: Kill all mongod processes + command: pkill -{{ kill_signal }} mongod + ignore_errors: true + +- name: Getting pids for mongod + pids: + name: mongod + register: pids_of_mongod + +- name: Wait for all mongod processes to exit + wait_for: + path: "/proc/{{ item }}/status" + state: absent + delay: 3 + with_items: "{{ pids_of_mongod }}" + +- set_fact: + mongod_auth: true + +- include_tasks: mongod_singlenode.yml +# Tests with auth enabled + +- name: Set syncdelay to 59 with auth + mongodb_parameter: + login_port: 3001 + login_user: "{{ mongodb_admin_user }}" + login_password: "{{ mongodb_admin_password }}" + param: syncdelay + value: 59 + param_type: int + register: sd_change + +- assert: + that: + - sd_change.before | int == 60 + - sd_change.after | int == 59 + - sd_change.changed == True + +- name: Set syncdelay to 59 (again) with auth + mongodb_parameter: + login_port: 3001 + login_user: "{{ mongodb_admin_user }}" + login_password: "{{ mongodb_admin_password }}" + param: syncdelay + value: 59 + param_type: int + register: sd_change + +- assert: + that: + - sd_change.before | int == 59 + - sd_change.after | int == 59 + - sd_change.changed == False + +- name: Set authenticationMechanisms to MONGODB-X509 with auth (will fail) + mongodb_parameter: + login_port: 3001 + login_user: "{{ mongodb_admin_user }}" + login_password: "{{ mongodb_admin_password }}" + param: authenticationMechanisms + value: "MONGODB-X509" + param_type: str + register: diag_change + ignore_errors: yes + +- assert: + that: + - '"unable to change parameter" in diag_change.msg' + - diag_change.failed == True + +# Clean up +- include_tasks: mongod_teardown.yml diff --git a/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml new file mode 100644 index 00000000..291cb1c9 --- /dev/null +++ b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml @@ -0,0 +1,55 @@ +- name: Set mongodb_user user for redhat + set_fact: + mongodb_user: "mongod" + when: ansible_os_family == "RedHat" + +- set_fact: + mongodb_nodes: + - 3001 + +- name: Create directories for mongod processes + file: + path: "{{ remote_tmp_dir }}/mongod{{ item }}" + state: directory + owner: "{{ mongodb_user }}" + group: "{{ mongodb_user }}" + mode: 0755 + recurse: yes + with_items: "{{ mongodb_nodes }}" + +- name: Ensure {{ remote_tmp_dir }}/config dir exists + file: + path: "{{ remote_tmp_dir }}/config" + state: directory + owner: "{{ mongodb_user }}" + group: "{{ mongodb_user }}" + mode: 0755 + +- name: Create keyfile + copy: + dest: "{{ remote_tmp_dir }}/my.key" + content: | + fd2CUrbXBJpB4rt74A6F + owner: "{{ mongodb_user }}" + group: "{{ mongodb_user }}" + mode: 0600 + when: mongod_auth == True + +- name: Spawn mongod process without auth + command: mongod --shardsvr --smallfiles {{ mongod_storage_engine_opts }} --dbpath mongod{{ item }} --port {{ item }} --logpath mongod{{ item }}/log.log --fork + args: + chdir: "{{ remote_tmp_dir }}" + with_items: "{{ mongodb_nodes | sort }}" + when: mongod_auth == False + +- name: Spawn mongod process with auth + command: mongod --shardsvr --smallfiles {{ mongod_storage_engine_opts }} --dbpath mongod{{ item }} --port {{ item }} --logpath mongod{{ item }}/log.log --fork --auth --keyFile my.key + args: + chdir: "{{ remote_tmp_dir }}" + with_items: "{{ mongodb_nodes | sort }}" + when: mongod_auth == True + +- name: Wait for mongod to start responding + wait_for: + port: "{{ item }}" + with_items: "{{ mongodb_nodes }}" diff --git a/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml new file mode 100644 index 00000000..a904a718 --- /dev/null +++ b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml @@ -0,0 +1,25 @@ +- name: Kill all mongod processes + command: pkill -{{ kill_signal }} mongod + ignore_errors: true + +- name: Getting pids for mongod + pids: + name: mongod + register: pids_of_mongod + +- name: Wait for all mongod processes to exit + wait_for: + path: "/proc/{{ item }}/status" + state: absent + delay: 1 + with_items: "{{ pids_of_mongod }}" + +- name: Remove all mongod folders + file: + path: "{{ remote_tmp_dir }}/{{ item }}" + state: absent + with_items: + - mongod3001 + +- name: Remove all mongod sock files + shell: rm -Rf /tmp/mongodb*.sock diff --git a/test/integration/targets/incidental_postgresql_user/aliases b/test/integration/targets/incidental_postgresql_user/aliases new file mode 100644 index 00000000..78b47900 --- /dev/null +++ b/test/integration/targets/incidental_postgresql_user/aliases @@ -0,0 +1,4 @@ +destructive +shippable/posix/incidental +skip/aix +skip/osx diff --git a/test/integration/targets/incidental_postgresql_user/defaults/main.yml b/test/integration/targets/incidental_postgresql_user/defaults/main.yml new file mode 100644 index 00000000..bc9ef19b --- /dev/null +++ b/test/integration/targets/incidental_postgresql_user/defaults/main.yml @@ -0,0 +1,3 @@ +db_name: 'ansible_db' +db_user1: 'ansible_db_user1' +db_user2: 'ansible_db_user2' diff --git a/test/integration/targets/incidental_postgresql_user/meta/main.yml b/test/integration/targets/incidental_postgresql_user/meta/main.yml new file mode 100644 index 00000000..c2a0d561 --- /dev/null +++ b/test/integration/targets/incidental_postgresql_user/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - incidental_setup_postgresql_db diff --git a/test/integration/targets/incidental_postgresql_user/tasks/main.yml b/test/integration/targets/incidental_postgresql_user/tasks/main.yml new file mode 100644 index 00000000..d59ae635 --- /dev/null +++ b/test/integration/targets/incidental_postgresql_user/tasks/main.yml @@ -0,0 +1,7 @@ +# Initial CI tests of postgresql_user module +- import_tasks: postgresql_user_initial.yml + when: postgres_version_resp.stdout is version('9.4', '>=') + +# General tests: +- import_tasks: postgresql_user_general.yml + when: postgres_version_resp.stdout is version('9.4', '>=') diff --git a/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml new file mode 100644 index 00000000..963f58ac --- /dev/null +++ b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml @@ -0,0 +1,741 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Integration tests for postgresql_user module. + +- vars: + test_user: hello.user.with.dots + test_user2: hello + test_group1: group1 + test_group2: group2 + test_table: test + test_comment1: 'comment1' + test_comment2: 'comment2' + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: postgres + + block: + # + # Common tests + # + - name: Create role in check_mode + <<: *task_parameters + check_mode: yes + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: check that the user doesn't exist + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 0 + + - name: Create role in actual mode + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: check that the user exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Add a comment on the user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '{{ test_comment1 }}' + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment1 }}'"] + + - name: check the comment + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment + FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}' + + - assert: + that: + - result.rowcount == 1 + - result.query_result[0].comment == '{{ test_comment1 }}' + + - name: Try to add the same comment on the user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '{{ test_comment1 }}' + + - assert: + that: + - result is not changed + + - name: Try to add another comment on the user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '{{ test_comment2 }}' + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment2 }}'"] + + - name: check the comment + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment + FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}' + + - assert: + that: + - result.rowcount == 1 + - result.query_result[0].comment == '{{ test_comment2 }}' + + - name: Try to create role again in check_mode + <<: *task_parameters + check_mode: yes + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: check that the user exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Try to create role again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: check that the user exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Drop role in check_mode + <<: *task_parameters + check_mode: yes + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + state: absent + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: check that the user actually exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Drop role in actual mode + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + state: absent + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: check that the user doesn't exist + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 0 + + - name: Try to drop role in check mode again + <<: *task_parameters + check_mode: yes + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + state: absent + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: Try to drop role in actual mode again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + state: absent + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + # + # password, no_password_changes, encrypted, expires parameters + # + + - name: Create role with password, passed as hashed md5 + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + password: md59543f1d82624df2b31672ec0f7050460 + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check that the user exist with a proper password + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'" + + - assert: + that: + - result.rowcount == 1 + + - name: Test no_password_changes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + password: u123 + no_password_changes: yes + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + + - name: Check that nothing changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'" + + - assert: + that: + - result.rowcount == 1 + + # Storing unencrypted passwords is not available from PostgreSQL 10 + - name: Change password, passed as unencrypted + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + password: myunencryptedpass + encrypted: no + when: postgres_version_resp.stdout is version('10', '<') + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + when: postgres_version_resp.stdout is version('10', '<') + + - name: Check that the user exist with the unencrypted password + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'myunencryptedpass'" + when: postgres_version_resp.stdout is version('10', '<') + + - assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '<') + + - name: Change password, explicit encrypted=yes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + password: myunencryptedpass + encrypted: yes + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check that the user exist with encrypted password + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword != 'myunencryptedpass'" + + - assert: + that: + - result.rowcount == 1 + + - name: Change rolvaliduntil attribute + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + expires: 'Jan 31 2020' + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolvaliduntil::text like '2020-01-31%' + + - assert: + that: + - result.rowcount == 1 + + - name: Try to set the same rolvaliduntil value again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + expires: 'Jan 31 2020' + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: Check that nothing changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolvaliduntil::text like '2020-01-31%' + + - assert: + that: + - result.rowcount == 1 + + # + # role_attr_flags + # + - name: Set role attributes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + role_attr_flags: CREATEROLE,CREATEDB + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolcreaterole = 't' and rolcreatedb = 't' + + - assert: + that: + - result.rowcount == 1 + + - name: Set the same role attributes again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + role_attr_flags: CREATEROLE,CREATEDB + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolcreaterole = 't' and rolcreatedb = 't' + + - name: Set role attributes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + role_attr_flags: NOCREATEROLE,NOCREATEDB + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolcreaterole = 'f' and rolcreatedb = 'f' + + - assert: + that: + - result.rowcount == 1 + + - name: Set role attributes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + role_attr_flags: NOCREATEROLE,NOCREATEDB + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolcreaterole = 'f' and rolcreatedb = 'f' + + # + # priv + # + - name: Create test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + columns: + - id int + + - name: Insert data to test table + <<: *task_parameters + postgresql_query: + query: "INSERT INTO {{ test_table }} (id) VALUES ('1')" + <<: *pg_parameters + + - name: Check that test_user is not allowed to read the data + <<: *task_parameters + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + session_role: '{{ test_user }}' + query: 'SELECT * FROM {{ test_table }}' + ignore_errors: yes + + - assert: + that: + - result is failed + - "'permission denied' in result.msg" + + - name: Grant privileges + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + priv: '{{ test_table }}:SELECT' + + - assert: + that: + - result is changed + + - name: Check that test_user is allowed to read the data + <<: *task_parameters + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + session_role: '{{ test_user }}' + query: 'SELECT * FROM {{ test_table }}' + + - assert: + that: + - result.rowcount == 1 + + - name: Grant the same privileges again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + priv: '{{ test_table }}:SELECT' + + - assert: + that: + - result is not changed + + - name: Remove test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + state: absent + + # + # fail_on_user + # + - name: Create role for test + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user2 }}' + + - name: Create test table, set owner as test_user + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + owner: '{{ test_user2 }}' + + - name: Test fail_on_user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user2 }}' + state: absent + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg == 'Unable to remove user' + + - name: Test fail_on_user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + fail_on_user: no + + - assert: + that: + - result is not changed + + # + # Test groups parameter + # + - name: Create test group + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_group2 }}' + role_attr_flags: NOLOGIN + + - name: Create role test_group1 and grant test_group2 to test_group1 in check_mode + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_group1 }}' + groups: '{{ test_group2 }}' + role_attr_flags: NOLOGIN + check_mode: yes + + - assert: + that: + - result is changed + - result.user == '{{ test_group1 }}' + - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"'] + + - name: check that the user doesn't exist + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'" + + - assert: + that: + - result.rowcount == 0 + + - name: check membership + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'" + + - assert: + that: + - result.rowcount == 0 + + - name: Create role test_group1 and grant test_group2 to test_group1 + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_group1 }}' + groups: '{{ test_group2 }}' + role_attr_flags: NOLOGIN + + - assert: + that: + - result is changed + - result.user == '{{ test_group1 }}' + - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"'] + + - name: check that the user exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: check membership + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Grant test_group2 to test_group1 again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_group1 }}' + groups: '{{ test_group2 }}' + + - assert: + that: + - result is not changed + - result.user == '{{ test_group1 }}' + + - name: check membership + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Grant groups to existent role + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + groups: + - '{{ test_group1 }}' + - '{{ test_group2 }}' + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + - result.queries == ['GRANT "{{ test_group1 }}" TO "{{ test_user }}"', 'GRANT "{{ test_group2 }}" TO "{{ test_user }}"'] + + - name: check membership + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT * FROM pg_group WHERE groname in ('{{ test_group1 }}', '{{ test_group2 }}') AND grolist != '{}'" + + - assert: + that: + - result.rowcount == 2 + + always: + # + # Clean up + # + - name: Drop test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + state: absent + + - name: Drop test user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ item }}' + state: absent + loop: + - '{{ test_user }}' + - '{{ test_user2 }}' + - '{{ test_group1 }}' + - '{{ test_group2 }}' diff --git a/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml new file mode 100644 index 00000000..ccd42847 --- /dev/null +++ b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml @@ -0,0 +1,153 @@ +# +# Create and destroy user, test 'password' and 'encrypted' parameters +# +# unencrypted values are not supported on newer versions +# do not run the encrypted: no tests if on 10+ +- set_fact: + encryption_values: + - 'yes' + +- set_fact: + encryption_values: '{{ encryption_values + ["no"]}}' + when: postgres_version_resp.stdout is version('10', '<=') + +- include_tasks: test_password.yml + vars: + encrypted: '{{ loop_item }}' + db_password1: 'secretù' # use UTF-8 + loop: '{{ encryption_values }}' + loop_control: + loop_var: loop_item + +# BYPASSRLS role attribute was introduced in PostgreSQL 9.5, so +# we want to test attribute management differently depending +# on the version. +- set_fact: + bypassrls_supported: "{{ postgres_version_resp.stdout is version('9.5.0', '>=') }}" + +# test 'no_password_change' and 'role_attr_flags' parameters +- include_tasks: test_no_password_change.yml + vars: + no_password_changes: '{{ loop_item }}' + loop: + - 'yes' + - 'no' + loop_control: + loop_var: loop_item + +### TODO: fail_on_user + +# +# Test login_user functionality +# +- name: Create a user to test login module parameters + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + encrypted: 'yes' + password: "password" + role_attr_flags: "CREATEDB,LOGIN,CREATEROLE" + login_user: "{{ pg_user }}" + db: postgres + +- name: Create db + postgresql_db: + name: "{{ db_name }}" + state: "present" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that database created + become: yes + become_user: "{{ pg_user }}" + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Create a user + postgresql_user: + name: "{{ db_user2 }}" + state: "present" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + db: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that it was created + become: yes + become_user: "{{ pg_user }}" + shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Grant database privileges + postgresql_privs: + type: "database" + state: "present" + roles: "{{ db_user2 }}" + privs: "CREATE,connect" + objs: "{{ db_name }}" + db: "{{ db_name }}" + login: "{{ db_user1 }}" + password: "password" + host: "localhost" + +- name: Check that the user has the requested permissions (database) + become: yes + become_user: "{{ pg_user }}" + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "db_user2 ~ '=Cc' in result_database.stdout" + +- name: Remove user + postgresql_user: + name: "{{ db_user2 }}" + state: 'absent' + priv: "ALL" + db: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that they were removed + become: yes + become_user: "{{ pg_user }}" + shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Destroy DB + postgresql_db: + state: absent + name: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that database was destroyed + become: yes + become_user: "{{ pg_user }}" + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" diff --git a/test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml b/test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml new file mode 100644 index 00000000..c296c0ea --- /dev/null +++ b/test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml @@ -0,0 +1,167 @@ +- vars: + task_parameters: &task_parameters + become_user: "{{ pg_user }}" + become: yes + register: result + postgresql_parameters: ¶meters + db: postgres + name: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + + block: + + - name: Create a user with all role attributes + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "SUPERUSER,CREATEROLE,CREATEDB,INHERIT,LOGIN{{ bypassrls_supported | ternary(',BYPASSRLS', '') }}" + no_password_changes: '{{ no_password_changes }}' # no_password_changes is ignored when user doesn't already exist + + - name: Check that the user has the requested role attributes + <<: *task_parameters + shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin {{ bypassrls_supported | ternary(\", 'bypassrls:'||rolbypassrls\", '') }} from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres" + + - assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:t' in result.stdout_lines[-2]" + - "'createrole:t' in result.stdout_lines[-2]" + - "'create:t' in result.stdout_lines[-2]" + - "'inherit:t' in result.stdout_lines[-2]" + - "'login:t' in result.stdout_lines[-2]" + + - block: + - name: Check that the user has the requested role attribute BYPASSRLS + <<: *task_parameters + shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres" + + - assert: + that: + - "not bypassrls_supported or 'bypassrls:t' in result.stdout_lines[-2]" + when: bypassrls_supported + + - name: Modify a user to have no role attributes + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }}" + no_password_changes: '{{ no_password_changes }}' + + - name: Check that ansible reports it modified the role + assert: + that: + - result is changed + + - name: "Check that the user doesn't have any attribute" + <<: *task_parameters + shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres" + + - assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:f' in result.stdout_lines[-2]" + - "'createrole:f' in result.stdout_lines[-2]" + - "'create:f' in result.stdout_lines[-2]" + - "'inherit:f' in result.stdout_lines[-2]" + - "'login:f' in result.stdout_lines[-2]" + + - block: + - name: Check that the user has the requested role attribute BYPASSRLS + <<: *task_parameters + shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres" + + - assert: + that: + - "not bypassrls_supported or 'bypassrls:f' in result.stdout_lines[-2]" + when: bypassrls_supported + + - name: Try to add an invalid attribute + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }},INVALID" + no_password_changes: '{{ no_password_changes }}' + ignore_errors: yes + + - name: Check that ansible reports failure + assert: + that: + - result is not changed + - result is failed + - "result.msg == 'Invalid role_attr_flags specified: INVALID'" + + - name: Modify a single role attribute on a user + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "LOGIN" + no_password_changes: '{{ no_password_changes }}' + + - name: Check that ansible reports it modified the role + assert: + that: + - result is changed + + - name: Check the role attributes + <<: *task_parameters + shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres + + - assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:f' in result.stdout_lines[-2]" + - "'createrole:f' in result.stdout_lines[-2]" + - "'create:f' in result.stdout_lines[-2]" + - "'inherit:f' in result.stdout_lines[-2]" + - "'login:t' in result.stdout_lines[-2]" + + - block: + - name: Check the role attribute BYPASSRLS + <<: *task_parameters + shell: echo "select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres + + - assert: + that: + - "( postgres_version_resp.stdout is version('9.5.0', '<')) or 'bypassrls:f' in result.stdout_lines[-2]" + when: bypassrls_supported + + - name: Check that using same attribute a second time does nothing + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "LOGIN" + no_password_changes: '{{ no_password_changes }}' + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - name: Check there isn't any update reported + assert: + that: + - result is not changed + + - name: Cleanup the user + <<: *task_parameters + postgresql_user: + <<: *parameters + state: 'absent' + no_password_changes: '{{ no_password_changes }}' # user deletion: no_password_changes is ignored + + - name: Check that user was removed + <<: *task_parameters + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres + + - assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + + always: + - name: Cleanup the user + <<: *task_parameters + postgresql_user: + <<: *parameters + state: 'absent' diff --git a/test/integration/targets/incidental_postgresql_user/tasks/test_password.yml b/test/integration/targets/incidental_postgresql_user/tasks/test_password.yml new file mode 100644 index 00000000..be033a55 --- /dev/null +++ b/test/integration/targets/incidental_postgresql_user/tasks/test_password.yml @@ -0,0 +1,336 @@ +- vars: + task_parameters: &task_parameters + become_user: "{{ pg_user }}" + become: yes + register: result + postgresql_parameters: ¶meters + db: postgres + name: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + + block: + - name: 'Check that PGOPTIONS environment variable is effective (1/2)' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + ignore_errors: true + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - name: 'Check that PGOPTIONS environment variable is effective (2/2)' + assert: + that: + - "{{ result is failed }}" + + - name: 'Create a user (password encrypted: {{ encrypted }})' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + encrypted: '{{ encrypted }}' + environment: + PGCLIENTENCODING: 'UTF8' + + - block: &changed # block is only used here in order to be able to define YAML anchor + - name: Check that ansible reports it was created + assert: + that: + - "{{ result is changed }}" + + - name: Check that it was created + <<: *task_parameters + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres + + - assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + + - name: Check that creating user a second time does nothing + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + encrypted: '{{ encrypted }}' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - block: ¬_changed # block is only used here in order to be able to define YAML anchor + - name: Check that ansible reports no change + assert: + that: + - "{{ result is not changed }}" + + - name: 'Define an expiration time' + <<: *task_parameters + postgresql_user: + <<: *parameters + expires: '2025-01-01' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: 'Redefine the same expiration time' + <<: *task_parameters + postgresql_user: + expires: '2025-01-01' + <<: *parameters + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - block: + + - name: 'Using MD5-hashed password: check that password not changed when using cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'ENCRYPTED'" + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}" + encrypted: 'yes' + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'UNENCRYPTED'" + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}" + encrypted: 'no' + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Redefine the same expiration time and password (encrypted)' + <<: *task_parameters + postgresql_user: + <<: *parameters + encrypted: 'yes' + password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}" + expires: '2025-01-01' + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using MD5-hashed password: check that password changed when using another cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: 'prefix{{ db_password1 }}' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: "Using MD5-hashed password: check that password changed when using another md5 hash with 'ENCRYPTED'" + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "md5{{ ('prefix1' ~ db_password1 ~ db_user1) | hash('md5')}}" + encrypted: 'yes' + + - <<: *changed + + - name: "Using MD5-hashed password: check that password changed when using md5 hash with 'UNENCRYPTED'" + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "md5{{ ('prefix2' ~ db_password1 ~ db_user1) | hash('md5')}}" + encrypted: 'no' + register: change_pass_unencrypted + failed_when: + - change_pass_unencrypted is failed + # newer version of psycopg2 no longer supported unencrypted password, we ignore the error + - '"UNENCRYPTED PASSWORD is no longer supported" not in change_pass_unencrypted.msg' + + - <<: *changed + + - name: 'Using MD5-hashed password: check that password changed when clearing the password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: 'Using MD5-hashed password: check that password not changed when clearing the password again' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using cleartext password: check that password not changed when clearing the password again' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using MD5-hashed password: check that password changed when using a cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + when: encrypted == 'yes' + + - block: + + - name: 'Using cleartext password: check that password not changed when using cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "{{ db_password1 }}" + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Redefine the same expiration time and password (not encrypted)' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "{{ db_password1 }}" + encrypted: 'no' + expires: '2025-01-01' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using cleartext password: check that password changed when using another cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "changed{{ db_password1 }}" + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: 'Using cleartext password: check that password changed when clearing the password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: 'Using cleartext password: check that password not changed when clearing the password again' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using MD5-hashed password: check that password not changed when clearing the password again' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using cleartext password: check that password changed when using cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "{{ db_password1 }}" + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + when: encrypted == 'no' + + - name: Remove user + <<: *task_parameters + postgresql_user: + state: 'absent' + <<: *parameters + + - <<: *changed + + - name: Check that they were removed + <<: *task_parameters + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + + - name: Check that removing user a second time does nothing + <<: *task_parameters + postgresql_user: + state: 'absent' + <<: *parameters + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + always: + - name: Remove user + <<: *task_parameters + postgresql_user: + state: 'absent' + <<: *parameters diff --git a/test/integration/targets/incidental_setup_docker/aliases b/test/integration/targets/incidental_setup_docker/aliases new file mode 100644 index 00000000..d466c39c --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/aliases @@ -0,0 +1,2 @@ +needs/target/setup_epel +hidden diff --git a/test/integration/targets/incidental_setup_docker/defaults/main.yml b/test/integration/targets/incidental_setup_docker/defaults/main.yml new file mode 100644 index 00000000..48959cc3 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/defaults/main.yml @@ -0,0 +1,18 @@ +docker_cli_version: '0.0' +docker_api_version: '0.0' +docker_py_version: '0.0' +docker_skip_cleanup: no +docker_prereq_packages: [] +docker_packages: + - docker-ce + +docker_pip_extra_packages: [] +docker_pip_never_remove: [] +docker_pip_packages: + - docker + +docker_cleanup_packages: + - docker + - docker-ce + - docker-ce-cli + - containerd.io diff --git a/test/integration/targets/incidental_setup_docker/handlers/main.yml b/test/integration/targets/incidental_setup_docker/handlers/main.yml new file mode 100644 index 00000000..9e3f928f --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/handlers/main.yml @@ -0,0 +1,14 @@ +- name: remove pip packages + pip: + state: absent + name: "{{ docker_pip_packages | union(docker_pip_extra_packages) | difference(docker_pip_never_remove) }}" + listen: cleanup docker + when: not docker_skip_cleanup | bool + +- name: remove docker packages + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: "{{ docker_cleanup_packages }}" + state: absent + listen: cleanup docker + when: not docker_skip_cleanup | bool diff --git a/test/integration/targets/incidental_setup_docker/meta/main.yml b/test/integration/targets/incidental_setup_docker/meta/main.yml new file mode 100644 index 00000000..91a63627 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_constraints diff --git a/test/integration/targets/incidental_setup_docker/tasks/Debian.yml b/test/integration/targets/incidental_setup_docker/tasks/Debian.yml new file mode 100644 index 00000000..0ea2cb4b --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/tasks/Debian.yml @@ -0,0 +1,43 @@ +- name: Get OS version + shell: uname -r + register: os_version + +- name: Install pre-reqs + apt: + name: "{{ docker_prereq_packages }}" + state: present + update_cache: yes + notify: cleanup docker + +- name: Add gpg key + shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg >key && apt-key add key + +- name: Add Docker repo + shell: add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + +- block: + - name: Prevent service restart + copy: + content: exit 101 + dest: /usr/sbin/policy-rc.d + backup: yes + mode: 0755 + register: policy_rc_d + + - name: Install Docker CE + apt: + name: "{{ docker_packages }}" + state: present + update_cache: yes + always: + - name: Restore /usr/sbin/policy-rc.d (if needed) + command: mv {{ policy_rc_d.backup_file }} /usr/sbin/policy-rc.d + when: + - "'backup_file' in policy_rc_d" + + - name: Remove /usr/sbin/policy-rc.d (if needed) + file: + path: /usr/sbin/policy-rc.d + state: absent + when: + - "'backup_file' not in policy_rc_d" diff --git a/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml b/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml new file mode 100644 index 00000000..9f52e8f1 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml @@ -0,0 +1,21 @@ +- name: Add repository + yum_repository: + file: docker-ce + name: docker-ce-stable + description: Docker CE Stable - $basearch + baseurl: https://download.docker.com/linux/fedora/$releasever/$basearch/stable + enabled: yes + gpgcheck: yes + gpgkey: https://download.docker.com/linux/fedora/gpg + +- name: Update cache + command: dnf makecache + args: + warn: no + +- name: Install docker + dnf: + name: "{{ docker_packages }}" + state: present + enablerepo: docker-ce-test + notify: cleanup docker diff --git a/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml b/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml new file mode 100644 index 00000000..bd708315 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml @@ -0,0 +1,44 @@ +# The RHEL extras repository must be enabled to provide the container-selinux package. +# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository + +- name: Install Docker pre-reqs + yum: + name: "{{ docker_prereq_packages }}" + state: present + notify: cleanup docker + +- name: Install epel repo which is missing on rhel-7 and is needed for pigz (needed for docker-ce 18) + include_role: + name: setup_epel + +- name: Enable extras repository for RHEL on AWS + # RHEL 7.6 uses REGION-rhel-server-extras and RHEL 7.7+ use rhel-7-server-rhui-extras-rpms + command: yum-config-manager --enable REGION-rhel-server-extras rhel-7-server-rhui-extras-rpms + args: + warn: no + +# They broke their .repo file, so we set it up ourselves +- name: Set-up repository + yum_repository: + name: docker-ce + description: docker-ce + baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable + gpgcheck: true + gpgkey: https://download.docker.com/linux/centos/gpg + +- name: Update cache + command: yum -y makecache fast + args: + warn: no + +- name: Install docker + yum: + name: "{{ docker_packages }}" + state: present + notify: cleanup docker + +- name: Make sure the docker daemon is running (failure expected inside docker container) + service: + name: docker + state: started + ignore_errors: "{{ ansible_virtualization_type == 'docker' }}" diff --git a/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml b/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml new file mode 100644 index 00000000..6d2fb3e7 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml @@ -0,0 +1,33 @@ +# The RHEL extras repository must be enabled to provide the container-selinux package. +# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository + +- name: Install Docker pre-reqs + dnf: + name: "{{ docker_prereq_packages }}" + state: present + notify: cleanup docker + register: result + until: result is success + retries: 10 + delay: 2 + +# They broke their .repo file, so we set it up ourselves +- name: Set-up repository + yum_repository: + name: docker-ce + description: docker-ce + baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable + gpgcheck: true + gpgkey: https://download.docker.com/linux/centos/gpg + +- name: Install docker + dnf: + name: "{{ docker_packages }}" + state: present + notify: cleanup docker + +- name: Make sure the docker daemon is running (failure expected inside docker container) + service: + name: docker + state: started + ignore_errors: "{{ ansible_virtualization_type == 'docker' }}" diff --git a/test/integration/targets/incidental_setup_docker/tasks/Suse.yml b/test/integration/targets/incidental_setup_docker/tasks/Suse.yml new file mode 100644 index 00000000..93f4d34e --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/tasks/Suse.yml @@ -0,0 +1,7 @@ +- name: Install docker + zypper: + name: "{{ docker_packages }}" + force: yes + disable_gpg_check: yes + update_cache: yes + notify: cleanup docker diff --git a/test/integration/targets/incidental_setup_docker/tasks/main.yml b/test/integration/targets/incidental_setup_docker/tasks/main.yml new file mode 100644 index 00000000..359a6d44 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/tasks/main.yml @@ -0,0 +1,113 @@ +- name: Setup Docker + when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] + block: + - name: Include distribution specific variables + include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.distribution }}.yml" + - "{{ ansible_facts.os_family }}.yml" + - default.yml + paths: + - "{{ role_path }}/vars" + + - name: Include distribution specific tasks + include_tasks: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.distribution }}.yml" + - "{{ ansible_facts.os_family }}.yml" + paths: + - "{{ role_path }}/tasks" + + - name: Install Python requirements + pip: + state: present + name: "{{ docker_pip_packages | union(docker_pip_extra_packages) }}" + extra_args: "-c {{ remote_constraints }}" + notify: cleanup docker + + # Detect docker CLI, API and docker-py versions + - name: Check Docker CLI version + command: "docker version -f {% raw %}'{{.Client.Version}}'{% endraw %}" + register: docker_cli_version_stdout + ignore_errors: yes + + - name: Check Docker API version + command: "{{ ansible_python.executable }} -c 'import docker; print(docker.from_env().version()[\"ApiVersion\"])'" + register: docker_api_version_stdout + ignore_errors: yes + + - name: Check docker-py API version + command: "{{ ansible_python.executable }} -c 'import docker; print(docker.__version__)'" + register: docker_py_version_stdout + ignore_errors: yes + + - set_fact: + docker_cli_version: "{{ docker_cli_version_stdout.stdout | default('0.0') }}" + docker_api_version: "{{ docker_api_version_stdout.stdout | default('0.0') }}" + docker_py_version: "{{ docker_py_version_stdout.stdout | default('0.0') }}" + + - debug: + msg: "Docker CLI version: {{ docker_cli_version }}; Docker API version: {{ docker_api_version }}; docker-py library version: {{ docker_py_version }}" + + - block: + # Cleanup docker daemon + - name: "Remove all ansible-test-* docker containers" + shell: 'docker ps --no-trunc --format {% raw %}"{{.Names}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker rm -f' + register: docker_containers + retries: 3 + delay: 3 + until: docker_containers is success + + - name: "Remove all ansible-test-* docker volumes" + shell: 'docker volume ls --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker volume rm -f' + register: docker_volumes + + - name: "Remove all ansible-test-* docker networks" + shell: 'docker network ls --no-trunc --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker network rm' + register: docker_networks + + - name: Cleaned docker resources + debug: + var: docker_resources + vars: + docker_resources: + containers: "{{ docker_containers.stdout_lines }}" + volumes: "{{ docker_volumes.stdout_lines }}" + networks: "{{ docker_networks.stdout_lines }}" + + # List all existing docker resources + - name: List all docker containers + command: docker ps --no-trunc -a + register: docker_containers + + - name: List all docker volumes + command: docker volume ls + register: docker_volumes + + - name: List all docker networks + command: docker network ls --no-trunc + register: docker_networks + + - name: List all docker images + command: docker images --no-trunc -a + register: docker_images + + - name: Still existing docker resources + debug: + var: docker_resources + vars: + docker_resources: + containers: "{{ docker_containers.stdout_lines }}" + volumes: "{{ docker_volumes.stdout_lines }}" + networks: "{{ docker_networks.stdout_lines }}" + images: "{{ docker_images.stdout_lines }}" + + when: docker_cli_version is version('0.0', '>') diff --git a/test/integration/targets/incidental_setup_docker/vars/Debian.yml b/test/integration/targets/incidental_setup_docker/vars/Debian.yml new file mode 100644 index 00000000..477bd124 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/vars/Debian.yml @@ -0,0 +1,9 @@ +docker_packages: + - docker-ce=5:19.03.0* + - docker-ce-cli=5:19.03.0* + +docker_prereq_packages: + - apt-transport-https + - ca-certificates + - curl + - software-properties-common diff --git a/test/integration/targets/incidental_setup_docker/vars/Fedora.yml b/test/integration/targets/incidental_setup_docker/vars/Fedora.yml new file mode 100644 index 00000000..9dd84344 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/vars/Fedora.yml @@ -0,0 +1,5 @@ +docker_prereq_packages: [] + +docker_packages: + - docker-ce-19.03.1 + - docker-ce-cli-19.03.1 diff --git a/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml b/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml new file mode 100644 index 00000000..84ba0920 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml @@ -0,0 +1,18 @@ +docker_prereq_packages: + - yum-utils + - device-mapper-persistent-data + - lvm2 + - libseccomp + +docker_packages: + - docker-ce-19.03.1 + - docker-ce-cli-19.03.1 + +docker_pip_extra_packages: + - requests==2.6.0 + +# We need to pin the above so pip finds the right system-installed package +# but we never want to try to remove it, so we substract this from the set of +# packages we remove on cleanup +docker_pip_never_remove: + - requests==2.6.0 diff --git a/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml b/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml new file mode 100644 index 00000000..ff6dcf7b --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml @@ -0,0 +1,9 @@ +docker_prereq_packages: + - yum-utils + - device-mapper-persistent-data + - lvm2 + - libseccomp + +docker_packages: + - docker-ce-19.03.13 + - docker-ce-cli-19.03.13 diff --git a/test/integration/targets/incidental_setup_docker/vars/Suse.yml b/test/integration/targets/incidental_setup_docker/vars/Suse.yml new file mode 100644 index 00000000..b740861f --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/vars/Suse.yml @@ -0,0 +1,2 @@ +docker_packages: + - docker=19.03.1_ce diff --git a/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml b/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml new file mode 100644 index 00000000..36ab54b9 --- /dev/null +++ b/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml @@ -0,0 +1,5 @@ +docker_pip_extra_packages: + # Installing requests >=2.12.0 on Ubuntu 14.04 breaks certificate validation. We restrict to an older version + # to ensure out get_url tests work out fine. This is only an issue if pyOpenSSL is also installed. + # Not sure why RHEL7 needs this specific version + - requests==2.6.0 diff --git a/test/integration/targets/incidental_setup_docker/vars/default.yml b/test/integration/targets/incidental_setup_docker/vars/default.yml new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/incidental_setup_ec2/aliases b/test/integration/targets/incidental_setup_ec2/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/incidental_setup_ec2/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/incidental_setup_ec2/defaults/main.yml b/test/integration/targets/incidental_setup_ec2/defaults/main.yml new file mode 100644 index 00000000..fb1f88b1 --- /dev/null +++ b/test/integration/targets/incidental_setup_ec2/defaults/main.yml @@ -0,0 +1,2 @@ +--- +resource_prefix: 'ansible-testing-' diff --git a/test/integration/targets/incidental_setup_ec2/tasks/common.yml b/test/integration/targets/incidental_setup_ec2/tasks/common.yml new file mode 100644 index 00000000..bf23f539 --- /dev/null +++ b/test/integration/targets/incidental_setup_ec2/tasks/common.yml @@ -0,0 +1,119 @@ +--- + +# ============================================================ +- name: test with no parameters + action: "{{module_name}}" + register: result + ignore_errors: true + +- name: assert failure when called with no parameters + assert: + that: + - 'result.failed' + - 'result.msg == "missing required arguments: name"' + +# ============================================================ +- name: test with only name + action: "{{module_name}} name={{ec2_key_name}}" + register: result + ignore_errors: true + +- name: assert failure when called with only 'name' + assert: + that: + - 'result.failed' + - 'result.msg == "Either region or ec2_url must be specified"' + +# ============================================================ +- name: test invalid region parameter + action: "{{module_name}} name='{{ec2_key_name}}' region='asdf querty 1234'" + register: result + ignore_errors: true + +- name: assert invalid region parameter + assert: + that: + - 'result.failed' + - 'result.msg.startswith("value of region must be one of:")' + +# ============================================================ +- name: test valid region parameter + action: "{{module_name}} name='{{ec2_key_name}}' region='{{ec2_region}}'" + register: result + ignore_errors: true + +- name: assert valid region parameter + assert: + that: + - 'result.failed' + - 'result.msg.startswith("No handler was ready to authenticate.")' + +# ============================================================ +- name: test environment variable EC2_REGION + action: "{{module_name}} name='{{ec2_key_name}}'" + environment: + EC2_REGION: '{{ec2_region}}' + register: result + ignore_errors: true + +- name: assert environment variable EC2_REGION + assert: + that: + - 'result.failed' + - 'result.msg.startswith("No handler was ready to authenticate.")' + +# ============================================================ +- name: test invalid ec2_url parameter + action: "{{module_name}} name='{{ec2_key_name}}'" + environment: + EC2_URL: bogus.example.com + register: result + ignore_errors: true + +- name: assert invalid ec2_url parameter + assert: + that: + - 'result.failed' + - 'result.msg.startswith("No handler was ready to authenticate.")' + +# ============================================================ +- name: test valid ec2_url parameter + action: "{{module_name}} name='{{ec2_key_name}}'" + environment: + EC2_URL: '{{ec2_url}}' + register: result + ignore_errors: true + +- name: assert valid ec2_url parameter + assert: + that: + - 'result.failed' + - 'result.msg.startswith("No handler was ready to authenticate.")' + +# ============================================================ +- name: test credentials from environment + action: "{{module_name}} name='{{ec2_key_name}}'" + environment: + EC2_REGION: '{{ec2_region}}' + EC2_ACCESS_KEY: bogus_access_key + EC2_SECRET_KEY: bogus_secret_key + register: result + ignore_errors: true + +- name: assert ec2_key with valid ec2_url + assert: + that: + - 'result.failed' + - '"EC2ResponseError: 401 Unauthorized" in result.msg' + +# ============================================================ +- name: test credential parameters + action: "{{module_name}} name='{{ec2_key_name}}' ec2_region='{{ec2_region}}' ec2_access_key=bogus_access_key ec2_secret_key=bogus_secret_key" + register: result + ignore_errors: true + +- name: assert credential parameters + assert: + that: + - 'result.failed' + - '"EC2ResponseError: 401 Unauthorized" in result.msg' diff --git a/test/integration/targets/incidental_setup_ec2/vars/main.yml b/test/integration/targets/incidental_setup_ec2/vars/main.yml new file mode 100644 index 00000000..3d7209ef --- /dev/null +++ b/test/integration/targets/incidental_setup_ec2/vars/main.yml @@ -0,0 +1,3 @@ +--- +ec2_url: ec2.amazonaws.com +ec2_region: us-east-1 diff --git a/test/integration/targets/incidental_setup_flatpak_remote/README.md b/test/integration/targets/incidental_setup_flatpak_remote/README.md new file mode 100644 index 00000000..d7916c14 --- /dev/null +++ b/test/integration/targets/incidental_setup_flatpak_remote/README.md @@ -0,0 +1,138 @@ +# Create a dummy flatpak repository remote + +This document describes how to create a local flatpak dummy repo. Just like the one contained in the `files/repo.tar.gxz` archive. + + +## Create a hello world app + +Prerequisites: + + - flathub + +Prepare the environment: + +``` +flatpak install --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6 +``` + +Create a hello world executable: + +``` +echo $'#!/bin/sh\necho hello world' > hello.sh +``` + +To create dummy flatpaks, run this (defining a unique NUM for every flatpak to add): + +``` +export NUM=1 +flatpak build-init appdir$NUM com.dummy.App$NUM org.freedesktop.Sdk org.freedesktop.Platform 1.6; +flatpak build appdir$NUM mkdir /app/bin; +flatpak build appdir$NUM install --mode=750 hello.sh /app/bin; +flatpak build-finish --command=hello.sh appdir$NUM +``` + +## Create a repo and/or add the app to it + +Create a repo and add the file to it in one command: + +``` +flatpak build-export repo appdir$NUM stable +``` + +## Create flatpak*-files + +Put a flatpakref file under the repo folder (`repo/com.dummy.App1.flatpakref`): + +``` +[Flatpak Ref] +Title=Dummy App$NUM +Name=com.dummy.App$NUM +Branch=stable +Url=file:///tmp/flatpak/repo +GPGKey={{ base64-encoded public KEY }} +IsRuntime=false +RuntimeRepo=https://flathub.org/repo/flathub.flatpakrepo +``` + +Add a `.flatpakrepo` file to the `repo` folder (`repo/dummy-repo.flatpakrepo`): + +``` +[Flatpak Repo] +Title=Dummy Repo +Url=file:///tmp/flatpak/repo +Comment=Dummy repo for ansible module integration testing +Description=Dummy repo for ansible module integration testing +GPGKey={{ base64-encoded public KEY }} +``` + +## Sign the repo + +Create a new key in a new gpg home folder (On RedHat systems, the executable needs to addressed as gpg2): + +``` +mkdir gpg +gpg --homedir gpg --quick-gen-key test@dummy.com +``` + +Sign the repo and summary file, you need to redo this when you update the repository: + +``` +flatpak build-sign repo --gpg-sign=KEY_ID --gpg-homedir=gpg +flatpak build-update-repo repo --gpg-sign=KEY_ID --gpg-homedir=gpg +``` + +Export the public key as a file: + +``` +gpg --homedir=gpg --export KEY_ID > dummy-repo.gpg +``` + +Create base64-encoded string from gpg-file for `GPGKey=` property in flatpak*-files: + +``` +base64 dummy-repo.gpg | tr -d '\n' +``` + +## How to use the repo + +Now you can add the `repo` folder as a local repo: + +``` +flatpak --system remote-add --gpg-import=/tmp/flatpak/repo/dummy-repo.gpg dummy-repo /tmp/flatpak/repo +``` + +Or, via `.flatpakrepo` file: + +``` +flatpak --system remote-add dummy-repo /tmp/flatpak/repo/dummy-repo.flatpakrepo +``` + +And install the hello world flatpaks like this: + +``` +flatpak --system install dummy-repo com.dummy.App$NUM +``` + +Or from flatpakref: + +``` +flatpak --system install --from /tmp/flatpak/repo/com.dummy.App$NUM.flatpakref +``` + +Run the app: + +``` +flatpak run com.dummy.App$NUM +``` + +To install an app without any runtime dependencies (the app will be broken, but it is enough to test flatpak installation): + +``` +flatpak --system install --no-deps dummy-repo com.dummy.App$NUM +``` + +## Sources: + +* https://blogs.gnome.org/alexl/2017/02/10/maintaining-a-flatpak-repository/ + +* http://docs.flatpak.org/en/latest/first-build.html diff --git a/test/integration/targets/incidental_setup_flatpak_remote/aliases b/test/integration/targets/incidental_setup_flatpak_remote/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/incidental_setup_flatpak_remote/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz b/test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz new file mode 100644 index 00000000..544bf706 Binary files /dev/null and b/test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz differ diff --git a/test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml b/test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml new file mode 100644 index 00000000..9380dee9 --- /dev/null +++ b/test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml @@ -0,0 +1,4 @@ +- name: remove temporary flatpak link + file: + state: absent + path: /tmp/flatpak diff --git a/test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml b/test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml new file mode 100644 index 00000000..75ee4583 --- /dev/null +++ b/test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml b/test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml new file mode 100644 index 00000000..c199d216 --- /dev/null +++ b/test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml @@ -0,0 +1,22 @@ +- name: Set up dummy flatpak repository remote + block: + + - name: Copy repo into place + unarchive: + src: repo.tar.xz + dest: "{{ remote_tmp_dir }}" + owner: root + group: root + mode: 0644 + + - name: Create deterministic link to temp directory + file: + state: link + src: "{{ remote_tmp_dir }}/" + path: "/tmp/flatpak" + owner: root + group: root + mode: 0644 + notify: remove temporary flatpak link + + become: true diff --git a/test/integration/targets/incidental_setup_mongodb/aliases b/test/integration/targets/incidental_setup_mongodb/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/incidental_setup_mongodb/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/incidental_setup_mongodb/defaults/main.yml b/test/integration/targets/incidental_setup_mongodb/defaults/main.yml new file mode 100644 index 00000000..b205013c --- /dev/null +++ b/test/integration/targets/incidental_setup_mongodb/defaults/main.yml @@ -0,0 +1,46 @@ +mongodb_version: "4.0" + +apt: + keyserver: "keyserver.ubuntu.com" + keyserver_id: "9DA31620334BD75D9DCB49F368818C72E52529D4" + repo: "deb [ arch=amd64 ] http://repo.mongodb.org/apt/ubuntu {{ansible_distribution_release}}/mongodb-org/{{mongodb_version}} multiverse" + +mongodb_packages: + mongod: mongodb-org-server + mongos: mongodb-org-mongos + mongo: mongodb-org-shell + +yum: + name: mongodb-org + description: "Official MongoDB {{mongodb_version}} yum repo" + baseurl: https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/{{mongodb_version}}/x86_64/ + gpgcheck: 1 + gpgkey: https://www.mongodb.org/static/pgp/server-{{mongodb_version}}.asc + redhat8url: https://repo.mongodb.org/yum/redhat/7/mongodb-org/{{mongodb_version}}/x86_64/ + fedoraurl: https://repo.mongodb.org/yum/amazon/2013.03/mongodb-org/{{mongodb_version}}/x86_64/ + +debian_packages_py2: + - python-dev + - python-setuptools + - python-pip + +debian_packages_py36: + - python3.6-dev + - python3-setuptools + - python3-pip + +redhat_packages_py2: + - python-devel + - python-setuptools + - python-pip + +redhat_packages_py3: + - python3-devel + - python3-setuptools + - python3-pip + +# Do not install requests[security] via pip. It will cause test failures. +# See https://github.com/ansible/ansible/pull/66319 +pip_packages: + - psutil + - pymongo diff --git a/test/integration/targets/incidental_setup_mongodb/handlers/main.yml b/test/integration/targets/incidental_setup_mongodb/handlers/main.yml new file mode 100644 index 00000000..1b73525e --- /dev/null +++ b/test/integration/targets/incidental_setup_mongodb/handlers/main.yml @@ -0,0 +1,24 @@ +- name: Remove debian_packages_py2 + apt: + name: "{{ debian_packages_py2 }}" + state: absent + +- name: Remove debian_packages_py36 + apt: + name: "{{ debian_packages_py36 }}" + state: absent + +- name: Remove redhat_packages_py2 + yum: + name: "{{ redhat_packages_py36 }}" + state: absent + +- name: Remove redhat_packages_py36 + yum: + name: "{{ redhat_packages_py36 }}" + state: absent + +- name: remove mongodb pip packages + pip: + name: "{{ pip_packages }}" + state: absent diff --git a/test/integration/targets/incidental_setup_mongodb/tasks/main.yml b/test/integration/targets/incidental_setup_mongodb/tasks/main.yml new file mode 100644 index 00000000..3bd090ca --- /dev/null +++ b/test/integration/targets/incidental_setup_mongodb/tasks/main.yml @@ -0,0 +1,168 @@ +# (c) 2019, Rhys Campbell + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# ============================================================ + +# https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/ +# Support for Ubuntu 14.04 has been removed from MongoDB 4.0.10+, 3.6.13+, and 3.4.21+. +# CentOS6 has python version issues +# Ubuntu 20.04 does not yet have the required packages +- meta: end_play + when: (ansible_distribution == 'Ubuntu' and ansible_distribution_version == '14.04') + or (ansible_distribution == 'Ubuntu' and ansible_distribution_version == '20.04') + or (ansible_os_family == "RedHat" and ansible_distribution_major_version == '6') + or ansible_os_family == "Suse" + or ansible_distribution == 'Fedora' + or (ansible_facts['distribution'] == "CentOS") + +# Ubuntu +- name: Import MongoDB public GPG Key + apt_key: + keyserver: "{{ apt.keyserver }}" + id: "{{ apt.keyserver_id }}" + when: + - ansible_distribution_version in ["16.04", "18.04"] + - ansible_distribution == 'Ubuntu' + +- name: Add MongoDB repository into sources list + apt_repository: + repo: "{{ apt.repo }}" + state: present + update_cache: yes + when: + - ansible_distribution_version in ["16.04", "18.04"] + - ansible_distribution == 'Ubuntu' + +# Need to handle various platforms here. Package name will not always be the same +- name: Ensure mongod package is installed + apt: + name: "{{ mongodb_packages.mongod }}" + state: present + force: yes + when: + - ansible_distribution == 'Ubuntu' + +- name: Ensure mongos package is installed + apt: + name: "{{ mongodb_packages.mongos }}" + state: present + force: yes + when: + - ansible_distribution == 'Ubuntu' + +- name: Ensure mongo client is installed + apt: + name: "{{ mongodb_packages.mongo }}" + state: present + force: yes + when: + - ansible_distribution == 'Ubuntu' +# EOF Ubuntu + +# Redhat +- name: Add MongopDB repo + yum_repository: + name: "{{ yum.name }}" + description: "{{ yum.description }}" + baseurl: "{{ yum.baseurl }}" + gpgcheck: "{{ yum.gpgcheck }}" + gpgkey: "{{ yum.gpgkey }}" + when: + - ansible_os_family == "RedHat" + - ansible_distribution_version.split('.')[0]|int <= 7 + - not ansible_distribution == "Fedora" + + +- name: RedHat 8 repo not yet available so use 7 url + yum_repository: + name: "{{ yum.name }}" + description: "{{ yum.description }}" + baseurl: "{{ yum.redhat8url }}" + gpgcheck: "{{ yum.gpgcheck }}" + gpgkey: "{{ yum.gpgkey }}" + when: + - ansible_os_family == "RedHat" + - ansible_distribution_version.split('.')[0]|int == 8 + - not ansible_distribution == "Fedora" + +- name: Another url for Fedora based systems + yum_repository: + name: "{{ yum.name }}" + description: "{{ yum.description }}" + baseurl: "{{ yum.fedoraurl }}" + gpgcheck: "{{ yum.gpgcheck }}" + gpgkey: "{{ yum.gpgkey }}" + when: + - ansible_distribution == "Fedora" + +- name: Ensure mongod package is installed + yum: + name: "{{ mongodb_packages.mongod }}" + state: present + when: ansible_os_family == "RedHat" + +- name: Ensure mongos package is installed + yum: + name: "{{ mongodb_packages.mongos }}" + state: present + when: ansible_os_family == "RedHat" + +- name: Ensure mongo client is installed + yum: + name: "{{ mongodb_packages.mongo }}" + state: present + when: ansible_os_family == "RedHat" +# EOF Redhat + +- name: Install debian_packages + apt: + name: "{{ debian_packages_py2 }}" + when: + - ansible_os_family == "Debian" + - ansible_distribution_version == "16.04" + notify: Remove debian_packages_py2 + +- name: Install debian_packages + apt: + name: "{{ debian_packages_py36 }}" + when: + - ansible_os_family == "Debian" + - ansible_distribution_version == "18.04" + notify: Remove debian_packages_py36 + +- name: Install redhat_packages_py2 + yum: + name: "{{ redhat_packages_py2 }}" + when: + - ansible_os_family == "RedHat" + - ansible_distribution_version|float < 8 + - not (ansible_os_family == "RedHat" and ansible_distribution_version|float < 8) + notify: Remove redhat_packages_py2 + +- name: Install redhat_packages_py3 + yum: + name: "{{ redhat_packages_py3 }}" + when: + - ansible_os_family == "RedHat" + - ansible_distribution_version|float >= 8 + notify: Remove redhat_packages_py3 + +- name: Install pip packages + pip: + name: "{{ pip_packages }}" + state: present + notify: remove mongodb pip packages diff --git a/test/integration/targets/incidental_setup_postgresql_db/aliases b/test/integration/targets/incidental_setup_postgresql_db/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml b/test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml new file mode 100644 index 00000000..aea02442 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml @@ -0,0 +1,17 @@ +postgresql_service: postgresql + +postgresql_packages: + - postgresql-server + - python-psycopg2 + +pg_user: postgres +pg_group: root + +locale_latin_suffix: +locale_utf8_suffix: + +# defaults for test SSL +ssl_db: 'ssl_db' +ssl_user: 'ssl_user' +ssl_pass: 'ssl_pass' +ssl_rootcert: '~{{ pg_user }}/root.crt' diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql new file mode 100644 index 00000000..53c79666 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql @@ -0,0 +1,2 @@ +CREATE OR REPLACE FUNCTION dummy_display_ext_version() +RETURNS text LANGUAGE SQL AS 'SELECT (''1.0'')::text'; diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql new file mode 100644 index 00000000..227ba1b4 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql @@ -0,0 +1,2 @@ +CREATE OR REPLACE FUNCTION dummy_display_ext_version() +RETURNS text LANGUAGE SQL AS 'SELECT (''2.0'')::text'; diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql new file mode 100644 index 00000000..7d6a60e5 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql @@ -0,0 +1,2 @@ +CREATE OR REPLACE FUNCTION dummy_display_ext_version() +RETURNS text LANGUAGE SQL AS 'SELECT (''3.0'')::text'; diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy.control b/test/integration/targets/incidental_setup_postgresql_db/files/dummy.control new file mode 100644 index 00000000..4f8553c2 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy.control @@ -0,0 +1,3 @@ +comment = 'dummy extension used to test postgresql_ext Ansible module' +default_version = '3.0' +relocatable = true diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf b/test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf new file mode 100644 index 00000000..58de3607 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf @@ -0,0 +1,10 @@ +# !!! This file managed by Ansible. Any local changes may be overwritten. !!! + +# Database administrative login by UNIX sockets +# note: you may wish to restrict this further later +local all {{ pg_user }} trust + +# TYPE DATABASE USER CIDR-ADDRESS METHOD +local all all md5 +host all all 127.0.0.1/32 md5 +host all all ::1/128 md5 diff --git a/test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml b/test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml new file mode 100644 index 00000000..2e969c31 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml @@ -0,0 +1,222 @@ +- name: python 2 + set_fact: + python_suffix: "" + when: ansible_python_version is version('3', '<') + +- name: python 3 + set_fact: + python_suffix: "-py3" + when: ansible_python_version is version('3', '>=') + +- name: Include distribution and Python version specific variables + include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml' + - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml' + - '{{ ansible_os_family }}{{ python_suffix }}.yml' + - 'default{{ python_suffix }}.yml' + paths: + - "{{ role_path }}/vars" + +- name: make sure the dbus service is started under systemd + systemd: + name: dbus + state: started + when: ansible_service_mgr == 'systemd' and ansible_distribution == 'Fedora' + +# Make sure we start fresh +- name: stop postgresql service + service: name={{ postgresql_service }} state=stopped + ignore_errors: True + +- name: remove old db (RedHat or Suse) + file: + path: "{{ pg_dir }}" + state: absent + ignore_errors: True + when: ansible_os_family == "RedHat" or ansible_os_family == "Suse" + +- name: remove old db (FreeBSD) + file: + path: "{{ pg_dir }}" + state: absent + ignore_errors: True + when: ansible_os_family == "FreeBSD" + +# Theoretically, pg_dropcluster should work but it doesn't so remove files +- name: remove old db config and files (debian) + file: + path: '{{ loop_item }}' + state: absent + ignore_errors: True + when: ansible_os_family == "Debian" + loop: + - /etc/postgresql + - /var/lib/postgresql + loop_control: + loop_var: loop_item + +- name: install dependencies for postgresql test + package: + name: "{{ postgresql_package_item }}" + state: present + with_items: "{{ postgresql_packages }}" + loop_control: + loop_var: postgresql_package_item + +- name: initialize postgres (FreeBSD) + command: /usr/local/etc/rc.d/postgresql oneinitdb + when: ansible_os_family == "FreeBSD" + +- name: Initialize postgres (RedHat systemd) + command: postgresql-setup initdb + when: ansible_os_family == "RedHat" and ansible_service_mgr == "systemd" + +- name: Initialize postgres (RedHat sysv) + command: /sbin/service postgresql initdb + when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd" + +- name: Initialize postgres (Debian) + shell: '. /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main' + args: + creates: "/etc/postgresql/{{ pg_ver }}/" + when: ansible_os_family == 'Debian' + +- name: Initialize postgres (Suse) + service: name=postgresql state=restarted + when: ansible_os_family == 'Suse' + +- name: Copy pg_hba into place + template: + src: files/pg_hba.conf + dest: "{{ pg_hba_location }}" + owner: "{{ pg_user }}" + group: "{{ pg_group }}" + mode: "0644" + +- name: Generate locales (Debian) + locale_gen: + name: '{{ item }}' + state: present + with_items: + - pt_BR + - es_ES + when: ansible_os_family == 'Debian' + +# Suse: locales are installed by default (glibc-locale package). +# Fedora 23: locales are installed by default (glibc-common package) +# CentOS: all locales are installed by default (glibc-common package) but some +# RPM macros could prevent their installation (for example when using anaconda +# instLangs parameter). + +- block: + - name: Install langpacks (RHEL8) + yum: + name: + - glibc-langpack-es + - glibc-langpack-pt + - glibc-all-langpacks + state: present + when: ansible_distribution_major_version is version('8', '>=') + + - name: Check if locales need to be generated (RedHat) + shell: "localedef --list-archive | grep -a -q '^{{ locale }}$'" + register: locale_present + ignore_errors: True + with_items: + - es_ES + - pt_BR + loop_control: + loop_var: locale + + - name: Reinstall internationalization files + shell: 'yum -y reinstall glibc-common || yum -y install glibc-common' + args: + warn: no + when: locale_present is failed + + - name: Generate locale (RedHat) + command: 'localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }}' + when: item is failed + with_items: '{{ locale_present.results }}' + when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora' + +- name: Install glibc langpacks (Fedora >= 24) + package: + name: '{{ item }}' + state: 'latest' + with_items: + - glibc-langpack-es + - glibc-langpack-pt + when: ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=') + +- name: enable postgresql service (FreeBSD) + lineinfile: + path: /etc/rc.conf + line: 'postgresql_enable="YES"' + when: ansible_os_family == "FreeBSD" + +- name: stop postgresql service + service: + name: "{{ postgresql_service }}" + state: stopped + +- name: pause between stop and start of postgresql service + pause: + seconds: 3 + +- name: start postgresql service + service: + name: "{{ postgresql_service }}" + state: started + +######################## +# Setup dummy extension: +- name: copy control file for dummy ext + copy: + src: dummy.control + dest: "/usr/share/postgresql/{{ pg_ver }}/extension/dummy.control" + mode: 0444 + when: ansible_os_family == 'Debian' + +- name: copy version files for dummy ext + copy: + src: "{{ item }}" + dest: "/usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}" + mode: 0444 + with_items: + - dummy--1.0.sql + - dummy--2.0.sql + - dummy--3.0.sql + when: ansible_os_family == 'Debian' + +- name: add update paths + file: + path: "/usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}" + mode: 0444 + state: touch + with_items: + - dummy--1.0--2.0.sql + - dummy--2.0--3.0.sql + when: ansible_os_family == 'Debian' + +- name: Get PostgreSQL version + become_user: "{{ pg_user }}" + become: yes + shell: "echo 'SHOW SERVER_VERSION' | psql --tuples-only --no-align --dbname postgres" + register: postgres_version_resp + +- name: Print PostgreSQL server version + debug: + msg: "{{ postgres_version_resp.stdout }}" + +# SSL configuration. +# Restricted using Debian family because of there are errors on other distributions +# that not related with PostgreSQL or psycopg2 SSL support. +# The tests key point is to be sure that ssl options work in general +- import_tasks: ssl.yml + when: + - ansible_os_family == 'Debian' + - postgres_version_resp.stdout is version('9.4', '>=') diff --git a/test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml b/test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml new file mode 100644 index 00000000..bc45ec6f --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml @@ -0,0 +1,81 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# The aim of this test is to be sure that SSL options work in general +# and preparing the environment for testing these options in +# the following PostgreSQL modules (ssl_db, ssl_user, certs). +# Configured by https://www.postgresql.org/docs/current/ssl-tcp.html + +#################### +# Prepare for tests: + +- name: postgresql SSL - create database + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: "{{ ssl_db }}" + +- name: postgresql SSL - create role + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ ssl_user }}" + role_attr_flags: SUPERUSER + password: "{{ ssl_pass }}" + +- name: postgresql SSL - install openssl + become: yes + package: name=openssl state=present + +- name: postgresql SSL - create certs 1 + become_user: root + become: yes + shell: 'openssl req -new -nodes -text -out ~{{ pg_user }}/root.csr \ + -keyout ~{{ pg_user }}/root.key -subj "/CN=localhost.local"' + +- name: postgresql SSL - create certs 2 + become_user: root + become: yes + shell: 'openssl x509 -req -in ~{{ pg_user }}/root.csr -text -days 3650 \ + -extensions v3_ca -signkey ~{{ pg_user }}/root.key -out ~{{ pg_user }}/root.crt' + +- name: postgresql SSL - create certs 3 + become_user: root + become: yes + shell: 'openssl req -new -nodes -text -out ~{{ pg_user }}/server.csr \ + -keyout ~{{ pg_user }}/server.key -subj "/CN=localhost.local"' + +- name: postgresql SSL - create certs 4 + become_user: root + become: yes + shell: 'openssl x509 -req -in ~{{ pg_user }}/server.csr -text -days 365 \ + -CA ~{{ pg_user }}/root.crt -CAkey ~{{ pg_user }}/root.key -CAcreateserial -out server.crt' + +- name: postgresql SSL - set right permissions to files + become_user: root + become: yes + file: + path: '{{ item }}' + mode: 0600 + owner: '{{ pg_user }}' + group: '{{ pg_user }}' + with_items: + - '~{{ pg_user }}/root.key' + - '~{{ pg_user }}/server.key' + - '~{{ pg_user }}/root.crt' + - '~{{ pg_user }}/server.csr' + +- name: postgresql SSL - enable SSL + become_user: "{{ pg_user }}" + become: yes + postgresql_set: + login_user: "{{ pg_user }}" + db: postgres + name: ssl + value: on + +- name: postgresql SSL - reload PostgreSQL to enable ssl on + become: yes + service: + name: "{{ postgresql_service }}" + state: reloaded diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml new file mode 100644 index 00000000..c5c6795e --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.4/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.4/main" +pg_ver: 9.4 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml new file mode 100644 index 00000000..2f6b0d98 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql95-server + - py36-psycopg2 + +pg_dir: /usr/local/pgsql/data +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 9.5 +pg_user: pgsql +pg_group: pgsql + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml new file mode 100644 index 00000000..efb0603b --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql95-server + - py27-psycopg2 + +pg_dir: /usr/local/pgsql/data +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 9.5 +pg_user: pgsql +pg_group: pgsql + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml new file mode 100644 index 00000000..2f6b0d98 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql95-server + - py36-psycopg2 + +pg_dir: /usr/local/pgsql/data +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 9.5 +pg_user: pgsql +pg_group: pgsql + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml new file mode 100644 index 00000000..1fe66782 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql96-server + - py27-psycopg2 + +pg_dir: /usr/local/pgsql/data +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 9.6 +pg_user: pgsql +pg_group: pgsql + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml new file mode 100644 index 00000000..cd7c83a4 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql11-server + - py36-psycopg2 + +pg_dir: /var/db/postgres/data11 +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 11 +pg_user: postgres +pg_group: postgres + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml new file mode 100644 index 00000000..0b1ab5b2 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql11-server + - py27-psycopg2 + +pg_dir: /var/db/postgres/data11 +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 11 +pg_user: postgres +pg_group: postgres + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml new file mode 100644 index 00000000..ee083722 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql-server" + - "python3-psycopg2" + - "bzip2" + - "xz" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml new file mode 100644 index 00000000..20c4b1f5 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml @@ -0,0 +1,7 @@ +postgresql_packages: + - "postgresql-server" + - "python-psycopg2" + - "bzip2" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml new file mode 100644 index 00000000..4b6e744b --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.1/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.1/main" +pg_ver: 9.1 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml new file mode 100644 index 00000000..ffcc8dd4 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.3/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.3/main" +pg_ver: 9.3 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml new file mode 100644 index 00000000..b088c310 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python3-psycopg2" + +pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.5/main" +pg_ver: 9.5 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml new file mode 100644 index 00000000..897efd2c --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.5/main" +pg_ver: 9.5 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml new file mode 100644 index 00000000..10453bdf --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python3-psycopg2" + +pg_hba_location: "/etc/postgresql/10/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/10/main" +pg_ver: 10 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-20-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-20-py3.yml new file mode 100644 index 00000000..7322bcb2 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-20-py3.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python3-psycopg2" + +pg_hba_location: "/etc/postgresql/12/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/12/main" +pg_ver: 12 diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml new file mode 100644 index 00000000..19152a64 --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml @@ -0,0 +1,6 @@ +postgresql_packages: + - "postgresql-server" + - "python3-psycopg2" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/default.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/default.yml new file mode 100644 index 00000000..ab36dd9f --- /dev/null +++ b/test/integration/targets/incidental_setup_postgresql_db/vars/default.yml @@ -0,0 +1,6 @@ +postgresql_packages: + - "postgresql-server" + - "python-psycopg2" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/test/integration/targets/incidental_setup_rabbitmq/aliases b/test/integration/targets/incidental_setup_rabbitmq/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/incidental_setup_rabbitmq/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf b/test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf new file mode 100644 index 00000000..1e602175 --- /dev/null +++ b/test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf @@ -0,0 +1,8 @@ +listeners.ssl.default = 5671 + +ssl_options.cacertfile = /tls/ca_certificate.pem +ssl_options.certfile = /tls/server_certificate.pem +ssl_options.keyfile = /tls/server_key.pem +ssl_options.password = bunnies +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = false diff --git a/test/integration/targets/incidental_setup_rabbitmq/meta/main.yml b/test/integration/targets/incidental_setup_rabbitmq/meta/main.yml new file mode 100644 index 00000000..7a6c3e01 --- /dev/null +++ b/test/integration/targets/incidental_setup_rabbitmq/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - incidental_setup_tls diff --git a/test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml b/test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml new file mode 100644 index 00000000..ad401fb3 --- /dev/null +++ b/test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- include: ubuntu.yml + when: + - ansible_distribution == 'Ubuntu' + - ansible_distribution_release != 'focal' diff --git a/test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml b/test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml new file mode 100644 index 00000000..6d7a3ef0 --- /dev/null +++ b/test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml @@ -0,0 +1,63 @@ +--- +# https://www.rabbitmq.com/install-debian.html#apt-pinning +- name: Pin erlang version that rabbitmq supports + copy: + dest: /etc/apt/preferences.d/erlang + content: | + Package: erlang* + Pin: version 1:20.3.8.18-1 + Pin-Priority: 1000 + + Package: esl-erlang + Pin: version 1:20.3.6 + Pin-Priority: 1000 + +- name: Install https transport for apt + apt: + name: apt-transport-https + state: latest + force: yes + +- name: Add RabbitMQ release signing key + apt_key: + url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_rabbitmq/rabbitmq-release-signing-key.asc + state: present + +- name: Add RabbitMQ Erlang repository + apt_repository: + repo: "deb https://dl.bintray.com/rabbitmq-erlang/debian {{ ansible_distribution_release }} erlang-20.x" + filename: 'rabbitmq-erlang' + state: present + update_cache: yes + +# Required by the rabbitmq modules that uses the management API +- name: Install requests + pip: + name: requests + +- name: Install RabbitMQ Server + apt: + deb: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/setup_rabbitmq/rabbitmq-server_3.7.14-1_all.deb + +- name: Install RabbitMQ TLS dependencies + apt: + name: "{{ item }}" + state: latest + loop: + - erlang-asn1 + - erlang-crypto + - erlang-public-key + - erlang-ssl + +- name: Ensure TLS config + copy: + src: rabbitmq.conf + dest: /etc/rabbitmq/rabbitmq.conf + +- name: Start RabbitMQ service + service: + name: rabbitmq-server + state: started + +- name: Enable management + command: rabbitmq-plugins enable --online rabbitmq_management diff --git a/test/integration/targets/incidental_setup_tls/aliases b/test/integration/targets/incidental_setup_tls/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/incidental_setup_tls/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/incidental_setup_tls/files/ca_certificate.pem b/test/integration/targets/incidental_setup_tls/files/ca_certificate.pem new file mode 100644 index 00000000..a438d926 --- /dev/null +++ b/test/integration/targets/incidental_setup_tls/files/ca_certificate.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDAjCCAeqgAwIBAgIJANguFROhaWocMA0GCSqGSIb3DQEBCwUAMDExIDAeBgNV +BAMMF1RMU0dlblNlbGZTaWduZWR0Um9vdENBMQ0wCwYDVQQHDAQkJCQkMB4XDTE5 +MDExMTA4MzMxNVoXDTI5MDEwODA4MzMxNVowMTEgMB4GA1UEAwwXVExTR2VuU2Vs +ZlNpZ25lZHRSb290Q0ExDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQDqVt84czSxWnWW4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp +7PrBbYF05FOgSdJLvL6grlRSQK2VPsXdLfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4l +JVpSDsBV2orR4pOIf1s1+iSwvcRQkX46SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy +0K2MbRs7oG2rdKks8zisfT0ymKnrFTdVeUjIrg0sStaMnf9VVkcEeYkfNY0vWqdn +CV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET+q+gOvjsEqzn7DvlPkmk86hIIWXKi3aM +A9swknL3rnagJL6GioWRpYUwKdRKmZxdyr4I2JTTAgMBAAGjHTAbMAwGA1UdEwQF +MAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQACTpPBf5WSwZ7r +hrbPUN3qVh70HI0ZNK2jlK6b5fpSdw3JI/GQl0Kw3eGICLzwTByWvhD62U7IigL5 +0UWxWuEod310Y/qo/7OxRVPp5PH/0oNGoKHhEzas2ii0heQYGsHQUKGzYNNyVfjy +nqBFz5AcKf067LcXivYqod6JDQHqFq/5/hWlIsHHrZIeijqqtthPq39GlGAYO+AB +U66nzlH7YQgmfYfy6l7O4LsjXf/bz9rWvueO3NqCsmXV+FacDkOkwWA5Kf6rcgNL +3G+2HAVTRIXDnO4ShnK6aYMW+UklpYRlVYBBUOdwoNIp5gI+BlSc1IuF6PdLVt3q +VdjN1MjY +-----END CERTIFICATE----- diff --git a/test/integration/targets/incidental_setup_tls/files/ca_key.pem b/test/integration/targets/incidental_setup_tls/files/ca_key.pem new file mode 100644 index 00000000..0a950eda --- /dev/null +++ b/test/integration/targets/incidental_setup_tls/files/ca_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDqVt84czSxWnWW +4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp7PrBbYF05FOgSdJLvL6grlRSQK2VPsXd +LfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4lJVpSDsBV2orR4pOIf1s1+iSwvcRQkX46 +SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy0K2MbRs7oG2rdKks8zisfT0ymKnrFTdV +eUjIrg0sStaMnf9VVkcEeYkfNY0vWqdnCV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET ++q+gOvjsEqzn7DvlPkmk86hIIWXKi3aMA9swknL3rnagJL6GioWRpYUwKdRKmZxd +yr4I2JTTAgMBAAECggEBALpg9ZDUMCiOpc+mbNO/ZkP90M7u38Q0M+7HY8XHOPkt +l+XUkWueSMRLhSeLDzMlnwf1HyN8RZLaJkzP6XAL1VXEwuXAiIskaZ4Cg07Arp/W +8cHhf4CcMuUVuCtOZcC+ajD4Do5zn9vkm9yH0ap0o0LdoWa/a8WfU+luy0EHBsSW +6qqI+nqNFmISluVbfWt7t3zp273+8sir6YeHQu9G91/jzggv8rHmu4EHhi3cnU0K +vY6OPCGBL7nrg9Rv1LSFpH95TvlIM6/Cm0AjgW7m6XwWUTaI9p+GvKzrYUSLd9L/ +QxlmAwiu/sBTXLrsWyr8XEtj+lVGxQ6eFbf6E+lUm8ECgYEA+8Wgmhf3VsC3gvJz +w2jApEoOioD5iGOWGClGVURkfaBhFELr4XCTVMdBuCtxT7LYTMHTAlBqIbdWDjB4 +m/E417hLGogSDy7j0R0Mx75OOGEitxYUhe0VGDNoytgCNd2UnTMt42lp+9vAHZag +INhVDOnxRNdtNTf1yYkWUMEbh1sCgYEA7kZNJXPVYJtR78+km/Gcv64Umci7KUV+ +hYc7chR5xv3cXvXg5eojKa4G7CyMQTX7VnRa6CiQKdN73AbIAhS4Oy5UlCOKtmb8 +xnBiOAYwSpOfIeZhjq0RvEeZX0t6u7XsErBZ03rEPKXF2nNDo1x8byrlKPtlUzwJ +gb5yjmK/mekCgYEA1TWQAs5m4+2Bun+tbv7nnHkmhT4hktGays0xRYYMf6Jwc6MU +dC5MZg/zZI5Nf8uZhq7hDWWh6vmCA7QifxSxKWVlHIu8l2UDAhRSvVg4j2Aa8Obe +7GdQZNUsWhLBFHKXpuQvaRTc7q8yqxvicM4igDQg4EZ6sgW4vDm+TxapRF8CgYAz +n6mhPqpxRtWGxo8cdkmGwfmWpAXg2DykQ3teqQ8FTQUM0erLBWJe6mR3kONGUaLF +xWnYuMkbNsW0EwgMY17S+6O5gMXR5RhJChpNlxGpZrhoiNiEJ/0atMyG9/x8ZNrj +5a9ggU248hWe0bBK2YPgNgP2UBlQ4kYRBSkerkhi2QKBgF+tlpyqcU+0iY82qRS2 +wMf7oI2pWR8nX9LPAY/nnvwWvqwcAFJPMlSMTu8Ext6h7l9yu+7JGL6JWwsO57Lb +Gm/RxbuZ/kG/13+lSNmZiyHrhj6hZhkAMeFM34fpT4+DBXqSxZuvdrmwBc5B2jYg +F9Bv8gcmZlGhqONL23evr9Gu +-----END PRIVATE KEY----- diff --git a/test/integration/targets/incidental_setup_tls/files/client_certificate.pem b/test/integration/targets/incidental_setup_tls/files/client_certificate.pem new file mode 100644 index 00000000..501d8389 --- /dev/null +++ b/test/integration/targets/incidental_setup_tls/files/client_certificate.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDRjCCAi6gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH +ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz +MThaFw0yOTAxMDgwODMzMThaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz +MQ8wDQYDVQQKDAZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCoM+OQ3HCnCUAAz9KGGTwWB9hQbUfAZXm/stlb2/uOAp3rNwxAlCs/giymBHE6 +Iu6mrK006Vn+Z9ibqIrD2LuCOxcu25y8goqG62TgdP5sa9wR+597s0XssnwnaY8y +bJ3p2zWAJvMgqQ0iNW/ZynpWbO85K5SryUykF7FAeNU9ogGGlIwCPjHhPvnwjkqd +yDqaA1VaJKDUWIF9joI7sV4VLgGhQvzXRrHULsTeIF2m0+ebL0PTNEWHQ0dtgLYX +kW7YO4Y6+n3cjHNH4qTof8V30EK8pk8kTdJ/x6ubwf+klFCAyroOxNOaxUy299Oo +yD6qIPJPnGkPhrKtWnWIhNzJAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD +AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz +LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN +AQELBQADggEBAK214+VVXnGnsUlvd9Q6A2Ea6UGrr6b7xkmlnIaNd+6xoUsDsHob +srHYm7UC0uLi1KwSunI7AU5ZELVEUfAmJzh3O4d6C5sQyqKYPqd5harWOQ3BOD0I +plHpp7qMtsPDuJBtmE/bmvF85eto0H7pPz+cTTXRlOaVVeiHjMggFcXdy1MzGo9C +X/4wLQmsFeypTfe+ZGqvDh99VV+ffNMIsMh+opWEloaKiHmDKB6S9aC/MsVVM4RR +nHm/UKTOukaGE9QIPkSSaygv3sBkVnQ2SHMvvtnjPHVHlizNoq6+YTnuOvKpo4o5 +V7Bij+W7rkBQLsEfwv2IC+gzmRz2yxr2tXk= +-----END CERTIFICATE----- diff --git a/test/integration/targets/incidental_setup_tls/files/client_key.pem b/test/integration/targets/incidental_setup_tls/files/client_key.pem new file mode 100644 index 00000000..850260a8 --- /dev/null +++ b/test/integration/targets/incidental_setup_tls/files/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAqDPjkNxwpwlAAM/Shhk8FgfYUG1HwGV5v7LZW9v7jgKd6zcM +QJQrP4IspgRxOiLupqytNOlZ/mfYm6iKw9i7gjsXLtucvIKKhutk4HT+bGvcEfuf +e7NF7LJ8J2mPMmyd6ds1gCbzIKkNIjVv2cp6VmzvOSuUq8lMpBexQHjVPaIBhpSM +Aj4x4T758I5Kncg6mgNVWiSg1FiBfY6CO7FeFS4BoUL810ax1C7E3iBdptPnmy9D +0zRFh0NHbYC2F5Fu2DuGOvp93IxzR+Kk6H/Fd9BCvKZPJE3Sf8erm8H/pJRQgMq6 +DsTTmsVMtvfTqMg+qiDyT5xpD4ayrVp1iITcyQIDAQABAoIBAHPszzpXs4xr46Cr +mvyxB6hnX76OkpUXWwGz0fptcsI9K3mhRuB7PhNXNE53YVIgITreZ8G/0jZ0e+VM +E9dG2HS5JRE2ap/BmJfERJIuD+vJqrL6KMCondi0arz/E6I9GdjDK+xW69nmqRaa +nawM0KQgD//m+WAsLJYrfg5hORZwI2SHaahawnCp0QaMmz3bdDWKRacM3q0UFX46 +Ze6CaZkUn+e1rHsTMcZBvxQWIVzysFNXh150idIB/PxL5YfCQqTSAj1c/nxaxz6a +BvHFlpaYR3tvXXlexxfjglCwsGyckbvTyP1cBZqpv5oES+VKt2PrOve9Zyax+CYT +0uQf6cECgYEA09+46QHXLfWh6jiJYu9skC9UrLU5czfCNB6PrUtFcjPFMYjZDcw9 +inJmcuTPXmfplxc47YDfpwotU+szTJDF+R8kknnfw9zVr/sIwZ5wsFfUQl/56Svn +AIOVvHHvcvMX95XKGiuTsoCIJZNjJN3l3ztu/bRciuiVLyizglwIVrMCgYEAyzvK +PFlWilbp3GPJlnW7x1bUxe1ziLE/Um+ujZx96+fy34hJLFdNdNzpNUjoOf3IDTGq +6xl+vXcf12gimWMFcD3qNIGKHBDM9cIB2RDbb6YcqI8lOqopsmOyGmVLPkRpCoUK +72kacQwvw6M9xjmpiG3dN8lE881jDmZi+hyCnJMCgYEAoIQnQAhP8Jbeo2dP1q+T +bS0elnX532uH6xqYOW8EXwAPznZiEw0ANspzCWqGHHzXQMusKmtvhcq1CpXvWHt6 +MUHB4GMK/wVosxmZya5yq3bu7ZZu7JOBQCdwosMi6NB5AO7vnaIUFLFB9E3UWBLw +243YicdCMU8B7yeD0ChPfPcCgYA1dYHKBBn+g8Q6Y8lIGaoOUmnfsok8gJtOfPAm +ce6xmi7J29iboE9QmTeC+62Sa44u4ky6UNeE0QwAJnVLcb+hebfcneKNZWH0l1bT +GVsPcFuDfzvkxZP4R782sERtmaMj0EFDHpuE9xatWIhMVyigKX4SSZAorXML+6S3 +c75rnwKBgBR+WU934wS+DbwTLlUB2mJWqJMEbOH/CUwPC7+VN4h1h3/i455iAeiU +BizLS0SlD+MoSbC7URcZuquqGkmMlnJXoxF+NdxoWZK78tYNftryWoR87TloiVc/ +LhkxZxje4tgW/mTLqH3zKDoyyzDzG6Q6tAUN2ZTjJFEws7qF30Qe +-----END RSA PRIVATE KEY----- diff --git a/test/integration/targets/incidental_setup_tls/files/server_certificate.pem b/test/integration/targets/incidental_setup_tls/files/server_certificate.pem new file mode 100644 index 00000000..4a0ebc6e --- /dev/null +++ b/test/integration/targets/incidental_setup_tls/files/server_certificate.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDRjCCAi6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH +ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz +MTZaFw0yOTAxMDgwODMzMTZaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz +MQ8wDQYDVQQKDAZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDIwErHwAesRBfd9HiZkmB3VYh28c1QkE9I8nYyHJKX2ZBUhAzK+h80BkcTJJ94 +265qWyACH/wl54Xe/ofFUFrGa4vz0qz4UkL/KI0OGw28Y4qnKdorb9DumbiIPB+9 +I9TJT9vhtXTxBNlBTpv3ONHL8EzdV6ZmuvELU11H27oQ4xoUYhfXPXLMLK0sOnXZ +lt0BOMMd5fVpJVa8fvXiw3626a0aXCr4e/MWUsBFRnzrXfgoW+AjYoTjKKS2hLYo +8//MM05h7ROIXrNe990sf9C1G+fOThmOMszK9sjMhu2xHranRcz5aA0UTfyOjTs8 +9WexUYhC5VorYyRWtVZu2mDjAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD +AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz +LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN +AQELBQADggEBAFoPBeB6tQhFS1198sia5NDHDDrghDOIlE0QbaoA+MSKzsaIy8Mu +mNcM2ewYpT600XXTBxcqF6/vuKL9OEbvivtRYQu1YfkifN1jzREoWTieUkR5ytzt +8ATfFkgTWJmiRiOIb/fNgewvhd+aKxep0OGwDiSKKl1ab6F17Cp4iK8sDBWmnUb6 +0Wf7pfver1Gl0Gp8vRXGUuc8a7udA9a8mV70HJlLkMdMvR9U8Bqih0+iRaqNWXRZ +7Lc6v5LbzrW/ntilmgU6F0lwxPydg49MY4UrSXcjYLZs9T4iYHwTfLxFjFMIgGwn +peYMKRj18akP9i2mjj5O2mRu4K+ecuUSOGI= +-----END CERTIFICATE----- diff --git a/test/integration/targets/incidental_setup_tls/files/server_key.pem b/test/integration/targets/incidental_setup_tls/files/server_key.pem new file mode 100644 index 00000000..c79ab648 --- /dev/null +++ b/test/integration/targets/incidental_setup_tls/files/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAyMBKx8AHrEQX3fR4mZJgd1WIdvHNUJBPSPJ2MhySl9mQVIQM +yvofNAZHEySfeNuualsgAh/8JeeF3v6HxVBaxmuL89Ks+FJC/yiNDhsNvGOKpyna +K2/Q7pm4iDwfvSPUyU/b4bV08QTZQU6b9zjRy/BM3VemZrrxC1NdR9u6EOMaFGIX +1z1yzCytLDp12ZbdATjDHeX1aSVWvH714sN+tumtGlwq+HvzFlLARUZ86134KFvg +I2KE4yiktoS2KPP/zDNOYe0TiF6zXvfdLH/QtRvnzk4ZjjLMyvbIzIbtsR62p0XM ++WgNFE38jo07PPVnsVGIQuVaK2MkVrVWbtpg4wIDAQABAoIBAHw3wA3pnNXTLJGC +fD1KfbZZjp9K76gyI10X6lsHow2i6dPiAah3LGecms4VkzfNdxcIW7303Kj3obZh ++ND277RnR6oPakgdXqdUCDP6OX2gemMFWqIWBkodhDmIOntmeHw4le4LwdiBD42B +frBy0B5JCsbLPYPDmPNRGh8krvVS+Eir4hb4tK95TPMSL0vEjvHYFbCxv7//Ri1p +3CROGp2CGX0WZ+Zs0crRNoIhRRM6kLAhROcqejtnEy6o7l5CWpCAL2vxlE9y8/kL +iRawSZRFZnz/zGnqpx0vswgvijkuPfcNGMSzdwaiDgQz8D0GkJ7s9VgzZJazNy+1 +ET/4YIECgYEA612rwP9Ar9qdYbmmMPaJzITnaIrNGfO2JvaQqZt+DG8sVgdxL7V5 +D6emcw406drKRZvFAxnW6ZW2bVpmit02osl0re2A/nOTXLNuo338Qkap/hG8YZrF +bw7w75pFa/rwlDtedjBnGHO2KbRXeU5Hn5wLoKjYgJoF6Ht+PPdL0IsCgYEA2lnC +pQEhM51iRMDqNdmVJyvsTNU1ikoO8HaXHq+LwOQETaKMnDwp4Bn14E815CTulAc/ +tsDTKSDk6umZ+IufG1a2v7CqgKVwkB4HkgxKFQs2gQdTFfoMi5eeHR+njuNtklp1 +9fWfKHsP/ddrg+iTVTRZBLWexgKK89IMHYalpAkCgYEAy0Q3a9NF81mTJ+3kOE8C +zO1OyLtuzGXsvxOb9c6C+owctyNwPeq05a89EgqH6hr5K0qOx9HOCCcyyJgVDQJl +CAuByB/gkmAQOTQBbhMFA9vxPanljknTDsnRjKwoHkw2712ig+Hjd3ufK79C+FGB +i7eBVzva1p2uUowshsxv3mcCgYAOFiRciMofjlO8o8V4W+Undcn02vxtQ4HbOYte +S2z0sMEmUQpJOghpkMMwCWwsn8VUf3M40w/MY3bhQNjSFA/br6hyjW8yhXnRkl5i +qbBN0z9c66AMlukgSFPHBTfGHB4Bhxx9Fa+C6Q2LDs6839BBevMTPrRTie509GQb +s4gUIQKBgAvE8wLcmozno0GLDnBdKRZP/C7tmVnAINuraITPUBTASwI+Qo8ILigQ +LRLaDqF84BEpjb8vdzkYFQqRQSZ8BI8NydfuKEFSBfL27sBvSGMYQJVm6bryUmPq +T3ayaeZ4Wb3FFDijgtM9dRKyf7p4hQPOqM44QrntAtb43b2Q5L7M +-----END RSA PRIVATE KEY----- diff --git a/test/integration/targets/incidental_setup_tls/tasks/main.yml b/test/integration/targets/incidental_setup_tls/tasks/main.yml new file mode 100644 index 00000000..c5b7a23a --- /dev/null +++ b/test/integration/targets/incidental_setup_tls/tasks/main.yml @@ -0,0 +1,21 @@ +--- +# Generated certificate with: https://github.com/michaelklishin/tls-gen +# ~/tls-gen/basic# make PASSWORD=bunnies CN=ansible.tls.tests +# verify with: make info + +- name: ensure target directory is present + file: + path: /tls + state: directory + +- name: ensure TLS files are present + copy: + src: "{{ item }}" + dest: "/tls/{{ item }}" + loop: + - ca_certificate.pem + - ca_key.pem + - client_certificate.pem + - client_key.pem + - server_certificate.pem + - server_key.pem diff --git a/test/integration/targets/incidental_synchronize/aliases b/test/integration/targets/incidental_synchronize/aliases new file mode 100644 index 00000000..31c6a8b4 --- /dev/null +++ b/test/integration/targets/incidental_synchronize/aliases @@ -0,0 +1 @@ +shippable/posix/incidental diff --git a/test/integration/targets/incidental_synchronize/files/bar.txt b/test/integration/targets/incidental_synchronize/files/bar.txt new file mode 100644 index 00000000..3e96db9b --- /dev/null +++ b/test/integration/targets/incidental_synchronize/files/bar.txt @@ -0,0 +1 @@ +templated_var_loaded diff --git a/test/integration/targets/incidental_synchronize/files/foo.txt b/test/integration/targets/incidental_synchronize/files/foo.txt new file mode 100644 index 00000000..3e96db9b --- /dev/null +++ b/test/integration/targets/incidental_synchronize/files/foo.txt @@ -0,0 +1 @@ +templated_var_loaded diff --git a/test/integration/targets/incidental_synchronize/tasks/main.yml b/test/integration/targets/incidental_synchronize/tasks/main.yml new file mode 100644 index 00000000..80e052a6 --- /dev/null +++ b/test/integration/targets/incidental_synchronize/tasks/main.yml @@ -0,0 +1,273 @@ +# test code for the synchronize module +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: install rsync + package: + name: rsync + when: ansible_distribution != "MacOSX" + +- name: cleanup old files + shell: rm -rf {{output_dir}}/* + +- name: create test new files + copy: dest={{output_dir}}/{{item}} mode=0644 content="hello world" + with_items: + - foo.txt + - bar.txt + +- name: synchronize file to new filename + synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.result + register: sync_result + +- assert: + that: + - "'changed' in sync_result" + - "sync_result.changed == true" + - "'cmd' in sync_result" + - "'rsync' in sync_result.cmd" + - "'msg' in sync_result" + - "sync_result.msg.startswith('>f+')" + - "sync_result.msg.endswith('+ foo.txt\n')" + +- name: test that the file was really copied over + stat: + path: "{{ output_dir }}/foo.result" + register: stat_result + +- assert: + that: + - "stat_result.stat.exists == True" + - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'" + +- name: test that the file is not copied a second time + synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.result + register: sync_result + +- assert: + that: + - "sync_result.changed == False" + +- name: Cleanup + file: + state: absent + path: "{{output_dir}}/{{item}}" + with_items: + - foo.result + - bar.result + +- name: Synchronize using the mode=push param + synchronize: + src: "{{output_dir}}/foo.txt" + dest: "{{output_dir}}/foo.result" + mode: push + register: sync_result + +- assert: + that: + - "'changed' in sync_result" + - "sync_result.changed == true" + - "'cmd' in sync_result" + - "'rsync' in sync_result.cmd" + - "'msg' in sync_result" + - "sync_result.msg.startswith('>f+')" + - "sync_result.msg.endswith('+ foo.txt\n')" + +- name: test that the file was really copied over + stat: + path: "{{ output_dir }}/foo.result" + register: stat_result + +- assert: + that: + - "stat_result.stat.exists == True" + - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'" + +- name: test that the file is not copied a second time + synchronize: + src: "{{output_dir}}/foo.txt" + dest: "{{output_dir}}/foo.result" + mode: push + register: sync_result + +- assert: + that: + - "sync_result.changed == False" + +- name: Cleanup + file: + state: absent + path: "{{output_dir}}/{{item}}" + with_items: + - foo.result + - bar.result + +- name: Synchronize using the mode=pull param + synchronize: + src: "{{output_dir}}/foo.txt" + dest: "{{output_dir}}/foo.result" + mode: pull + register: sync_result + +- assert: + that: + - "'changed' in sync_result" + - "sync_result.changed == true" + - "'cmd' in sync_result" + - "'rsync' in sync_result.cmd" + - "'msg' in sync_result" + - "sync_result.msg.startswith('>f+')" + - "sync_result.msg.endswith('+ foo.txt\n')" + +- name: test that the file was really copied over + stat: + path: "{{ output_dir }}/foo.result" + register: stat_result + +- assert: + that: + - "stat_result.stat.exists == True" + - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'" + +- name: test that the file is not copied a second time + synchronize: + src: "{{output_dir}}/foo.txt" + dest: "{{output_dir}}/foo.result" + mode: pull + register: sync_result + +- assert: + that: + - "sync_result.changed == False" + +- name: Cleanup + file: + state: absent + path: "{{output_dir}}/{{item}}" + with_items: + - foo.result + - bar.result + +- name: synchronize files using with_items (issue#5965) + synchronize: src={{output_dir}}/{{item}} dest={{output_dir}}/{{item}}.result + with_items: + - foo.txt + - bar.txt + register: sync_result + +- assert: + that: + - "sync_result.changed" + - "sync_result.msg == 'All items completed'" + - "'results' in sync_result" + - "sync_result.results|length == 2" + - "sync_result.results[0].msg.endswith('+ foo.txt\n')" + - "sync_result.results[1].msg.endswith('+ bar.txt\n')" + +- name: Cleanup + file: + state: absent + path: "{{output_dir}}/{{item}}.result" + with_items: + - foo.txt + - bar.txt + +- name: synchronize files using rsync_path (issue#7182) + synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.rsync_path rsync_path="sudo rsync" + register: sync_result + +- assert: + that: + - "'changed' in sync_result" + - "sync_result.changed == true" + - "'cmd' in sync_result" + - "'rsync' in sync_result.cmd" + - "'rsync_path' in sync_result.cmd" + - "'msg' in sync_result" + - "sync_result.msg.startswith('>f+')" + - "sync_result.msg.endswith('+ foo.txt\n')" + +- name: Cleanup + file: + state: absent + path: "{{output_dir}}/{{item}}" + with_items: + - foo.rsync_path + +- name: add subdirectories for link-dest test + file: + path: "{{output_dir}}/{{item}}/" + state: directory + mode: 0755 + with_items: + - directory_a + - directory_b + +- name: copy foo.txt into the first directory + synchronize: + src: "{{output_dir}}/foo.txt" + dest: "{{output_dir}}/{{item}}/foo.txt" + with_items: + - directory_a + +- name: synchronize files using link_dest + synchronize: + src: "{{output_dir}}/directory_a/foo.txt" + dest: "{{output_dir}}/directory_b/foo.txt" + link_dest: + - "{{output_dir}}/directory_a" + register: sync_result + +- name: get stat information for directory_a + stat: + path: "{{ output_dir }}/directory_a/foo.txt" + register: stat_result_a + +- name: get stat information for directory_b + stat: + path: "{{ output_dir }}/directory_b/foo.txt" + register: stat_result_b + +- assert: + that: + - "'changed' in sync_result" + - "sync_result.changed == true" + - "stat_result_a.stat.inode == stat_result_b.stat.inode" + +- name: synchronize files using link_dest that would be recursive + synchronize: + src: "{{output_dir}}/foo.txt" + dest: "{{output_dir}}/foo.result" + link_dest: + - "{{output_dir}}" + register: sync_result + ignore_errors: yes + +- assert: + that: + - sync_result is not changed + - sync_result is failed + +- name: Cleanup + file: + state: absent + path: "{{output_dir}}/{{item}}" + with_items: + - "directory_b/foo.txt" + - "directory_a/foo.txt" + - "directory_a" + - "directory_b" diff --git a/test/integration/targets/incidental_timezone/aliases b/test/integration/targets/incidental_timezone/aliases new file mode 100644 index 00000000..834cafc9 --- /dev/null +++ b/test/integration/targets/incidental_timezone/aliases @@ -0,0 +1,5 @@ +destructive +shippable/posix/incidental +skip/aix +skip/osx +skip/macos diff --git a/test/integration/targets/incidental_timezone/tasks/main.yml b/test/integration/targets/incidental_timezone/tasks/main.yml new file mode 100644 index 00000000..247ad6cf --- /dev/null +++ b/test/integration/targets/incidental_timezone/tasks/main.yml @@ -0,0 +1,57 @@ +# Because hwclock usually isn't available inside Docker containers in Shippable +# these tasks will detect if hwclock works and only run hwclock tests if it is +# supported. That is why it is recommended to run these tests locally with +# `--docker-privileged` on centos6, centos7 and ubuntu1404 images. Example +# command to run on centos6: +# +# ansible-test integration --docker centos6 --docker-privileged -v timezone + +## +## set path to timezone config files +## + +- name: set config file path on Debian + set_fact: + timezone_config_file: '/etc/timezone' + when: ansible_os_family == 'Debian' + +- name: set config file path on RedHat + set_fact: + timezone_config_file: '/etc/sysconfig/clock' + when: ansible_os_family == 'RedHat' + +## +## set path to hwclock config files +## + +- name: set config file path on Debian + set_fact: + hwclock_config_file: '/etc/default/rcS' + when: ansible_os_family == 'Debian' + +- name: set config file path on RedHat + set_fact: + hwclock_config_file: '/etc/sysconfig/clock' + when: ansible_os_family == 'RedHat' + +- name: Run tests + # Skip tests on Fedora because dbus fails to start unless the container is run in priveleged mode. + # Even then, it starts unreliably. This may be due to the move to cgroup v2 in Fedora 31. + # https://www.redhat.com/sysadmin/fedora-31-control-group-v2 + # Just skip Fedora rather than version-limiting because F30 goes EOL within a month of this writing + # and that is the oldest version we currently test in CI. F31+ are affected by the issue + # and making the tests work on them is something to deal with in community.general, not here. + when: ansible_distribution != 'Fedora' + block: + - name: set timezone to Etc/UTC + timezone: + name: Etc/UTC + register: original_timezone + + - block: + - include_tasks: test.yml + always: + - name: Restore original system timezone - {{ original_timezone.diff.before.name }} + timezone: + name: "{{ original_timezone.diff.before.name }}" + when: original_timezone is changed diff --git a/test/integration/targets/incidental_timezone/tasks/test.yml b/test/integration/targets/incidental_timezone/tasks/test.yml new file mode 100644 index 00000000..ec0d854d --- /dev/null +++ b/test/integration/targets/incidental_timezone/tasks/test.yml @@ -0,0 +1,607 @@ +## +## test setting timezone, idempotency and checkmode +## + +- name: set timezone to Australia/Brisbane (checkmode) + timezone: + name: Australia/Brisbane + check_mode: yes + register: timezone_set_checkmode + +- name: ensure timezone reported as changed in checkmode + assert: + that: + - timezone_set_checkmode.changed + - timezone_set_checkmode.diff.after.name == 'Australia/Brisbane' + - timezone_set_checkmode.diff.before.name == 'Etc/UTC' + +- name: ensure checkmode didn't change the timezone + command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane + register: result + failed_when: result is not failed + changed_when: no + +- name: ensure that checkmode didn't update the timezone in the config file + command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }} + when: + - ansible_service_mgr != 'systemd' + - ansible_os_family == 'RedHat' + +- name: ensure that checkmode didn't update the timezone in the config file + command: egrep '^Etc/UTC' {{ timezone_config_file }} + when: + - ansible_service_mgr != 'systemd' + - ansible_os_family == 'Debian' + +- name: set timezone to Australia/Brisbane + timezone: + name: Australia/Brisbane + register: timezone_set + +- name: ensure timezone changed + assert: + that: + - timezone_set.changed + - timezone_set.diff.after.name == 'Australia/Brisbane' + - timezone_set.diff.before.name == 'Etc/UTC' + +- name: ensure that the timezone is actually set + command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane + changed_when: no + +- name: ensure that the timezone is updated in the config file + command: egrep '^(TIME)?ZONE="Australia/Brisbane"' {{ timezone_config_file }} + when: + - ansible_service_mgr != 'systemd' + - ansible_os_family == 'RedHat' + +- name: ensure that the timezone is updated in the config file + command: egrep '^Australia/Brisbane' {{ timezone_config_file }} + when: + - ansible_service_mgr != 'systemd' + - ansible_os_family == 'Debian' + +- name: set timezone to Australia/Brisbane again + timezone: + name: Australia/Brisbane + register: timezone_again + +- name: ensure timezone idempotency + assert: + that: + - not timezone_again.changed + +- name: set timezone to Australia/Brisbane again in checkmode + timezone: + name: Australia/Brisbane + register: timezone_again_checkmode + +- name: set timezone idempotency (checkmode) + assert: + that: + - not timezone_again_checkmode.changed + +## +## tests for same timezones with different names +## + +- name: check dpkg-reconfigure + shell: type dpkg-reconfigure + register: check_dpkg_reconfigure + ignore_errors: yes + changed_when: no + +- name: check timedatectl + shell: type timedatectl && timedatectl + register: check_timedatectl + ignore_errors: yes + changed_when: no + +- block: + - name: set timezone to Etc/UTC + timezone: + name: Etc/UTC + + - name: change timezone from Etc/UTC to UTC + timezone: + name: UTC + register: timezone_etcutc_to_utc + + - name: check timezone changed from Etc/UTC to UTC + assert: + that: + - timezone_etcutc_to_utc.changed + - timezone_etcutc_to_utc.diff.before.name == 'Etc/UTC' + - timezone_etcutc_to_utc.diff.after.name == 'UTC' + + - name: change timezone from UTC to Etc/UTC + timezone: + name: Etc/UTC + register: timezone_utc_to_etcutc + + - name: check timezone changed from UTC to Etc/UTC + assert: + that: + - timezone_utc_to_etcutc.changed + - timezone_utc_to_etcutc.diff.before.name == 'UTC' + - timezone_utc_to_etcutc.diff.after.name == 'Etc/UTC' + + when: + # FIXME: Due to the bug of the dpkg-reconfigure, those tests failed on non-systemd debian + - check_dpkg_reconfigure.rc != 0 or check_timedatectl.rc == 0 + +## +## no systemd tests for timezone +## + +- block: + ## + ## test with empty config file + ## + + - name: empty config file + command: cp /dev/null {{ timezone_config_file }} + + - name: set timezone to Europe/Belgrade (empty config file) + timezone: + name: Europe/Belgrade + register: timezone_empty_conf + + - name: check if timezone set (empty config file) + assert: + that: + - timezone_empty_conf.changed + - timezone_empty_conf.diff.after.name == 'Europe/Belgrade' + - timezone_empty_conf.diff.before.name == 'n/a' + + - name: check if the timezone is actually set (empty config file) + command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade + changed_when: no + + + ## + ## test with deleted config file + ## + + - name: remove config file + file: + path: '{{ timezone_config_file }}' + state: absent + + - name: set timezone to Europe/Belgrade (no config file) + timezone: + name: Europe/Belgrade + register: timezone_missing_conf + + - name: check if timezone set (no config file) + assert: + that: + - timezone_missing_conf.changed + - timezone_missing_conf.diff.after.name == 'Europe/Belgrade' + - timezone_missing_conf.diff.before.name == 'n/a' + + - name: check if the timezone is actually set (no config file) + command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade + changed_when: no + + + ## + ## test with /etc/localtime as symbolic link to a zoneinfo file + ## + + - name: create symlink /etc/locatime -> /usr/share/zoneinfo/Etc/UTC + file: + src: /usr/share/zoneinfo/Etc/UTC + dest: /etc/localtime + state: link + force: yes + + - name: set timezone to Europe/Belgrade (over symlink) + timezone: + name: Europe/Belgrade + register: timezone_symllink + + - name: check if timezone set (over symlink) + assert: + that: + - timezone_symllink.changed + - timezone_symllink.diff.after.name == 'Europe/Belgrade' + - timezone_symllink.diff.before.name == 'Etc/UTC' + + - name: check if the timezone is actually set (over symlink) + command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade + changed_when: no + + + ## + ## test with /etc/localtime as broken symbolic link + ## + + - name: set timezone to a broken symlink + file: + src: /tmp/foo + dest: /etc/localtime + state: link + force: yes + + - name: set timezone to Europe/Belgrade (over broken symlink) + timezone: + name: Europe/Belgrade + register: timezone_symllink_broken + + - name: check if timezone set (over broken symlink) + assert: + that: + - timezone_symllink_broken.changed + - timezone_symllink_broken.diff.after.name == 'Europe/Belgrade' + - timezone_symllink_broken.diff.before.name == 'n/a' + + - name: check if the timezone is actually set (over broken symlink) + command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade + changed_when: no + + + ## + ## test with /etc/localtime set manually using copy + ## + + - name: set timezone manually by coping zone info file to /etc/localtime + copy: + src: /usr/share/zoneinfo/Etc/UTC + dest: /etc/localtime + remote_src: yes + + - name: set timezone to Europe/Belgrade (over copied file) + timezone: + name: Europe/Belgrade + register: timezone_copied + + - name: check if timezone set (over copied file) + assert: + that: + - timezone_copied.changed + - timezone_copied.diff.after.name == 'Europe/Belgrade' + - timezone_copied.diff.before.name == 'n/a' + + - name: check if the timezone is actually set (over copied file) + command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade + changed_when: no + when: + - ansible_service_mgr != 'systemd' + - timezone_config_file is defined + + +#### +#### hwclock tests +#### + +- name: check if hwclock is supported in the environment + command: hwclock --test + register: hwclock_test + ignore_errors: yes + +- name: check if timedatectl works in the environment + command: timedatectl + register: timedatectl_test + ignore_errors: yes + +- name: + set_fact: + hwclock_supported: '{{ hwclock_test is successful or timedatectl_test is successful }}' +## +## test set hwclock, idempotency and checkmode +## + +- block: + - name: set hwclock to local + timezone: + hwclock: local + + - name: set hwclock to UTC (checkmode) + timezone: + hwclock: UTC + check_mode: yes + register: hwclock_set_checkmode + + - name: ensure hwclock reported as changed (checkmode) + assert: + that: + - hwclock_set_checkmode.changed + - hwclock_set_checkmode.diff.after.hwclock == 'UTC' + - hwclock_set_checkmode.diff.before.hwclock == 'local' + + - block: + - name: ensure that checkmode didn't update hwclock in /etc/adjtime + command: grep ^UTC /etc/adjtime + register: result + failed_when: result is not failed + + - name: ensure that checkmode didn't update hwclock the config file + command: grep ^UTC=no {{ hwclock_config_file }} + when: ansible_service_mgr != 'systemd' + + - name: set hwclock to UTC + timezone: + hwclock: UTC + register: hwclock_set + + - name: ensure hwclock changed + assert: + that: + - hwclock_set.changed + - hwclock_set.diff.after.hwclock == 'UTC' + - hwclock_set.diff.before.hwclock == 'local' + + - block: + - name: ensure that hwclock is updated in /etc/adjtime + command: grep ^UTC /etc/adjtime + + - name: ensure that hwclock is updated in the config file + command: grep ^UTC=yes {{ hwclock_config_file }} + when: ansible_service_mgr != 'systemd' + + - name: set hwclock to RTC again + timezone: + hwclock: UTC + register: hwclock_again + + - name: set hwclock idempotency + assert: + that: + - not hwclock_again.changed + + - name: set hwclock to RTC again (checkmode) + timezone: + hwclock: UTC + check_mode: yes + register: hwclock_again_checkmode + + - name: set hwclock idempotency (checkmode) + assert: + that: + - not hwclock_again_checkmode.changed + + + ## + ## no systemd tests for hwclock + ## + + - block: + ## + ## test set hwclock with both /etc/adjtime and conf file deleted + ## + + - name: remove /etc/adjtime and conf file + file: + path: '{{ item }}' + state: absent + with_items: + - /etc/adjtime + - '{{ hwclock_config_file }}' + + - name: set hwclock to UTC with deleted /etc/adjtime and conf file + timezone: + hwclock: UTC + register: hwclock_set_utc_deleted_adjtime_and_conf + + - name: ensure hwclock changed with deleted /etc/adjtime and conf + assert: + that: + - hwclock_set_utc_deleted_adjtime_and_conf.changed + - hwclock_set_utc_deleted_adjtime_and_conf.diff.after.hwclock == 'UTC' + - hwclock_set_utc_deleted_adjtime_and_conf.diff.before.hwclock == 'n/a' + + + ## + ## test set hwclock with /etc/adjtime deleted + ## + + - name: remove /etc/adjtime + file: + path: '{{ item }}' + state: absent + with_items: + - /etc/adjtime + + - name: set hwclock to UTC with deleted /etc/adjtime + timezone: + hwclock: UTC + register: hwclock_set_utc_deleted_adjtime_utc + + - name: ensure hwclock changed with deleted /etc/adjtime + assert: + that: + - not hwclock_set_utc_deleted_adjtime_utc.changed + - hwclock_set_utc_deleted_adjtime_utc.diff.after.hwclock == 'UTC' + - hwclock_set_utc_deleted_adjtime_utc.diff.before.hwclock == 'UTC' + + - name: set hwclock to LOCAL with deleted /etc/adjtime + timezone: + hwclock: local + register: hwclock_set_local_deleted_adjtime_local + + - name: ensure hwclock changed to LOCAL with deleted /etc/adjtime + assert: + that: + - hwclock_set_local_deleted_adjtime_local.changed + - hwclock_set_local_deleted_adjtime_local.diff.after.hwclock == 'local' + - hwclock_set_local_deleted_adjtime_local.diff.before.hwclock == 'UTC' + + + ## + ## test set hwclock with conf file deleted + ## + + - name: remove conf file + file: + path: '{{ item }}' + state: absent + with_items: + - '{{ hwclock_config_file }}' + + - name: set hwclock to UTC with deleted conf + timezone: + hwclock: UTC + register: hwclock_set_utc_deleted_conf + + - name: ensure hwclock changed with deleted /etc/adjtime + assert: + that: + - hwclock_set_utc_deleted_conf.changed + - hwclock_set_utc_deleted_conf.diff.after.hwclock == 'UTC' + - hwclock_set_utc_deleted_conf.diff.before.hwclock == 'n/a' + + + ## + ## test set hwclock with /etc/adjtime missing UTC/LOCAL strings + ## + + - name: create /etc/adjtime without UTC/LOCAL + copy: + content: '0.0 0 0\n0' + dest: /etc/adjtime + + - name: set hwclock to UTC with broken /etc/adjtime + timezone: + hwclock: UTC + register: hwclock_set_utc_broken_adjtime + + - name: ensure hwclock doesn't report changed with broken /etc/adjtime + assert: + that: + - not hwclock_set_utc_broken_adjtime.changed + - hwclock_set_utc_broken_adjtime.diff.after.hwclock == 'UTC' + - hwclock_set_utc_broken_adjtime.diff.before.hwclock == 'UTC' + + - name: set hwclock to LOCAL with broken /etc/adjtime + timezone: + hwclock: local + register: hwclock_set_local_broken_adjtime + + - name: ensure hwclock changed to LOCAL with broken /etc/adjtime + assert: + that: + - hwclock_set_local_broken_adjtime.changed + - hwclock_set_local_broken_adjtime.diff.after.hwclock == 'local' + - hwclock_set_local_broken_adjtime.diff.before.hwclock == 'UTC' + when: + - ansible_service_mgr != 'systemd' + - hwclock_config_file is defined + + #### + #### timezone + hwclock tests + #### + + ## + ## test set timezone and hwclock, idempotency and checkmode + ## + + - name: set timezone to Etc/UTC and hwclock to local + timezone: + name: Etc/UTC + hwclock: local + + - name: set timezone to Europe/Belgrade and hwclock to UTC (checkmode) + timezone: + name: Europe/Belgrade + hwclock: UTC + check_mode: yes + register: tzclock_set_checkmode + + - name: ensure timezone and hwclock reported as changed in checkmode + assert: + that: + - tzclock_set_checkmode.changed + - tzclock_set_checkmode.diff.after.name == 'Europe/Belgrade' + - tzclock_set_checkmode.diff.before.name == 'Etc/UTC' + - tzclock_set_checkmode.diff.after.hwclock == 'UTC' + - tzclock_set_checkmode.diff.before.hwclock == 'local' + + - name: ensure checkmode didn't change the timezone + command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane + register: result + failed_when: result is not failed + changed_when: no + + - block: + - name: ensure that checkmode didn't update the timezone in the config file + command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }} + when: + - ansible_os_family == 'RedHat' + + - name: ensure that checkmode didn't update the timezone in the config file + command: egrep '^Etc/UTC' {{ timezone_config_file }} + when: + - ansible_os_family == 'Debian' + + - name: ensure that checkmode didn't update hwclock in /etc/adjtime + command: grep ^UTC /etc/adjtime + register: result + failed_when: result is not failed + + - name: ensure that checkmode didn't update hwclock the config file + command: grep ^UTC=no {{ hwclock_config_file }} + when: ansible_service_mgr != 'systemd' + + - name: set timezone to Europe/Belgrade and hwclock to UTC + timezone: + name: Europe/Belgrade + hwclock: UTC + register: tzclock_set + + - name: ensure timezone and hwclock changed + assert: + that: + - tzclock_set.changed + - tzclock_set.diff.after.name == 'Europe/Belgrade' + - tzclock_set.diff.before.name == 'Etc/UTC' + - tzclock_set.diff.after.hwclock == 'UTC' + - tzclock_set.diff.before.hwclock == 'local' + + - name: ensure that the timezone is actually set + command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade + changed_when: no + + - block: + - name: ensure that the timezone is updated in the config file + command: egrep '^(TIME)?ZONE="Europe/Belgrade"' {{ timezone_config_file }} + when: + - ansible_os_family == 'RedHat' + + - name: ensure that the timezone is updated in the config file + command: egrep 'Europe/Belgrade' {{ timezone_config_file }} + when: + - ansible_os_family == 'Debian' + + - name: ensure that hwclock is updated in /etc/adjtime + command: grep ^UTC /etc/adjtime + + - name: ensure that hwclock is updated in the config file + command: grep ^UTC=yes {{ hwclock_config_file }} + when: ansible_service_mgr != 'systemd' + + - name: set timezone to Europe/Belgrade and hwclock to UTC again + timezone: + name: Europe/Belgrade + hwclock: UTC + register: tzclock_set_again + + - name: set timezone and hwclock idempotency + assert: + that: + - not tzclock_set_again.changed + + - name: set timezone to Europe/Belgrade and hwclock to UTC again (checkmode) + timezone: + name: Europe/Belgrade + hwclock: UTC + register: tzclock_set_again_checkmode + + - name: set timezone and hwclock idempotency in checkmode + assert: + that: + - not tzclock_set_again_checkmode.changed + + when: + - ansible_system == 'Linux' + - hwclock_supported diff --git a/test/integration/targets/incidental_vyos_config/aliases b/test/integration/targets/incidental_vyos_config/aliases new file mode 100644 index 00000000..fae06ba0 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/aliases @@ -0,0 +1,2 @@ +shippable/vyos/incidental +network/vyos diff --git a/test/integration/targets/incidental_vyos_config/defaults/main.yaml b/test/integration/targets/incidental_vyos_config/defaults/main.yaml new file mode 100644 index 00000000..9ef5ba51 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_items: [] diff --git a/test/integration/targets/incidental_vyos_config/tasks/cli.yaml b/test/integration/targets/incidental_vyos_config/tasks/cli.yaml new file mode 100644 index 00000000..22a71d96 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tasks/cli.yaml @@ -0,0 +1,22 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + delegate_to: localhost + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=ansible.netcommon.network_cli) + include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + +- name: run test case (connection=local) + include: "{{ test_case_to_run }} ansible_connection=local" + with_first_found: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/test/integration/targets/incidental_vyos_config/tasks/cli_config.yaml b/test/integration/targets/incidental_vyos_config/tasks/cli_config.yaml new file mode 100644 index 00000000..8ed28748 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tasks/cli_config.yaml @@ -0,0 +1,16 @@ +--- +- name: collect all cli_config test cases + find: + paths: "{{ role_path }}/tests/cli_config" + patterns: "{{ testcase }}.yaml" + register: test_cases + delegate_to: localhost + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case (connection=ansible.netcommon.network_cli) + include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/test/integration/targets/incidental_vyos_config/tasks/main.yaml b/test/integration/targets/incidental_vyos_config/tasks/main.yaml new file mode 100644 index 00000000..13977a44 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- {include: cli.yaml, tags: ['cli']} +- {include: cli_config.yaml, tags: ['cli_config']} diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/backup.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/backup.yaml new file mode 100644 index 00000000..af6a772f --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tests/cli/backup.yaml @@ -0,0 +1,113 @@ +--- +- debug: msg="START vyos/backup.yaml on connection={{ ansible_connection }}" + +- name: collect any backup files + find: + paths: "{{ role_path }}/backup" + pattern: "{{ inventory_hostname_short }}_config*" + register: backup_files + connection: local + +- name: delete backup files + file: + path: "{{ item.path }}" + state: absent + with_items: "{{backup_files.files|default([])}}" + +- name: take configure backup + vyos.vyos.vyos_config: + backup: true + register: result + +- assert: + that: + - "result.changed == true" + +- name: collect any backup files + find: + paths: "{{ role_path }}/backup" + pattern: "{{ inventory_hostname_short }}_config*" + register: backup_files + connection: local + +- assert: + that: + - "backup_files.files is defined" + +- name: delete configurable backup file path + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ role_path }}/backup_test_dir/" + - "{{ role_path }}/backup/backup.cfg" + +- name: take configuration backup in custom filename and directory path + vyos.vyos.vyos_config: + backup: true + backup_options: + filename: backup.cfg + dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}" + become: true + register: result + +- assert: + that: + - "result.changed == true" + +- name: check if the backup file-1 exist + find: + paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}/backup.cfg" + register: backup_file + connection: local + +- assert: + that: + - "backup_file.files is defined" + +- name: take configuration backup in custom filename + vyos.vyos.vyos_config: + backup: true + backup_options: + filename: backup.cfg + become: true + register: result + +- assert: + that: + - "result.changed == true" + +- name: check if the backup file-2 exist + find: + paths: "{{ role_path }}/backup/backup.cfg" + register: backup_file + connection: local + +- assert: + that: + - "backup_file.files is defined" + +- name: take configuration backup in custom path and default filename + vyos.vyos.vyos_config: + backup: true + backup_options: + dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}" + become: true + register: result + +- assert: + that: + - "result.changed == true" + +- name: check if the backup file-3 exist + find: + paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}" + pattern: "{{ inventory_hostname_short }}_config*" + register: backup_file + connection: local + +- assert: + that: + - "backup_file.files is defined" + +- debug: msg="END vyos/backup.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/check_config.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/check_config.yaml new file mode 100644 index 00000000..f1ddc71b --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tests/cli/check_config.yaml @@ -0,0 +1,63 @@ +--- +- debug: msg="START cli/config_check.yaml on connection={{ ansible_connection }}" + +- name: setup- ensure interface is not present + vyos.vyos.vyos_config: + lines: delete interfaces loopback lo + +- name: setup- create interface + vyos.vyos.vyos_config: + lines: + - interfaces + - interfaces loopback lo + - interfaces loopback lo description test + register: result + +# note collapsing the duplicate lines doesn't work if +# lines: +# - interfaces loopback lo description test +# - interfaces loopback lo +# - interfaces + +- name: Check that multiple duplicate lines collapse into a single commands + assert: + that: + - "{{ result.commands|length }} == 1" + +- name: Check that set is correctly prepended + assert: + that: + - "result.commands[0] == 'set interfaces loopback lo description test'" + +- name: configure config_check config command + vyos.vyos.vyos_config: + lines: delete interfaces loopback lo + register: result + +- assert: + that: + - "result.changed == true" + +- name: check config_check config command idempontent + vyos.vyos.vyos_config: + lines: delete interfaces loopback lo + register: result + +- assert: + that: + - "result.changed == false" + +- name: check multiple line config filter is working + vyos.vyos.vyos_config: + lines: + - set system login user esa level admin + - set system login user esa authentication encrypted-password '!abc!' + - set system login user vyos level admin + - set system login user vyos authentication encrypted-password 'abc' + register: result + +- assert: + that: + - "{{ result.filtered|length }} == 2" + +- debug: msg="END cli/config_check.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/comment.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/comment.yaml new file mode 100644 index 00000000..2cd13509 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tests/cli/comment.yaml @@ -0,0 +1,34 @@ +--- +- debug: msg="START cli/comment.yaml on connection={{ ansible_connection }}" + +- name: setup + vyos.vyos.vyos_config: + lines: set system host-name {{ inventory_hostname_short }} + match: none + +- name: configure using comment + vyos.vyos.vyos_config: + lines: set system host-name foo + comment: this is a test + register: result + +- assert: + that: + - "result.changed == true" + - "'set system host-name foo' in result.commands" + +- name: collect system commits + vyos.vyos.vyos_command: + commands: show system commit + register: result + +- assert: + that: + - "'this is a test' in result.stdout_lines[0][1]" + +- name: teardown + vyos.vyos.vyos_config: + lines: set system host-name {{ inventory_hostname_short }} + match: none + +- debug: msg="END cli/comment.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/config.cfg b/test/integration/targets/incidental_vyos_config/tests/cli/config.cfg new file mode 100644 index 00000000..36c98f19 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tests/cli/config.cfg @@ -0,0 +1,3 @@ + set service lldp + set protocols static + diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/save.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/save.yaml new file mode 100644 index 00000000..d8e45e25 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tests/cli/save.yaml @@ -0,0 +1,54 @@ +--- +- debug: msg="START cli/save.yaml on connection={{ ansible_connection }}" + +- name: setup + vyos.vyos.vyos_config: + lines: set system host-name {{ inventory_hostname_short }} + match: none + +- name: configure hostaname and save + vyos.vyos.vyos_config: + lines: set system host-name foo + save: true + register: result + +- assert: + that: + - "result.changed == true" + - "'set system host-name foo' in result.commands" + +- name: configure hostaname and don't save + vyos.vyos.vyos_config: + lines: set system host-name bar + register: result + +- assert: + that: + - "result.changed == true" + - "'set system host-name bar' in result.commands" + +- name: save config + vyos.vyos.vyos_config: + save: true + register: result + +- assert: + that: + - "result.changed == true" + +- name: save config again + vyos.vyos.vyos_config: + save: true + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + vyos.vyos.vyos_config: + lines: set system host-name {{ inventory_hostname_short }} + match: none + save: true + +- debug: msg="END cli/simple.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/simple.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/simple.yaml new file mode 100644 index 00000000..c0826737 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tests/cli/simple.yaml @@ -0,0 +1,53 @@ +--- +- debug: msg="START cli/simple.yaml on connection={{ ansible_connection }}" + +- name: setup + vyos.vyos.vyos_config: + lines: set system host-name {{ inventory_hostname_short }} + match: none + +- name: configure simple config command + vyos.vyos.vyos_config: + lines: set system host-name foo + register: result + +- assert: + that: + - "result.changed == true" + - "'set system host-name foo' in result.commands" + +- name: check simple config command idempontent + vyos.vyos.vyos_config: + lines: set system host-name foo + register: result + +- assert: + that: + - "result.changed == false" + +- name: Delete services + vyos.vyos.vyos_config: &del + lines: + - delete service lldp + - delete protocols static + +- name: Configuring when commands starts with whitespaces + vyos.vyos.vyos_config: + src: "{{ role_path }}/tests/cli/config.cfg" + register: result + +- assert: + that: + - "result.changed == true" + - '"set service lldp" in result.commands' + - '"set protocols static" in result.commands' + +- name: Delete services + vyos.vyos.vyos_config: *del + +- name: teardown + vyos.vyos.vyos_config: + lines: set system host-name {{ inventory_hostname_short }} + match: none + +- debug: msg="END cli/simple.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_backup.yaml b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_backup.yaml new file mode 100644 index 00000000..744bb7ea --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_backup.yaml @@ -0,0 +1,114 @@ +--- +- debug: msg="END cli_config/backup.yaml on connection={{ ansible_connection }}" + +- name: delete configurable backup file path + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ role_path }}/backup_test_dir/" + - "{{ role_path }}/backup/backup.cfg" + +- name: collect any backup files + find: + paths: "{{ role_path }}/backup" + pattern: "{{ inventory_hostname_short }}_config*" + register: backup_files + connection: local + +- name: delete backup files + file: + path: "{{ item.path }}" + state: absent + with_items: "{{backup_files.files|default([])}}" + +- name: take config backup + ansible.netcommon.cli_config: + backup: true + become: true + register: result + +- assert: + that: + - "result.changed == true" + +- name: collect any backup files + find: + paths: "{{ role_path }}/backup" + pattern: "{{ inventory_hostname_short }}_config*" + register: backup_files + connection: local + +- assert: + that: + - "backup_files.files is defined" + +- name: take configuration backup in custom filename and directory path + ansible.netcommon.cli_config: + backup: true + backup_options: + filename: backup.cfg + dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}" + become: true + register: result + +- assert: + that: + - "result.changed == true" + +- name: check if the backup file-1 exist + find: + paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}/backup.cfg" + register: backup_file + connection: local + +- assert: + that: + - "backup_file.files is defined" + +- name: take configuration backup in custom filename + ansible.netcommon.cli_config: + backup: true + backup_options: + filename: backup.cfg + become: true + register: result + +- assert: + that: + - "result.changed == true" + +- name: check if the backup file-2 exist + find: + paths: "{{ role_path }}/backup/backup.cfg" + register: backup_file + connection: local + +- assert: + that: + - "backup_file.files is defined" + +- name: take configuration backup in custom path and default filename + ansible.netcommon.cli_config: + backup: true + backup_options: + dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}" + become: true + register: result + +- assert: + that: + - "result.changed == true" + +- name: check if the backup file-3 exist + find: + paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}" + pattern: "{{ inventory_hostname_short }}_config*" + register: backup_file + connection: local + +- assert: + that: + - "backup_file.files is defined" + +- debug: msg="END cli_config/backup.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_basic.yaml b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_basic.yaml new file mode 100644 index 00000000..c6c4f594 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_basic.yaml @@ -0,0 +1,28 @@ +--- +- debug: msg="START cli_config/cli_basic.yaml on connection={{ ansible_connection }}" + +- name: setup - remove interface description + ansible.netcommon.cli_config: &rm + config: delete interfaces loopback lo description + +- name: configure device with config + ansible.netcommon.cli_config: &conf + config: set interfaces loopback lo description 'this is a test' + register: result + +- assert: + that: + - "result.changed == true" + +- name: Idempotence + ansible.netcommon.cli_config: *conf + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + ansible.netcommon.cli_config: *rm + +- debug: msg="END cli_config/cli_basic.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_comment.yaml b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_comment.yaml new file mode 100644 index 00000000..90ee1c86 --- /dev/null +++ b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_comment.yaml @@ -0,0 +1,30 @@ +--- +- debug: msg="START cli_config/cli_comment.yaml on connection={{ ansible_connection }}" + +- name: setup + ansible.netcommon.cli_config: &rm + config: set system host-name {{ inventory_hostname_short }} + +- name: configure using comment + ansible.netcommon.cli_config: + config: set system host-name foo + commit_comment: this is a test + register: result + +- assert: + that: + - "result.changed == true" + +- name: collect system commits + vyos.vyos.vyos_command: + commands: show system commit + register: result + +- assert: + that: + - "'this is a test' in result.stdout_lines[0][1]" + +- name: teardown + ansible.netcommon.cli_config: *rm + +- debug: msg="END cli_config/cli_comment.yaml on connection={{ ansible_connection }}" diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/aliases b/test/integration/targets/incidental_vyos_lldp_interfaces/aliases new file mode 100644 index 00000000..fae06ba0 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/aliases @@ -0,0 +1,2 @@ +shippable/vyos/incidental +network/vyos diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/defaults/main.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/defaults/main.yaml new file mode 100644 index 00000000..164afead --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "[^_].*" +test_items: [] diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/meta/main.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/meta/main.yaml new file mode 100644 index 00000000..ee1fa013 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/meta/main.yaml @@ -0,0 +1,3 @@ +--- +dependencies: + - incidental_vyos_prepare_tests diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/cli.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/cli.yaml new file mode 100644 index 00000000..83496e0e --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/cli.yaml @@ -0,0 +1,19 @@ +--- +- name: Collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + use_regex: true + register: test_cases + delegate_to: localhost + +- name: Set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: Run test case (connection=ansible.netcommon.network_cli) + include: "{{ test_case_to_run }}" + vars: + ansible_connection: ansible.netcommon.network_cli + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/main.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/main.yaml new file mode 100644 index 00000000..d4cf26fc --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- {include: cli.yaml, tags: ['cli']} diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate.yaml new file mode 100644 index 00000000..3acded63 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate.yaml @@ -0,0 +1,14 @@ +--- +- name: Setup + ansible.netcommon.cli_config: + config: "{{ lines }}" + vars: + lines: | + set service lldp interface eth1 + set service lldp interface eth1 location civic-based country-code US + set service lldp interface eth1 location civic-based ca-type 0 ca-value ENGLISH + set service lldp interface eth2 + set service lldp interface eth2 location coordinate-based latitude 33.524449N + set service lldp interface eth2 location coordinate-based altitude 2200 + set service lldp interface eth2 location coordinate-based datum WGS84 + set service lldp interface eth2 location coordinate-based longitude 222.267255W diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate_intf.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate_intf.yaml new file mode 100644 index 00000000..c7ab1ae7 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate_intf.yaml @@ -0,0 +1,10 @@ +--- +- name: Setup + ansible.netcommon.cli_config: + config: "{{ lines }}" + vars: + lines: | + set service lldp interface eth2 + set service lldp interface eth2 location civic-based country-code US + set service lldp interface eth2 location civic-based ca-type 0 ca-value ENGLISH + set service lldp interface eth2 disable diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_remove_config.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_remove_config.yaml new file mode 100644 index 00000000..1b1a3b33 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_remove_config.yaml @@ -0,0 +1,8 @@ +--- +- name: Remove Config + ansible.netcommon.cli_config: + config: "{{ lines }}" + vars: + lines: | + delete service lldp interface + delete service lldp diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/deleted.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/deleted.yaml new file mode 100644 index 00000000..7b2d53a3 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/deleted.yaml @@ -0,0 +1,46 @@ +--- +- debug: + msg: "Start vyos_lldp_interfaces deleted integration tests ansible_connection={{ ansible_connection }}" + +- include_tasks: _populate.yaml + +- block: + - name: Delete attributes of given LLDP interfaces. + vyos.vyos.vyos_lldp_interfaces: &deleted + config: + - name: 'eth1' + - name: 'eth2' + state: deleted + register: result + + - name: Assert that the before dicts were correctly generated + assert: + that: + - "{{ populate | symmetric_difference(result['before']) |length == 0 }}" + + - name: Assert that the correct set of commands were generated + assert: + that: + - "{{ deleted['commands'] | symmetric_difference(result['commands']) |length == 0 }}" + + - name: Assert that the after dicts were correctly generated + assert: + that: + - "{{ deleted['after'] | symmetric_difference(result['after']) |length == 0 }}" + + - name: Delete attributes of given interfaces (IDEMPOTENT) + vyos.vyos.vyos_lldp_interfaces: *deleted + register: result + + - name: Assert that the previous task was idempotent + assert: + that: + - "result.changed == false" + - "result.commands|length == 0" + + - name: Assert that the before dicts were correctly generated + assert: + that: + - "{{ deleted['after'] | symmetric_difference(result['before']) |length == 0 }}" + always: + - include_tasks: _remove_config.yaml diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/empty_config.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/empty_config.yaml new file mode 100644 index 00000000..44c0b894 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/empty_config.yaml @@ -0,0 +1,36 @@ +--- +- debug: + msg: "START vyos_lldp_interfaces empty_config integration tests on connection={{ ansible_connection }}" + +- name: Merged with empty config should give appropriate error message + vyos.vyos.vyos_lldp_interfaces: + config: + state: merged + register: result + ignore_errors: true + +- assert: + that: + - result.msg == 'value of config parameter must not be empty for state merged' + +- name: Replaced with empty config should give appropriate error message + vyos.vyos.vyos_lldp_interfaces: + config: + state: replaced + register: result + ignore_errors: true + +- assert: + that: + - result.msg == 'value of config parameter must not be empty for state replaced' + +- name: Overridden with empty config should give appropriate error message + vyos.vyos.vyos_lldp_interfaces: + config: + state: overridden + register: result + ignore_errors: true + +- assert: + that: + - result.msg == 'value of config parameter must not be empty for state overridden' diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/merged.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/merged.yaml new file mode 100644 index 00000000..bf968b21 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/merged.yaml @@ -0,0 +1,58 @@ +--- +- debug: + msg: "START vyos_lldp_interfaces merged integration tests on connection={{ ansible_connection }}" + +- include_tasks: _remove_config.yaml + +- block: + - name: Merge the provided configuration with the exisiting running configuration + vyos.vyos.vyos_lldp_interfaces: &merged + config: + - name: 'eth1' + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + + - name: 'eth2' + location: + coordinate_based: + altitude: 2200 + datum: 'WGS84' + longitude: '222.267255W' + latitude: '33.524449N' + state: merged + register: result + + - name: Assert that before dicts were correctly generated + assert: + that: "{{ merged['before'] | symmetric_difference(result['before']) |length == 0 }}" + + - name: Assert that correct set of commands were generated + assert: + that: + - "{{ merged['commands'] | symmetric_difference(result['commands']) |length == 0 }}" + + - name: Assert that after dicts was correctly generated + assert: + that: + - "{{ merged['after'] | symmetric_difference(result['after']) |length == 0 }}" + + - name: Merge the provided configuration with the existing running configuration (IDEMPOTENT) + vyos.vyos.vyos_lldp_interfaces: *merged + register: result + + - name: Assert that the previous task was idempotent + assert: + that: + - "result['changed'] == false" + + - name: Assert that before dicts were correctly generated + assert: + that: + - "{{ merged['after'] | symmetric_difference(result['before']) |length == 0 }}" + + always: + - include_tasks: _remove_config.yaml diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/overridden.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/overridden.yaml new file mode 100644 index 00000000..8cf038c9 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/overridden.yaml @@ -0,0 +1,49 @@ +--- +- debug: + msg: "START vyos_lldp_interfaces overridden integration tests on connection={{ ansible_connection }}" + +- include_tasks: _remove_config.yaml + +- include_tasks: _populate_intf.yaml + +- block: + - name: Overrides all device configuration with provided configuration + vyos.vyos.vyos_lldp_interfaces: &overridden + config: + - name: 'eth2' + location: + elin: '0000000911' + state: overridden + register: result + + - name: Assert that before dicts were correctly generated + assert: + that: + - "{{ populate_intf | symmetric_difference(result['before']) |length == 0 }}" + + - name: Assert that correct commands were generated + assert: + that: + - "{{ overridden['commands'] | symmetric_difference(result['commands']) |length == 0 }}" + + - name: Assert that after dicts were correctly generated + assert: + that: + - "{{ overridden['after'] | symmetric_difference(result['after']) |length == 0 }}" + + - name: Overrides all device configuration with provided configurations (IDEMPOTENT) + vyos.vyos.vyos_lldp_interfaces: *overridden + register: result + + - name: Assert that the previous task was idempotent + assert: + that: + - "result['changed'] == false" + + - name: Assert that before dicts were correctly generated + assert: + that: + - "{{ overridden['after'] | symmetric_difference(result['before']) |length == 0 }}" + + always: + - include_tasks: _remove_config.yaml diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/replaced.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/replaced.yaml new file mode 100644 index 00000000..17acf065 --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/replaced.yaml @@ -0,0 +1,63 @@ +--- +- debug: + msg: "START vyos_lldp_interfaces replaced integration tests on connection={{ ansible_connection }}" + +- include_tasks: _remove_config.yaml + +- include_tasks: _populate.yaml + +- block: + - name: Replace device configurations of listed LLDP interfaces with provided configurations + vyos.vyos.vyos_lldp_interfaces: &replaced + config: + - name: 'eth2' + enable: false + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + + - name: 'eth1' + enable: false + location: + coordinate_based: + altitude: 2200 + datum: 'WGS84' + longitude: '222.267255W' + latitude: '33.524449N' + state: replaced + register: result + + - name: Assert that correct set of commands were generated + assert: + that: + - "{{ replaced['commands'] | symmetric_difference(result['commands']) |length == 0 }}" + + - name: Assert that before dicts are correctly generated + assert: + that: + - "{{ populate | symmetric_difference(result['before']) |length == 0 }}" + + - name: Assert that after dict is correctly generated + assert: + that: + - "{{ replaced['after'] | symmetric_difference(result['after']) |length == 0 }}" + + - name: Replace device configurations of listed LLDP interfaces with provided configurarions (IDEMPOTENT) + vyos.vyos.vyos_lldp_interfaces: *replaced + register: result + + - name: Assert that task was idempotent + assert: + that: + - "result['changed'] == false" + + - name: Assert that before dict is correctly generated + assert: + that: + - "{{ replaced['after'] | symmetric_difference(result['before']) |length == 0 }}" + + always: + - include_tasks: _remove_config.yaml diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/rtt.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/rtt.yaml new file mode 100644 index 00000000..4d4cf82c --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/rtt.yaml @@ -0,0 +1,57 @@ +--- +- debug: + msg: "START vyos_lldp_interfaces round trip integration tests on connection={{ ansible_connection }}" + +- include_tasks: _remove_config.yaml + +- block: + - name: Apply the provided configuration (base config) + vyos.vyos.vyos_lldp_interfaces: + config: + - name: 'eth1' + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + + state: merged + register: base_config + + - name: Gather lldp_interfaces facts + vyos.vyos.vyos_facts: + gather_subset: + - default + gather_network_resources: + - lldp_interfaces + + - name: Apply the provided configuration (config to be reverted) + vyos.vyos.vyos_lldp_interfaces: + config: + - name: 'eth2' + location: + coordinate_based: + altitude: 2200 + datum: 'WGS84' + longitude: '222.267255W' + latitude: '33.524449N' + state: merged + register: result + + - name: Assert that changes were applied + assert: + that: "{{ round_trip['after'] | symmetric_difference(result['after']) |length == 0 }}" + + - name: Revert back to base config using facts round trip + vyos.vyos.vyos_lldp_interfaces: + config: "{{ ansible_facts['network_resources']['lldp_interfaces'] }}" + state: overridden + register: revert + + - name: Assert that config was reverted + assert: + that: "{{ base_config['after'] | symmetric_difference(revert['after']) |length == 0 }}" + + always: + - include_tasks: _remove_config.yaml diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/vars/main.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/vars/main.yaml new file mode 100644 index 00000000..169b0d5d --- /dev/null +++ b/test/integration/targets/incidental_vyos_lldp_interfaces/vars/main.yaml @@ -0,0 +1,130 @@ +--- +merged: + before: [] + + + commands: + - "set service lldp interface eth1 location civic-based country-code 'US'" + - "set service lldp interface eth1 location civic-based ca-type 0 ca-value 'ENGLISH'" + - "set service lldp interface eth1" + - "set service lldp interface eth2 location coordinate-based latitude '33.524449N'" + - "set service lldp interface eth2 location coordinate-based altitude '2200'" + - "set service lldp interface eth2 location coordinate-based datum 'WGS84'" + - "set service lldp interface eth2 location coordinate-based longitude '222.267255W'" + - "set service lldp interface eth2 location coordinate-based latitude '33.524449N'" + - "set service lldp interface eth2 location coordinate-based altitude '2200'" + - "set service lldp interface eth2 location coordinate-based datum 'WGS84'" + - "set service lldp interface eth2 location coordinate-based longitude '222.267255W'" + - "set service lldp interface eth2" + + after: + - name: 'eth1' + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + + - name: 'eth2' + location: + coordinate_based: + altitude: 2200 + datum: 'WGS84' + longitude: '222.267255W' + latitude: '33.524449N' + +populate: + - name: 'eth1' + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + + - name: 'eth2' + location: + coordinate_based: + altitude: 2200 + datum: 'WGS84' + longitude: '222.267255W' + latitude: '33.524449N' + +replaced: + commands: + - "delete service lldp interface eth2 location" + - "set service lldp interface eth2 'disable'" + - "set service lldp interface eth2 location civic-based country-code 'US'" + - "set service lldp interface eth2 location civic-based ca-type 0 ca-value 'ENGLISH'" + - "delete service lldp interface eth1 location" + - "set service lldp interface eth1 'disable'" + - "set service lldp interface eth1 location coordinate-based latitude '33.524449N'" + - "set service lldp interface eth1 location coordinate-based altitude '2200'" + - "set service lldp interface eth1 location coordinate-based datum 'WGS84'" + - "set service lldp interface eth1 location coordinate-based longitude '222.267255W'" + + after: + - name: 'eth2' + enable: false + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + + - name: 'eth1' + enable: false + location: + coordinate_based: + altitude: 2200 + datum: 'WGS84' + longitude: '222.267255W' + latitude: '33.524449N' + +populate_intf: + - name: 'eth2' + enable: false + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + +overridden: + commands: + - "delete service lldp interface eth2 location" + - "delete service lldp interface eth2 'disable'" + - "set service lldp interface eth2 location elin '0000000911'" + + after: + - name: 'eth2' + location: + elin: 0000000911 + +deleted: + commands: + - "delete service lldp interface eth1" + - "delete service lldp interface eth2" + + after: [] + +round_trip: + after: + - name: 'eth1' + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + + - name: 'eth2' + location: + coordinate_based: + altitude: 2200 + datum: 'WGS84' + longitude: '222.267255W' + latitude: '33.524449N' diff --git a/test/integration/targets/incidental_vyos_prepare_tests/aliases b/test/integration/targets/incidental_vyos_prepare_tests/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/incidental_vyos_prepare_tests/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/incidental_vyos_prepare_tests/tasks/main.yaml b/test/integration/targets/incidental_vyos_prepare_tests/tasks/main.yaml new file mode 100644 index 00000000..ac0b4922 --- /dev/null +++ b/test/integration/targets/incidental_vyos_prepare_tests/tasks/main.yaml @@ -0,0 +1,13 @@ +--- +- name: Ensure required interfaces are present in running-config + ansible.netcommon.cli_config: + config: "{{ lines }}" + vars: + lines: | + set interfaces ethernet eth0 address dhcp + set interfaces ethernet eth0 speed auto + set interfaces ethernet eth0 duplex auto + set interfaces ethernet eth1 + set interfaces ethernet eth2 + delete interfaces loopback lo + ignore_errors: true diff --git a/test/integration/targets/incidental_win_copy/aliases b/test/integration/targets/incidental_win_copy/aliases new file mode 100644 index 00000000..a5fc90dc --- /dev/null +++ b/test/integration/targets/incidental_win_copy/aliases @@ -0,0 +1,2 @@ +shippable/windows/incidental +windows diff --git a/test/integration/targets/incidental_win_copy/defaults/main.yml b/test/integration/targets/incidental_win_copy/defaults/main.yml new file mode 100644 index 00000000..5d8a1d23 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/defaults/main.yml @@ -0,0 +1 @@ +test_win_copy_path: C:\ansible\win_copy .ÅÑŚÌβŁÈ [$!@^&test(;)] diff --git a/test/integration/targets/incidental_win_copy/files-different/vault/folder/nested-vault-file b/test/integration/targets/incidental_win_copy/files-different/vault/folder/nested-vault-file new file mode 100644 index 00000000..d8d15498 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/files-different/vault/folder/nested-vault-file @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +65653164323866373138353632323531393664393563633665373635623763353561386431373366 +3232353263363034313136663062623336663463373966320a333763323032646463386432626161 +36386330356637666362396661653935653064623038333031653335626164376465353235303636 +3335616231663838620a303632343938326538656233393562303162343261383465623261646664 +33613932343461626339333832363930303962633364303736376634396364643861 diff --git a/test/integration/targets/incidental_win_copy/files-different/vault/readme.txt b/test/integration/targets/incidental_win_copy/files-different/vault/readme.txt new file mode 100644 index 00000000..dae883b5 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/files-different/vault/readme.txt @@ -0,0 +1,5 @@ +This directory contains some files that have been encrypted with ansible-vault. + +This is to test out the decrypt parameter in win_copy. + +The password is: password diff --git a/test/integration/targets/incidental_win_copy/files-different/vault/vault-file b/test/integration/targets/incidental_win_copy/files-different/vault/vault-file new file mode 100644 index 00000000..2fff7619 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/files-different/vault/vault-file @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +30353665333635633433356261616636356130386330363962386533303566313463383734373532 +3933643234323638623939613462346361313431363939370a303532656338353035346661353965 +34656231633238396361393131623834316262306533663838336362366137306562646561383766 +6363373965633337640a373666336461613337346131353564383134326139616561393664663563 +3431 diff --git a/test/integration/targets/incidental_win_copy/files/empty.txt b/test/integration/targets/incidental_win_copy/files/empty.txt new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/incidental_win_copy/files/foo.txt b/test/integration/targets/incidental_win_copy/files/foo.txt new file mode 100644 index 00000000..7c6ded14 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git a/test/integration/targets/incidental_win_copy/files/subdir/bar.txt b/test/integration/targets/incidental_win_copy/files/subdir/bar.txt new file mode 100644 index 00000000..76018072 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/files/subdir/bar.txt @@ -0,0 +1 @@ +baz diff --git a/test/integration/targets/incidental_win_copy/files/subdir/subdir2/baz.txt b/test/integration/targets/incidental_win_copy/files/subdir/subdir2/baz.txt new file mode 100644 index 00000000..76018072 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/files/subdir/subdir2/baz.txt @@ -0,0 +1 @@ +baz diff --git a/test/integration/targets/incidental_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt b/test/integration/targets/incidental_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt new file mode 100644 index 00000000..78df5b06 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt @@ -0,0 +1 @@ +qux \ No newline at end of file diff --git a/test/integration/targets/incidental_win_copy/tasks/main.yml b/test/integration/targets/incidental_win_copy/tasks/main.yml new file mode 100644 index 00000000..b2ee103f --- /dev/null +++ b/test/integration/targets/incidental_win_copy/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: create empty folder + file: + path: '{{role_path}}/files/subdir/empty' + state: directory + delegate_to: localhost + +# removes the cached zip module from the previous task so we can replicate +# the below issue where win_copy would delete DEFAULT_LOCAL_TMP if it +# had permission to +# https://github.com/ansible/ansible/issues/35613 +- name: clear the local ansiballz cache + file: + path: "{{lookup('config', 'DEFAULT_LOCAL_TMP')}}/ansiballz_cache" + state: absent + delegate_to: localhost + +- name: create test folder + win_file: + path: '{{test_win_copy_path}}' + state: directory + +- block: + - name: run tests for local to remote + include_tasks: tests.yml + + - name: run tests for remote to remote + include_tasks: remote_tests.yml + + always: + - name: remove test folder + win_file: + path: '{{test_win_copy_path}}' + state: absent diff --git a/test/integration/targets/incidental_win_copy/tasks/remote_tests.yml b/test/integration/targets/incidental_win_copy/tasks/remote_tests.yml new file mode 100644 index 00000000..5abb5020 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/tasks/remote_tests.yml @@ -0,0 +1,471 @@ +--- +- name: fail when source does not exist remote + win_copy: + src: fakesource + dest: fakedest + remote_src: yes + register: fail_remote_invalid_source + failed_when: "'it does not exist' not in fail_remote_invalid_source.msg" + +- name: setup source folder for remote tests + win_copy: + src: files/ + dest: '{{test_win_copy_path}}\source\' + +- name: setup remote failure tests + win_file: + path: '{{item.path}}' + state: '{{item.state}}' + with_items: + - { 'path': '{{test_win_copy_path}}\target\folder', 'state': 'directory' } + - { 'path': '{{test_win_copy_path}}\target\file', 'state': 'touch' } + - { 'path': '{{test_win_copy_path}}\target\subdir', 'state': 'touch' } + +- name: fail source is a file but dest is a folder + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\folder' + remote_src: yes + register: fail_remote_file_to_folder + failed_when: "'dest is already a folder' not in fail_remote_file_to_folder.msg" + +- name: fail source is a file but dest is a folder + win_copy: + src: '{{test_win_copy_path}}\source\' + dest: '{{test_win_copy_path}}\target\' + remote_src: yes + register: fail_remote_folder_to_file + failed_when: "'dest is already a file' not in fail_remote_folder_to_file.msg" + +- name: fail source is a file dest parent dir is also a file + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\file\foo.txt' + remote_src: yes + register: fail_remote_file_parent_dir_file + failed_when: "'is currently a file' not in fail_remote_file_parent_dir_file.msg" + +- name: fail source is a folder dest parent dir is also a file + win_copy: + src: '{{test_win_copy_path}}\source\subdir' + dest: '{{test_win_copy_path}}\target\file' + remote_src: yes + register: fail_remote_folder_parent_dir_file + failed_when: "'object at dest parent dir is not a folder' not in fail_remote_folder_parent_dir_file.msg" + +- name: fail to copy a remote file with parent dir that doesn't exist and filename is set + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\missing-dir\foo.txt' + remote_src: yes + register: fail_remote_missing_parent_dir + failed_when: "'does not exist' not in fail_remote_missing_parent_dir.msg" + +- name: remove target after remote failure tests + win_file: + path: '{{test_win_copy_path}}\target' + state: absent + +- name: create remote target after cleaning + win_file: + path: '{{test_win_copy_path}}\target' + state: directory + +- name: copy single file remote (check mode) + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\foo-target.txt' + remote_src: yes + register: remote_copy_file_check + check_mode: yes + +- name: get result of copy single file remote (check mode) + win_stat: + path: '{{test_win_copy_path}}\target\foo-target.txt' + register: remote_copy_file_actual_check + +- name: assert copy single file remote (check mode) + assert: + that: + - remote_copy_file_check is changed + - remote_copy_file_actual_check.stat.exists == False + +- name: copy single file remote + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\foo-target.txt' + remote_src: yes + register: remote_copy_file + +- name: get result of copy single file remote + win_stat: + path: '{{test_win_copy_path}}\target\foo-target.txt' + register: remote_copy_file_actual + +- name: assert copy single file remote + assert: + that: + - remote_copy_file is changed + - remote_copy_file.operation == 'file_copy' + - remote_copy_file.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - remote_copy_file.size == 8 + - remote_copy_file.original_basename == 'foo.txt' + - remote_copy_file_actual.stat.exists == True + - remote_copy_file_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + +- name: copy single file remote (idempotent) + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\foo-target.txt' + remote_src: yes + register: remote_copy_file_again + +- name: assert copy single file remote (idempotent) + assert: + that: + - remote_copy_file_again is not changed + +- name: copy single file into folder remote (check mode) + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\' + remote_src: yes + register: remote_copy_file_to_folder_check + check_mode: yes + +- name: get result of copy single file into folder remote (check mode) + win_stat: + path: '{{test_win_copy_path}}\target\foo.txt' + register: remote_copy_file_to_folder_actual_check + +- name: assert copy single file into folder remote (check mode) + assert: + that: + - remote_copy_file_to_folder_check is changed + - remote_copy_file_to_folder_actual_check.stat.exists == False + +- name: copy single file into folder remote + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\' + remote_src: yes + register: remote_copy_file_to_folder + +- name: get result of copy single file into folder remote + win_stat: + path: '{{test_win_copy_path}}\target\foo.txt' + register: remote_copy_file_to_folder_actual + +- name: assert copy single file into folder remote + assert: + that: + - remote_copy_file_to_folder is changed + - remote_copy_file_to_folder.operation == 'file_copy' + - remote_copy_file_to_folder.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - remote_copy_file_to_folder.size == 8 + - remote_copy_file_to_folder.original_basename == 'foo.txt' + - remote_copy_file_to_folder_actual.stat.exists == True + - remote_copy_file_to_folder_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + +- name: copy single file into folder remote (idempotent) + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\' + remote_src: yes + register: remote_copy_file_to_folder_again + +- name: assert copy single file into folder remote + assert: + that: + - remote_copy_file_to_folder_again is not changed + +- name: copy single file to missing folder (check mode) + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\missing\' + remote_src: yes + register: remote_copy_file_to_missing_folder_check + check_mode: yes + +- name: get result of copy single file to missing folder remote (check mode) + win_stat: + path: '{{test_win_copy_path}}\target\missing\foo.txt' + register: remote_copy_file_to_missing_folder_actual_check + +- name: assert copy single file to missing folder remote (check mode) + assert: + that: + - remote_copy_file_to_missing_folder_check is changed + - remote_copy_file_to_missing_folder_check.operation == 'file_copy' + - remote_copy_file_to_missing_folder_actual_check.stat.exists == False + +- name: copy single file to missing folder remote + win_copy: + src: '{{test_win_copy_path}}\source\foo.txt' + dest: '{{test_win_copy_path}}\target\missing\' + remote_src: yes + register: remote_copy_file_to_missing_folder + +- name: get result of copy single file to missing folder remote + win_stat: + path: '{{test_win_copy_path}}\target\missing\foo.txt' + register: remote_copy_file_to_missing_folder_actual + +- name: assert copy single file to missing folder remote + assert: + that: + - remote_copy_file_to_missing_folder is changed + - remote_copy_file_to_missing_folder.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - remote_copy_file_to_missing_folder.operation == 'file_copy' + - remote_copy_file_to_missing_folder.size == 8 + - remote_copy_file_to_missing_folder_actual.stat.exists == True + - remote_copy_file_to_missing_folder_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + +- name: clear target for folder to folder test + win_file: + path: '{{test_win_copy_path}}\target' + state: absent + +- name: copy folder to folder remote (check mode) + win_copy: + src: '{{test_win_copy_path}}\source' + dest: '{{test_win_copy_path}}\target' + remote_src: yes + register: remote_copy_folder_to_folder_check + check_mode: yes + +- name: get result of copy folder to folder remote (check mode) + win_stat: + path: '{{test_win_copy_path}}\target' + register: remote_copy_folder_to_folder_actual_check + +- name: assert copy folder to folder remote (check mode) + assert: + that: + - remote_copy_folder_to_folder_check is changed + - remote_copy_folder_to_folder_check.operation == 'folder_copy' + - remote_copy_folder_to_folder_actual_check.stat.exists == False + +- name: copy folder to folder remote + win_copy: + src: '{{test_win_copy_path}}\source' + dest: '{{test_win_copy_path}}\target' + remote_src: yes + register: remote_copy_folder_to_folder + +- name: get result of copy folder to folder remote + win_find: + paths: '{{test_win_copy_path}}\target' + recurse: yes + file_type: directory + register: remote_copy_folder_to_folder_actual + +- name: assert copy folder to folder remote + assert: + that: + - remote_copy_folder_to_folder is changed + - remote_copy_folder_to_folder.operation == 'folder_copy' + - remote_copy_folder_to_folder_actual.examined == 11 + - remote_copy_folder_to_folder_actual.matched == 6 + - remote_copy_folder_to_folder_actual.files[0].filename == 'source' + - remote_copy_folder_to_folder_actual.files[1].filename == 'subdir' + - remote_copy_folder_to_folder_actual.files[2].filename == 'empty' + - remote_copy_folder_to_folder_actual.files[3].filename == 'subdir2' + - remote_copy_folder_to_folder_actual.files[4].filename == 'subdir3' + - remote_copy_folder_to_folder_actual.files[5].filename == 'subdir4' + +- name: copy folder to folder remote (idempotent) + win_copy: + src: '{{test_win_copy_path}}\source' + dest: '{{test_win_copy_path}}\target' + remote_src: yes + register: remote_copy_folder_to_folder_again + +- name: assert copy folder to folder remote (idempotent) + assert: + that: + - remote_copy_folder_to_folder_again is not changed + +- name: change remote file after folder to folder test + win_copy: + content: bar.txt + dest: '{{test_win_copy_path}}\target\source\foo.txt' + +- name: remote remote folder after folder to folder test + win_file: + path: '{{test_win_copy_path}}\target\source\subdir\subdir2\subdir3\subdir4' + state: absent + +- name: copy folder to folder remote after change + win_copy: + src: '{{test_win_copy_path}}\source' + dest: '{{test_win_copy_path}}\target' + remote_src: yes + register: remote_copy_folder_to_folder_after_change + +- name: get result of copy folder to folder remote after change + win_find: + paths: '{{test_win_copy_path}}\target\source' + recurse: yes + patterns: ['foo.txt', 'qux.txt'] + register: remote_copy_folder_to_folder_after_change_actual + +- name: assert copy folder after changes + assert: + that: + - remote_copy_folder_to_folder_after_change is changed + - remote_copy_folder_to_folder_after_change_actual.matched == 2 + - remote_copy_folder_to_folder_after_change_actual.files[0].checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - remote_copy_folder_to_folder_after_change_actual.files[1].checksum == 'b54ba7f5621240d403f06815f7246006ef8c7d43' + +- name: clear target folder before folder contents to remote test + win_file: + path: '{{test_win_copy_path}}\target' + state: absent + +- name: copy folder contents to folder remote with backslash (check mode) + win_copy: + src: '{{test_win_copy_path}}\source\' + dest: '{{test_win_copy_path}}\target' + remote_src: yes + register: remote_copy_folder_content_backslash_check + check_mode: yes + +- name: get result of copy folder contents to folder remote with backslash (check mode) + win_stat: + path: '{{test_win_copy_path}}\target' + register: remote_copy_folder_content_backslash_actual_check + +- name: assert copy folder content to folder remote with backslash (check mode) + assert: + that: + - remote_copy_folder_content_backslash_check is changed + - remote_copy_folder_content_backslash_actual_check.stat.exists == False + +- name: copy folder contents to folder remote with backslash + win_copy: + src: '{{test_win_copy_path}}\source\' + dest: '{{test_win_copy_path}}\target' + remote_src: yes + register: remote_copy_folder_content_backslash + +- name: get result of copy folder contents to folder remote with backslash + win_find: + paths: '{{test_win_copy_path}}\target' + recurse: yes + file_type: directory + register: remote_copy_folder_content_backslash_actual + +- name: assert copy folder content to folder remote with backslash + assert: + that: + - remote_copy_folder_content_backslash is changed + - remote_copy_folder_content_backslash.operation == 'folder_copy' + - remote_copy_folder_content_backslash_actual.examined == 10 + - remote_copy_folder_content_backslash_actual.matched == 5 + - remote_copy_folder_content_backslash_actual.files[0].filename == 'subdir' + - remote_copy_folder_content_backslash_actual.files[1].filename == 'empty' + - remote_copy_folder_content_backslash_actual.files[2].filename == 'subdir2' + - remote_copy_folder_content_backslash_actual.files[3].filename == 'subdir3' + - remote_copy_folder_content_backslash_actual.files[4].filename == 'subdir4' + +- name: copy folder contents to folder remote with backslash (idempotent) + win_copy: + src: '{{test_win_copy_path}}\source\' + dest: '{{test_win_copy_path}}\target' + remote_src: yes + register: remote_copy_folder_content_backslash_again + +- name: assert copy folder content to folder remote with backslash (idempotent) + assert: + that: + - remote_copy_folder_content_backslash_again is not changed + +- name: change remote file after folder content to folder test + win_copy: + content: bar.txt + dest: '{{test_win_copy_path}}\target\foo.txt' + +- name: remote remote folder after folder content to folder test + win_file: + path: '{{test_win_copy_path}}\target\subdir\subdir2\subdir3\subdir4' + state: absent + +- name: copy folder content to folder remote after change + win_copy: + src: '{{test_win_copy_path}}/source/' + dest: '{{test_win_copy_path}}/target/' + remote_src: yes + register: remote_copy_folder_content_to_folder_after_change + +- name: get result of copy folder content to folder remote after change + win_find: + paths: '{{test_win_copy_path}}\target' + recurse: yes + patterns: ['foo.txt', 'qux.txt'] + register: remote_copy_folder_content_to_folder_after_change_actual + +- name: assert copy folder content to folder after changes + assert: + that: + - remote_copy_folder_content_to_folder_after_change is changed + - remote_copy_folder_content_to_folder_after_change_actual.matched == 2 + - remote_copy_folder_content_to_folder_after_change_actual.files[0].checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - remote_copy_folder_content_to_folder_after_change_actual.files[1].checksum == 'b54ba7f5621240d403f06815f7246006ef8c7d43' + +# https://github.com/ansible/ansible/issues/50077 +- name: create empty nested directory + win_file: + path: '{{ test_win_copy_path }}\source\empty-nested\nested-dir' + state: directory + +- name: copy empty nested directory (check mode) + win_copy: + src: '{{ test_win_copy_path }}\source\empty-nested' + dest: '{{ test_win_copy_path }}\target' + remote_src: True + check_mode: True + register: copy_empty_dir_check + +- name: get result of copy empty nested directory (check mode) + win_stat: + path: '{{ test_win_copy_path }}\target\empty-nested' + register: copy_empty_dir_actual_check + +- name: assert copy empty nested directory (check mode) + assert: + that: + - copy_empty_dir_check is changed + - copy_empty_dir_check.operation == "folder_copy" + - not copy_empty_dir_actual_check.stat.exists + +- name: copy empty nested directory + win_copy: + src: '{{ test_win_copy_path }}\source\empty-nested' + dest: '{{ test_win_copy_path }}\target' + remote_src: True + register: copy_empty_dir + +- name: get result of copy empty nested directory + win_stat: + path: '{{ test_win_copy_path }}\target\empty-nested\nested-dir' + register: copy_empty_dir_actual + +- name: assert copy empty nested directory + assert: + that: + - copy_empty_dir is changed + - copy_empty_dir.operation == "folder_copy" + - copy_empty_dir_actual.stat.exists + +- name: copy empty nested directory (idempotent) + win_copy: + src: '{{ test_win_copy_path }}\source\empty-nested' + dest: '{{ test_win_copy_path }}\target' + remote_src: True + register: copy_empty_dir_again + +- name: assert copy empty nested directory (idempotent) + assert: + that: + - not copy_empty_dir_again is changed diff --git a/test/integration/targets/incidental_win_copy/tasks/tests.yml b/test/integration/targets/incidental_win_copy/tasks/tests.yml new file mode 100644 index 00000000..d15e71f6 --- /dev/null +++ b/test/integration/targets/incidental_win_copy/tasks/tests.yml @@ -0,0 +1,535 @@ +--- +- name: fail no source or content + win_copy: + dest: dest + register: fail_no_source_content + failed_when: fail_no_source_content.msg != 'src (or content) and dest are required' + +- name: fail content but dest isn't a file, unix ending + win_copy: + content: a + dest: a/ + register: fail_dest_not_file_unix + failed_when: fail_dest_not_file_unix.msg != 'dest must be a file if content is defined' + +- name: fail content but dest isn't a file, windows ending + win_copy: + content: a + dest: a\ + register: fail_dest_not_file_windows + failed_when: fail_dest_not_file_windows.msg != 'dest must be a file if content is defined' + +- name: fail to copy a file with parent dir that doesn't exist and filename is set + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\missing-dir\foo.txt' + register: fail_missing_parent_dir + failed_when: "'does not exist' not in fail_missing_parent_dir.msg" + +- name: fail to copy an encrypted file without the password set + win_copy: + src: '{{role_path}}/files-different/vault/vault-file' + dest: '{{test_win_copy_path}}\file' + register: fail_copy_encrypted_file + ignore_errors: yes # weird failed_when doesn't work in this case + +- name: assert failure message when copying an encrypted file without the password set + assert: + that: + - fail_copy_encrypted_file is failed + - fail_copy_encrypted_file.msg == 'A vault password or secret must be specified to decrypt {{role_path}}/files-different/vault/vault-file' + +- name: fail to copy a directory with an encrypted file without the password + win_copy: + src: '{{role_path}}/files-different/vault' + dest: '{{test_win_copy_path}}' + register: fail_copy_directory_with_enc_file + ignore_errors: yes + +- name: assert failure message when copying a directory that contains an encrypted file without the password set + assert: + that: + - fail_copy_directory_with_enc_file is failed + - fail_copy_directory_with_enc_file.msg == 'A vault password or secret must be specified to decrypt {{role_path}}/files-different/vault/vault-file' + +- name: copy with content (check mode) + win_copy: + content: a + dest: '{{test_win_copy_path}}\file' + register: copy_content_check + check_mode: yes + +- name: get result of copy with content (check mode) + win_stat: + path: '{{test_win_copy_path}}\file' + register: copy_content_actual_check + +- name: assert copy with content (check mode) + assert: + that: + - copy_content_check is changed + - copy_content_check.checksum == '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8' + - copy_content_check.operation == 'file_copy' + - copy_content_check.size == 1 + - copy_content_actual_check.stat.exists == False + +- name: copy with content + win_copy: + content: a + dest: '{{test_win_copy_path}}\file' + register: copy_content + +- name: get result of copy with content + win_stat: + path: '{{test_win_copy_path}}\file' + register: copy_content_actual + +- name: assert copy with content + assert: + that: + - copy_content is changed + - copy_content.checksum == '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8' + - copy_content.operation == 'file_copy' + - copy_content.size == 1 + - copy_content_actual.stat.exists == True + - copy_content_actual.stat.checksum == '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8' + +- name: copy with content (idempotent) + win_copy: + content: a + dest: '{{test_win_copy_path}}\file' + register: copy_content_again + +- name: assert copy with content (idempotent) + assert: + that: + - copy_content_again is not changed + +- name: copy with content change when missing + win_copy: + content: b + dest: '{{test_win_copy_path}}\file' + force: no + register: copy_content_when_missing + +- name: assert copy with content change when missing + assert: + that: + - copy_content_when_missing is not changed + +- name: copy single file (check mode) + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\foo-target.txt' + register: copy_file_check + check_mode: yes + +- name: get result of copy single file (check mode) + win_stat: + path: '{{test_win_copy_path}}\foo-target.txt' + register: copy_file_actual_check + +- name: assert copy single file (check mode) + assert: + that: + - copy_file_check is changed + - copy_file_check.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - copy_file_check.dest == test_win_copy_path + '\\foo-target.txt' + - copy_file_check.operation == 'file_copy' + - copy_file_check.size == 8 + - copy_file_actual_check.stat.exists == False + +- name: copy single file + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\foo-target.txt' + register: copy_file + +- name: get result of copy single file + win_stat: + path: '{{test_win_copy_path}}\foo-target.txt' + register: copy_file_actual + +- name: assert copy single file + assert: + that: + - copy_file is changed + - copy_file.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - copy_file.dest == test_win_copy_path + '\\foo-target.txt' + - copy_file.operation == 'file_copy' + - copy_file.size == 8 + - copy_file_actual.stat.exists == True + - copy_file_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + +- name: copy single file (idempotent) + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\foo-target.txt' + register: copy_file_again + +- name: assert copy single file (idempotent) + assert: + that: + - copy_file_again is not changed + +- name: copy single file (backup) + win_copy: + content: "{{ lookup('file', 'foo.txt') }}\nfoo bar" + dest: '{{test_win_copy_path}}\foo-target.txt' + backup: yes + register: copy_file_backup + +- name: check backup_file + win_stat: + path: '{{ copy_file_backup.backup_file }}' + register: backup_file + +- name: assert copy single file (backup) + assert: + that: + - copy_file_backup is changed + - backup_file.stat.exists == true + +- name: copy single file to folder (check mode) + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\' + register: copy_file_to_folder_check + check_mode: yes + +- name: get result of copy single file to folder (check mode) + win_stat: + path: '{{test_win_copy_path}}\foo.txt' + register: copy_file_to_folder_actual_check + +- name: assert copy single file to folder (check mode) + assert: + that: + - copy_file_to_folder_check is changed + - copy_file_to_folder_check.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - copy_file_to_folder_check.dest == test_win_copy_path + '\\foo.txt' + - copy_file_to_folder_check.operation == 'file_copy' + - copy_file_to_folder_check.size == 8 + - copy_file_to_folder_actual_check.stat.exists == False + +- name: copy single file to folder + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\' + register: copy_file_to_folder + +- name: get result of copy single file to folder + win_stat: + path: '{{test_win_copy_path}}\foo.txt' + register: copy_file_to_folder_actual + +- name: assert copy single file to folder + assert: + that: + - copy_file_to_folder is changed + - copy_file_to_folder.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - copy_file_to_folder.dest == test_win_copy_path + '\\foo.txt' + - copy_file_to_folder.operation == 'file_copy' + - copy_file_to_folder.size == 8 + - copy_file_to_folder_actual.stat.exists == True + - copy_file_to_folder_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + +- name: copy single file to folder (idempotent) + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\' + register: copy_file_to_folder_again + +- name: assert copy single file to folder (idempotent) + assert: + that: + - copy_file_to_folder_again is not changed + +- name: copy single file to missing folder (check mode) + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\missing\' + register: copy_file_to_missing_folder_check + check_mode: yes + +- name: get result of copy single file to missing folder (check mode) + win_stat: + path: '{{test_win_copy_path}}\missing\foo.txt' + register: copy_file_to_missing_folder_actual_check + +- name: assert copy single file to missing folder (check mode) + assert: + that: + - copy_file_to_missing_folder_check is changed + - copy_file_to_missing_folder_check.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - copy_file_to_missing_folder_check.operation == 'file_copy' + - copy_file_to_missing_folder_check.size == 8 + - copy_file_to_missing_folder_actual_check.stat.exists == False + +- name: copy single file to missing folder + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\missing\' + register: copy_file_to_missing_folder + +- name: get result of copy single file to missing folder + win_stat: + path: '{{test_win_copy_path}}\missing\foo.txt' + register: copy_file_to_missing_folder_actual + +- name: assert copy single file to missing folder + assert: + that: + - copy_file_to_missing_folder is changed + - copy_file_to_missing_folder.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - copy_file_to_missing_folder.operation == 'file_copy' + - copy_file_to_missing_folder.size == 8 + - copy_file_to_missing_folder_actual.stat.exists == True + - copy_file_to_missing_folder_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + +- name: copy folder (check mode) + win_copy: + src: files + dest: '{{test_win_copy_path}}\recursive\folder' + register: copy_folder_check + check_mode: yes + +- name: get result of copy folder (check mode) + win_stat: + path: '{{test_win_copy_path}}\recursive\folder' + register: copy_folder_actual_check + +- name: assert copy folder (check mode) + assert: + that: + - copy_folder_check is changed + - copy_folder_check.operation == 'folder_copy' + - copy_folder_actual_check.stat.exists == False + +- name: copy folder + win_copy: + src: files + dest: '{{test_win_copy_path}}\recursive\folder' + register: copy_folder + +- name: get result of copy folder + win_find: + paths: '{{test_win_copy_path}}\recursive\folder' + recurse: yes + file_type: directory + register: copy_folder_actual + +- name: assert copy folder + assert: + that: + - copy_folder is changed + - copy_folder.operation == 'folder_copy' + - copy_folder_actual.examined == 11 # includes files and folders, the below is the nested order + - copy_folder_actual.matched == 6 + - copy_folder_actual.files[0].filename == 'files' + - copy_folder_actual.files[1].filename == 'subdir' + - copy_folder_actual.files[2].filename == 'empty' + - copy_folder_actual.files[3].filename == 'subdir2' + - copy_folder_actual.files[4].filename == 'subdir3' + - copy_folder_actual.files[5].filename == 'subdir4' + +- name: copy folder (idempotent) + win_copy: + src: files + dest: '{{test_win_copy_path}}\recursive\folder' + register: copy_folder_again + +- name: assert copy folder (idempotent) + assert: + that: + - copy_folder_again is not changed + +- name: change the text of a file in the remote source + win_copy: + content: bar.txt + dest: '{{test_win_copy_path}}\recursive\folder\files\foo.txt' + +- name: remove folder for test of recursive copy + win_file: + path: '{{test_win_copy_path}}\recursive\folder\files\subdir\subdir2\subdir3\subdir4' + state: absent + +- name: copy folder after changes + win_copy: + src: files + dest: '{{test_win_copy_path}}\recursive\folder' + register: copy_folder_after_change + +- name: get result of copy folder after changes + win_find: + paths: '{{test_win_copy_path}}\recursive\folder\files' + recurse: yes + patterns: ['foo.txt', 'qux.txt'] + register: copy_folder_after_changes_actual + +- name: assert copy folder after changes + assert: + that: + - copy_folder_after_change is changed + - copy_folder_after_changes_actual.matched == 2 + - copy_folder_after_changes_actual.files[0].checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6' + - copy_folder_after_changes_actual.files[1].checksum == 'b54ba7f5621240d403f06815f7246006ef8c7d43' + +- name: copy folder's contents (check mode) + win_copy: + src: files/ + dest: '{{test_win_copy_path}}\recursive-contents\' + register: copy_folder_contents_check + check_mode: yes + +- name: get result of copy folder'scontents (check mode) + win_stat: + path: '{{test_win_copy_path}}\recursive-contents' + register: copy_folder_contents_actual_check + +- name: assert copy folder's contents (check mode) + assert: + that: + - copy_folder_contents_check is changed + - copy_folder_contents_check.operation == 'folder_copy' + - copy_folder_contents_actual_check.stat.exists == False + +- name: copy folder's contents + win_copy: + src: files/ + dest: '{{test_win_copy_path}}\recursive-contents\' + register: copy_folder_contents + +- name: get result of copy folder + win_find: + paths: '{{test_win_copy_path}}\recursive-contents' + recurse: yes + file_type: directory + register: copy_folder_contents_actual + +- name: assert copy folder + assert: + that: + - copy_folder_contents is changed + - copy_folder_contents.operation == 'folder_copy' + - copy_folder_contents_actual.examined == 10 # includes files and folders, the below is the nested order + - copy_folder_contents_actual.matched == 5 + - copy_folder_contents_actual.files[0].filename == 'subdir' + - copy_folder_contents_actual.files[1].filename == 'empty' + - copy_folder_contents_actual.files[2].filename == 'subdir2' + - copy_folder_contents_actual.files[3].filename == 'subdir3' + - copy_folder_contents_actual.files[4].filename == 'subdir4' + +- name: fail to copy file to a folder + win_copy: + src: foo.txt + dest: '{{test_win_copy_path}}\recursive-contents' + register: fail_file_to_folder + failed_when: "'object at path is already a directory' not in fail_file_to_folder.msg" + +- name: fail to copy folder to a file + win_copy: + src: subdir/ + dest: '{{test_win_copy_path}}\recursive-contents\foo.txt' + register: fail_folder_to_file + failed_when: "'object at parent directory path is already a file' not in fail_folder_to_file.msg" + +# https://github.com/ansible/ansible/issues/31336 +- name: create file with colon in the name + copy: + dest: '{{role_path}}/files-different/colon:file' + content: test + delegate_to: localhost + +- name: copy a file with colon as a source + win_copy: + src: '{{role_path}}/files-different/colon:file' + dest: '{{test_win_copy_path}}\colon.file' + register: copy_file_with_colon + +- name: get result of file with colon as a source + win_stat: + path: '{{test_win_copy_path}}\colon.file' + register: copy_file_with_colon_result + +- name: assert results of copy a file with colon as a source + assert: + that: + - copy_file_with_colon is changed + - copy_file_with_colon_result.stat.exists == True + - copy_file_with_colon_result.stat.checksum == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3" + +- name: remove file with colon in the name + file: + path: '{{role_path}}/files-different/colon:file' + state: absent + delegate_to: localhost + +- name: copy an encrypted file without decrypting + win_copy: + src: '{{role_path}}/files-different/vault/vault-file' + dest: '{{test_win_copy_path}}\vault-file' + decrypt: no + register: copy_encrypted_file + +- name: get stat of copied encrypted file without decrypting + win_stat: + path: '{{test_win_copy_path}}\vault-file' + register: copy_encrypted_file_result + +- name: assert result of copy an encrypted file without decrypting + assert: + that: + - copy_encrypted_file is changed + - copy_encrypted_file_result.stat.checksum == "74a89620002d253f38834ee5b06cddd28956a43d" + +- name: copy an encrypted file without decrypting (idempotent) + win_copy: + src: '{{role_path}}/files-different/vault/vault-file' + dest: '{{test_win_copy_path}}\vault-file' + decrypt: no + register: copy_encrypted_file_again + +- name: assert result of copy an encrypted file without decrypting (idempotent) + assert: + that: + - copy_encrypted_file_again is not changed + +- name: copy folder with encrypted files without decrypting + win_copy: + src: '{{role_path}}/files-different/vault/' + dest: '{{test_win_copy_path}}\encrypted-test' + decrypt: no + register: copy_encrypted_file + +- name: get result of copy folder with encrypted files without decrypting + win_find: + paths: '{{test_win_copy_path}}\encrypted-test' + recurse: yes + patterns: '*vault*' + register: copy_encrypted_file_result + +- name: assert result of copy folder with encrypted files without decrypting + assert: + that: + - copy_encrypted_file is changed + - copy_encrypted_file_result.files|count == 2 + - copy_encrypted_file_result.files[0].checksum == "834563c94127730ecfa42dfc1e1821bbda2e51da" + - copy_encrypted_file_result.files[1].checksum == "74a89620002d253f38834ee5b06cddd28956a43d" + +- name: copy folder with encrypted files without decrypting (idempotent) + win_copy: + src: '{{role_path}}/files-different/vault/' + dest: '{{test_win_copy_path}}\encrypted-test' + decrypt: no + register: copy_encrypted_file_again + +- name: assert result of copy folder with encrypted files without decrypting (idempotent) + assert: + that: + - copy_encrypted_file_again is not changed + +- name: remove test folder after local to remote tests + win_file: + path: '{{test_win_copy_path}}' + state: absent diff --git a/test/integration/targets/incidental_win_data_deduplication/aliases b/test/integration/targets/incidental_win_data_deduplication/aliases new file mode 100644 index 00000000..c7657537 --- /dev/null +++ b/test/integration/targets/incidental_win_data_deduplication/aliases @@ -0,0 +1,5 @@ +shippable/windows/incidental +windows +skip/windows/2008 +skip/windows/2008-R2 +skip/windows/2012 diff --git a/test/integration/targets/incidental_win_data_deduplication/meta/main.yml b/test/integration/targets/incidental_win_data_deduplication/meta/main.yml new file mode 100644 index 00000000..9f37e96c --- /dev/null +++ b/test/integration/targets/incidental_win_data_deduplication/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- setup_remote_tmp_dir diff --git a/test/integration/targets/incidental_win_data_deduplication/tasks/main.yml b/test/integration/targets/incidental_win_data_deduplication/tasks/main.yml new file mode 100644 index 00000000..ae6be90e --- /dev/null +++ b/test/integration/targets/incidental_win_data_deduplication/tasks/main.yml @@ -0,0 +1,2 @@ +--- +- include: pre_test.yml diff --git a/test/integration/targets/incidental_win_data_deduplication/tasks/pre_test.yml b/test/integration/targets/incidental_win_data_deduplication/tasks/pre_test.yml new file mode 100644 index 00000000..f72955e4 --- /dev/null +++ b/test/integration/targets/incidental_win_data_deduplication/tasks/pre_test.yml @@ -0,0 +1,40 @@ +--- +- set_fact: + AnsibleVhdx: '{{ remote_tmp_dir }}\AnsiblePart.vhdx' + +- name: Install FS-Data-Deduplication + win_feature: + name: FS-Data-Deduplication + include_sub_features: true + state: present + register: data_dedup_feat_reg + +- name: Reboot windows after the feature has been installed + win_reboot: + reboot_timeout: 3600 + when: + - data_dedup_feat_reg.success + - data_dedup_feat_reg.reboot_required + +- name: Copy VHDX scripts + win_template: + src: "{{ item.src }}" + dest: '{{ remote_tmp_dir }}\{{ item.dest }}' + loop: + - { src: partition_creation_script.j2, dest: partition_creation_script.txt } + - { src: partition_deletion_script.j2, dest: partition_deletion_script.txt } + +- name: Create partition + win_command: diskpart.exe /s {{ remote_tmp_dir }}\partition_creation_script.txt + +- name: Format T with NTFS + win_format: + drive_letter: T + file_system: ntfs + +- name: Run tests + block: + - include: tests.yml + always: + - name: Detach disk + win_command: diskpart.exe /s {{ remote_tmp_dir }}\partition_deletion_script.txt diff --git a/test/integration/targets/incidental_win_data_deduplication/tasks/tests.yml b/test/integration/targets/incidental_win_data_deduplication/tasks/tests.yml new file mode 100644 index 00000000..64a42927 --- /dev/null +++ b/test/integration/targets/incidental_win_data_deduplication/tasks/tests.yml @@ -0,0 +1,47 @@ +--- + +- name: Enable Data Deduplication on the T drive - check mode + win_data_deduplication: + drive_letter: "T" + state: present + settings: + no_compress: true + minimum_file_age_days: 2 + minimum_file_size: 0 + check_mode: yes + register: win_data_deduplication_enable_check_mode + +- name: Check that it was successful with a change - check mode + assert: + that: + - win_data_deduplication_enable_check_mode is changed + +- name: Enable Data Deduplication on the T drive + win_data_deduplication: + drive_letter: "T" + state: present + settings: + no_compress: true + minimum_file_age_days: 2 + minimum_file_size: 0 + register: win_data_deduplication_enable + +- name: Check that it was successful with a change + assert: + that: + - win_data_deduplication_enable is changed + +- name: Enable Data Deduplication on the T drive + win_data_deduplication: + drive_letter: "T" + state: present + settings: + no_compress: true + minimum_file_age_days: 2 + minimum_file_size: 0 + register: win_data_deduplication_enable_again + +- name: Check that it was successful without a change + assert: + that: + - win_data_deduplication_enable_again is not changed diff --git a/test/integration/targets/incidental_win_data_deduplication/templates/partition_creation_script.j2 b/test/integration/targets/incidental_win_data_deduplication/templates/partition_creation_script.j2 new file mode 100644 index 00000000..8e47fda9 --- /dev/null +++ b/test/integration/targets/incidental_win_data_deduplication/templates/partition_creation_script.j2 @@ -0,0 +1,11 @@ +create vdisk file="{{ AnsibleVhdx }}" maximum=2000 type=fixed + +select vdisk file="{{ AnsibleVhdx }}" + +attach vdisk + +convert mbr + +create partition primary + +assign letter="T" diff --git a/test/integration/targets/incidental_win_data_deduplication/templates/partition_deletion_script.j2 b/test/integration/targets/incidental_win_data_deduplication/templates/partition_deletion_script.j2 new file mode 100644 index 00000000..c2be9cd1 --- /dev/null +++ b/test/integration/targets/incidental_win_data_deduplication/templates/partition_deletion_script.j2 @@ -0,0 +1,3 @@ +select vdisk file="{{ AnsibleVhdx }}" + +detach vdisk diff --git a/test/integration/targets/incidental_win_dsc/aliases b/test/integration/targets/incidental_win_dsc/aliases new file mode 100644 index 00000000..9114c742 --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/aliases @@ -0,0 +1,6 @@ +shippable/windows/incidental +windows +skip/windows/2008 +skip/windows/2008-R2 +skip/windows/2012 +skip/windows/2012-R2 diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1 new file mode 100644 index 00000000..dbf1ecf3 --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1 @@ -0,0 +1,41 @@ +#Requires -Version 5.0 -Modules CimCmdlets + +Function Get-TargetResource +{ + [CmdletBinding()] + [OutputType([Hashtable])] + param( + [Parameter(Mandatory=$true)] + [ValidateNotNullOrEmpty()] + [String]$KeyParam + ) + return @{Value = [bool]$global:DSCMachineStatus} +} + +Function Set-TargetResource +{ + [CmdletBinding()] + param ( + [Parameter(Mandatory=$true)] + [ValidateNotNullOrEmpty()] + [String]$KeyParam, + [Bool]$Value = $true + ) + $global:DSCMachineStatus = [int]$Value +} + +Function Test-TargetResource +{ + [CmdletBinding()] + [OutputType([Boolean])] + param ( + [Parameter(Mandatory=$true)] + [ValidateNotNullOrEmpty()] + [String]$KeyParam, + [Bool]$Value = $true + ) + $false +} + +Export-ModuleMember -Function *-TargetResource + diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.schema.mof b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.schema.mof new file mode 100644 index 00000000..288b8877 --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.schema.mof @@ -0,0 +1,7 @@ +[ClassVersion("1.0.0"), FriendlyName("xSetReboot")] +class ANSIBLE_xSetReboot : OMI_BaseResource +{ + [Key] String KeyParam; + [Write] Boolean Value; +}; + diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 new file mode 100644 index 00000000..79f64969 --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 @@ -0,0 +1,214 @@ +#Requires -Version 5.0 -Modules CimCmdlets + +Function ConvertFrom-CimInstance { + param( + [Parameter(Mandatory=$true)][CimInstance]$Instance + ) + $hashtable = @{ + _cim_instance = $Instance.CimSystemProperties.ClassName + } + foreach ($prop in $Instance.CimInstanceProperties) { + $hashtable."$($prop.Name)" = ConvertTo-OutputValue -Value $prop.Value + } + return $hashtable +} + +Function ConvertTo-OutputValue { + param($Value) + + if ($Value -is [DateTime[]]) { + $Value = $Value | ForEach-Object { $_.ToString("o") } + } elseif ($Value -is [DateTime]) { + $Value = $Value.ToString("o") + } elseif ($Value -is [Double]) { + $Value = $Value.ToString() # To avoid Python 2 double parsing issues on test validation + } elseif ($Value -is [Double[]]) { + $Value = $Value | ForEach-Object { $_.ToString() } + } elseif ($Value -is [PSCredential]) { + $password = $null + $password_ptr = [System.Runtime.InteropServices.Marshal]::SecureStringToGlobalAllocUnicode($Value.Password) + try { + $password = [System.Runtime.InteropServices.Marshal]::PtrToStringUni($password_ptr) + } finally { + [System.Runtime.InteropServices.Marshal]::ZeroFreeGlobalAllocUnicode($password_ptr) + } + $Value = @{ + username = $Value.Username + password = $password + } + } elseif ($Value -is [CimInstance[]]) { + $value_list = [System.Collections.Generic.List`1[Hashtable]]@() + foreach ($cim_instance in $Value) { + $value_list.Add((ConvertFrom-CimInstance -Instance $cim_instance)) + } + $Value = $value_list.ToArray() + } elseif ($Value -is [CimInstance]) { + $Value = ConvertFrom-CimInstance -Instance $Value + } + + return ,$Value +} + +Function Get-TargetResource +{ + [CmdletBinding()] + [OutputType([Hashtable])] + param( + [Parameter(Mandatory = $true)] + [ValidateSet("Present", "Absent")] + [String] $Ensure = "Present", + + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [String] $Path + ) + return @{ + Ensure = $Ensure + Path = $Path + } +} + +Function Set-TargetResource +{ + [CmdletBinding()] + param + ( + [Parameter(Mandatory = $true)] + [ValidateSet("Present", "Absent")] + [String] $Ensure = "Present", + + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [String] $Path, + + [String] $DefaultParam = "Default", + [String] $StringParam, + [String[]] $StringArrayParam, + [SByte] $Int8Param, + [SByte[]] $Int8ArrayParam, + [Byte] $UInt8Param, + [Byte[]] $UInt8ArrayParam, + [Int16] $Int16Param, + [Int16[]] $Int16ArrayParam, + [UInt16] $UInt16Param, + [UInt16[]] $UInt16ArrayParam, + [Int32] $Int32Param, + [Int32[]] $Int32ArrayParam, + [UInt32] $UInt32Param, + [UInt32[]] $UInt32ArrayParam, + [Int64] $Int64Param, + [Int64[]] $Int64ArrayParam, + [UInt64] $UInt64Param, + [UInt64[]] $UInt64ArrayParam, + [Bool] $BooleanParam, + [Bool[]] $BooleanArrayParam, + [Char] $CharParam, + [Char[]] $CharArrayParam, + [Single] $SingleParam, + [Single[]] $SingleArrayParam, + [Double] $DoubleParam, + [Double[]] $DoubleArrayParam, + [DateTime] $DateTimeParam, + [DateTime[]] $DateTimeArrayParam, + [PSCredential] $PSCredentialParam, + [CimInstance[]] $HashtableParam, + [CimInstance] $CimInstanceParam, + [CimInstance[]] $CimInstanceArrayParam, + [CimInstance] $NestedCimInstanceParam, + [CimInstance[]] $NestedCimInstanceArrayParam + ) + + $info = @{ + Version = "1.0.0" + Ensure = @{ + Type = $Ensure.GetType().FullName + Value = $Ensure + } + Path = @{ + Type = $Path.GetType().FullName + Value = $Path + } + DefaultParam = @{ + Type = $DefaultParam.GetType().FullName + Value = $DefaultParam + } + } + + foreach ($kvp in $PSCmdlet.MyInvocation.BoundParameters.GetEnumerator()) { + $info."$($kvp.Key)" = @{ + Type = $kvp.Value.GetType().FullName + Value = (ConvertTo-OutputValue -Value $kvp.Value) + } + } + + if (Test-Path -Path $Path) { + Remove-Item -Path $Path -Force > $null + } + New-Item -Path $Path -ItemType File > $null + Set-Content -Path $Path -Value (ConvertTo-Json -InputObject $info -Depth 10) > $null + Write-Verbose -Message "set verbose" + Write-Warning -Message "set warning" +} + +Function Test-TargetResource +{ + [CmdletBinding()] + [OutputType([Boolean])] + param + ( + [Parameter(Mandatory = $true)] + [ValidateSet("Present", "Absent")] + [String] $Ensure = "Present", + + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [String] $Path, + + [String] $DefaultParam = "Default", + [String] $StringParam, + [String[]] $StringArrayParam, + [SByte] $Int8Param, + [SByte[]] $Int8ArrayParam, + [Byte] $UInt8Param, + [Byte[]] $UInt8ArrayParam, + [Int16] $Int16Param, + [Int16[]] $Int16ArrayParam, + [UInt16] $UInt16Param, + [UInt16[]] $UInt16ArrayParam, + [Int32] $Int32Param, + [Int32[]] $Int32ArrayParam, + [UInt32] $UInt32Param, + [UInt32[]] $UInt32ArrayParam, + [Int64] $Int64Param, + [Int64[]] $Int64ArrayParam, + [UInt64] $UInt64Param, + [UInt64[]] $UInt64ArrayParam, + [Bool] $BooleanParam, + [Bool[]] $BooleanArrayParam, + [Char] $CharParam, + [Char[]] $CharArrayParam, + [Single] $SingleParam, + [Single[]] $SingleArrayParam, + [Double] $DoubleParam, + [Double[]] $DoubleArrayParam, + [DateTime] $DateTimeParam, + [DateTime[]] $DateTimeArrayParam, + [PSCredential] $PSCredentialParam, + [CimInstance[]] $HashtableParam, + [CimInstance] $CimInstanceParam, + [CimInstance[]] $CimInstanceArrayParam, + [CimInstance] $NestedCimInstanceParam, + [CimInstance[]] $NestedCimInstanceArrayParam + ) + Write-Verbose -Message "test verbose" + Write-Warning -Message "test warning" + $exists = Test-Path -LiteralPath $Path -PathType Leaf + if ($Ensure -eq "Present") { + $exists + } else { + -not $exists + } +} + +Export-ModuleMember -Function *-TargetResource + diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof new file mode 100644 index 00000000..c61b2b1e --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof @@ -0,0 +1,60 @@ +[ClassVersion("1.0.0")] +class ANSIBLE_xTestClass +{ + [Key] String Key; + [Write] String StringValue; + [Write] SInt32 IntValue; + [Write] String StringArrayValue[]; +}; + +[ClassVersion("1.0.0")] +class ANSIBLE_xNestedClass +{ + [Key] String KeyValue; + [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimValue; + [Write, EmbeddedInstance("MSFT_KeyValuePair")] String HashValue[]; + [Write] SInt16 IntValue; +}; + +[ClassVersion("1.0.0"), FriendlyName("xTestResource")] +class ANSIBLE_xTestResource : OMI_BaseResource +{ + [Key] String Path; + [Required, ValueMap{"Present", "Absent"}, Values{"Present", "Absent"}] String Ensure; + [Read] String ReadParam; + [Write] String DefaultParam; + [Write] String StringParam; + [Write] String StringArrayParam[]; + [Write] SInt8 Int8Param; + [Write] SInt8 Int8ArrayParam[]; + [Write] UInt8 UInt8Param; + [Write] UInt8 UInt8ArrayParam[]; + [Write] SInt16 Int16Param; + [Write] SInt16 Int16ArrayParam[]; + [Write] UInt16 UInt16Param; + [Write] UInt16 UInt16ArrayParam[]; + [Write] SInt32 Int32Param; + [Write] SInt32 Int32ArrayParam[]; + [Write] UInt32 UInt32Param; + [Write] UInt32 UInt32ArrayParam[]; + [Write] SInt64 Int64Param; + [Write] SInt64 Int64ArrayParam[]; + [Write] UInt64 UInt64Param; + [Write] UInt64 UInt64ArrayParam[]; + [Write] Boolean BooleanParam; + [Write] Boolean BooleanArrayParam[]; + [Write] Char16 CharParam; + [Write] Char16 CharArrayParam[]; + [Write] Real32 SingleParam; + [Write] Real32 SingleArrayParam[]; + [Write] Real64 DoubleParam; + [Write] Real64 DoubleArrayParam[]; + [Write] DateTime DateTimeParam; + [Write] DateTime DateTimeArrayParam[]; + [Write, EmbeddedInstance("MSFT_Credential")] String PSCredentialParam; + [Write, EmbeddedInstance("MSFT_KeyValuePair")] String HashtableParam[]; + [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimInstanceArrayParam[]; + [Write, EmbeddedInstance("ANSIBLE_xNestedClass")] String NestedCimInstanceParam; + [Write, EmbeddedInstance("ANSIBLE_xNestedClass")] String NestedCimInstanceArrayParam[]; +}; + diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd1 new file mode 100644 index 00000000..3d61611d --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd1 @@ -0,0 +1,13 @@ +@{ + ModuleVersion = '1.0.0' + GUID = '80c895c4-de3f-4d6d-8fa4-c504c96b6f22' + Author = 'Ansible' + CompanyName = 'Ansible' + Copyright = '(c) 2019' + Description = 'Test DSC Resource for Ansible integration tests' + PowerShellVersion = '5.0' + CLRVersion = '4.0' + FunctionsToExport = '*' + CmdletsToExport = '*' +} + diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 new file mode 100644 index 00000000..d75256e1 --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 @@ -0,0 +1,214 @@ +#Requires -Version 5.0 -Modules CimCmdlets + +Function ConvertFrom-CimInstance { + param( + [Parameter(Mandatory=$true)][CimInstance]$Instance + ) + $hashtable = @{ + _cim_instance = $Instance.CimSystemProperties.ClassName + } + foreach ($prop in $Instance.CimInstanceProperties) { + $hashtable."$($prop.Name)" = ConvertTo-OutputValue -Value $prop.Value + } + return $hashtable +} + +Function ConvertTo-OutputValue { + param($Value) + + if ($Value -is [DateTime[]]) { + $Value = $Value | ForEach-Object { $_.ToString("o") } + } elseif ($Value -is [DateTime]) { + $Value = $Value.ToString("o") + } elseif ($Value -is [Double]) { + $Value = $Value.ToString() # To avoid Python 2 double parsing issues on test validation + } elseif ($Value -is [Double[]]) { + $Value = $Value | ForEach-Object { $_.ToString() } + } elseif ($Value -is [PSCredential]) { + $password = $null + $password_ptr = [System.Runtime.InteropServices.Marshal]::SecureStringToGlobalAllocUnicode($Value.Password) + try { + $password = [System.Runtime.InteropServices.Marshal]::PtrToStringUni($password_ptr) + } finally { + [System.Runtime.InteropServices.Marshal]::ZeroFreeGlobalAllocUnicode($password_ptr) + } + $Value = @{ + username = $Value.Username + password = $password + } + } elseif ($Value -is [CimInstance[]]) { + $value_list = [System.Collections.Generic.List`1[Hashtable]]@() + foreach ($cim_instance in $Value) { + $value_list.Add((ConvertFrom-CimInstance -Instance $cim_instance)) + } + $Value = $value_list.ToArray() + } elseif ($Value -is [CimInstance]) { + $Value = ConvertFrom-CimInstance -Instance $Value + } + + return ,$Value +} + +Function Get-TargetResource +{ + [CmdletBinding()] + [OutputType([Hashtable])] + param( + [Parameter(Mandatory = $true)] + [ValidateSet("Present", "Absent")] + [String] $Ensure = "Present", + + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [String] $Path + ) + return @{ + Ensure = $Ensure + Path = $Path + } +} + +Function Set-TargetResource +{ + [CmdletBinding()] + param + ( + [Parameter(Mandatory = $true)] + [ValidateSet("Present", "Absent")] + [String] $Ensure = "Present", + + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [String] $Path, + + [String] $DefaultParam = "Default", + [String] $StringParam, + [String[]] $StringArrayParam, + [SByte] $Int8Param, + [SByte[]] $Int8ArrayParam, + [Byte] $UInt8Param, + [Byte[]] $UInt8ArrayParam, + [Int16] $Int16Param, + [Int16[]] $Int16ArrayParam, + [UInt16] $UInt16Param, + [UInt16[]] $UInt16ArrayParam, + [Int32] $Int32Param, + [Int32[]] $Int32ArrayParam, + [UInt32] $UInt32Param, + [UInt32[]] $UInt32ArrayParam, + [Int64] $Int64Param, + [Int64[]] $Int64ArrayParam, + [UInt64] $UInt64Param, + [UInt64[]] $UInt64ArrayParam, + [Bool] $BooleanParam, + [Bool[]] $BooleanArrayParam, + [Char] $CharParam, + [Char[]] $CharArrayParam, + [Single] $SingleParam, + [Single[]] $SingleArrayParam, + [Double] $DoubleParam, + [Double[]] $DoubleArrayParam, + [DateTime] $DateTimeParam, + [DateTime[]] $DateTimeArrayParam, + [PSCredential] $PSCredentialParam, + [CimInstance[]] $HashtableParam, + [CimInstance] $CimInstanceParam, + [CimInstance[]] $CimInstanceArrayParam, + [CimInstance] $NestedCimInstanceParam, + [CimInstance[]] $NestedCimInstanceArrayParam + ) + + $info = @{ + Version = "1.0.1" + Ensure = @{ + Type = $Ensure.GetType().FullName + Value = $Ensure + } + Path = @{ + Type = $Path.GetType().FullName + Value = $Path + } + DefaultParam = @{ + Type = $DefaultParam.GetType().FullName + Value = $DefaultParam + } + } + + foreach ($kvp in $PSCmdlet.MyInvocation.BoundParameters.GetEnumerator()) { + $info."$($kvp.Key)" = @{ + Type = $kvp.Value.GetType().FullName + Value = (ConvertTo-OutputValue -Value $kvp.Value) + } + } + + if (Test-Path -Path $Path) { + Remove-Item -Path $Path -Force > $null + } + New-Item -Path $Path -ItemType File > $null + Set-Content -Path $Path -Value (ConvertTo-Json -InputObject $info -Depth 10) > $null + Write-Verbose -Message "set verbose" + Write-Warning -Message "set warning" +} + +Function Test-TargetResource +{ + [CmdletBinding()] + [OutputType([Boolean])] + param + ( + [Parameter(Mandatory = $true)] + [ValidateSet("Present", "Absent")] + [String] $Ensure = "Present", + + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [String] $Path, + + [String] $DefaultParam = "Default", + [String] $StringParam, + [String[]] $StringArrayParam, + [SByte] $Int8Param, + [SByte[]] $Int8ArrayParam, + [Byte] $UInt8Param, + [Byte[]] $UInt8ArrayParam, + [Int16] $Int16Param, + [Int16[]] $Int16ArrayParam, + [UInt16] $UInt16Param, + [UInt16[]] $UInt16ArrayParam, + [Int32] $Int32Param, + [Int32[]] $Int32ArrayParam, + [UInt32] $UInt32Param, + [UInt32[]] $UInt32ArrayParam, + [Int64] $Int64Param, + [Int64[]] $Int64ArrayParam, + [UInt64] $UInt64Param, + [UInt64[]] $UInt64ArrayParam, + [Bool] $BooleanParam, + [Bool[]] $BooleanArrayParam, + [Char] $CharParam, + [Char[]] $CharArrayParam, + [Single] $SingleParam, + [Single[]] $SingleArrayParam, + [Double] $DoubleParam, + [Double[]] $DoubleArrayParam, + [DateTime] $DateTimeParam, + [DateTime[]] $DateTimeArrayParam, + [PSCredential] $PSCredentialParam, + [CimInstance[]] $HashtableParam, + [CimInstance] $CimInstanceParam, + [CimInstance[]] $CimInstanceArrayParam, + [CimInstance] $NestedCimInstanceParam, + [CimInstance[]] $NestedCimInstanceArrayParam + ) + Write-Verbose -Message "test verbose" + Write-Warning -Message "test warning" + $exists = Test-Path -LiteralPath $Path -PathType Leaf + if ($Ensure -eq "Present") { + $exists + } else { + -not $exists + } +} + +Export-ModuleMember -Function *-TargetResource + diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof new file mode 100644 index 00000000..9301664b --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof @@ -0,0 +1,63 @@ +[ClassVersion("1.0.1")] +class ANSIBLE_xTestClass +{ + [Key] String KeyValue; + [Write, ValueMap{"Choice1", "Choice2"}, Values{"Choice1", "Choice2"}] String Choice; + [Write] String StringValue; + [Write] SInt32 IntValue; + [Write] String StringArrayValue[]; +}; + +[ClassVersion("1.0.1")] +class ANSIBLE_xNestedClass +{ + [Key] String KeyValue; + [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimValue; + [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimArrayValue[]; + [Write, EmbeddedInstance("MSFT_KeyValuePair")] String HashValue[]; + [Write] SInt16 IntValue; +}; + +[ClassVersion("1.0.1"), FriendlyName("xTestResource")] +class ANSIBLE_xTestResource : OMI_BaseResource +{ + [Key] String Path; + [Required, ValueMap{"Present", "Absent"}, Values{"Present", "Absent"}] String Ensure; + [Read] String ReadParam; + [Write] String DefaultParam; + [Write] String StringParam; + [Write] String StringArrayParam[]; + [Write] SInt8 Int8Param; + [Write] SInt8 Int8ArrayParam[]; + [Write] UInt8 UInt8Param; + [Write] UInt8 UInt8ArrayParam[]; + [Write] SInt16 Int16Param; + [Write] SInt16 Int16ArrayParam[]; + [Write] UInt16 UInt16Param; + [Write] UInt16 UInt16ArrayParam[]; + [Write] SInt32 Int32Param; + [Write] SInt32 Int32ArrayParam[]; + [Write] UInt32 UInt32Param; + [Write] UInt32 UInt32ArrayParam[]; + [Write] SInt64 Int64Param; + [Write] SInt64 Int64ArrayParam[]; + [Write] UInt64 UInt64Param; + [Write] UInt64 UInt64ArrayParam[]; + [Write] Boolean BooleanParam; + [Write] Boolean BooleanArrayParam[]; + [Write] Char16 CharParam; + [Write] Char16 CharArrayParam[]; + [Write] Real32 SingleParam; + [Write] Real32 SingleArrayParam[]; + [Write] Real64 DoubleParam; + [Write] Real64 DoubleArrayParam[]; + [Write] DateTime DateTimeParam; + [Write] DateTime DateTimeArrayParam[]; + [Write, EmbeddedInstance("MSFT_Credential")] String PSCredentialParam; + [Write, EmbeddedInstance("MSFT_KeyValuePair")] String HashtableParam[]; + [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimInstanceParam; + [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimInstanceArrayParam[]; + [Write, EmbeddedInstance("ANSIBLE_xNestedClass")] String NestedCimInstanceParam; + [Write, EmbeddedInstance("ANSIBLE_xNestedClass")] String NestedCimInstanceArrayParam[]; +}; + diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd1 new file mode 100644 index 00000000..0c43b852 --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd1 @@ -0,0 +1,13 @@ +@{ + ModuleVersion = '1.0.1' + GUID = '80c895c4-de3f-4d6d-8fa4-c504c96b6f22' + Author = 'Ansible' + CompanyName = 'Ansible' + Copyright = '(c) 2019' + Description = 'Test DSC Resource for Ansible integration tests' + PowerShellVersion = '5.0' + CLRVersion = '4.0' + FunctionsToExport = '*' + CmdletsToExport = '*' +} + diff --git a/test/integration/targets/incidental_win_dsc/meta/main.yml b/test/integration/targets/incidental_win_dsc/meta/main.yml new file mode 100644 index 00000000..9f37e96c --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- setup_remote_tmp_dir diff --git a/test/integration/targets/incidental_win_dsc/tasks/main.yml b/test/integration/targets/incidental_win_dsc/tasks/main.yml new file mode 100644 index 00000000..f37295ab --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: get powershell version + win_shell: $PSVersionTable.PSVersion.Major + register: powershell_version + +- name: expect failure when running on old PS hosts + win_dsc: + resource_name: File + register: fail_dsc_old + failed_when: '"This module cannot run as it requires a minimum PowerShell version of 5.0" not in fail_dsc_old.msg' + when: powershell_version.stdout_lines[0]|int < 5 + +- name: run tests when PSv5+ + when: powershell_version.stdout_lines[0]|int >= 5 + block: + - name: add remote temp dir to PSModulePath + win_path: + name: PSModulePath + state: present + scope: machine + elements: + - '{{ remote_tmp_dir }}' + + - name: copy custom DSC resources to remote temp dir + win_copy: + src: xTestDsc + dest: '{{ remote_tmp_dir }}' + + - name: run tests + include_tasks: tests.yml + + always: + - name: remove remote tmp dir from PSModulePath + win_path: + name: PSModulePath + state: absent + scope: machine + elements: + - '{{ remote_tmp_dir }}' diff --git a/test/integration/targets/incidental_win_dsc/tasks/tests.yml b/test/integration/targets/incidental_win_dsc/tasks/tests.yml new file mode 100644 index 00000000..d2a6802f --- /dev/null +++ b/test/integration/targets/incidental_win_dsc/tasks/tests.yml @@ -0,0 +1,544 @@ +--- +- name: fail with incorrect DSC resource name + win_dsc: + resource_name: FakeResource + register: fail_invalid_resource + failed_when: fail_invalid_resource.msg != "Resource 'FakeResource' not found." + +- name: fail with invalid DSC version + win_dsc: + resource_name: xTestResource + module_version: 0.0.1 + register: fail_invalid_version + failed_when: 'fail_invalid_version.msg != "Resource ''xTestResource'' with version ''0.0.1'' not found. Versions installed: ''1.0.0'', ''1.0.1''."' + +- name: fail with mandatory option not set + win_dsc: + resource_name: xSetReboot + Value: yes + register: fail_man_key + failed_when: 'fail_man_key.msg != "missing required arguments: KeyParam"' + +- name: fail with mandatory option not set in sub dict + win_dsc: + resource_name: xTestResource + Path: C:\path + Ensure: Present + CimInstanceParam: # Missing KeyValue in dict + Choice: Choice1 + register: fail_man_key_sub_dict + failed_when: 'fail_man_key_sub_dict.msg != "missing required arguments: KeyValue found in CimInstanceParam"' + +- name: fail invalid option + win_dsc: + resource_name: xSetReboot + KeyParam: key + OtherParam: invalid + register: fail_invalid_option + failed_when: 'fail_invalid_option.msg != "Unsupported parameters for (win_dsc) module: OtherParam. Supported parameters include: KeyParam, PsDscRunAsCredential_username, module_version, Value, PsDscRunAsCredential_password, resource_name, DependsOn"' + +- name: fail invalid option in sub dict + win_dsc: + resource_name: xTestResource + Path: C:\path + Ensure: Present + NestedCimInstanceParam: + KeyValue: key + CimValue: + KeyValue: other key + InvalidKey: invalid + register: fail_invalid_option_sub_dict + failed_when: 'fail_invalid_option_sub_dict.msg != "Unsupported parameters for (win_dsc) module: InvalidKey found in NestedCimInstanceParam -> CimValue. Supported parameters include: IntValue, KeyValue, StringArrayValue, Choice, StringValue"' + +- name: fail invalid read only option + win_dsc: + resource_name: xTestResource + Path: C:\path + Ensure: Present + ReadParam: abc + register: fail_invalid_option_read_only + failed_when: '"Unsupported parameters for (win_dsc) module: ReadParam" not in fail_invalid_option_read_only.msg' + +- name: fail invalid choice + win_dsc: + resource_name: xTestResource + Path: C:\path + Ensure: invalid + register: fail_invalid_choice + failed_when: 'fail_invalid_choice.msg != "value of Ensure must be one of: Present, Absent. Got no match for: invalid"' + +- name: fail invalid choice in sub dict + win_dsc: + resource_name: xTestResource + Path: C:\path + Ensure: Present + CimInstanceArrayParam: + - KeyValue: key + - KeyValue: key2 + Choice: Choice3 + register: fail_invalid_choice_sub_dict + failed_when: 'fail_invalid_choice_sub_dict.msg != "value of Choice must be one of: Choice1, Choice2. Got no match for: Choice3 found in CimInstanceArrayParam"' + +- name: fail old version missing new option + win_dsc: + resource_name: xTestResource + module_version: 1.0.0 + Path: C:\path + Ensure: Present + CimInstanceParam: # CimInstanceParam does not exist in the 1.0.0 version + Key: key + register: fail_invalid_option_old + failed_when: '"Unsupported parameters for (win_dsc) module: CimInstanceParam" not in fail_invalid_option_old.msg' + +- name: fail old version missing new option sub dict + win_dsc: + resource_name: xTestResource + module_version: 1.0.0 + Path: C:\path + Ensure: Present + CimInstanceArrayParam: + - Key: key + Choice: Choice1 + register: fail_invalid_option_old_sub_dict + failed_when: 'fail_invalid_option_old_sub_dict.msg != "Unsupported parameters for (win_dsc) module: Choice found in CimInstanceArrayParam. Supported parameters include: Key, IntValue, StringArrayValue, StringValue"' + +- name: create test file (check mode) + win_dsc: + resource_name: File + DestinationPath: '{{ remote_tmp_dir }}\dsc-file' + Contents: file contents + Attributes: + - Hidden + - ReadOnly + Ensure: Present + Type: File + register: create_file_check + check_mode: yes + +- name: get result of create test file (check mode) + win_stat: + path: '{{ remote_tmp_dir }}\dsc-file' + register: create_file_actual_check + +- name: assert create test file (check mode) + assert: + that: + - create_file_check is changed + - create_file_check.module_version == None # Some built in modules don't have a version set + - not create_file_check.reboot_required + - not create_file_actual_check.stat.exists + +- name: assert create test file verbosity (check mode) + assert: + that: + - create_file_check.verbose_test is defined + - not create_file_check.verbose_set is defined + when: ansible_verbosity >= 3 + +- name: create test file + win_dsc: + resource_name: File + DestinationPath: '{{ remote_tmp_dir }}\dsc-file' + Contents: file contents + Attributes: + - Hidden + - ReadOnly + Ensure: Present + Type: File + register: create_file + +- name: get result of create test file + win_stat: + path: '{{ remote_tmp_dir }}\dsc-file' + register: create_file_actual + +- name: assert create test file verbosity + assert: + that: + - create_file.verbose_test is defined + - create_file.verbose_set is defined + when: ansible_verbosity >= 3 + +- name: assert create test file + assert: + that: + - create_file is changed + - create_file.module_version == None + - not create_file.reboot_required + - create_file_actual.stat.exists + - create_file_actual.stat.attributes == "ReadOnly, Hidden, Archive" + - create_file_actual.stat.checksum == 'd48daab51112b49ecabd917adc345b8ba257055e' + +- name: create test file (idempotent) + win_dsc: + resource_name: File + DestinationPath: '{{ remote_tmp_dir }}\dsc-file' + Contents: file contents + Attributes: + - Hidden + - ReadOnly + Ensure: Present + Type: File + register: create_file_again + +- name: assert create test file (idempotent) + assert: + that: + - not create_file_again is changed + - create_file.module_version == None + - not create_file.reboot_required + +- name: get SID of the current Ansible user + win_shell: | + Add-Type -AssemblyName System.DirectoryServices.AccountManagement + [System.DirectoryServices.AccountManagement.UserPrincipal]::Current.Sid.Value + register: actual_sid + +- name: run DSC process as another user + win_dsc: + resource_name: Script + GetScript: '@{ Result= "" }' + SetScript: | + Add-Type -AssemblyName System.DirectoryServices.AccountManagement + $sid = [System.DirectoryServices.AccountManagement.UserPrincipal]::Current.Sid.Value + Set-Content -Path "{{ remote_tmp_dir }}\runas.txt" -Value $sid + TestScript: $false + PsDscRunAsCredential_username: '{{ ansible_user }}' + PsDscRunAsCredential_password: '{{ ansible_password }}' + register: runas_user + +- name: get result of run DSC process as another user + slurp: + path: '{{ remote_tmp_dir }}\runas.txt' + register: runas_user_result + +- name: assert run DSC process as another user + assert: + that: + - runas_user is changed + - runas_user.module_version != None # Can't reliably set the version but we can test it is set + - not runas_user.reboot_required + - runas_user_result.content|b64decode == actual_sid.stdout + +- name: run DSC that sets reboot_required with defaults + win_dsc: + resource_name: xSetReboot + KeyParam: value # Just to satisfy the Resource with key validation + register: set_reboot_defaults + +- name: assert run DSC that sets reboot_required with defaults + assert: + that: + - set_reboot_defaults.reboot_required + +- name: run DSC that sets reboot_required with False + win_dsc: + resource_name: xSetReboot + KeyParam: value + Value: no + register: set_reboot_false + +- name: assert run DSC that sets reboot_required with False + assert: + that: + - not set_reboot_false.reboot_required + +- name: run DSC that sets reboot_required with True + win_dsc: + resource_name: xSetReboot + KeyParam: value + Value: yes + register: set_reboot_true + +- name: assert run DSC that sets reboot_required with True + assert: + that: + - set_reboot_true.reboot_required + +- name: test DSC with all types + win_dsc: + resource_name: xTestResource + Path: '{{ remote_tmp_dir }}\test-types.json' + Ensure: Present + StringParam: string param + StringArrayParam: + - string 1 + - string 2 + Int8Param: 127 # [SByte]::MaxValue + Int8ArrayParam: + - 127 + - '127' + UInt8Param: 255 # [Byte]::MaxValue + UInt8ArrayParam: + - 255 + - '255' + Int16Param: 32767 # [Int16]::MaxValue + Int16ArrayParam: 32767, 32767 + UInt16Param: '65535' # [UInt16]::MaxValue + UInt16ArrayParam: 65535 + Int32Param: 2147483647 # [Int32]::MaxValue + Int32ArrayParam: '2147483647' + UInt32Param: '4294967295' # [UInt32]::MaxValue + UInt32ArrayParam: + - '4294967295' + - 4294967295 + Int64Param: 9223372036854775807 # [Int64]::MaxValue + Int64ArrayParam: + - -9223372036854775808 # [Int64]::MinValue + - 9223372036854775807 + UInt64Param: 18446744073709551615 # [UInt64]::MaxValue + UInt64ArrayParam: + - 0 # [UInt64]::MinValue + - 18446744073709551615 + BooleanParam: True + BooleanArrayParam: + - True + - 'True' + - 'true' + - 'y' + - 'yes' + - 1 + - False + - 'False' + - 'false' + - 'n' + - 'no' + - 0 + CharParam: c + CharArrayParam: + - c + - h + - a + - r + SingleParam: 3.402823E+38 + SingleArrayParam: + - '3.402823E+38' + - 1.2393494 + DoubleParam: 1.79769313486232E+300 + DoubleArrayParam: + - '1.79769313486232E+300' + - 3.56821831681516 + DateTimeParam: '2019-02-22T13:57:31.2311892-04:00' + DateTimeArrayParam: + - '2019-02-22T13:57:31.2311892+00:00' + - '2019-02-22T13:57:31.2311892+04:00' + PSCredentialParam_username: username1 + PSCredentialParam_password: password1 + HashtableParam: + key1: string 1 + key2: '' + key3: 1 + CimInstanceParam: + KeyValue: a + CimInstanceArrayParam: + - KeyValue: b + Choice: Choice1 + StringValue: string 1 + IntValue: 1 + StringArrayValue: + - abc + - def + - KeyValue: c + Choice: Choice2 + StringValue: string 2 + IntValue: '2' + StringArrayValue: + - ghi + - jkl + NestedCimInstanceParam: + KeyValue: key value + CimValue: + KeyValue: d + CimArrayValue: + - KeyValue: e + Choice: Choice2 + HashValue: + a: a + IntValue: '300' + register: dsc_types + +- name: get result of test DSC with all types + slurp: + path: '{{ remote_tmp_dir }}\test-types.json' + register: dsc_types_raw + +- name: convert result of test DSC with all types to dict + set_fact: + dsc_types_actual: '{{ dsc_types_raw.content | b64decode | from_json }}' + +- name: assert test DSC with all types + assert: + that: + - dsc_types is changed + - dsc_types.module_version == '1.0.1' + - not dsc_types.reboot_required + - dsc_types_actual.Version == '1.0.1' + - dsc_types_actual.Verbose.Value.IsPresent + - dsc_types_actual.DefaultParam.Value == 'Default' # ensures that the default is set in the engine if we don't set it outselves + - dsc_types_actual.Ensure.Value == 'Present' + - dsc_types_actual.Path.Value == remote_tmp_dir + "\\test-types.json" + - dsc_types_actual.StringParam.Type == 'System.String' + - dsc_types_actual.StringParam.Value == 'string param' + - dsc_types_actual.StringArrayParam.Type == 'System.String[]' + - dsc_types_actual.StringArrayParam.Value == ['string 1', 'string 2'] + - dsc_types_actual.Int8Param.Type == 'System.SByte' + - dsc_types_actual.Int8Param.Value == 127 + - dsc_types_actual.Int8ArrayParam.Type == 'System.SByte[]' + - dsc_types_actual.Int8ArrayParam.Value == [127, 127] + - dsc_types_actual.UInt8Param.Type == 'System.Byte' + - dsc_types_actual.UInt8Param.Value == 255 + - dsc_types_actual.UInt8ArrayParam.Type == 'System.Byte[]' + - dsc_types_actual.UInt8ArrayParam.Value == [255, 255] + - dsc_types_actual.Int16Param.Type == 'System.Int16' + - dsc_types_actual.Int16Param.Value == 32767 + - dsc_types_actual.Int16ArrayParam.Type == 'System.Int16[]' + - dsc_types_actual.Int16ArrayParam.Value == [32767, 32767] + - dsc_types_actual.UInt16Param.Type == 'System.UInt16' + - dsc_types_actual.UInt16Param.Value == 65535 + - dsc_types_actual.UInt16ArrayParam.Type == 'System.UInt16[]' + - dsc_types_actual.UInt16ArrayParam.Value == [65535] + - dsc_types_actual.Int32Param.Type == 'System.Int32' + - dsc_types_actual.Int32Param.Value == 2147483647 + - dsc_types_actual.Int32ArrayParam.Type == 'System.Int32[]' + - dsc_types_actual.Int32ArrayParam.Value == [2147483647] + - dsc_types_actual.UInt32Param.Type == 'System.UInt32' + - dsc_types_actual.UInt32Param.Value == 4294967295 + - dsc_types_actual.UInt32ArrayParam.Type == 'System.UInt32[]' + - dsc_types_actual.UInt32ArrayParam.Value == [4294967295, 4294967295] + - dsc_types_actual.Int64Param.Type == 'System.Int64' + - dsc_types_actual.Int64Param.Value == 9223372036854775807 + - dsc_types_actual.Int64ArrayParam.Type == 'System.Int64[]' + - dsc_types_actual.Int64ArrayParam.Value == [-9223372036854775808, 9223372036854775807] + - dsc_types_actual.UInt64Param.Type == 'System.UInt64' + - dsc_types_actual.UInt64Param.Value == 18446744073709551615 + - dsc_types_actual.UInt64ArrayParam.Type == 'System.UInt64[]' + - dsc_types_actual.UInt64ArrayParam.Value == [0, 18446744073709551615] + - dsc_types_actual.BooleanParam.Type == 'System.Boolean' + - dsc_types_actual.BooleanParam.Value == True + - dsc_types_actual.BooleanArrayParam.Type == 'System.Boolean[]' + - dsc_types_actual.BooleanArrayParam.Value == [True, True, True, True, True, True, False, False, False, False, False, False] + - dsc_types_actual.CharParam.Type == 'System.Char' + - dsc_types_actual.CharParam.Value == 'c' + - dsc_types_actual.CharArrayParam.Type == 'System.Char[]' + - dsc_types_actual.CharArrayParam.Value == ['c', 'h', 'a', 'r'] + - dsc_types_actual.SingleParam.Type == 'System.Single' + - dsc_types_actual.SingleParam.Value|string == '3.402823e+38' + - dsc_types_actual.SingleArrayParam.Type == 'System.Single[]' + - dsc_types_actual.SingleArrayParam.Value|length == 2 + - dsc_types_actual.SingleArrayParam.Value[0]|string == '3.402823e+38' + - dsc_types_actual.SingleArrayParam.Value[1]|string == '1.23934937' + - dsc_types_actual.DoubleParam.Type == 'System.Double' + - dsc_types_actual.DoubleParam.Value == '1.79769313486232E+300' + - dsc_types_actual.DoubleArrayParam.Type == 'System.Double[]' + - dsc_types_actual.DoubleArrayParam.Value|length == 2 + - dsc_types_actual.DoubleArrayParam.Value[0] == '1.79769313486232E+300' + - dsc_types_actual.DoubleArrayParam.Value[1] == '3.56821831681516' + - dsc_types_actual.DateTimeParam.Type == 'System.DateTime' + - dsc_types_actual.DateTimeParam.Value == '2019-02-22T17:57:31.2311890+00:00' + - dsc_types_actual.DateTimeArrayParam.Type == 'System.DateTime[]' + - dsc_types_actual.DateTimeArrayParam.Value == ['2019-02-22T13:57:31.2311890+00:00', '2019-02-22T09:57:31.2311890+00:00'] + - dsc_types_actual.PSCredentialParam.Type == 'System.Management.Automation.PSCredential' + - dsc_types_actual.PSCredentialParam.Value.username == 'username1' + - dsc_types_actual.PSCredentialParam.Value.password == 'password1' + # Hashtable is actually a CimInstance[] of MSFT_KeyValuePairs + - dsc_types_actual.HashtableParam.Type == 'Microsoft.Management.Infrastructure.CimInstance[]' + - dsc_types_actual.HashtableParam.Value|length == 3 + # Can't guarantee the order of the keys so just check they are the values they could be + - dsc_types_actual.HashtableParam.Value[0].Key in ["key1", "key2", "key3"] + - dsc_types_actual.HashtableParam.Value[0].Value in ["string 1", "1", ""] + - dsc_types_actual.HashtableParam.Value[0]._cim_instance == 'MSFT_KeyValuePair' + - dsc_types_actual.HashtableParam.Value[1].Key in ["key1", "key2", "key3"] + - dsc_types_actual.HashtableParam.Value[1].Value in ["string 1", "1", ""] + - dsc_types_actual.HashtableParam.Value[1]._cim_instance == 'MSFT_KeyValuePair' + - dsc_types_actual.HashtableParam.Value[2].Key in ["key1", "key2", "key3"] + - dsc_types_actual.HashtableParam.Value[2].Value in ["string 1", "1", ""] + - dsc_types_actual.HashtableParam.Value[2]._cim_instance == 'MSFT_KeyValuePair' + - dsc_types_actual.CimInstanceParam.Type == 'Microsoft.Management.Infrastructure.CimInstance' + - dsc_types_actual.CimInstanceParam.Value.Choice == None + - dsc_types_actual.CimInstanceParam.Value.IntValue == None + - dsc_types_actual.CimInstanceParam.Value.KeyValue == 'a' + - dsc_types_actual.CimInstanceParam.Value.StringArrayValue == None + - dsc_types_actual.CimInstanceParam.Value.StringValue == None + - dsc_types_actual.CimInstanceParam.Value._cim_instance == "ANSIBLE_xTestClass" + - dsc_types_actual.CimInstanceArrayParam.Type == 'Microsoft.Management.Infrastructure.CimInstance[]' + - dsc_types_actual.CimInstanceArrayParam.Value|length == 2 + - dsc_types_actual.CimInstanceArrayParam.Value[0].Choice == 'Choice1' + - dsc_types_actual.CimInstanceArrayParam.Value[0].IntValue == 1 + - dsc_types_actual.CimInstanceArrayParam.Value[0].KeyValue == 'b' + - dsc_types_actual.CimInstanceArrayParam.Value[0].StringArrayValue == ['abc', 'def'] + - dsc_types_actual.CimInstanceArrayParam.Value[0].StringValue == 'string 1' + - dsc_types_actual.CimInstanceArrayParam.Value[0]._cim_instance == 'ANSIBLE_xTestClass' + - dsc_types_actual.CimInstanceArrayParam.Value[1].Choice == 'Choice2' + - dsc_types_actual.CimInstanceArrayParam.Value[1].IntValue == 2 + - dsc_types_actual.CimInstanceArrayParam.Value[1].KeyValue == 'c' + - dsc_types_actual.CimInstanceArrayParam.Value[1].StringArrayValue == ['ghi', 'jkl'] + - dsc_types_actual.CimInstanceArrayParam.Value[1].StringValue == 'string 2' + - dsc_types_actual.CimInstanceArrayParam.Value[1]._cim_instance == 'ANSIBLE_xTestClass' + - dsc_types_actual.NestedCimInstanceParam.Type == 'Microsoft.Management.Infrastructure.CimInstance' + - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue|length == 1 + - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].Choice == 'Choice2' + - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].IntValue == None + - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].KeyValue == 'e' + - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].StringArrayValue == None + - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].StringValue == None + - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0]._cim_instance == 'ANSIBLE_xTestClass' + - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.Choice == None + - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.IntValue == None + - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.KeyValue == 'd' + - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.StringArrayValue == None + - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.StringValue == None + - dsc_types_actual.NestedCimInstanceParam.Value.CimValue._cim_instance == 'ANSIBLE_xTestClass' + - dsc_types_actual.NestedCimInstanceParam.Value.HashValue|length == 1 + - dsc_types_actual.NestedCimInstanceParam.Value.HashValue[0].Key == 'a' + - dsc_types_actual.NestedCimInstanceParam.Value.HashValue[0].Value == 'a' + - dsc_types_actual.NestedCimInstanceParam.Value.HashValue[0]._cim_instance == 'MSFT_KeyValuePair' + - dsc_types_actual.NestedCimInstanceParam.Value.IntValue == 300 + - dsc_types_actual.NestedCimInstanceParam.Value.KeyValue == 'key value' + - dsc_types_actual.NestedCimInstanceParam.Value._cim_instance == 'ANSIBLE_xNestedClass' + +- name: test DSC with all types older version + win_dsc: + resource_name: xTestResource + module_version: 1.0.0 + Path: '{{ remote_tmp_dir }}\test-types.json' + Ensure: Absent + StringParam: string param old + CimInstanceArrayParam: + - Key: old key + StringValue: string old 1 + IntValue: 0 + StringArrayValue: + - zyx + - wvu + register: dsc_types_old + +- name: get result of test DSC with all types older version + slurp: + path: '{{ remote_tmp_dir }}\test-types.json' + register: dsc_types_old_raw + +- name: convert result of test DSC with all types to dict + set_fact: + dsc_types_old_actual: '{{ dsc_types_old_raw.content | b64decode | from_json }}' + +- name: assert test DSC with all types older version + assert: + that: + - dsc_types_old is changed + - dsc_types_old.module_version == '1.0.0' + - not dsc_types_old.reboot_required + - dsc_types_old_actual.Version == '1.0.0' + - dsc_types_old_actual.Verbose.Value.IsPresent + - dsc_types_old_actual.DefaultParam.Value == 'Default' + - dsc_types_old_actual.Ensure.Value == 'Absent' + - dsc_types_old_actual.Path.Value == remote_tmp_dir + "\\test-types.json" + - dsc_types_old_actual.StringParam.Type == 'System.String' + - dsc_types_old_actual.StringParam.Value == 'string param old' + - dsc_types_old_actual.CimInstanceArrayParam.Type == 'Microsoft.Management.Infrastructure.CimInstance[]' + - dsc_types_old_actual.CimInstanceArrayParam.Value|length == 1 + - not dsc_types_old_actual.CimInstanceArrayParam.Value[0].Choice is defined # 1.0.0 does not have a Choice option + - dsc_types_old_actual.CimInstanceArrayParam.Value[0].IntValue == 0 + - dsc_types_old_actual.CimInstanceArrayParam.Value[0].Key == 'old key' + - dsc_types_old_actual.CimInstanceArrayParam.Value[0].StringArrayValue == ['zyx', 'wvu'] + - dsc_types_old_actual.CimInstanceArrayParam.Value[0].StringValue == 'string old 1' + - dsc_types_old_actual.CimInstanceArrayParam.Value[0]._cim_instance == 'ANSIBLE_xTestClass' diff --git a/test/integration/targets/incidental_win_lineinfile/aliases b/test/integration/targets/incidental_win_lineinfile/aliases new file mode 100644 index 00000000..194cbc3f --- /dev/null +++ b/test/integration/targets/incidental_win_lineinfile/aliases @@ -0,0 +1,3 @@ +shippable/windows/incidental +windows +skip/windows/2016 # Host takes a while to run and module isn't OS dependent diff --git a/test/integration/targets/incidental_win_lineinfile/files/test.txt b/test/integration/targets/incidental_win_lineinfile/files/test.txt new file mode 100644 index 00000000..8187db9f --- /dev/null +++ b/test/integration/targets/incidental_win_lineinfile/files/test.txt @@ -0,0 +1,5 @@ +This is line 1 +This is line 2 +REF this is a line for backrefs REF +This is line 4 +This is line 5 diff --git a/test/integration/targets/incidental_win_lineinfile/files/test_linebreak.txt b/test/integration/targets/incidental_win_lineinfile/files/test_linebreak.txt new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/incidental_win_lineinfile/files/test_quoting.txt b/test/integration/targets/incidental_win_lineinfile/files/test_quoting.txt new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/incidental_win_lineinfile/files/testempty.txt b/test/integration/targets/incidental_win_lineinfile/files/testempty.txt new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/incidental_win_lineinfile/files/testnoeof.txt b/test/integration/targets/incidental_win_lineinfile/files/testnoeof.txt new file mode 100644 index 00000000..152780b9 --- /dev/null +++ b/test/integration/targets/incidental_win_lineinfile/files/testnoeof.txt @@ -0,0 +1,2 @@ +This is line 1 +This is line 2 \ No newline at end of file diff --git a/test/integration/targets/incidental_win_lineinfile/meta/main.yml b/test/integration/targets/incidental_win_lineinfile/meta/main.yml new file mode 100644 index 00000000..e0ff46db --- /dev/null +++ b/test/integration/targets/incidental_win_lineinfile/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - incidental_win_prepare_tests diff --git a/test/integration/targets/incidental_win_lineinfile/tasks/main.yml b/test/integration/targets/incidental_win_lineinfile/tasks/main.yml new file mode 100644 index 00000000..e5f047be --- /dev/null +++ b/test/integration/targets/incidental_win_lineinfile/tasks/main.yml @@ -0,0 +1,708 @@ +# Test code for the win_lineinfile module, adapted from the standard lineinfile module tests +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +- name: deploy the test file for lineinfile + win_copy: src=test.txt dest={{win_output_dir}}/test.txt + register: result + +- name: assert that the test file was deployed + assert: + that: + - "result.changed == true" + +- name: stat the test file + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: check win_stat file result + assert: + that: + - "result.stat.exists" + - "not result.stat.isdir" + - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'" + - "result is not failed" + - "result is not changed" + + +- name: insert a line at the beginning of the file, and back it up + win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line at the beginning" insertbefore="BOF" backup=yes + register: result + +- name: check backup_file + win_stat: + path: '{{ result.backup_file }}' + register: backup_file + +- name: assert that the line was inserted at the head of the file + assert: + that: + - result.changed == true + - result.msg == 'line added' + - backup_file.stat.exists == true + +- name: stat the backup file + win_stat: path={{result.backup}} + register: result + +- name: assert the backup file matches the previous hash + assert: + that: + - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'" + +- name: stat the test after the insert at the head + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test hash is what we expect for the file with the insert at the head + assert: + that: + - "result.stat.checksum == 'b526e2e044defc64dfb0fad2f56e105178f317d8'" + +- name: insert a line at the end of the file + win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line at the end" insertafter="EOF" + register: result + +- name: assert that the line was inserted at the end of the file + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: stat the test after the insert at the end + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test checksum matches after the insert at the end + assert: + that: + - "result.stat.checksum == 'dd5e207e28ce694ab18e41c2b16deb74fde93b14'" + +- name: insert a line after the first line + win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line after line 1" insertafter="^This is line 1$" + register: result + +- name: assert that the line was inserted after the first line + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: stat the test after insert after the first line + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test checksum matches after the insert after the first line + assert: + that: + - "result.stat.checksum == '604b17405f2088e6868af9680b7834087acdc8f4'" + +- name: insert a line before the last line + win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line before line 5" insertbefore="^This is line 5$" + register: result + +- name: assert that the line was inserted before the last line + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: stat the test after the insert before the last line + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test checksum matches after the insert before the last line + assert: + that: + - "result.stat.checksum == '8f5b30e8f01578043d782e5a68d4c327e75a6e34'" + +- name: replace a line with backrefs + win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="This is line 3" backrefs=yes regexp="^(REF).*$" + register: result + +- name: assert that the line with backrefs was changed + assert: + that: + - "result.changed == true" + - "result.msg == 'line replaced'" + +- name: stat the test after the backref line was replaced + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test checksum matches after backref line was replaced + assert: + that: + - "result.stat.checksum == 'ef6b02645908511a2cfd2df29d50dd008897c580'" + +- name: remove the middle line + win_lineinfile: dest={{win_output_dir}}/test.txt state=absent regexp="^This is line 3$" + register: result + +- name: assert that the line was removed + assert: + that: + - "result.changed == true" + - "result.msg == '1 line(s) removed'" + +- name: stat the test after the middle line was removed + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test checksum matches after the middle line was removed + assert: + that: + - "result.stat.checksum == '11695efa472be5c31c736bc43e055f8ac90eabdf'" + +- name: run a validation script that succeeds + win_lineinfile: dest={{win_output_dir}}/test.txt state=absent regexp="^This is line 5$" validate="sort.exe %s" + register: result + +- name: assert that the file validated after removing a line + assert: + that: + - "result.changed == true" + - "result.msg == '1 line(s) removed'" + +- name: stat the test after the validation succeeded + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test checksum matches after the validation succeeded + assert: + that: + - "result.stat.checksum == '39c38a30aa6ac6af9ec41f54c7ed7683f1249347'" + +- name: run a validation script that fails + win_lineinfile: dest={{win_output_dir}}/test.txt state=absent regexp="^This is line 1$" validate="sort.exe %s.foo" + register: result + ignore_errors: yes + +- name: assert that the validate failed + assert: + that: + - "result.failed == true" + +- name: stat the test after the validation failed + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test checksum matches the previous after the validation failed + assert: + that: + - "result.stat.checksum == '39c38a30aa6ac6af9ec41f54c7ed7683f1249347'" + +- name: use create=yes + win_lineinfile: dest={{win_output_dir}}/new_test.txt create=yes insertbefore=BOF state=present line="This is a new file" + register: result + +- name: assert that the new file was created + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: validate that the newly created file exists + win_stat: path={{win_output_dir}}/new_test.txt + register: result + ignore_errors: yes + +- name: assert the newly created test checksum matches + assert: + that: + - "result.stat.checksum == '84faac1183841c57434693752fc3debc91b9195d'" + +# Test EOF in cases where file has no newline at EOF +- name: testnoeof deploy the file for lineinfile + win_copy: src=testnoeof.txt dest={{win_output_dir}}/testnoeof.txt + register: result + +- name: testnoeof insert a line at the end of the file + win_lineinfile: dest={{win_output_dir}}/testnoeof.txt state=present line="New line at the end" insertafter="EOF" + register: result + +- name: testempty assert that the line was inserted at the end of the file + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: testnoeof stat the no newline EOF test after the insert at the end + win_stat: path={{win_output_dir}}/testnoeof.txt + register: result + +- name: testnoeof assert test checksum matches after the insert at the end + assert: + that: + - "result.stat.checksum == '229852b09f7e9921fbcbb0ee0166ba78f7f7f261'" + +- name: add multiple lines at the end of the file + win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="This is a line\r\nwith newline character" insertafter="EOF" + register: result + +- name: assert that the multiple lines was inserted + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: stat file after adding multiple lines + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test checksum matches after inserting multiple lines + assert: + that: + - "result.stat.checksum == '1401413cd4eac732be66cd6aceddd334c4240f86'" + + + +# Test EOF with empty file to make sure no unnecessary newline is added +- name: testempty deploy the testempty file for lineinfile + win_copy: src=testempty.txt dest={{win_output_dir}}/testempty.txt + register: result + +- name: testempty insert a line at the end of the file + win_lineinfile: dest={{win_output_dir}}/testempty.txt state=present line="New line at the end" insertafter="EOF" + register: result + +- name: testempty assert that the line was inserted at the end of the file + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: testempty stat the test after the insert at the end + win_stat: path={{win_output_dir}}/testempty.txt + register: result + +- name: testempty assert test checksum matches after the insert at the end + assert: + that: + - "result.stat.checksum == 'd3d34f11edda51be7ca5dcb0757cf3e1257c0bfe'" + + + +- name: replace a line with backrefs included in the line + win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New $1 created with the backref" backrefs=yes regexp="^This is (line 4)$" + register: result + +- name: assert that the line with backrefs was changed + assert: + that: + - "result.changed == true" + - "result.msg == 'line replaced'" + +- name: stat the test after the backref line was replaced + win_stat: path={{win_output_dir}}/test.txt + register: result + +- name: assert test checksum matches after backref line was replaced + assert: + that: + - "result.stat.checksum == 'e6ff42e926dac2274c93dff0b8a323e07ae09149'" + +################################################################### +# issue 8535 + +- name: create a new file for testing quoting issues + win_copy: src=test_quoting.txt dest={{win_output_dir}}/test_quoting.txt + register: result + +- name: assert the new file was created + assert: + that: + - result.changed + +- name: use with_items to add code-like strings to the quoting txt file + win_lineinfile: > + dest={{win_output_dir}}/test_quoting.txt + line="{{ item }}" + insertbefore="BOF" + with_items: + - "'foo'" + - "dotenv.load();" + - "var dotenv = require('dotenv');" + register: result + +- name: assert the quote test file was modified correctly + assert: + that: + - result.results|length == 3 + - result.results[0].changed + - result.results[0].item == "'foo'" + - result.results[1].changed + - result.results[1].item == "dotenv.load();" + - result.results[2].changed + - result.results[2].item == "var dotenv = require('dotenv');" + +- name: stat the quote test file + win_stat: path={{win_output_dir}}/test_quoting.txt + register: result + +- name: assert test checksum matches for quote test file + assert: + that: + - "result.stat.checksum == 'f3bccdbdfa1d7176c497ef87d04957af40ab48d2'" + +- name: append a line into the quoted file with a single quote + win_lineinfile: dest={{win_output_dir}}/test_quoting.txt line="import g'" + register: result + +- name: assert that the quoted file was changed + assert: + that: + - result.changed + +- name: stat the quote test file + win_stat: path={{win_output_dir}}/test_quoting.txt + register: result + +- name: assert test checksum matches adding line with single quote + assert: + that: + - "result.stat.checksum == 'dabf4cbe471e1797d8dcfc773b6b638c524d5237'" + +- name: insert a line into the quoted file with many double quotation strings + win_lineinfile: dest={{win_output_dir}}/test_quoting.txt line='"quote" and "unquote"' + register: result + +- name: assert that the quoted file was changed + assert: + that: + - result.changed + +- name: stat the quote test file + win_stat: path={{win_output_dir}}/test_quoting.txt + register: result + +- name: assert test checksum matches quoted line added + assert: + that: + - "result.stat.checksum == '9dc1fc1ff19942e2936564102ad37134fa83b91d'" + + +# Windows vs. Unix line separator test cases + +- name: Create windows test file with initial line + win_lineinfile: dest={{win_output_dir}}/test_windows_sep.txt create=yes insertbefore=BOF state=present line="This is a new file" + register: result + +- name: assert that the new file was created + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: validate that the newly created file exists + win_stat: path={{win_output_dir}}/test_windows_sep.txt + register: result + +- name: assert the newly created file checksum matches + assert: + that: + - "result.stat.checksum == '84faac1183841c57434693752fc3debc91b9195d'" + +- name: Test appending to the file using the default (windows) line separator + win_lineinfile: dest={{win_output_dir}}/test_windows_sep.txt insertbefore=EOF state=present line="This is the last line" + register: result + +- name: assert that the new line was added + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: stat the file + win_stat: path={{win_output_dir}}/test_windows_sep.txt + register: result + +- name: assert the file checksum matches expected checksum + assert: + that: + - "result.stat.checksum == '71a17ddd1d57ed7c7912e4fd11ecb2ead0b27033'" + + +- name: Create unix test file with initial line + win_lineinfile: dest={{win_output_dir}}/test_unix_sep.txt create=yes insertbefore=BOF state=present line="This is a new file" + register: result + +- name: assert that the new file was created + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: validate that the newly created file exists + win_stat: path={{win_output_dir}}/test_unix_sep.txt + register: result + +- name: assert the newly created file checksum matches + assert: + that: + - "result.stat.checksum == '84faac1183841c57434693752fc3debc91b9195d'" + +- name: Test appending to the file using unix line separator + win_lineinfile: dest={{win_output_dir}}/test_unix_sep.txt insertbefore=EOF state=present line="This is the last line" newline="unix" + register: result + +- name: assert that the new line was added + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- name: stat the file + win_stat: path={{win_output_dir}}/test_unix_sep.txt + register: result + +- name: assert the file checksum matches expected checksum + assert: + that: + - "result.stat.checksum == 'f1f634a37ab1c73efb77a71a5ad2cc87b61b17ae'" + + +# Encoding management test cases + +# Default (auto) encoding should use utf-8 with no BOM +- name: Test create file without explicit encoding results in utf-8 without BOM + win_lineinfile: dest={{win_output_dir}}/test_auto_utf8.txt create=yes insertbefore=BOF state=present line="This is a new utf-8 file" + register: result + +- name: assert that the new file was created + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + - "result.encoding == 'utf-8'" + +- name: validate that the newly created file exists + win_stat: path={{win_output_dir}}/test_auto_utf8.txt + register: result + +- name: assert the newly created file checksum matches + assert: + that: + - "result.stat.checksum == 'b69fcbacca8291a4668f57fba91d7c022f1c3dc7'" + +- name: Test appending to the utf-8 without BOM file - should autodetect UTF-8 no BOM + win_lineinfile: dest={{win_output_dir}}/test_auto_utf8.txt insertbefore=EOF state=present line="This is the last line" + register: result + +- name: assert that the new line was added and encoding did not change + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + - "result.encoding == 'utf-8'" + +- name: stat the file + win_stat: path={{win_output_dir}}/test_auto_utf8.txt + register: result + +- name: assert the file checksum matches + assert: + that: + - "result.stat.checksum == '64d747f1ebf8c9d793dbfd27126e4152d39a3848'" + + +# UTF-8 explicit (with BOM) +- name: Test create file with explicit utf-8 encoding results in utf-8 with a BOM + win_lineinfile: dest={{win_output_dir}}/test_utf8.txt create=yes encoding="utf-8" insertbefore=BOF state=present line="This is a new utf-8 file" + register: result + +- name: assert that the new file was created + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + - "result.encoding == 'utf-8'" + +- name: validate that the newly created file exists + win_stat: path={{win_output_dir}}/test_utf8.txt + register: result + +- name: assert the newly created file checksum matches + assert: + that: + - "result.stat.checksum == 'd45344b2b3bf1cf90eae851b40612f5f37a88bbb'" + +- name: Test appending to the utf-8 with BOM file - should autodetect utf-8 with BOM encoding + win_lineinfile: dest={{win_output_dir}}/test_utf8.txt insertbefore=EOF state=present line="This is the last line" + register: result + +- name: assert that the new line was added and encoding did not change + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + - "result.encoding == 'utf-8'" + +- name: stat the file + win_stat: path={{win_output_dir}}/test_utf8.txt + register: result + +- name: assert the file checksum matches + assert: + that: + - "result.stat.checksum == '9b84254489f40f258871a4c6573cacc65895ee1a'" + + +# UTF-16 explicit +- name: Test create file with explicit utf-16 encoding + win_lineinfile: dest={{win_output_dir}}/test_utf16.txt create=yes encoding="utf-16" insertbefore=BOF state=present line="This is a new utf-16 file" + register: result + +- name: assert that the new file was created + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + - "result.encoding == 'utf-16'" + +- name: validate that the newly created file exists + win_stat: path={{win_output_dir}}/test_utf16.txt + register: result + +- name: assert the newly created file checksum matches + assert: + that: + - "result.stat.checksum == '785b0693cec13b60e2c232782adeda2f8a967434'" + +- name: Test appending to the utf-16 file - should autodetect utf-16 encoding + win_lineinfile: dest={{win_output_dir}}/test_utf16.txt insertbefore=EOF state=present line="This is the last line" + register: result + +- name: assert that the new line was added and encoding did not change + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + - "result.encoding == 'utf-16'" + +- name: stat the file + win_stat: path={{win_output_dir}}/test_utf16.txt + register: result + +- name: assert the file checksum matches + assert: + that: + - "result.stat.checksum == '70e4eb3ba795e1ba94d262db47e4fd17c64b2e73'" + +# UTF-32 explicit +- name: Test create file with explicit utf-32 encoding + win_lineinfile: dest={{win_output_dir}}/test_utf32.txt create=yes encoding="utf-32" insertbefore=BOF state=present line="This is a new utf-32 file" + register: result + +- name: assert that the new file was created + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + - "result.encoding == 'utf-32'" + +- name: validate that the newly created file exists + win_stat: path={{win_output_dir}}/test_utf32.txt + register: result + +- name: assert the newly created file checksum matches + assert: + that: + - "result.stat.checksum == '7a6e3f3604c0def431aaa813173a4ddaa10fd1fb'" + +- name: Test appending to the utf-32 file - should autodetect utf-32 encoding + win_lineinfile: dest={{win_output_dir}}/test_utf32.txt insertbefore=EOF state=present line="This is the last line" + register: result + +- name: assert that the new line was added and encoding did not change + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + - "result.encoding == 'utf-32'" + +- name: stat the file + win_stat: path={{win_output_dir}}/test_utf32.txt + register: result + +- name: assert the file checksum matches + assert: + that: + - "result.stat.checksum == '66a72e71f42c4775f4326da95cfe82c8830e5022'" + +######################################################################### +# issue #33858 +# \r\n causes line break instead of printing literally which breaks paths. + +- name: create testing file + win_copy: + src: test_linebreak.txt + dest: "{{win_output_dir}}/test_linebreak.txt" + +- name: stat the test file + win_stat: + path: "{{win_output_dir}}/test_linebreak.txt" + register: result + +# (Get-FileHash -path C:\ansible\test\integration\targets\win_lineinfile\files\test_linebreak.txt -Algorithm sha1).hash.tolower() +- name: check win_stat file result + assert: + that: + - result.stat.exists + - not result.stat.isdir + - result.stat.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709' + - result is not failed + - result is not changed + +- name: insert path c:\return\new to test file + win_lineinfile: + dest: "{{win_output_dir}}/test_linebreak.txt" + line: c:\return\new + register: result_literal + +- name: insert path "c:\return\new" to test file, will cause line breaks + win_lineinfile: + dest: "{{win_output_dir}}/test_linebreak.txt" + line: "c:\return\new" + register: result_expand + +- name: assert that the lines were inserted + assert: + that: + - result_literal.changed == true + - result_literal.msg == 'line added' + - result_expand.changed == true + - result_expand.msg == 'line added' + +- name: stat the test file + win_stat: + path: "{{win_output_dir}}/test_linebreak.txt" + register: result + +- debug: + var: result + verbosity: 1 + +# expect that the file looks like this: +# c:\return\new +# c: +# eturn +# ew #or c:eturnew on windows +- name: assert that one line is literal and the other has breaks + assert: + that: + - result.stat.checksum == 'd2dfd11bc70526ff13a91153c76a7ae5595a845b' diff --git a/test/integration/targets/incidental_win_ping/aliases b/test/integration/targets/incidental_win_ping/aliases new file mode 100644 index 00000000..a5fc90dc --- /dev/null +++ b/test/integration/targets/incidental_win_ping/aliases @@ -0,0 +1,2 @@ +shippable/windows/incidental +windows diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_set_attr.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_set_attr.ps1 new file mode 100644 index 00000000..f1704964 --- /dev/null +++ b/test/integration/targets/incidental_win_ping/library/win_ping_set_attr.ps1 @@ -0,0 +1,31 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +$params = Parse-Args $args $true; + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = "pong" +}; + +# Test that Set-Attr will replace an existing attribute. +Set-Attr $result "ping" $data + +Exit-Json $result; diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_strict_mode_error.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_strict_mode_error.ps1 new file mode 100644 index 00000000..508174af --- /dev/null +++ b/test/integration/targets/incidental_win_ping/library/win_ping_strict_mode_error.ps1 @@ -0,0 +1,30 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +$params = Parse-Args $args $true; + +$params.thisPropertyDoesNotExist + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = $data +}; + +Exit-Json $result; diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps1 new file mode 100644 index 00000000..d4c9f07a --- /dev/null +++ b/test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps1 @@ -0,0 +1,30 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +$blah = 'I can't quote my strings correctly.' + +$params = Parse-Args $args $true; + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = $data +}; + +Exit-Json $result; diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_throw.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_throw.ps1 new file mode 100644 index 00000000..7306f4d2 --- /dev/null +++ b/test/integration/targets/incidental_win_ping/library/win_ping_throw.ps1 @@ -0,0 +1,30 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +throw + +$params = Parse-Args $args $true; + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = $data +}; + +Exit-Json $result; diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_throw_string.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_throw_string.ps1 new file mode 100644 index 00000000..09e3b7cb --- /dev/null +++ b/test/integration/targets/incidental_win_ping/library/win_ping_throw_string.ps1 @@ -0,0 +1,30 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +throw "no ping for you" + +$params = Parse-Args $args $true; + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = $data +}; + +Exit-Json $result; diff --git a/test/integration/targets/incidental_win_ping/tasks/main.yml b/test/integration/targets/incidental_win_ping/tasks/main.yml new file mode 100644 index 00000000..a7e6ba7f --- /dev/null +++ b/test/integration/targets/incidental_win_ping/tasks/main.yml @@ -0,0 +1,67 @@ +# test code for the win_ping module +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: test win_ping + action: win_ping + register: win_ping_result + +- name: check win_ping result + assert: + that: + - win_ping_result is not failed + - win_ping_result is not changed + - win_ping_result.ping == 'pong' + +- name: test win_ping with data + win_ping: + data: ☠ + register: win_ping_with_data_result + +- name: check win_ping result with data + assert: + that: + - win_ping_with_data_result is not failed + - win_ping_with_data_result is not changed + - win_ping_with_data_result.ping == '☠' + +- name: test win_ping.ps1 with data as complex args + # win_ping.ps1: # TODO: do we want to actually support this? no other tests that I can see... + win_ping: + data: bleep + register: win_ping_ps1_result + +- name: check win_ping.ps1 result with data + assert: + that: + - win_ping_ps1_result is not failed + - win_ping_ps1_result is not changed + - win_ping_ps1_result.ping == 'bleep' + +- name: test win_ping using data=crash so that it throws an exception + win_ping: + data: crash + register: win_ping_crash_result + ignore_errors: yes + +- name: check win_ping_crash result + assert: + that: + - win_ping_crash_result is failed + - win_ping_crash_result is not changed + - 'win_ping_crash_result.msg == "Unhandled exception while executing module: boom"' + - '"throw \"boom\"" in win_ping_crash_result.exception' diff --git a/test/integration/targets/incidental_win_prepare_tests/aliases b/test/integration/targets/incidental_win_prepare_tests/aliases new file mode 100644 index 00000000..136c05e0 --- /dev/null +++ b/test/integration/targets/incidental_win_prepare_tests/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/incidental_win_prepare_tests/meta/main.yml b/test/integration/targets/incidental_win_prepare_tests/meta/main.yml new file mode 100644 index 00000000..cf5427b6 --- /dev/null +++ b/test/integration/targets/incidental_win_prepare_tests/meta/main.yml @@ -0,0 +1,3 @@ +--- + +allow_duplicates: yes diff --git a/test/integration/targets/incidental_win_prepare_tests/tasks/main.yml b/test/integration/targets/incidental_win_prepare_tests/tasks/main.yml new file mode 100644 index 00000000..e87b614b --- /dev/null +++ b/test/integration/targets/incidental_win_prepare_tests/tasks/main.yml @@ -0,0 +1,29 @@ +# test code for the windows versions of copy, file and template module +# originally +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +- name: clean out the test directory + win_file: name={{win_output_dir|mandatory}} state=absent + tags: + - prepare + +- name: create the test directory + win_file: name={{win_output_dir}} state=directory + tags: + - prepare diff --git a/test/integration/targets/incidental_win_psexec/aliases b/test/integration/targets/incidental_win_psexec/aliases new file mode 100644 index 00000000..a5fc90dc --- /dev/null +++ b/test/integration/targets/incidental_win_psexec/aliases @@ -0,0 +1,2 @@ +shippable/windows/incidental +windows diff --git a/test/integration/targets/incidental_win_psexec/meta/main.yml b/test/integration/targets/incidental_win_psexec/meta/main.yml new file mode 100644 index 00000000..9f37e96c --- /dev/null +++ b/test/integration/targets/incidental_win_psexec/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- setup_remote_tmp_dir diff --git a/test/integration/targets/incidental_win_psexec/tasks/main.yml b/test/integration/targets/incidental_win_psexec/tasks/main.yml new file mode 100644 index 00000000..27783f9e --- /dev/null +++ b/test/integration/targets/incidental_win_psexec/tasks/main.yml @@ -0,0 +1,80 @@ +# Would use [] but this has troubles with PATH and trying to find the executable so just resort to keeping a space +- name: record special path for tests + set_fact: + testing_dir: '{{ remote_tmp_dir }}\ansible win_psexec' + +- name: create special path testing dir + win_file: + path: '{{ testing_dir }}' + state: directory + +- name: Download PsExec + win_get_url: + url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/win_psexec/PsExec.exe + dest: '{{ testing_dir }}\PsExec.exe' + +- name: Get the existing PATH env var + win_shell: '$env:PATH' + register: system_path + changed_when: False + +- name: Run whoami + win_psexec: + command: whoami.exe + nobanner: true + register: whoami + environment: + PATH: '{{ testing_dir }};{{ system_path.stdout | trim }}' + +- name: Test whoami + assert: + that: + - whoami.rc == 0 + - whoami.stdout == '' + # FIXME: Standard output does not work or is truncated + #- whoami.stdout == '{{ ansible_hostname|lower }}' + +- name: Run whoami as SYSTEM + win_psexec: + command: whoami.exe + system: yes + nobanner: true + executable: '{{ testing_dir }}\PsExec.exe' + register: whoami_as_system + # Seems to be a bug with PsExec where the stdout can be empty, just retry the task to make this test a bit more stable + until: whoami_as_system.rc == 0 and whoami_as_system.stdout == 'nt authority\system' + retries: 3 + delay: 2 + +# FIXME: Behaviour is not consistent on all Windows systems +#- name: Run whoami as ELEVATED +# win_psexec: +# command: whoami.exe +# elevated: yes +# register: whoami_as_elevated +# +## Ensure we have basic facts +#- setup: +# +#- debug: +# msg: '{{ whoami_as_elevated.stdout|lower }} == {{ ansible_hostname|lower }}\{{ ansible_user_id|lower }}' +# +#- name: Test whoami +# assert: +# that: +# - whoami_as_elevated.rc == 0 +# - whoami_as_elevated.stdout|lower == '{{ ansible_hostname|lower }}\{{ ansible_user_id|lower }}' + +- name: Run command with multiple arguments + win_psexec: + command: powershell.exe -NonInteractive "exit 1" + ignore_errors: yes + register: whoami_multiple_args + environment: + PATH: '{{ testing_dir }};{{ system_path.stdout | trim }}' + +- name: Test command with multiple argumetns + assert: + that: + - whoami_multiple_args.rc == 1 + - whoami_multiple_args.psexec_command == "psexec.exe -accepteula powershell.exe -NonInteractive \"exit 1\"" diff --git a/test/integration/targets/incidental_win_reboot/aliases b/test/integration/targets/incidental_win_reboot/aliases new file mode 100644 index 00000000..a5fc90dc --- /dev/null +++ b/test/integration/targets/incidental_win_reboot/aliases @@ -0,0 +1,2 @@ +shippable/windows/incidental +windows diff --git a/test/integration/targets/incidental_win_reboot/tasks/main.yml b/test/integration/targets/incidental_win_reboot/tasks/main.yml new file mode 100644 index 00000000..7757e08f --- /dev/null +++ b/test/integration/targets/incidental_win_reboot/tasks/main.yml @@ -0,0 +1,70 @@ +--- +- name: make sure win output dir exists + win_file: + path: "{{win_output_dir}}" + state: directory + +- name: reboot with defaults + win_reboot: + +- name: test with negative values for delays + win_reboot: + post_reboot_delay: -0.5 + pre_reboot_delay: -61 + +- name: schedule a reboot for sometime in the future + win_command: shutdown.exe /r /t 599 + +- name: reboot with a shutdown already scheduled + win_reboot: + +# test a reboot that reboots again during the test_command phase +- name: create test file + win_file: + path: '{{win_output_dir}}\win_reboot_test' + state: touch + +- name: reboot with secondary reboot stage + win_reboot: + test_command: '{{ lookup("template", "post_reboot.ps1") }}' + +- name: reboot with test command that fails + win_reboot: + test_command: 'FAIL' + reboot_timeout: 120 + register: reboot_fail_test + failed_when: "reboot_fail_test.msg != 'Timed out waiting for post-reboot test command (timeout=120)'" + +- name: remove SeRemoteShutdownPrivilege + win_user_right: + name: SeRemoteShutdownPrivilege + users: [] + action: set + register: removed_shutdown_privilege + +- block: + - name: try and reboot without required privilege + win_reboot: + register: fail_privilege + failed_when: + - "'Reboot command failed, error was:' not in fail_privilege.msg" + - "'Access is denied.(5)' not in fail_privilege.msg" + + always: + - name: reset the SeRemoteShutdownPrivilege + win_user_right: + name: SeRemoteShutdownPrivilege + users: '{{ removed_shutdown_privilege.removed }}' + action: add + +- name: Use invalid parameter + reboot: + foo: bar + ignore_errors: true + register: invalid_parameter + +- name: Ensure task fails with error + assert: + that: + - invalid_parameter is failed + - "invalid_parameter.msg == 'Invalid options for reboot: foo'" diff --git a/test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1 b/test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1 new file mode 100644 index 00000000..e4a99a72 --- /dev/null +++ b/test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1 @@ -0,0 +1,8 @@ +if (Test-Path -Path '{{win_output_dir}}\win_reboot_test') { + New-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager' ` + -Name PendingFileRenameOperations ` + -Value @("\??\{{win_output_dir}}\win_reboot_test`0") ` + -PropertyType MultiString + Restart-Computer -Force + exit 1 +} diff --git a/test/integration/targets/incidental_win_security_policy/aliases b/test/integration/targets/incidental_win_security_policy/aliases new file mode 100644 index 00000000..a5fc90dc --- /dev/null +++ b/test/integration/targets/incidental_win_security_policy/aliases @@ -0,0 +1,2 @@ +shippable/windows/incidental +windows diff --git a/test/integration/targets/incidental_win_security_policy/library/test_win_security_policy.ps1 b/test/integration/targets/incidental_win_security_policy/library/test_win_security_policy.ps1 new file mode 100644 index 00000000..5c83c1b5 --- /dev/null +++ b/test/integration/targets/incidental_win_security_policy/library/test_win_security_policy.ps1 @@ -0,0 +1,53 @@ +#!powershell + +# WANT_JSON +# POWERSHELL_COMMON + +# basic script to get the lsit of users in a particular right +# this is quite complex to put as a simple script so this is +# just a simple module + +$ErrorActionPreference = 'Stop' + +$params = Parse-Args $args -supports_check_mode $false +$section = Get-AnsibleParam -obj $params -name "section" -type "str" -failifempty $true +$key = Get-AnsibleParam -obj $params -name "key" -type "str" -failifempty $true + +$result = @{ + changed = $false +} + +Function ConvertFrom-Ini($file_path) { + $ini = @{} + switch -Regex -File $file_path { + "^\[(.+)\]" { + $section = $matches[1] + $ini.$section = @{} + } + "(.+?)\s*=(.*)" { + $name = $matches[1].Trim() + $value = $matches[2].Trim() + if ($value -match "^\d+$") { + $value = [int]$value + } elseif ($value.StartsWith('"') -and $value.EndsWith('"')) { + $value = $value.Substring(1, $value.Length - 2) + } + + $ini.$section.$name = $value + } + } + + $ini +} + +$secedit_ini_path = [IO.Path]::GetTempFileName() +&SecEdit.exe /export /cfg $secedit_ini_path /quiet +$secedit_ini = ConvertFrom-Ini -file_path $secedit_ini_path + +if ($secedit_ini.ContainsKey($section)) { + $result.value = $secedit_ini.$section.$key +} else { + $result.value = $null +} + +Exit-Json $result diff --git a/test/integration/targets/incidental_win_security_policy/tasks/main.yml b/test/integration/targets/incidental_win_security_policy/tasks/main.yml new file mode 100644 index 00000000..28fdb5ea --- /dev/null +++ b/test/integration/targets/incidental_win_security_policy/tasks/main.yml @@ -0,0 +1,41 @@ +--- +- name: get current entry for audit + test_win_security_policy: + section: Event Audit + key: AuditSystemEvents + register: before_value_audit + +- name: get current entry for guest + test_win_security_policy: + section: System Access + key: NewGuestName + register: before_value_guest + +- block: + - name: set AuditSystemEvents entry before tests + win_security_policy: + section: Event Audit + key: AuditSystemEvents + value: 0 + + - name: set NewGuestName entry before tests + win_security_policy: + section: System Access + key: NewGuestName + value: Guest + + - name: run tests + include_tasks: tests.yml + + always: + - name: reset entries for AuditSystemEvents + win_security_policy: + section: Event Audit + key: AuditSystemEvents + value: "{{before_value_audit.value}}" + + - name: reset entries for NewGuestName + win_security_policy: + section: System Access + key: NewGuestName + value: "{{before_value_guest.value}}" diff --git a/test/integration/targets/incidental_win_security_policy/tasks/tests.yml b/test/integration/targets/incidental_win_security_policy/tasks/tests.yml new file mode 100644 index 00000000..724b6010 --- /dev/null +++ b/test/integration/targets/incidental_win_security_policy/tasks/tests.yml @@ -0,0 +1,186 @@ +--- +- name: fail with invalid section name + win_security_policy: + section: This is not a valid section + key: KeyName + value: 0 + register: fail_invalid_section + failed_when: fail_invalid_section.msg != "The section 'This is not a valid section' does not exist in SecEdit.exe output ini" + +- name: fail with invalid key name + win_security_policy: + section: System Access + key: InvalidKey + value: 0 + register: fail_invalid_key + failed_when: fail_invalid_key.msg != "The key 'InvalidKey' in section 'System Access' is not a valid key, cannot set this value" + +- name: change existing key check + win_security_policy: + section: Event Audit + key: AuditSystemEvents + value: 1 + register: change_existing_check + check_mode: yes + +- name: get actual change existing key check + test_win_security_policy: + section: Event Audit + key: AuditSystemEvents + register: change_existing_actual_check + +- name: assert change existing key check + assert: + that: + - change_existing_check is changed + - change_existing_actual_check.value == 0 + +- name: change existing key + win_security_policy: + section: Event Audit + key: AuditSystemEvents + value: 1 + register: change_existing + +- name: get actual change existing key + test_win_security_policy: + section: Event Audit + key: AuditSystemEvents + register: change_existing_actual + +- name: assert change existing key + assert: + that: + - change_existing is changed + - change_existing_actual.value == 1 + +- name: change existing key again + win_security_policy: + section: Event Audit + key: AuditSystemEvents + value: 1 + register: change_existing_again + +- name: assert change existing key again + assert: + that: + - change_existing_again is not changed + - change_existing_again.value == 1 + +- name: change existing key with string type + win_security_policy: + section: Event Audit + key: AuditSystemEvents + value: "1" + register: change_existing_key_with_type + +- name: assert change existing key with string type + assert: + that: + - change_existing_key_with_type is not changed + - change_existing_key_with_type.value == "1" + +- name: change existing string key check + win_security_policy: + section: System Access + key: NewGuestName + value: New Guest + register: change_existing_string_check + check_mode: yes + +- name: get actual change existing string key check + test_win_security_policy: + section: System Access + key: NewGuestName + register: change_existing_string_actual_check + +- name: assert change existing string key check + assert: + that: + - change_existing_string_check is changed + - change_existing_string_actual_check.value == "Guest" + +- name: change existing string key + win_security_policy: + section: System Access + key: NewGuestName + value: New Guest + register: change_existing_string + +- name: get actual change existing string key + test_win_security_policy: + section: System Access + key: NewGuestName + register: change_existing_string_actual + +- name: assert change existing string key + assert: + that: + - change_existing_string is changed + - change_existing_string_actual.value == "New Guest" + +- name: change existing string key again + win_security_policy: + section: System Access + key: NewGuestName + value: New Guest + register: change_existing_string_again + +- name: assert change existing string key again + assert: + that: + - change_existing_string_again is not changed + - change_existing_string_again.value == "New Guest" + +- name: add policy setting + win_security_policy: + section: Privilege Rights + # following key is empty by default + key: SeCreateTokenPrivilege + # add Guests + value: '*S-1-5-32-546' + +- name: get actual policy setting + test_win_security_policy: + section: Privilege Rights + key: SeCreateTokenPrivilege + register: add_policy_setting_actual + +- name: assert add policy setting + assert: + that: + - add_policy_setting_actual.value == '*S-1-5-32-546' + +- name: remove policy setting + win_security_policy: + section: Privilege Rights + key: SeCreateTokenPrivilege + value: '' + diff: yes + register: remove_policy_setting + +- name: get actual policy setting + test_win_security_policy: + section: Privilege Rights + key: SeCreateTokenPrivilege + register: remove_policy_setting_actual + +- name: assert remove policy setting + assert: + that: + - remove_policy_setting is changed + - remove_policy_setting.diff.prepared == "[Privilege Rights]\n-SeCreateTokenPrivilege = *S-1-5-32-546\n+SeCreateTokenPrivilege = " + - remove_policy_setting_actual.value is none + +- name: remove policy setting again + win_security_policy: + section: Privilege Rights + key: SeCreateTokenPrivilege + value: '' + register: remove_policy_setting_again + +- name: assert remove policy setting again + assert: + that: + - remove_policy_setting_again is not changed + - remove_policy_setting_again.value == '' diff --git a/test/integration/targets/incidental_xml/aliases b/test/integration/targets/incidental_xml/aliases new file mode 100644 index 00000000..fc0963c1 --- /dev/null +++ b/test/integration/targets/incidental_xml/aliases @@ -0,0 +1,4 @@ +destructive +shippable/posix/incidental +skip/aix +skip/power/centos diff --git a/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml new file mode 100644 index 00000000..d0e3e39a --- /dev/null +++ b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml @@ -0,0 +1,13 @@ + + + Толстый бар + + Окское + Невское + + десять + + +
http://tolstyybar.com
+
+
diff --git a/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml new file mode 100644 index 00000000..5afc7974 --- /dev/null +++ b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml b/test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml new file mode 100644 index 00000000..61747d4b --- /dev/null +++ b/test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml b/test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml new file mode 100644 index 00000000..525330c2 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + Окское + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-add-children-elements.xml b/test/integration/targets/incidental_xml/results/test-add-children-elements.xml new file mode 100644 index 00000000..f9ff2517 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-add-children-elements.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + Old Rasputin + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml b/test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml new file mode 100644 index 00000000..565ba402 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + Natty LiteMiller LiteCoors Lite + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml b/test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml new file mode 100644 index 00000000..8da96336 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml @@ -0,0 +1,17 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Old Rasputin + Old Motor Oil + Old Curmudgeon + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml b/test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml new file mode 100644 index 00000000..c409e54b --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml @@ -0,0 +1,17 @@ + + + Tasty Beverage Co. + + Rochefort 10 + Old Rasputin + Old Motor Oil + Old Curmudgeon + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml new file mode 100644 index 00000000..37465224 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml new file mode 100644 index 00000000..5a3907f6 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml b/test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml new file mode 100644 index 00000000..fa1ddfca --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml @@ -0,0 +1,32 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + George Killian's Irish Red + Pilsner Urquell + + 10 + + +
http://tastybeverageco.com
+ +
+ 555-555-1234 + + + Smith + John + Q + + + + + + xml tag with no special characters + xml tag with dashes + xml tag with dashes and dots + xml tag with dashes, dots and underscores +
diff --git a/test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml b/test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml new file mode 100644 index 00000000..3d27e8aa --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + Old Rasputin + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-pretty-print-only.xml b/test/integration/targets/incidental_xml/results/test-pretty-print-only.xml new file mode 100644 index 00000000..f47909ac --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-pretty-print-only.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
diff --git a/test/integration/targets/incidental_xml/results/test-pretty-print.xml b/test/integration/targets/incidental_xml/results/test-pretty-print.xml new file mode 100644 index 00000000..b5c38262 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-pretty-print.xml @@ -0,0 +1,15 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + Old Rasputin + + 10 + + +
http://tastybeverageco.com
+
+
diff --git a/test/integration/targets/incidental_xml/results/test-remove-attribute.xml b/test/integration/targets/incidental_xml/results/test-remove-attribute.xml new file mode 100644 index 00000000..8a621cf1 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-remove-attribute.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-remove-element.xml b/test/integration/targets/incidental_xml/results/test-remove-element.xml new file mode 100644 index 00000000..454d905c --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-remove-element.xml @@ -0,0 +1,13 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml b/test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml new file mode 100644 index 00000000..732a0ed2 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml b/test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml new file mode 100644 index 00000000..16df98e2 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml @@ -0,0 +1,13 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml b/test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml new file mode 100644 index 00000000..de3bc3f6 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-attribute-value.xml b/test/integration/targets/incidental_xml/results/test-set-attribute-value.xml new file mode 100644 index 00000000..143fe7bf --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-attribute-value.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml b/test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml new file mode 100644 index 00000000..0ef2b7e6 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml @@ -0,0 +1,11 @@ + + + Tasty Beverage Co. + + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml b/test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml new file mode 100644 index 00000000..f19d5356 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml @@ -0,0 +1,11 @@ + + + Tasty Beverage Co. + + ОкскоеНевское + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-children-elements.xml b/test/integration/targets/incidental_xml/results/test-set-children-elements.xml new file mode 100644 index 00000000..be313a5a --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-children-elements.xml @@ -0,0 +1,11 @@ + + + Tasty Beverage Co. + + 90 Minute IPAHarvest Pumpkin Ale + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml b/test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml new file mode 100644 index 00000000..785beb64 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml b/test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml new file mode 100644 index 00000000..734fe6db --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + пять + + +
http://tastybeverageco.com
+
+пять
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-element-value.xml b/test/integration/targets/incidental_xml/results/test-set-element-value.xml new file mode 100644 index 00000000..fc97ec3b --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-element-value.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 5 + + +
http://tastybeverageco.com
+
+5
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml b/test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml new file mode 100644 index 00000000..44abda43 --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 10 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml b/test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml new file mode 100644 index 00000000..0cc8a79e --- /dev/null +++ b/test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml @@ -0,0 +1,14 @@ + + + Tasty Beverage Co. + + Rochefort 10 + St. Bernardus Abbot 12 + Schlitz + + 11 + + +
http://tastybeverageco.com
+
+
\ No newline at end of file diff --git a/test/integration/targets/incidental_xml/tasks/main.yml b/test/integration/targets/incidental_xml/tasks/main.yml new file mode 100644 index 00000000..9b8f2c36 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/main.yml @@ -0,0 +1,67 @@ +- name: Gather facts + setup: + +- name: Install lxml (FreeBSD) + package: + name: '{{ "py27-lxml" if ansible_python.version.major == 2 else "py36-lxml" }}' + state: present + when: ansible_os_family == "FreeBSD" + +# Needed for MacOSX ! +- name: Install lxml + pip: + name: lxml + state: present +# when: ansible_os_family == "Darwin" + +- name: Get lxml version + command: "{{ ansible_python_interpreter }} -c 'from lxml import etree; print(\".\".join(str(v) for v in etree.LXML_VERSION))'" + register: lxml_version + +- name: Set lxml capabilities as variables + set_fact: + # NOTE: Some tests require predictable element attribute order, + # which is only guaranteed starting from lxml v3.0alpha1 + lxml_predictable_attribute_order: '{{ lxml_version.stdout is version("3", ">=") }}' + + # NOTE: The xml module requires at least lxml v2.3.0 + lxml_xpath_attribute_result_attrname: '{{ lxml_version.stdout is version("2.3.0", ">=") }}' + +- name: Only run the tests when lxml v2.3.0+ + when: lxml_xpath_attribute_result_attrname + block: + + - include_tasks: test-add-children-elements.yml + - include_tasks: test-add-children-from-groupvars.yml + - include_tasks: test-add-children-insertafter.yml + - include_tasks: test-add-children-insertbefore.yml + - include_tasks: test-add-children-with-attributes.yml + - include_tasks: test-add-element-implicitly.yml + - include_tasks: test-count.yml + - include_tasks: test-mutually-exclusive-attributes.yml + - include_tasks: test-remove-attribute.yml + - include_tasks: test-remove-element.yml + - include_tasks: test-set-attribute-value.yml + - include_tasks: test-set-children-elements.yml + - include_tasks: test-set-children-elements-level.yml + - include_tasks: test-set-element-value.yml + - include_tasks: test-set-element-value-empty.yml + - include_tasks: test-pretty-print.yml + - include_tasks: test-pretty-print-only.yml + - include_tasks: test-add-namespaced-children-elements.yml + - include_tasks: test-remove-namespaced-attribute.yml + - include_tasks: test-set-namespaced-attribute-value.yml + - include_tasks: test-set-namespaced-element-value.yml + - include_tasks: test-set-namespaced-children-elements.yml + - include_tasks: test-get-element-content.yml + - include_tasks: test-xmlstring.yml + - include_tasks: test-children-elements-xml.yml + + # Unicode tests + - include_tasks: test-add-children-elements-unicode.yml + - include_tasks: test-add-children-with-attributes-unicode.yml + - include_tasks: test-set-attribute-value-unicode.yml + - include_tasks: test-count-unicode.yml + - include_tasks: test-get-element-content.yml + - include_tasks: test-set-children-elements-unicode.yml + - include_tasks: test-set-element-value-unicode.yml diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml new file mode 100644 index 00000000..8ad91501 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml @@ -0,0 +1,29 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: + - beer: Окское + register: add_children_elements_unicode + + - name: Compare to expected result + copy: + src: results/test-add-children-elements-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - add_children_elements_unicode.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml new file mode 100644 index 00000000..8d9b0686 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml @@ -0,0 +1,29 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: + - beer: Old Rasputin + register: add_children_elements + + - name: Compare to expected result + copy: + src: results/test-add-children-elements.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - add_children_elements.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml new file mode 100644 index 00000000..e062de8d --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml @@ -0,0 +1,28 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: '{{ bad_beers }}' + register: add_children_from_groupvars + + - name: Compare to expected result + copy: + src: results/test-add-children-from-groupvars.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - add_children_from_groupvars.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml new file mode 100644 index 00000000..2d42e2d5 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml @@ -0,0 +1,32 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]' + insertafter: yes + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + pretty_print: yes + register: add_children_insertafter + + - name: Compare to expected result + copy: + src: results/test-add-children-insertafter.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - add_children_insertafter.changed == true + - comparison.changed == false # identical diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml new file mode 100644 index 00000000..8550f12c --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml @@ -0,0 +1,32 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]' + insertbefore: yes + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + pretty_print: yes + register: add_children_insertbefore + + - name: Compare to expected result + copy: + src: results/test-add-children-insertbefore.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - add_children_insertbefore.changed == true + - comparison.changed == false # identical diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml new file mode 100644 index 00000000..d4a2329f --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml @@ -0,0 +1,31 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: + - beer: + name: Окское + type: экстра + register: add_children_with_attributes_unicode + + - name: Compare to expected result + copy: + src: results/test-add-children-with-attributes-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - add_children_with_attributes_unicode.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml new file mode 100644 index 00000000..91e92637 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml @@ -0,0 +1,35 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + add_children: + - beer: + name: Ansible Brew + type: light + register: add_children_with_attributes + + - name: Compare to expected result + copy: + src: results/test-add-children-with-attributes.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + # NOTE: This test may fail if lxml does not support predictable element attribute order + # So we filter the failure out for these platforms (e.g. CentOS 6) + # The module still works fine, we simply are not comparing as smart as we should. + - name: Test expected result + assert: + that: + - add_children_with_attributes.changed == true + - comparison.changed == false # identical + when: lxml_predictable_attribute_order + #command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml b/test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml new file mode 100644 index 00000000..db674ba4 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml @@ -0,0 +1,237 @@ +--- +- name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers-implicit.xml + + +- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/phonenumber + value: 555-555-1234 + +- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2 + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/owner/name/last + value: Smith + +- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2 + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/owner/name/first + value: John + +- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do. + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/website/validxhtml + +- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior. + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/website/validxhtml/@validateon + +- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior. + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/website_bis/validxhtml/@validateon + +- name: Add an attribute with a value + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/owner/@dob='1976-04-12' + +- name: Add an element with a value, alternate syntax + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/beers/beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing + +- name: Add an element without special characters + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/testnormalelement + value: xml tag with no special characters + pretty_print: yes + +- name: Add an element with dash + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/test-with-dash + value: xml tag with dashes + pretty_print: yes + +- name: Add an element with dot + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/test-with-dash.and.dot + value: xml tag with dashes and dots + pretty_print: yes + +- name: Add an element with underscore + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/test-with.dash_and.dot_and-underscores + value: xml tag with dashes, dots and underscores + pretty_print: yes + +- name: Add an attribute on a conditional element + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/beers/beer[text()="George Killian's Irish Red"]/@color='red' + +- name: Add two attributes on a conditional element + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/beers/beer[text()="Pilsner Urquell" and @origin='CZ']/@color='blonde' + +- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/owner/name[first/text()='John']/middle + value: Q + +- name: Pretty Print this! + xml: + file: /tmp/ansible-xml-beers-implicit.xml + pretty_print: yes + +- name: Compare to expected result + copy: + src: results/test-add-element-implicitly.yml + dest: /tmp/ansible-xml-beers-implicit.xml + check_mode: yes + diff: yes + register: comparison + +- name: Test expected result + assert: + that: + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-add-element-implicitly.yml /tmp/ansible-xml-beers-implicit.xml + + +# Now we repeat the same, just to ensure proper use of namespaces +- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:phonenumber + value: 555-555-1234 + namespaces: + a: http://example.com/some/namespace + +- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2 + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:owner/a:name/a:last + value: Smith + namespaces: + a: http://example.com/some/namespace + +- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2 + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:owner/a:name/a:first + value: John + namespaces: + a: http://example.com/some/namespace + +- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do. + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:website/a:validxhtml + namespaces: + a: http://example.com/some/namespace + +- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior. + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:website/a:validxhtml/@a:validateon + namespaces: + a: http://example.com/some/namespace + +- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior. + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:website_bis/a:validxhtml/@a:validateon + namespaces: + a: http://example.com/some/namespace + +- name: Add an attribute with a value + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:owner/@a:dob='1976-04-12' + namespaces: + a: http://example.com/some/namespace + +- name: Add an element with a value, alternate syntax + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:beers/a:beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing + namespaces: + a: http://example.com/some/namespace + +- name: Add an attribute on a conditional element + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:beers/a:beer[text()="George Killian's Irish Red"]/@a:color='red' + namespaces: + a: http://example.com/some/namespace + +- name: Add two attributes on a conditional element + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:beers/a:beer[text()="Pilsner Urquell" and @a:origin='CZ']/@a:color='blonde' + namespaces: + a: http://example.com/some/namespace + +- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/a:owner/a:name[a:first/text()='John']/a:middle + value: Q + namespaces: + a: http://example.com/some/namespace + +- name: Add an element without special characters + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/testnormalelement + value: xml tag with no special characters + pretty_print: yes + namespaces: + a: http://example.com/some/namespace + + +- name: Add an element with dash + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/test-with-dash + value: xml tag with dashes + pretty_print: yes + namespaces: + a: http://example.com/some/namespace + +- name: Add an element with dot + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/test-with-dash.and.dot + value: xml tag with dashes and dots + pretty_print: yes + namespaces: + a: http://example.com/some/namespace + +- name: Add an element with underscore + xml: + file: /tmp/ansible-xml-beers-implicit.xml + xpath: /business/test-with.dash_and.dot_and-underscores + value: xml tag with dashes, dots and underscores + pretty_print: yes + namespaces: + a: http://example.com/some/namespace + +- name: Pretty Print this! + xml: + file: /tmp/ansible-xml-beers-implicit.xml + pretty_print: yes diff --git a/test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml new file mode 100644 index 00000000..25eca47f --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml @@ -0,0 +1,32 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + + + - name: Add namespaced child element + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/ber:beers + namespaces: + bus: http://test.business + ber: http://test.beers + add_children: + - beer: Old Rasputin + register: add_namespaced_children_elements + + - name: Compare to expected result + copy: + src: results/test-add-namespaced-children-elements.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - add_namespaced_children_elements.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml b/test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml new file mode 100644 index 00000000..e63100c4 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml @@ -0,0 +1,30 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add child element with xml format + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + input_type: xml + add_children: + - 'Old Rasputin' + register: children_elements + + - name: Compare to expected result + copy: + src: results/test-add-children-elements.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - children_elements.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-count-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-count-unicode.yml new file mode 100644 index 00000000..47a806bf --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-count-unicode.yml @@ -0,0 +1,19 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers-unicode.xml + dest: /tmp/ansible-xml-beers-unicode.xml + + + - name: Count child element + xml: + path: /tmp/ansible-xml-beers-unicode.xml + xpath: /business/beers/beer + count: yes + register: beers + + - name: Test expected result + assert: + that: + - beers.changed == false + - beers.count == 2 diff --git a/test/integration/targets/incidental_xml/tasks/test-count.yml b/test/integration/targets/incidental_xml/tasks/test-count.yml new file mode 100644 index 00000000..cbc97e32 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-count.yml @@ -0,0 +1,19 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add child element + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers/beer + count: yes + register: beers + + - name: Test expected result + assert: + that: + - beers.changed == false + - beers.count == 3 diff --git a/test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml new file mode 100644 index 00000000..73ae9667 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml @@ -0,0 +1,32 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers-unicode.xml + dest: /tmp/ansible-xml-beers-unicode.xml + + + - name: Get element attributes + xml: + path: /tmp/ansible-xml-beers-unicode.xml + xpath: /business/rating + content: attribute + register: get_element_attribute + + - name: Test expected result + assert: + that: + - get_element_attribute.changed == false + - get_element_attribute.matches[0]['rating'] is defined and get_element_attribute.matches[0]['rating']['subjective'] == 'да' + + - name: Get element text + xml: + path: /tmp/ansible-xml-beers-unicode.xml + xpath: /business/rating + content: text + register: get_element_text + + - name: Test expected result + assert: + that: + - get_element_text.changed == false + - get_element_text.matches[0]['rating'] == 'десять' diff --git a/test/integration/targets/incidental_xml/tasks/test-get-element-content.yml b/test/integration/targets/incidental_xml/tasks/test-get-element-content.yml new file mode 100644 index 00000000..58ca7767 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-get-element-content.yml @@ -0,0 +1,52 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Get element attributes + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + content: attribute + register: get_element_attribute + + - name: Test expected result + assert: + that: + - get_element_attribute.changed == false + - get_element_attribute.matches[0]['rating'] is defined + - get_element_attribute.matches[0]['rating']['subjective'] == 'true' + + # TODO: Remove this in Ansible v2.12 when this incorrect use of attribute is deprecated + - name: Get element attributes + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + content: attribute + attribute: subjective + register: get_element_attribute_wrong + + - name: Test expected result + assert: + that: + - get_element_attribute_wrong.changed == false + - get_element_attribute_wrong.matches[0]['rating'] is defined + - get_element_attribute_wrong.matches[0]['rating']['subjective'] == 'true' + - get_element_attribute_wrong.deprecations is defined + - get_element_attribute_wrong.deprecations[0].msg == "Parameter 'attribute=subjective' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." + - get_element_attribute_wrong.deprecations[0].version == '2.12' + + - name: Get element text + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + content: text + register: get_element_text + + - name: Test expected result + assert: + that: + - get_element_text.changed == false + - get_element_text.matches[0]['rating'] == '10' diff --git a/test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml b/test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml new file mode 100644 index 00000000..3f24b0ac --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml @@ -0,0 +1,22 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Specify both children to add and a value + xml: + path: /tmp/ansible-xml-beers.xml + add_children: + - child01 + - child02 + value: conflict! + register: module_output + ignore_errors: yes + + - name: Test expected result + assert: + that: + - module_output.changed == false + - module_output.failed == true diff --git a/test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml b/test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml new file mode 100644 index 00000000..7c0f7d5f --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml @@ -0,0 +1,29 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml.orig + + - name: Remove spaces from test fixture + shell: sed 's/^[ ]*//g' < /tmp/ansible-xml-beers.xml.orig > /tmp/ansible-xml-beers.xml + + - name: Pretty print without modification + xml: + path: /tmp/ansible-xml-beers.xml + pretty_print: yes + register: pretty_print_only + + - name: Compare to expected result + copy: + src: results/test-pretty-print-only.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - pretty_print_only.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-pretty-print.yml b/test/integration/targets/incidental_xml/tasks/test-pretty-print.yml new file mode 100644 index 00000000..88b618b2 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-pretty-print.yml @@ -0,0 +1,30 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Pretty print + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + pretty_print: yes + add_children: + - beer: Old Rasputin + register: pretty_print + + - name: Compare to expected result + copy: + src: results/test-pretty-print.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - pretty_print.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml b/test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml new file mode 100644 index 00000000..9aa395e6 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml @@ -0,0 +1,28 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Remove '/business/rating/@subjective' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating/@subjective + state: absent + register: remove_attribute + + - name: Compare to expected result + copy: + src: results/test-remove-attribute.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - remove_attribute.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-element.yml b/test/integration/targets/incidental_xml/tasks/test-remove-element.yml new file mode 100644 index 00000000..f2e20ea2 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-remove-element.yml @@ -0,0 +1,28 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Remove '/business/rating' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + state: absent + register: remove_element + + - name: Compare to expected result + copy: + src: results/test-remove-element.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - remove_element.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml new file mode 100644 index 00000000..36682b22 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml @@ -0,0 +1,33 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + + + - name: Remove namespaced '/bus:business/rat:rating/@attr:subjective' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/rat:rating/@attr:subjective + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + state: absent + register: remove_namespaced_attribute + + - name: Compare to expected result + copy: + src: results/test-remove-namespaced-attribute.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - remove_element.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml new file mode 100644 index 00000000..be78af68 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml @@ -0,0 +1,33 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + + + - name: Remove namespaced '/bus:business/rat:rating' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/rat:rating + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + state: absent + register: remove_namespaced_element + + - name: Compare to expected result + copy: + src: results/test-remove-element.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - remove_namespaced_element.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml new file mode 100644 index 00000000..dabf72a1 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml @@ -0,0 +1,29 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Set '/business/rating/@subjective' to 'нет' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + attribute: subjective + value: нет + register: set_attribute_value_unicode + + - name: Compare to expected result + copy: + src: results/test-set-attribute-value-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_attribute_value_unicode.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml new file mode 100644 index 00000000..2aa39fe2 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml @@ -0,0 +1,29 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Set '/business/rating/@subjective' to 'false' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + attribute: subjective + value: 'false' + register: set_attribute_value + + - name: Compare to expected result + copy: + src: results/test-set-attribute-value.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_attribute_value.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml new file mode 100644 index 00000000..3e2c0adb --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml @@ -0,0 +1,74 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Set child elements + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: &children + - beer: + alcohol: "0.5" + name: 90 Minute IPA + _: + - Water: + liter: "0.2" + quantity: 200g + - Starch: + quantity: 10g + - Hops: + quantity: 50g + - Yeast: + quantity: 20g + - beer: + alcohol: "0.3" + name: Harvest Pumpkin Ale + _: + - Water: + liter: "0.2" + quantity: 200g + - Hops: + quantity: 25g + - Yeast: + quantity: 20g + register: set_children_elements_level + + - name: Compare to expected result + copy: + src: results/test-set-children-elements-level.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_children_elements_level.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml + + + - name: Set child elements (again) + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: *children + register: set_children_again + + - name: Compare to expected result + copy: + src: results/test-set-children-elements-level.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_children_again.changed == false + - comparison.changed == false # identical diff --git a/test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml new file mode 100644 index 00000000..240b894a --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml @@ -0,0 +1,46 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Set child elements + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: &children + - beer: Окское + - beer: Невское + register: set_children_elements_unicode + + - name: Compare to expected result + copy: + src: results/test-set-children-elements-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_children_elements_unicode.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml + + + - name: Compare to expected result + copy: + src: results/test-set-children-elements-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_children_again.changed == false + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml new file mode 100644 index 00000000..7b0f3247 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml @@ -0,0 +1,53 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Set child elements + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: &children + - beer: 90 Minute IPA + - beer: Harvest Pumpkin Ale + register: set_children_elements + + - name: Compare to expected result + copy: + src: results/test-set-children-elements.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_children_elements.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml + + + - name: Set child elements (again) + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/beers + set_children: *children + register: set_children_again + + - name: Compare to expected result + copy: + src: results/test-set-children-elements.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_children_again.changed == false + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml b/test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml new file mode 100644 index 00000000..5814803c --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml @@ -0,0 +1,28 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Set '/business/website/address' to empty string. + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/website/address + value: '' + register: set_element_value_empty + + - name: Compare to expected result + copy: + src: results/test-set-element-value-empty.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_element_value_empty.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml new file mode 100644 index 00000000..c3a40b7d --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml @@ -0,0 +1,43 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add 2nd '/business/rating' with value 'пять' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business + add_children: + - rating: пять + + - name: Set '/business/rating' to 'пять' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + value: пять + register: set_element_first_run + + - name: Set '/business/rating' to 'false'... again + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + value: пять + register: set_element_second_run + + - name: Compare to expected result + copy: + src: results/test-set-element-value-unicode.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_element_first_run.changed == true + - set_element_second_run.changed == false + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-set-element-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-element-value.yml new file mode 100644 index 00000000..dbd070f1 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-element-value.yml @@ -0,0 +1,43 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-beers.xml + dest: /tmp/ansible-xml-beers.xml + + + - name: Add 2nd '/business/rating' with value '5' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business + add_children: + - rating: '5' + + - name: Set '/business/rating' to '5' + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + value: '5' + register: set_element_first_run + + - name: Set '/business/rating' to '5'... again + xml: + path: /tmp/ansible-xml-beers.xml + xpath: /business/rating + value: '5' + register: set_element_second_run + + - name: Compare to expected result + copy: + src: results/test-set-element-value.xml + dest: /tmp/ansible-xml-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_element_first_run.changed == true + - set_element_second_run.changed == false + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml new file mode 100644 index 00000000..e0086efe --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml @@ -0,0 +1,34 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + + + - name: Set namespaced '/bus:business/rat:rating/@attr:subjective' to 'false' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + xpath: /bus:business/rat:rating + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + attribute: attr:subjective + value: 'false' + register: set_namespaced_attribute_value + + - name: Compare to expected result + copy: + src: results/test-set-namespaced-attribute-value.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - set_namespaced_attribute_value.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml new file mode 100644 index 00000000..8e66e70e --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml @@ -0,0 +1,57 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers-xml.xml + + - name: Set child elements + xml: + path: /tmp/ansible-xml-namespaced-beers-xml.xml + xpath: /bus:business/ber:beers + namespaces: + bus: http://test.business + ber: http://test.beers + set_children: + - beer: 90 Minute IPA + - beer: Harvest Pumpkin Ale + + - name: Copy state after first set_children + copy: + src: /tmp/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers-1.xml + remote_src: yes + + - name: Set child elements again + xml: + path: /tmp/ansible-xml-namespaced-beers-xml.xml + xpath: /bus:business/ber:beers + namespaces: + bus: http://test.business + ber: http://test.beers + set_children: + - beer: 90 Minute IPA + - beer: Harvest Pumpkin Ale + register: set_children_again + + - name: Copy state after second set_children + copy: + src: /tmp/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers-2.xml + remote_src: yes + + - name: Compare to expected result + copy: + src: /tmp/ansible-xml-namespaced-beers-1.xml + dest: /tmp/ansible-xml-namespaced-beers-2.xml + remote_src: yes + check_mode: yes + diff: yes + register: comparison + #command: diff /tmp/ansible-xml-namespaced-beers-1.xml /tmp/ansible-xml-namespaced-beers-2.xml + + - name: Test expected result + assert: + that: + - set_children_again.changed == false # idempotency + - set_namespaced_attribute_value.changed == true + - comparison.changed == false # identical diff --git a/test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml new file mode 100644 index 00000000..f77d7537 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml @@ -0,0 +1,46 @@ +--- + - name: Setup test fixture + copy: + src: fixtures/ansible-xml-namespaced-beers.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + + + - name: Set namespaced '/bus:business/rat:rating' to '11' + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + xpath: /bus:business/rat:rating + value: '11' + register: set_element_first_run + + - name: Set namespaced '/bus:business/rat:rating' to '11' again + xml: + path: /tmp/ansible-xml-namespaced-beers.xml + namespaces: + bus: http://test.business + ber: http://test.beers + rat: http://test.rating + attr: http://test.attribute + xpath: /bus:business/rat:rating + value: '11' + register: set_element_second_run + + - name: Compare to expected result + copy: + src: results/test-set-namespaced-element-value.xml + dest: /tmp/ansible-xml-namespaced-beers.xml + check_mode: yes + diff: yes + register: comparison + #command: diff -u {{ role_path }}/results/test-set-namespaced-element-value.xml /tmp/ansible-xml-namespaced-beers.xml + + - name: Test expected result + assert: + that: + - set_element_first_run.changed == true + - set_element_second_run.changed == false + - comparison.changed == false # identical diff --git a/test/integration/targets/incidental_xml/tasks/test-xmlstring.yml b/test/integration/targets/incidental_xml/tasks/test-xmlstring.yml new file mode 100644 index 00000000..4620d984 --- /dev/null +++ b/test/integration/targets/incidental_xml/tasks/test-xmlstring.yml @@ -0,0 +1,81 @@ +--- + - name: Copy expected results to remote + copy: + src: "results/{{ item }}" + dest: "/tmp/{{ item }}" + with_items: + - test-pretty-print.xml + - test-pretty-print-only.xml + + # NOTE: Jinja2 templating eats trailing newlines + - name: Read from xmlstring (not using pretty_print) + xml: + xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}" + xpath: . + register: xmlresponse + + - name: Compare to expected result + copy: + content: "{{ xmlresponse.xmlstring }}\n" + dest: '/tmp/test-pretty-print-only.xml' + check_mode: yes + diff: yes + register: comparison + + - name: Test expected result + assert: + that: + - xmlresponse.changed == false + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml + + + # NOTE: Jinja2 templating eats trailing newlines + - name: Read from xmlstring (using pretty_print) + xml: + xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}" + pretty_print: yes + register: xmlresponse + + - name: Compare to expected result + copy: + content: '{{ xmlresponse.xmlstring }}' + dest: '/tmp/test-pretty-print-only.xml' + check_mode: yes + diff: yes + register: comparison + + # FIXME: This change is related to the newline added by pretty_print + - name: Test expected result + assert: + that: + - xmlresponse.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml + + + # NOTE: Jinja2 templating eats trailing newlines + - name: Read from xmlstring + xml: + xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}" + xpath: /business/beers + pretty_print: yes + add_children: + - beer: Old Rasputin + register: xmlresponse_modification + + - name: Compare to expected result + copy: + content: '{{ xmlresponse_modification.xmlstring }}' + dest: '/tmp/test-pretty-print.xml' + check_mode: yes + diff: yes + register: comparison + + # FIXME: This change is related to the newline added by pretty_print + - name: Test expected result + assert: + that: + - xmlresponse_modification.changed == true + - comparison.changed == false # identical + #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml diff --git a/test/integration/targets/incidental_xml/vars/main.yml b/test/integration/targets/incidental_xml/vars/main.yml new file mode 100644 index 00000000..7c5675bd --- /dev/null +++ b/test/integration/targets/incidental_xml/vars/main.yml @@ -0,0 +1,6 @@ +# -*- mode: yaml -* +--- +bad_beers: +- beer: "Natty Lite" +- beer: "Miller Lite" +- beer: "Coors Lite" diff --git a/test/integration/targets/include_import/aliases b/test/integration/targets/include_import/aliases new file mode 100644 index 00000000..fff62d9f --- /dev/null +++ b/test/integration/targets/include_import/aliases @@ -0,0 +1,2 @@ +shippable/posix/group5 +skip/aix diff --git a/test/integration/targets/include_import/apply/import_apply.yml b/test/integration/targets/include_import/apply/import_apply.yml new file mode 100644 index 00000000..27a40861 --- /dev/null +++ b/test/integration/targets/include_import/apply/import_apply.yml @@ -0,0 +1,31 @@ +--- +- hosts: testhost + gather_facts: false + tasks: + - import_tasks: + file: import_tasks.yml + apply: + tags: + - foo + tags: + - always + + - assert: + that: + - include_tasks_result is defined + tags: + - always + + - import_role: + name: import_role + apply: + tags: + - foo + tags: + - always + + - assert: + that: + - include_role_result is defined + tags: + - always diff --git a/test/integration/targets/include_import/apply/include_apply.yml b/test/integration/targets/include_import/apply/include_apply.yml new file mode 100644 index 00000000..32c6e5e9 --- /dev/null +++ b/test/integration/targets/include_import/apply/include_apply.yml @@ -0,0 +1,50 @@ +--- +- hosts: testhost + gather_facts: false + tasks: + - include_tasks: + file: include_tasks.yml + apply: + tags: + - foo + tags: + - always + + - assert: + that: + - include_tasks_result is defined + tags: + - always + + - include_role: + name: include_role + apply: + tags: + - foo + tags: + - always + + - assert: + that: + - include_role_result is defined + tags: + - always + + - include_role: + name: include_role2 + apply: + tags: + - foo + tags: + - not_specified_on_purpose + + - assert: + that: + - include_role2_result is undefined + tags: + - always + + - include_role: + name: include_role + apply: + delegate_to: testhost2 diff --git a/test/integration/targets/include_import/apply/include_tasks.yml b/test/integration/targets/include_import/apply/include_tasks.yml new file mode 100644 index 00000000..be511d1e --- /dev/null +++ b/test/integration/targets/include_import/apply/include_tasks.yml @@ -0,0 +1,2 @@ +- set_fact: + include_tasks_result: true diff --git a/test/integration/targets/include_import/apply/roles/include_role/tasks/main.yml b/test/integration/targets/include_import/apply/roles/include_role/tasks/main.yml new file mode 100644 index 00000000..7f86b264 --- /dev/null +++ b/test/integration/targets/include_import/apply/roles/include_role/tasks/main.yml @@ -0,0 +1,2 @@ +- set_fact: + include_role_result: true diff --git a/test/integration/targets/include_import/apply/roles/include_role2/tasks/main.yml b/test/integration/targets/include_import/apply/roles/include_role2/tasks/main.yml new file mode 100644 index 00000000..028c30d5 --- /dev/null +++ b/test/integration/targets/include_import/apply/roles/include_role2/tasks/main.yml @@ -0,0 +1,2 @@ +- set_fact: + include_role2_result: true diff --git a/test/integration/targets/include_import/empty_group_warning/playbook.yml b/test/integration/targets/include_import/empty_group_warning/playbook.yml new file mode 100644 index 00000000..6da5b7c3 --- /dev/null +++ b/test/integration/targets/include_import/empty_group_warning/playbook.yml @@ -0,0 +1,13 @@ +--- +- hosts: localhost + gather_facts: false + tasks: + - name: Group + group_by: + key: test_{{ inventory_hostname }} + +- hosts: test_localhost + gather_facts: false + tasks: + - name: Print + import_tasks: tasks.yml diff --git a/test/integration/targets/include_import/empty_group_warning/tasks.yml b/test/integration/targets/include_import/empty_group_warning/tasks.yml new file mode 100644 index 00000000..2fbad773 --- /dev/null +++ b/test/integration/targets/include_import/empty_group_warning/tasks.yml @@ -0,0 +1,3 @@ +- name: test + debug: + msg: hello diff --git a/test/integration/targets/include_import/grandchild/block_include_tasks.yml b/test/integration/targets/include_import/grandchild/block_include_tasks.yml new file mode 100644 index 00000000..f8addcf4 --- /dev/null +++ b/test/integration/targets/include_import/grandchild/block_include_tasks.yml @@ -0,0 +1,2 @@ +- command: "true" + register: block_include_result diff --git a/test/integration/targets/include_import/grandchild/import.yml b/test/integration/targets/include_import/grandchild/import.yml new file mode 100644 index 00000000..ef6990e2 --- /dev/null +++ b/test/integration/targets/include_import/grandchild/import.yml @@ -0,0 +1 @@ +- include_tasks: include_level_1.yml diff --git a/test/integration/targets/include_import/grandchild/import_include_include_tasks.yml b/test/integration/targets/include_import/grandchild/import_include_include_tasks.yml new file mode 100644 index 00000000..dae3a245 --- /dev/null +++ b/test/integration/targets/include_import/grandchild/import_include_include_tasks.yml @@ -0,0 +1,2 @@ +- command: "true" + register: import_include_include_result diff --git a/test/integration/targets/include_import/grandchild/include_level_1.yml b/test/integration/targets/include_import/grandchild/include_level_1.yml new file mode 100644 index 00000000..e323511f --- /dev/null +++ b/test/integration/targets/include_import/grandchild/include_level_1.yml @@ -0,0 +1 @@ +- include_tasks: import_include_include_tasks.yml diff --git a/test/integration/targets/include_import/handler_addressing/playbook.yml b/test/integration/targets/include_import/handler_addressing/playbook.yml new file mode 100644 index 00000000..7515dc99 --- /dev/null +++ b/test/integration/targets/include_import/handler_addressing/playbook.yml @@ -0,0 +1,11 @@ +- hosts: localhost + gather_facts: false + tasks: + - import_role: + name: include_handler_test + +- hosts: localhost + gather_facts: false + tasks: + - import_role: + name: import_handler_test diff --git a/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/handlers/main.yml b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/handlers/main.yml new file mode 100644 index 00000000..95524ed4 --- /dev/null +++ b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/handlers/main.yml @@ -0,0 +1,2 @@ +- name: do_import + import_tasks: tasks/handlers.yml diff --git a/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/handlers.yml b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/handlers.yml new file mode 100644 index 00000000..eeb49ff6 --- /dev/null +++ b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/handlers.yml @@ -0,0 +1,2 @@ +- debug: + msg: import handler task diff --git a/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/main.yml b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/main.yml new file mode 100644 index 00000000..b0312cc2 --- /dev/null +++ b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/main.yml @@ -0,0 +1,3 @@ +- command: "true" + notify: + - do_import diff --git a/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/handlers/main.yml b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/handlers/main.yml new file mode 100644 index 00000000..7f24b9d5 --- /dev/null +++ b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/handlers/main.yml @@ -0,0 +1,2 @@ +- name: do_include + include_tasks: tasks/handlers.yml diff --git a/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/handlers.yml b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/handlers.yml new file mode 100644 index 00000000..2bf07f23 --- /dev/null +++ b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/handlers.yml @@ -0,0 +1,2 @@ +- debug: + msg: include handler task diff --git a/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/main.yml b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/main.yml new file mode 100644 index 00000000..c29a787c --- /dev/null +++ b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/main.yml @@ -0,0 +1,3 @@ +- command: "true" + notify: + - do_include diff --git a/test/integration/targets/include_import/inventory b/test/integration/targets/include_import/inventory new file mode 100644 index 00000000..3ae8d9c3 --- /dev/null +++ b/test/integration/targets/include_import/inventory @@ -0,0 +1,6 @@ +[local] +testhost ansible_connection=local host_var_role_name=role3 +testhost2 ansible_connection=local host_var_role_name=role2 + +[local:vars] +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/include_import/nestedtasks/nested/nested.yml b/test/integration/targets/include_import/nestedtasks/nested/nested.yml new file mode 100644 index 00000000..95fe2660 --- /dev/null +++ b/test/integration/targets/include_import/nestedtasks/nested/nested.yml @@ -0,0 +1,2 @@ +--- +- include_role: {name: nested_include_task} diff --git a/test/integration/targets/include_import/parent_templating/playbook.yml b/test/integration/targets/include_import/parent_templating/playbook.yml new file mode 100644 index 00000000..b7330206 --- /dev/null +++ b/test/integration/targets/include_import/parent_templating/playbook.yml @@ -0,0 +1,11 @@ +# https://github.com/ansible/ansible/issues/49969 +- hosts: localhost + gather_facts: false + tasks: + - include_role: + name: test + public: true + + - assert: + that: + - included_other is defined diff --git a/test/integration/targets/include_import/parent_templating/roles/test/tasks/localhost.yml b/test/integration/targets/include_import/parent_templating/roles/test/tasks/localhost.yml new file mode 100644 index 00000000..e5b281e7 --- /dev/null +++ b/test/integration/targets/include_import/parent_templating/roles/test/tasks/localhost.yml @@ -0,0 +1 @@ +- include_tasks: other.yml diff --git a/test/integration/targets/include_import/parent_templating/roles/test/tasks/main.yml b/test/integration/targets/include_import/parent_templating/roles/test/tasks/main.yml new file mode 100644 index 00000000..16fba69a --- /dev/null +++ b/test/integration/targets/include_import/parent_templating/roles/test/tasks/main.yml @@ -0,0 +1 @@ +- include_tasks: "{{ lookup('first_found', inventory_hostname ~ '.yml') }}" diff --git a/test/integration/targets/include_import/parent_templating/roles/test/tasks/other.yml b/test/integration/targets/include_import/parent_templating/roles/test/tasks/other.yml new file mode 100644 index 00000000..c3bae1a5 --- /dev/null +++ b/test/integration/targets/include_import/parent_templating/roles/test/tasks/other.yml @@ -0,0 +1,2 @@ +- set_fact: + included_other: true diff --git a/test/integration/targets/include_import/playbook/group_vars/all.yml b/test/integration/targets/include_import/playbook/group_vars/all.yml new file mode 100644 index 00000000..9acd8c64 --- /dev/null +++ b/test/integration/targets/include_import/playbook/group_vars/all.yml @@ -0,0 +1 @@ +group_var1: set in group_vars/all.yml diff --git a/test/integration/targets/include_import/playbook/playbook1.yml b/test/integration/targets/include_import/playbook/playbook1.yml new file mode 100644 index 00000000..55c66d80 --- /dev/null +++ b/test/integration/targets/include_import/playbook/playbook1.yml @@ -0,0 +1,9 @@ +- name: Playbook 1 + hosts: testhost2 + + tasks: + - name: Set fact in playbook 1 + set_fact: + canary_var1: playbook1 imported + tags: + - canary1 diff --git a/test/integration/targets/include_import/playbook/playbook2.yml b/test/integration/targets/include_import/playbook/playbook2.yml new file mode 100644 index 00000000..c986165e --- /dev/null +++ b/test/integration/targets/include_import/playbook/playbook2.yml @@ -0,0 +1,9 @@ +- name: Playbook 2 + hosts: testhost2 + + tasks: + - name: Set fact in playbook 2 + set_fact: + canary_var2: playbook2 imported + tags: + - canary2 diff --git a/test/integration/targets/include_import/playbook/playbook3.yml b/test/integration/targets/include_import/playbook/playbook3.yml new file mode 100644 index 00000000..b62b96c3 --- /dev/null +++ b/test/integration/targets/include_import/playbook/playbook3.yml @@ -0,0 +1,10 @@ +- name: Playbook 3 + hosts: testhost2 + + tasks: + - name: Set fact in playbook 3 + set_fact: + canary_var3: playbook3 imported + include_next_playbook: yes + tags: + - canary3 diff --git a/test/integration/targets/include_import/playbook/playbook4.yml b/test/integration/targets/include_import/playbook/playbook4.yml new file mode 100644 index 00000000..330612a9 --- /dev/null +++ b/test/integration/targets/include_import/playbook/playbook4.yml @@ -0,0 +1,9 @@ +- name: Playbook 4 + hosts: testhost2 + + tasks: + - name: Set fact in playbook 4 + set_fact: + canary_var4: playbook4 imported + tags: + - canary4 diff --git a/test/integration/targets/include_import/playbook/playbook_needing_vars.yml b/test/integration/targets/include_import/playbook/playbook_needing_vars.yml new file mode 100644 index 00000000..6454502b --- /dev/null +++ b/test/integration/targets/include_import/playbook/playbook_needing_vars.yml @@ -0,0 +1,6 @@ +--- +- hosts: testhost + gather_facts: no + tasks: + - import_role: + name: "{{ import_playbook_role_name }}" diff --git a/test/integration/targets/include_import/playbook/roles/import_playbook_role/tasks/main.yml b/test/integration/targets/include_import/playbook/roles/import_playbook_role/tasks/main.yml new file mode 100644 index 00000000..77554399 --- /dev/null +++ b/test/integration/targets/include_import/playbook/roles/import_playbook_role/tasks/main.yml @@ -0,0 +1,2 @@ +- debug: + msg: in import_playbook_role diff --git a/test/integration/targets/include_import/playbook/sub_playbook/library/helloworld.py b/test/integration/targets/include_import/playbook/sub_playbook/library/helloworld.py new file mode 100644 index 00000000..0ebe690d --- /dev/null +++ b/test/integration/targets/include_import/playbook/sub_playbook/library/helloworld.py @@ -0,0 +1,30 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule(argument_spec={}) + + module.exit_json(msg='Hello, World!') + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/include_import/playbook/sub_playbook/sub_playbook.yml b/test/integration/targets/include_import/playbook/sub_playbook/sub_playbook.yml new file mode 100644 index 00000000..4399d935 --- /dev/null +++ b/test/integration/targets/include_import/playbook/sub_playbook/sub_playbook.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: false + tasks: + - helloworld: diff --git a/test/integration/targets/include_import/playbook/test_import_playbook.yml b/test/integration/targets/include_import/playbook/test_import_playbook.yml new file mode 100644 index 00000000..a5894608 --- /dev/null +++ b/test/integration/targets/include_import/playbook/test_import_playbook.yml @@ -0,0 +1,26 @@ +# Test that additional parameters after import_playbook raises a warning & strips whitespaces +- import_playbook: playbook1.yml tags=test_import + +# Test and validate playbook import +- import_playbook: playbook1.yml +- import_playbook: validate1.yml + + +# Test and validate conditional import +- import_playbook: playbook2.yml + when: no + +- import_playbook: validate2.yml + +- import_playbook: playbook3.yml +- import_playbook: playbook4.yml + when: include_next_playbook + +- import_playbook: validate34.yml + +- import_playbook: playbook_needing_vars.yml + vars: + import_playbook_role_name: import_playbook_role + +# https://github.com/ansible/ansible/issues/59548 +- import_playbook: sub_playbook/sub_playbook.yml diff --git a/test/integration/targets/include_import/playbook/test_import_playbook_tags.yml b/test/integration/targets/include_import/playbook/test_import_playbook_tags.yml new file mode 100644 index 00000000..46136f6f --- /dev/null +++ b/test/integration/targets/include_import/playbook/test_import_playbook_tags.yml @@ -0,0 +1,10 @@ +- import_playbook: playbook1.yml # Test tag in tasks in included play +- import_playbook: playbook2.yml # Test tag added to import_playbook + tags: + - canary22 + +- import_playbook: playbook3.yml # Test skipping tags added to import_playbook + tags: + - skipme + +- import_playbook: validate_tags.yml # Validate diff --git a/test/integration/targets/include_import/playbook/validate1.yml b/test/integration/targets/include_import/playbook/validate1.yml new file mode 100644 index 00000000..0018344d --- /dev/null +++ b/test/integration/targets/include_import/playbook/validate1.yml @@ -0,0 +1,10 @@ +- hosts: testhost2 + + tasks: + - name: Assert that variable was set in playbook1.yml + assert: + that: + - canary_var1 == 'playbook1 imported' + tags: + - validate + - validate1 diff --git a/test/integration/targets/include_import/playbook/validate2.yml b/test/integration/targets/include_import/playbook/validate2.yml new file mode 100644 index 00000000..f22bcb6e --- /dev/null +++ b/test/integration/targets/include_import/playbook/validate2.yml @@ -0,0 +1,10 @@ +- hosts: testhost2 + + tasks: + - name: Assert that playbook2.yml was skipeed + assert: + that: + - canary_var2 is not defined + tags: + - validate + - validate2 diff --git a/test/integration/targets/include_import/playbook/validate34.yml b/test/integration/targets/include_import/playbook/validate34.yml new file mode 100644 index 00000000..fd53a305 --- /dev/null +++ b/test/integration/targets/include_import/playbook/validate34.yml @@ -0,0 +1,11 @@ +- hosts: testhost2 + + tasks: + - name: Assert that playbook3.yml and playbook4.yml were imported + assert: + that: + - canary_var3 == 'playbook3 imported' + - canary_var4 == 'playbook4 imported' + tags: + - validate + - validate34 diff --git a/test/integration/targets/include_import/playbook/validate_tags.yml b/test/integration/targets/include_import/playbook/validate_tags.yml new file mode 100644 index 00000000..acdcb1f2 --- /dev/null +++ b/test/integration/targets/include_import/playbook/validate_tags.yml @@ -0,0 +1,11 @@ +- hosts: testhost2 + + tasks: + - name: Assert that only tasks with tags were run + assert: + that: + - canary_var1 == 'playbook1 imported' + - canary_var2 == 'playbook2 imported' + - canary_var3 is not defined + tags: + - validate diff --git a/test/integration/targets/include_import/public_exposure/no_bleeding.yml b/test/integration/targets/include_import/public_exposure/no_bleeding.yml new file mode 100644 index 00000000..b9db7132 --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/no_bleeding.yml @@ -0,0 +1,25 @@ +--- +- hosts: testhost + gather_facts: false + tasks: + - name: Static imports should expose vars at parse time, not at execution time + assert: + that: + - static_defaults_var == 'static_defaults' + - static_vars_var == 'static_vars' + - import_role: + name: static + - assert: + that: + - static_tasks_var == 'static_tasks' + - static_defaults_var == 'static_defaults' + - static_vars_var == 'static_vars' + +- hosts: testhost + gather_facts: false + tasks: + - name: Ensure vars from import_roles do not bleed between plays + assert: + that: + - static_defaults_var is undefined + - static_vars_var is undefined diff --git a/test/integration/targets/include_import/public_exposure/no_overwrite_roles.yml b/test/integration/targets/include_import/public_exposure/no_overwrite_roles.yml new file mode 100644 index 00000000..6a1d9bff --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/no_overwrite_roles.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: false + roles: + - call_import diff --git a/test/integration/targets/include_import/public_exposure/playbook.yml b/test/integration/targets/include_import/public_exposure/playbook.yml new file mode 100644 index 00000000..11735e77 --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/playbook.yml @@ -0,0 +1,56 @@ +--- +- hosts: testhost + gather_facts: false + roles: + - regular + tasks: + - debug: + msg: start tasks + + - name: Static imports should expose vars at parse time, not at execution time + assert: + that: + - static_defaults_var == 'static_defaults' + - static_vars_var == 'static_vars' + - import_role: + name: static + - assert: + that: + - static_tasks_var == 'static_tasks' + - static_defaults_var == 'static_defaults' + - static_vars_var == 'static_vars' + + - include_role: + name: dynamic_private + - assert: + that: + - private_tasks_var == 'private_tasks' + - private_defaults_var is undefined + - private_vars_var is undefined + + - name: Dynamic include should not expose vars until execution time + assert: + that: + - dynamic_tasks_var is undefined + - dynamic_defaults_var is undefined + - dynamic_vars_var is undefined + - include_role: + name: dynamic + public: true + - assert: + that: + - dynamic_tasks_var == 'dynamic_tasks' + - dynamic_defaults_var == 'dynamic_defaults' + - dynamic_vars_var == 'dynamic_vars' + + - include_role: + name: from + public: true + tasks_from: from.yml + vars_from: from.yml + defaults_from: from.yml + - assert: + that: + - from_tasks_var == 'from_tasks' + - from_defaults_var == 'from_defaults' + - from_vars_var == 'from_vars' diff --git a/test/integration/targets/include_import/public_exposure/roles/call_import/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/call_import/tasks/main.yml new file mode 100644 index 00000000..d6b28f09 --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/call_import/tasks/main.yml @@ -0,0 +1,6 @@ +- import_role: + name: regular + +- assert: + that: + - regular_defaults_var is defined diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic/defaults/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic/defaults/main.yml new file mode 100644 index 00000000..099ac29b --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/dynamic/defaults/main.yml @@ -0,0 +1 @@ +dynamic_defaults_var: dynamic_defaults diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic/tasks/main.yml new file mode 100644 index 00000000..e9b9ad3d --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/dynamic/tasks/main.yml @@ -0,0 +1,5 @@ +- debug: + msg: dynamic + +- set_fact: + dynamic_tasks_var: dynamic_tasks diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic/vars/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic/vars/main.yml new file mode 100644 index 00000000..b33c12df --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/dynamic/vars/main.yml @@ -0,0 +1 @@ +dynamic_vars_var: dynamic_vars diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic_private/defaults/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/defaults/main.yml new file mode 100644 index 00000000..b19ef72c --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/defaults/main.yml @@ -0,0 +1 @@ +private_defaults_var: private_defaults diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic_private/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/tasks/main.yml new file mode 100644 index 00000000..1c7f653d --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/tasks/main.yml @@ -0,0 +1,5 @@ +- debug: + msg: private + +- set_fact: + private_tasks_var: private_tasks diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic_private/vars/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/vars/main.yml new file mode 100644 index 00000000..60f7ca81 --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/vars/main.yml @@ -0,0 +1 @@ +private_vars_var: private_vars diff --git a/test/integration/targets/include_import/public_exposure/roles/from/defaults/from.yml b/test/integration/targets/include_import/public_exposure/roles/from/defaults/from.yml new file mode 100644 index 00000000..6729c4b4 --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/from/defaults/from.yml @@ -0,0 +1 @@ +from_defaults_var: from_defaults diff --git a/test/integration/targets/include_import/public_exposure/roles/from/tasks/from.yml b/test/integration/targets/include_import/public_exposure/roles/from/tasks/from.yml new file mode 100644 index 00000000..932efc9f --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/from/tasks/from.yml @@ -0,0 +1,5 @@ +- debug: + msg: from + +- set_fact: + from_tasks_var: from_tasks diff --git a/test/integration/targets/include_import/public_exposure/roles/from/vars/from.yml b/test/integration/targets/include_import/public_exposure/roles/from/vars/from.yml new file mode 100644 index 00000000..98b2ad47 --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/from/vars/from.yml @@ -0,0 +1 @@ +from_vars_var: from_vars diff --git a/test/integration/targets/include_import/public_exposure/roles/regular/defaults/main.yml b/test/integration/targets/include_import/public_exposure/roles/regular/defaults/main.yml new file mode 100644 index 00000000..21a6967c --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/regular/defaults/main.yml @@ -0,0 +1 @@ +regular_defaults_var: regular_defaults diff --git a/test/integration/targets/include_import/public_exposure/roles/regular/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/regular/tasks/main.yml new file mode 100644 index 00000000..eafa141a --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/regular/tasks/main.yml @@ -0,0 +1,5 @@ +- debug: + msg: regular + +- set_fact: + regular_tasks_var: regular_tasks diff --git a/test/integration/targets/include_import/public_exposure/roles/regular/vars/main.yml b/test/integration/targets/include_import/public_exposure/roles/regular/vars/main.yml new file mode 100644 index 00000000..3d06546f --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/regular/vars/main.yml @@ -0,0 +1 @@ +regular_vars_var: regular_vars diff --git a/test/integration/targets/include_import/public_exposure/roles/static/defaults/main.yml b/test/integration/targets/include_import/public_exposure/roles/static/defaults/main.yml new file mode 100644 index 00000000..d88f5559 --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/static/defaults/main.yml @@ -0,0 +1 @@ +static_defaults_var: static_defaults diff --git a/test/integration/targets/include_import/public_exposure/roles/static/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/static/tasks/main.yml new file mode 100644 index 00000000..5a6488c1 --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/static/tasks/main.yml @@ -0,0 +1,5 @@ +- debug: + msg: static + +- set_fact: + static_tasks_var: static_tasks diff --git a/test/integration/targets/include_import/public_exposure/roles/static/vars/main.yml b/test/integration/targets/include_import/public_exposure/roles/static/vars/main.yml new file mode 100644 index 00000000..982e34d0 --- /dev/null +++ b/test/integration/targets/include_import/public_exposure/roles/static/vars/main.yml @@ -0,0 +1 @@ +static_vars_var: static_vars diff --git a/test/integration/targets/include_import/role/test_import_role.yml b/test/integration/targets/include_import/role/test_import_role.yml new file mode 100644 index 00000000..d45ff79b --- /dev/null +++ b/test/integration/targets/include_import/role/test_import_role.yml @@ -0,0 +1,139 @@ +- name: Test import_role + hosts: testhost + + vars: + run_role: yes + do_not_run_role: no + role_name: role1 + test_var: templating test in playbook + role_vars: + where_am_i_defined: in the playbook + entire_task: + include_role: + name: role1 + + tasks: + - name: Test basic role import + import_role: + name: role1 + + - name: Assert that basic include works + assert: + that: + - _role1_result.msg == 'In role1' + + - name: Test conditional role include + import_role: + name: role1 + tasks_from: canary1.yml + when: run_role + + - name: Assert that role ran + assert: + that: + - role1_canary1 == 'r1c1' + + - name: Test conditional role import that should be skipped + import_role: + name: role1 + tasks_from: canary2.yml + when: do_not_run_role + + - name: Assert that role did not run + assert: + that: + - role1_canary2 is not defined + + # FIXME We expect this to fail, but I'm not sure how best to test for + # syntax level failures. + # + # - name: Test role import with a loop + # import_role: + # name: "{{ item }}" + # register: loop_test + # with_items: + # - role1 + # - role3 + # - role2 + + - name: Test importing a task file from a role + import_role: + name: role1 + tasks_from: tasks.yml + + - name: Test importing vars file and tasks file from a role + import_role: + name: role3 + tasks_from: vartest.yml + vars_from: role3vars.yml + + - name: Assert that variables defined in previous task are available to play + assert: + that: + - role3_default == 'defined in role3/defaults/main.yml' + - role3_main == 'defined in role3/vars/main.yml' + - role3_var == 'defined in role3/vars/role3vars.yml' + ignore_errors: yes + + - name: Test using a play variable for role name + import_role: + name: "{{ role_name }}" + + # FIXME Trying to use a host_var here causes play execution to fail because + # the variable is undefined. + # + # - name: Test using a host variable for role name + # import_role: + # name: "{{ host_var_role_name }}" + + - name: Pass variable to role + import_role: + name: role1 + tasks_from: vartest.yml + vars: + where_am_i_defined: in the task + + ## FIXME Currently failing + ## ERROR! Vars in a IncludeRole must be specified as a dictionary, or a list of dictionaries + # - name: Pass all variables in a variable to role + # import_role: + # name: role1 + # tasks_from: vartest.yml + # vars: "{{ role_vars }}" + + - name: Pass templated variable to a role + import_role: + name: role1 + tasks_from: vartest.yml + vars: + where_am_i_defined: "{{ test_var }}" + + # FIXME This fails with the following error: + # The module {u'import_role': {u'name': u'role1'}} was not found in configured module paths. + # + - name: Include an entire task + action: + module: "{{ entire_task }}" + tags: + - never + + - block: + - name: Include a role that will fail + import_role: + name: role1 + tasks_from: fail.yml + + rescue: + - name: Include a role inside rescue + import_role: + name: role2 + + always: + - name: Include role inside always + import_role: + name: role3 + + - name: Test delegate_to handler is delegated + import_role: + name: delegated_handler + delegate_to: localhost diff --git a/test/integration/targets/include_import/role/test_include_role.yml b/test/integration/targets/include_import/role/test_include_role.yml new file mode 100644 index 00000000..e120bd8c --- /dev/null +++ b/test/integration/targets/include_import/role/test_include_role.yml @@ -0,0 +1,166 @@ +- name: Test include_role + hosts: testhost + + vars: + run_role: yes + do_not_run_role: no + role_name: role1 + test_var: templating test in playbook + role_vars: + where_am_i_defined: in the playbook + entire_task: + include_role: + name: role1 + + tasks: + - name: Test basic role include + include_role: + name: role1 + + - name: Assert that basic include works + assert: + that: + - _role1_result.msg == 'In role1' + + - name: Test conditional role include + include_role: + name: role1 + tasks_from: canary1.yml + when: run_role + + - name: Assert that role ran + assert: + that: + - role1_canary1 == 'r1c1' + + - name: Test conditional role include that should be skipped + include_role: + name: role1 + tasks_from: canary2.yml + when: do_not_run_role + + - name: Assert that role did not run + assert: + that: + - role1_canary2 is not defined + + - name: Test role include with a loop + include_role: + name: "{{ item }}" + with_items: + - role1 + - role3 + - role2 + + - name: Assert that roles run with_items + assert: + that: + - _role1_result.msg == 'In role1' + - _role2_result.msg == 'In role2' + - _role3_result.msg == 'In role3' + + - name: Test including a task file from a role + include_role: + name: role1 + tasks_from: tasks.yml + + - name: Test including vars file and tasks file from a role + include_role: + name: role3 + tasks_from: vartest.yml + vars_from: role3vars.yml + + - name: Assert that variables defined in previous task are available to play + assert: + that: + - role3_default == 'defined in role3/defaults/main.yml' + - role3_main == 'defined in role3/vars/main.yml' + - role3_var == 'defined in role3/vars/role3vars.yml' + ignore_errors: yes + + - name: Test using a play variable for role name + include_role: + name: "{{ role_name }}" + + - name: Test using a host variable for role name + include_role: + name: "{{ host_var_role_name }}" + + - name: Pass variable to role + include_role: + name: role1 + tasks_from: vartest.yml + vars: + where_am_i_defined: in the task + + ## FIXME Currently failing with + ## ERROR! Vars in a IncludeRole must be specified as a dictionary, or a list of dictionaries + # - name: Pass all variables in a variable to role + # include_role: + # name: role1 + # tasks_from: vartest.yml + # vars: "{{ role_vars }}" + + - name: Pass templated variable to a role + include_role: + name: role1 + tasks_from: vartest.yml + vars: + where_am_i_defined: "{{ test_var }}" + + - name: Use a variable in tasks_from field + include_role: + name: role1 + tasks_from: "{{ tasks_file_name }}.yml" + vars: + tasks_file_name: canary3 + + - name: Assert that tasks file was included + assert: + that: + - role1_canary3 == 'r1c3' + + ## FIXME This fails with the following error: + ## The module {u'include_role': {u'name': u'role1'}} was not found in configured module paths. + # - name: Include an entire task + # action: + # module: "{{ entire_task }}" + + - block: + - name: Include a role that will fail + include_role: + name: role1 + tasks_from: fail.yml + + rescue: + - name: Include a role inside rescue + include_role: + name: role2 + + always: + - name: Include role inside always + include_role: + name: role3 + +- hosts: testhost,testhost2 + tasks: + - name: wipe role results + set_fact: + _role2_result: ~ + _role3_result: ~ + + - name: Test using a host variable for role name + include_role: + name: "{{ host_var_role_name }}" + + - name: assert that host variable for role name calls 2 diff roles + assert: + that: + - _role2_result is not none + when: inventory_hostname == 'testhost2' + + - name: assert that host variable for role name calls 2 diff roles + assert: + that: + - _role3_result is not none + when: inventory_hostname == 'testhost' diff --git a/test/integration/targets/include_import/role/test_include_role_vars_from.yml b/test/integration/targets/include_import/role/test_include_role_vars_from.yml new file mode 100644 index 00000000..f7bb4d76 --- /dev/null +++ b/test/integration/targets/include_import/role/test_include_role_vars_from.yml @@ -0,0 +1,10 @@ +- name: Test include_role vars_from + hosts: testhost + vars: + role_name: role1 + tasks: + - name: Test vars_from + include_role: + name: role1 + vars_from: + - vars_1.yml diff --git a/test/integration/targets/include_import/roles/delegated_handler/handlers/main.yml b/test/integration/targets/include_import/roles/delegated_handler/handlers/main.yml new file mode 100644 index 00000000..550ddc21 --- /dev/null +++ b/test/integration/targets/include_import/roles/delegated_handler/handlers/main.yml @@ -0,0 +1,4 @@ +- name: delegated assert handler + assert: + that: + - ansible_delegated_vars is defined diff --git a/test/integration/targets/include_import/roles/delegated_handler/tasks/main.yml b/test/integration/targets/include_import/roles/delegated_handler/tasks/main.yml new file mode 100644 index 00000000..9d2ef61c --- /dev/null +++ b/test/integration/targets/include_import/roles/delegated_handler/tasks/main.yml @@ -0,0 +1,3 @@ +- command: "true" + notify: + - delegated assert handler diff --git a/test/integration/targets/include_import/roles/dup_allowed_role/meta/main.yml b/test/integration/targets/include_import/roles/dup_allowed_role/meta/main.yml new file mode 100644 index 00000000..61d3ffe4 --- /dev/null +++ b/test/integration/targets/include_import/roles/dup_allowed_role/meta/main.yml @@ -0,0 +1,2 @@ +--- +allow_duplicates: true diff --git a/test/integration/targets/include_import/roles/dup_allowed_role/tasks/main.yml b/test/integration/targets/include_import/roles/dup_allowed_role/tasks/main.yml new file mode 100644 index 00000000..cad935e3 --- /dev/null +++ b/test/integration/targets/include_import/roles/dup_allowed_role/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- debug: + msg: "Tasks file inside role" diff --git a/test/integration/targets/include_import/roles/loop_name_assert/tasks/main.yml b/test/integration/targets/include_import/roles/loop_name_assert/tasks/main.yml new file mode 100644 index 00000000..9bb3db51 --- /dev/null +++ b/test/integration/targets/include_import/roles/loop_name_assert/tasks/main.yml @@ -0,0 +1,4 @@ +- assert: + that: + - name == 'name_from_loop_var' + - name != 'loop_name_assert' diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/defaults/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/defaults/main.yml new file mode 100644 index 00000000..aba24bbe --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/defaults/main.yml @@ -0,0 +1,3 @@ +--- +testnesteddep2_defvar1: foobar +testnesteddep2_varvar1: foobar diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/meta/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/meta/main.yml new file mode 100644 index 00000000..31afcaa9 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- role: nested/nested/nested_dep_role2a diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/main.yml new file mode 100644 index 00000000..1f2ee7f0 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/main.yml @@ -0,0 +1,2 @@ +--- +- include_tasks: ./rund.yml diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/rund.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/rund.yml new file mode 100644 index 00000000..523e579d --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/rund.yml @@ -0,0 +1,2 @@ +--- +- shell: echo from deprole2a diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/vars/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/vars/main.yml new file mode 100644 index 00000000..c89b6973 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/vars/main.yml @@ -0,0 +1,2 @@ +--- +testnesteddep2_varvar1: muche diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/defaults/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/defaults/main.yml new file mode 100644 index 00000000..aba24bbe --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/defaults/main.yml @@ -0,0 +1,3 @@ +--- +testnesteddep2_defvar1: foobar +testnesteddep2_varvar1: foobar diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/meta/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/meta/main.yml new file mode 100644 index 00000000..6fc8ab0c --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- role: nested/nested/nested_dep_role2b diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/main.yml new file mode 100644 index 00000000..729582c4 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/main.yml @@ -0,0 +1,2 @@ +--- +- include_tasks: ./rune.yml diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/rune.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/rune.yml new file mode 100644 index 00000000..e77882b2 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/rune.yml @@ -0,0 +1,2 @@ +--- +- shell: echo from deprole2 diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/vars/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/vars/main.yml new file mode 100644 index 00000000..c89b6973 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/vars/main.yml @@ -0,0 +1,2 @@ +--- +testnesteddep2_varvar1: muche diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/defaults/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/defaults/main.yml new file mode 100644 index 00000000..aba24bbe --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/defaults/main.yml @@ -0,0 +1,3 @@ +--- +testnesteddep2_defvar1: foobar +testnesteddep2_varvar1: foobar diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/meta/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/meta/main.yml new file mode 100644 index 00000000..32cf5dda --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/main.yml new file mode 100644 index 00000000..5fbb04fe --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/main.yml @@ -0,0 +1,2 @@ +--- +- include_tasks: ./runf.yml diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/runf.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/runf.yml new file mode 100644 index 00000000..694005fd --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/runf.yml @@ -0,0 +1,2 @@ +--- +- shell: echo from deprole2b diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/vars/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/vars/main.yml new file mode 100644 index 00000000..c89b6973 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/vars/main.yml @@ -0,0 +1,2 @@ +--- +testnesteddep2_varvar1: muche diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/defaults/main.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/defaults/main.yml new file mode 100644 index 00000000..536745ee --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/defaults/main.yml @@ -0,0 +1,3 @@ +--- +testnesteddep_defvar1: foobar +testnesteddep_varvar1: foobar diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/meta/main.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/meta/main.yml new file mode 100644 index 00000000..23d65c7e --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/main.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/main.yml new file mode 100644 index 00000000..d86604b4 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/main.yml @@ -0,0 +1,2 @@ +--- +- include_tasks: ./runc.yml diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/runc.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/runc.yml new file mode 100644 index 00000000..76682f54 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/runc.yml @@ -0,0 +1,4 @@ +--- +- debug: + msg: from test_nested_dep_role +- include_role: {name: nested/nested/nested_dep_role2} diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/vars/main.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/vars/main.yml new file mode 100644 index 00000000..b80b5de3 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/vars/main.yml @@ -0,0 +1,2 @@ +--- +testnesteddep_varvar1: muche diff --git a/test/integration/targets/include_import/roles/nested_include_task/meta/main.yml b/test/integration/targets/include_import/roles/nested_include_task/meta/main.yml new file mode 100644 index 00000000..9410b7d2 --- /dev/null +++ b/test/integration/targets/include_import/roles/nested_include_task/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- role: nested/nested_dep_role diff --git a/test/integration/targets/include_import/roles/nested_include_task/tasks/main.yml b/test/integration/targets/include_import/roles/nested_include_task/tasks/main.yml new file mode 100644 index 00000000..15a8e9fa --- /dev/null +++ b/test/integration/targets/include_import/roles/nested_include_task/tasks/main.yml @@ -0,0 +1,2 @@ +--- +- include_tasks: ./runa.yml diff --git a/test/integration/targets/include_import/roles/nested_include_task/tasks/runa.yml b/test/integration/targets/include_import/roles/nested_include_task/tasks/runa.yml new file mode 100644 index 00000000..643fdd2a --- /dev/null +++ b/test/integration/targets/include_import/roles/nested_include_task/tasks/runa.yml @@ -0,0 +1,3 @@ +--- +- debug: + msg: from nested_include_task diff --git a/test/integration/targets/include_import/roles/role1/tasks/canary1.yml b/test/integration/targets/include_import/roles/role1/tasks/canary1.yml new file mode 100644 index 00000000..9f202ba3 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/canary1.yml @@ -0,0 +1,2 @@ +- set_fact: + role1_canary1: r1c1 diff --git a/test/integration/targets/include_import/roles/role1/tasks/canary2.yml b/test/integration/targets/include_import/roles/role1/tasks/canary2.yml new file mode 100644 index 00000000..80e18b8d --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/canary2.yml @@ -0,0 +1,2 @@ +- set_fact: + role1_canary2: r1c2 diff --git a/test/integration/targets/include_import/roles/role1/tasks/canary3.yml b/test/integration/targets/include_import/roles/role1/tasks/canary3.yml new file mode 100644 index 00000000..40014e32 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/canary3.yml @@ -0,0 +1,2 @@ +- set_fact: + role1_canary3: r1c3 diff --git a/test/integration/targets/include_import/roles/role1/tasks/fail.yml b/test/integration/targets/include_import/roles/role1/tasks/fail.yml new file mode 100644 index 00000000..b1b5f155 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/fail.yml @@ -0,0 +1,3 @@ +- name: EXPECTED FAILURE + fail: + msg: This command should always fail diff --git a/test/integration/targets/include_import/roles/role1/tasks/main.yml b/test/integration/targets/include_import/roles/role1/tasks/main.yml new file mode 100644 index 00000000..a8b641ee --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/main.yml @@ -0,0 +1,3 @@ +- debug: + msg: In role1 + register: _role1_result diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t01.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t01.yml new file mode 100644 index 00000000..e4a1e63e --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t01.yml @@ -0,0 +1 @@ +- import_tasks: r1t02.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t02.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t02.yml new file mode 100644 index 00000000..d3d37507 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t02.yml @@ -0,0 +1 @@ +- import_tasks: r1t03.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t03.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t03.yml new file mode 100644 index 00000000..1d3330ae --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t03.yml @@ -0,0 +1 @@ +- import_tasks: r1t04.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t04.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t04.yml new file mode 100644 index 00000000..f3eece23 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t04.yml @@ -0,0 +1 @@ +- import_tasks: r1t05.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t05.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t05.yml new file mode 100644 index 00000000..4c7371ee --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t05.yml @@ -0,0 +1 @@ +- import_tasks: r1t06.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t06.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t06.yml new file mode 100644 index 00000000..96d56609 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t06.yml @@ -0,0 +1 @@ +- import_tasks: r1t07.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t07.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t07.yml new file mode 100644 index 00000000..ee8d3252 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t07.yml @@ -0,0 +1 @@ +- import_tasks: r1t08.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t08.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t08.yml new file mode 100644 index 00000000..33b81096 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t08.yml @@ -0,0 +1 @@ +- import_tasks: r1t09.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t09.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t09.yml new file mode 100644 index 00000000..8973c291 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t09.yml @@ -0,0 +1 @@ +- import_tasks: r1t10.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t10.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t10.yml new file mode 100644 index 00000000..eafdca25 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t10.yml @@ -0,0 +1 @@ +- import_tasks: r1t11.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t11.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t11.yml new file mode 100644 index 00000000..9ab828f3 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t11.yml @@ -0,0 +1 @@ +- import_tasks: r1t12.yml diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t12.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t12.yml new file mode 100644 index 00000000..88284861 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/r1t12.yml @@ -0,0 +1,2 @@ +- debug: + msg: r1t12 diff --git a/test/integration/targets/include_import/roles/role1/tasks/tasks.yml b/test/integration/targets/include_import/roles/role1/tasks/tasks.yml new file mode 100644 index 00000000..45430bc4 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/tasks.yml @@ -0,0 +1,2 @@ +- debug: + msg: Tasks file inside role1 diff --git a/test/integration/targets/include_import/roles/role1/tasks/vartest.yml b/test/integration/targets/include_import/roles/role1/tasks/vartest.yml new file mode 100644 index 00000000..5a49d8dd --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/tasks/vartest.yml @@ -0,0 +1,2 @@ +- debug: + var: where_am_i_defined diff --git a/test/integration/targets/include_import/roles/role1/vars/main.yml b/test/integration/targets/include_import/roles/role1/vars/main.yml new file mode 100644 index 00000000..57d31cf7 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/vars/main.yml @@ -0,0 +1 @@ +where_am_i_defined: role1 vars/main.yml diff --git a/test/integration/targets/include_import/roles/role1/vars/role1vars.yml b/test/integration/targets/include_import/roles/role1/vars/role1vars.yml new file mode 100644 index 00000000..57d31cf7 --- /dev/null +++ b/test/integration/targets/include_import/roles/role1/vars/role1vars.yml @@ -0,0 +1 @@ +where_am_i_defined: role1 vars/main.yml diff --git a/test/integration/targets/include_import/roles/role2/tasks/main.yml b/test/integration/targets/include_import/roles/role2/tasks/main.yml new file mode 100644 index 00000000..82934f67 --- /dev/null +++ b/test/integration/targets/include_import/roles/role2/tasks/main.yml @@ -0,0 +1,3 @@ +- debug: + msg: In role2 + register: _role2_result diff --git a/test/integration/targets/include_import/roles/role3/defaults/main.yml b/test/integration/targets/include_import/roles/role3/defaults/main.yml new file mode 100644 index 00000000..c3464c4d --- /dev/null +++ b/test/integration/targets/include_import/roles/role3/defaults/main.yml @@ -0,0 +1,2 @@ +where_am_i_defined: defaults in role3 +role3_default: defined in role3/defaults/main.yml diff --git a/test/integration/targets/include_import/roles/role3/handlers/main.yml b/test/integration/targets/include_import/roles/role3/handlers/main.yml new file mode 100644 index 00000000..c8baa270 --- /dev/null +++ b/test/integration/targets/include_import/roles/role3/handlers/main.yml @@ -0,0 +1,3 @@ +- name: runme + debug: + msg: role3 handler diff --git a/test/integration/targets/include_import/roles/role3/tasks/main.yml b/test/integration/targets/include_import/roles/role3/tasks/main.yml new file mode 100644 index 00000000..bb70dad3 --- /dev/null +++ b/test/integration/targets/include_import/roles/role3/tasks/main.yml @@ -0,0 +1,3 @@ +- debug: + msg: In role3 + register: _role3_result diff --git a/test/integration/targets/include_import/roles/role3/tasks/tasks.yml b/test/integration/targets/include_import/roles/role3/tasks/tasks.yml new file mode 100644 index 00000000..0e822695 --- /dev/null +++ b/test/integration/targets/include_import/roles/role3/tasks/tasks.yml @@ -0,0 +1,2 @@ +- debug: + msg: Tasks file inside role3 diff --git a/test/integration/targets/include_import/roles/role3/tasks/vartest.yml b/test/integration/targets/include_import/roles/role3/tasks/vartest.yml new file mode 100644 index 00000000..cb21c53f --- /dev/null +++ b/test/integration/targets/include_import/roles/role3/tasks/vartest.yml @@ -0,0 +1,2 @@ +- debug: + var: role3_var diff --git a/test/integration/targets/include_import/roles/role3/vars/main.yml b/test/integration/targets/include_import/roles/role3/vars/main.yml new file mode 100644 index 00000000..9adac6b8 --- /dev/null +++ b/test/integration/targets/include_import/roles/role3/vars/main.yml @@ -0,0 +1 @@ +role3_main: defined in role3/vars/main.yml diff --git a/test/integration/targets/include_import/roles/role3/vars/role3vars.yml b/test/integration/targets/include_import/roles/role3/vars/role3vars.yml new file mode 100644 index 00000000..f324d56a --- /dev/null +++ b/test/integration/targets/include_import/roles/role3/vars/role3vars.yml @@ -0,0 +1,2 @@ +where_am_i_defined: role3vars.yml +role3_var: defined in role3/vars/role3vars.yml diff --git a/test/integration/targets/include_import/roles/role_with_deps/meta/main.yml b/test/integration/targets/include_import/roles/role_with_deps/meta/main.yml new file mode 100644 index 00000000..a2446bba --- /dev/null +++ b/test/integration/targets/include_import/roles/role_with_deps/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - role1 + - role2 diff --git a/test/integration/targets/include_import/roles/role_with_deps/tasks/main.yml b/test/integration/targets/include_import/roles/role_with_deps/tasks/main.yml new file mode 100644 index 00000000..060fe42f --- /dev/null +++ b/test/integration/targets/include_import/roles/role_with_deps/tasks/main.yml @@ -0,0 +1,2 @@ +- debug: + msg: In role_with_deps diff --git a/test/integration/targets/include_import/run_once/include_me.yml b/test/integration/targets/include_import/run_once/include_me.yml new file mode 100644 index 00000000..e92128a9 --- /dev/null +++ b/test/integration/targets/include_import/run_once/include_me.yml @@ -0,0 +1,2 @@ +- set_fact: + lola: wiseman diff --git a/test/integration/targets/include_import/run_once/playbook.yml b/test/integration/targets/include_import/run_once/playbook.yml new file mode 100644 index 00000000..cc1e265f --- /dev/null +++ b/test/integration/targets/include_import/run_once/playbook.yml @@ -0,0 +1,61 @@ +# This playbook exists to document the behavior of how run_once when +# applied to a dynamic include works +# +# As with other uses of keywords on dynamic includes, it only affects the include. +# In this case it causes the include to only be processed for ansible_play_hosts[0] +# which has the side effect of only running the tasks on ansible_play_hosts[0] +# and would only delegate facts of the include itself, not the tasks contained within + +- hosts: localhost + gather_facts: false + tasks: + - add_host: + name: "{{ item }}" + ansible_connection: local + groups: + - all + loop: + - localhost0 + - localhost1 + + - add_host: + name: "{{ item }}" + groups: + - testing + ansible_connection: local + loop: + - localhost2 + - localhost3 + +- hosts: all:!testing + gather_facts: false + vars: + lola: untouched + tasks: + - include_tasks: + file: include_me.yml + apply: + run_once: true + run_once: true + + - assert: + that: + - lola == 'wiseman' + +- hosts: testing + gather_facts: false + vars: + lola: untouched + tasks: + - include_tasks: include_me.yml + run_once: true + + - assert: + that: + - lola == 'wiseman' + when: inventory_hostname == ansible_play_hosts[0] + + - assert: + that: + - lola == 'untouched' + when: inventory_hostname != ansible_play_hosts[0] diff --git a/test/integration/targets/include_import/runme.sh b/test/integration/targets/include_import/runme.sh new file mode 100755 index 00000000..68b12a1f --- /dev/null +++ b/test/integration/targets/include_import/runme.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_ROLES_PATH=./roles + +function gen_task_files() { + for i in $(seq -f '%03g' 1 39); do + echo -e "- name: Hello Message\n debug:\n msg: Task file ${i}" > "tasks/hello/tasks-file-${i}.yml" + done +} + +## Adhoc + +ansible -m include_role -a name=role1 localhost + +## Import (static) + +# Playbook +test "$(ANSIBLE_DEPRECATION_WARNINGS=1 ansible-playbook -i ../../inventory playbook/test_import_playbook.yml "$@" 2>&1 | grep -c '\[DEPRECATION WARNING\]: Additional parameters in import_playbook')" = 1 + +ANSIBLE_STRATEGY='linear' ansible-playbook playbook/test_import_playbook_tags.yml -i inventory "$@" --tags canary1,canary22,validate --skip-tags skipme + +# Tasks +ANSIBLE_STRATEGY='linear' ansible-playbook tasks/test_import_tasks.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_import_tasks.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_import_tasks_tags.yml -i inventory "$@" --tags tasks1,canary1,validate + +# Role +ANSIBLE_STRATEGY='linear' ansible-playbook role/test_import_role.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook role/test_import_role.yml -i inventory "$@" + + +## Include (dynamic) + +# Tasks +ANSIBLE_STRATEGY='linear' ansible-playbook tasks/test_include_tasks.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_include_tasks.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_include_tasks_tags.yml -i inventory "$@" --tags tasks1,canary1,validate + +# Role +ANSIBLE_STRATEGY='linear' ansible-playbook role/test_include_role.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook role/test_include_role.yml -i inventory "$@" + +# https://github.com/ansible/ansible/issues/68515 +ansible-playbook -v role/test_include_role_vars_from.yml 2>&1 | tee test_include_role_vars_from.out +test "$(grep -E -c 'Expected a string for vars_from but got' test_include_role_vars_from.out)" = 1 + +## Max Recursion Depth +# https://github.com/ansible/ansible/issues/23609 +ANSIBLE_STRATEGY='linear' ansible-playbook test_role_recursion.yml -i inventory "$@" +ANSIBLE_STRATEGY='linear' ansible-playbook test_role_recursion_fqcn.yml -i inventory "$@" + +## Nested tasks +# https://github.com/ansible/ansible/issues/34782 +ANSIBLE_STRATEGY='linear' ansible-playbook test_nested_tasks.yml -i inventory "$@" +ANSIBLE_STRATEGY='linear' ansible-playbook test_nested_tasks_fqcn.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook test_nested_tasks.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook test_nested_tasks_fqcn.yml -i inventory "$@" + +## Tons of top level include_tasks +# https://github.com/ansible/ansible/issues/36053 +# Fixed by https://github.com/ansible/ansible/pull/36075 +gen_task_files +ANSIBLE_STRATEGY='linear' ansible-playbook test_copious_include_tasks.yml -i inventory "$@" +ANSIBLE_STRATEGY='linear' ansible-playbook test_copious_include_tasks_fqcn.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook test_copious_include_tasks.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook test_copious_include_tasks_fqcn.yml -i inventory "$@" +rm -f tasks/hello/*.yml + +# Inlcuded tasks should inherit attrs from non-dynamic blocks in parent chain +# https://github.com/ansible/ansible/pull/38827 +ANSIBLE_STRATEGY='linear' ansible-playbook test_grandparent_inheritance.yml -i inventory "$@" +ANSIBLE_STRATEGY='linear' ansible-playbook test_grandparent_inheritance_fqcn.yml -i inventory "$@" + +# undefined_var +ANSIBLE_STRATEGY='linear' ansible-playbook undefined_var/playbook.yml -i inventory "$@" +ANSIBLE_STRATEGY='free' ansible-playbook undefined_var/playbook.yml -i inventory "$@" + +# include_ + apply (explicit inheritance) +ANSIBLE_STRATEGY='linear' ansible-playbook apply/include_apply.yml -i inventory "$@" --tags foo +set +e +OUT=$(ANSIBLE_STRATEGY='linear' ansible-playbook apply/import_apply.yml -i inventory "$@" --tags foo 2>&1 | grep 'ERROR! Invalid options for import_tasks: apply') +set -e +if [[ -z "$OUT" ]]; then + echo "apply on import_tasks did not cause error" + exit 1 +fi + +# Test that duplicate items in loop are not deduped +ANSIBLE_STRATEGY='linear' ansible-playbook tasks/test_include_dupe_loop.yml -i inventory "$@" | tee test_include_dupe_loop.out +test "$(grep -c '"item=foo"' test_include_dupe_loop.out)" = 3 +ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_include_dupe_loop.yml -i inventory "$@" | tee test_include_dupe_loop.out +test "$(grep -c '"item=foo"' test_include_dupe_loop.out)" = 3 + +ansible-playbook public_exposure/playbook.yml -i inventory "$@" +ansible-playbook public_exposure/no_bleeding.yml -i inventory "$@" +ansible-playbook public_exposure/no_overwrite_roles.yml -i inventory "$@" + +# https://github.com/ansible/ansible/pull/48068 +ANSIBLE_HOST_PATTERN_MISMATCH=warning ansible-playbook run_once/playbook.yml "$@" + +# https://github.com/ansible/ansible/issues/48936 +ansible-playbook -v handler_addressing/playbook.yml 2>&1 | tee test_handler_addressing.out +test "$(grep -E -c 'include handler task|ERROR! The requested handler '"'"'do_import'"'"' was not found' test_handler_addressing.out)" = 2 + +# https://github.com/ansible/ansible/issues/49969 +ansible-playbook -v parent_templating/playbook.yml 2>&1 | tee test_parent_templating.out +test "$(grep -E -c 'Templating the path of the parent include_tasks failed.' test_parent_templating.out)" = 0 + +# https://github.com/ansible/ansible/issues/54618 +ansible-playbook test_loop_var_bleed.yaml "$@" + +# https://github.com/ansible/ansible/issues/56580 +ansible-playbook valid_include_keywords/playbook.yml "$@" + +# https://github.com/ansible/ansible/issues/64902 +ansible-playbook tasks/test_allow_single_role_dup.yml 2>&1 | tee test_allow_single_role_dup.out +test "$(grep -c 'ok=3' test_allow_single_role_dup.out)" = 1 + +# https://github.com/ansible/ansible/issues/66764 +ANSIBLE_HOST_PATTERN_MISMATCH=error ansible-playbook empty_group_warning/playbook.yml + +ansible-playbook test_include_loop_fqcn.yml "$@" diff --git a/test/integration/targets/include_import/tasks/debug_item.yml b/test/integration/targets/include_import/tasks/debug_item.yml new file mode 100644 index 00000000..025e132d --- /dev/null +++ b/test/integration/targets/include_import/tasks/debug_item.yml @@ -0,0 +1,2 @@ +- debug: + msg: "item={{ item }}" diff --git a/test/integration/targets/include_import/tasks/hello/.gitignore b/test/integration/targets/include_import/tasks/hello/.gitignore new file mode 100644 index 00000000..b4602e78 --- /dev/null +++ b/test/integration/targets/include_import/tasks/hello/.gitignore @@ -0,0 +1 @@ +tasks-file-* diff --git a/test/integration/targets/include_import/tasks/hello/keep b/test/integration/targets/include_import/tasks/hello/keep new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/include_import/tasks/nested/nested.yml b/test/integration/targets/include_import/tasks/nested/nested.yml new file mode 100644 index 00000000..0bfcdeef --- /dev/null +++ b/test/integration/targets/include_import/tasks/nested/nested.yml @@ -0,0 +1,2 @@ +--- +- include_tasks: ../../nestedtasks/nested/nested.yml diff --git a/test/integration/targets/include_import/tasks/tasks1.yml b/test/integration/targets/include_import/tasks/tasks1.yml new file mode 100644 index 00000000..e1d83d92 --- /dev/null +++ b/test/integration/targets/include_import/tasks/tasks1.yml @@ -0,0 +1,5 @@ +- name: Set variable inside tasks1.yml + set_fact: + set_in_tasks1: yes + tags: + - tasks1 diff --git a/test/integration/targets/include_import/tasks/tasks2.yml b/test/integration/targets/include_import/tasks/tasks2.yml new file mode 100644 index 00000000..1b4c86fc --- /dev/null +++ b/test/integration/targets/include_import/tasks/tasks2.yml @@ -0,0 +1,5 @@ +- name: Set variable inside tasks2.yml + set_fact: + set_in_tasks2: yes + tags: + - tasks2 diff --git a/test/integration/targets/include_import/tasks/tasks3.yml b/test/integration/targets/include_import/tasks/tasks3.yml new file mode 100644 index 00000000..6da37191 --- /dev/null +++ b/test/integration/targets/include_import/tasks/tasks3.yml @@ -0,0 +1,5 @@ +- name: Set variable inside tasks3.yml + set_fact: + set_in_tasks3: yes + tags: + - tasks3 diff --git a/test/integration/targets/include_import/tasks/tasks4.yml b/test/integration/targets/include_import/tasks/tasks4.yml new file mode 100644 index 00000000..fc2eb6cb --- /dev/null +++ b/test/integration/targets/include_import/tasks/tasks4.yml @@ -0,0 +1,5 @@ +- name: Set variable inside tasks4.yml + set_fact: + set_in_tasks4: yes + tags: + - tasks4 diff --git a/test/integration/targets/include_import/tasks/tasks5.yml b/test/integration/targets/include_import/tasks/tasks5.yml new file mode 100644 index 00000000..f2ee6b9e --- /dev/null +++ b/test/integration/targets/include_import/tasks/tasks5.yml @@ -0,0 +1,6 @@ +- name: Set variable inside tasks5.yml + set_fact: + set_in_tasks5: yes + tags: + - tasks5 + - canary1 diff --git a/test/integration/targets/include_import/tasks/tasks6.yml b/test/integration/targets/include_import/tasks/tasks6.yml new file mode 100644 index 00000000..fa03079d --- /dev/null +++ b/test/integration/targets/include_import/tasks/tasks6.yml @@ -0,0 +1,5 @@ +- name: Set variable inside tasks6.yml + set_fact: + set_in_tasks6: yes + tags: + - tasks6 diff --git a/test/integration/targets/include_import/tasks/test_allow_single_role_dup.yml b/test/integration/targets/include_import/tasks/test_allow_single_role_dup.yml new file mode 100644 index 00000000..3a6992fa --- /dev/null +++ b/test/integration/targets/include_import/tasks/test_allow_single_role_dup.yml @@ -0,0 +1,8 @@ +--- +- name: test for allow_duplicates with single role + hosts: localhost + gather_facts: false + roles: + - dup_allowed_role + - dup_allowed_role + - dup_allowed_role diff --git a/test/integration/targets/include_import/tasks/test_import_tasks.yml b/test/integration/targets/include_import/tasks/test_import_tasks.yml new file mode 100644 index 00000000..8f07bb90 --- /dev/null +++ b/test/integration/targets/include_import/tasks/test_import_tasks.yml @@ -0,0 +1,41 @@ +- name: Test import_tasks + hosts: testhost + + tasks: + - name: Test basic task import + import_tasks: tasks1.yml + + - name: Assert that fact was set in import + assert: + that: + - set_in_tasks1 + + - name: Test conditional task import + import_tasks: tasks2.yml + when: no + + - name: Assert that tasks were skipped + assert: + that: + - set_in_tasks2 is not defined + + - block: + - name: Import tasks inside a block + import_tasks: tasks3.yml + + - name: Assert that task3 was included + assert: + that: + - set_in_tasks3 + + always: + - name: Import task inside always + import_tasks: tasks4.yml + + - name: Validate that variables set in previously improted tasks are passed down. + import_tasks: validate3.yml + + - name: Assert that tasks4 was included + assert: + that: + - set_in_tasks4 diff --git a/test/integration/targets/include_import/tasks/test_import_tasks_tags.yml b/test/integration/targets/include_import/tasks/test_import_tasks_tags.yml new file mode 100644 index 00000000..3b1d68fc --- /dev/null +++ b/test/integration/targets/include_import/tasks/test_import_tasks_tags.yml @@ -0,0 +1,23 @@ +- name: Test import_tasks using tags + hosts: testhost + + tasks: + - name: Import tasks1.yml + import_tasks: tasks1.yml + + - name: Import tasks4.yml using tag on import task + import_tasks: tasks4.yml + tags: + - canary1 + + - name: Import tasks2.yml + import_tasks: tasks2.yml + + - name: Assert that appropriate tasks were run + assert: + that: + - set_in_tasks1 + - set_in_tasks4 + - set_in_tasks2 is not defined + tags: + - validate diff --git a/test/integration/targets/include_import/tasks/test_include_dupe_loop.yml b/test/integration/targets/include_import/tasks/test_include_dupe_loop.yml new file mode 100644 index 00000000..b7b9301d --- /dev/null +++ b/test/integration/targets/include_import/tasks/test_include_dupe_loop.yml @@ -0,0 +1,8 @@ +- name: Test Include Duplicate Loop Items + hosts: testhost + tasks: + - include_tasks: debug_item.yml + loop: + - foo + - foo + - foo diff --git a/test/integration/targets/include_import/tasks/test_include_tasks.yml b/test/integration/targets/include_import/tasks/test_include_tasks.yml new file mode 100644 index 00000000..ebe2273e --- /dev/null +++ b/test/integration/targets/include_import/tasks/test_include_tasks.yml @@ -0,0 +1,44 @@ +- name: Test include_tasks + hosts: testhost + + tasks: + - name: Test basic task include + include_tasks: tasks1.yml + + - name: Assert that fact was set in include + assert: + that: + - set_in_tasks1 + + - name: Test conditional task include + include_tasks: tasks2.yml + when: no + + - name: Assert that tasks were skipped + assert: + that: + - set_in_tasks2 is not defined + + - block: + - name: Include tasks inside a block + include_tasks: tasks3.yml + + - name: Assert that task3 was included + assert: + that: + - set_in_tasks3 + + always: + - name: Include task inside always + include_tasks: tasks4.yml + + - name: Validate that variables set in previously improted tasks are passed down + include_tasks: validate3.yml + + - name: Assert that tasks4 was included + assert: + that: + - set_in_tasks4 + + - name: include_tasks + action + action: include_tasks tasks1.yml diff --git a/test/integration/targets/include_import/tasks/test_include_tasks_tags.yml b/test/integration/targets/include_import/tasks/test_include_tasks_tags.yml new file mode 100644 index 00000000..3fe43809 --- /dev/null +++ b/test/integration/targets/include_import/tasks/test_include_tasks_tags.yml @@ -0,0 +1,25 @@ +- name: Test include_tasks using tags + hosts: testhost + + tasks: + # This should not be included + - name: Include tasks1.yml + include_tasks: tasks1.yml + + # This should be included but tasks inside should not run because they do not have + # the canary1 tag and tasks2 is not in the list of tags for the ansible-playbook command + - name: Include tasks2.yml + include_tasks: tasks2.yml + tags: + - canary1 + + # This should be included and tasks inside should be run + - name: Include tasks5.yml using tag on include task + include_tasks: tasks5.yml + tags: + - canary1 + + - name: Include validate_tags.yml + include_tasks: validate_tags.yml + tags: + - validate diff --git a/test/integration/targets/include_import/tasks/test_recursion.yml b/test/integration/targets/include_import/tasks/test_recursion.yml new file mode 100644 index 00000000..96754ec8 --- /dev/null +++ b/test/integration/targets/include_import/tasks/test_recursion.yml @@ -0,0 +1,6 @@ +- hosts: testhost + + tasks: + - include_role: + name: role + tasks_from: r1t1.yml diff --git a/test/integration/targets/include_import/tasks/validate3.yml b/test/integration/targets/include_import/tasks/validate3.yml new file mode 100644 index 00000000..e3166aa3 --- /dev/null +++ b/test/integration/targets/include_import/tasks/validate3.yml @@ -0,0 +1,4 @@ +- name: Assert than variable set in previously included task is defined + assert: + that: + - set_in_tasks3 diff --git a/test/integration/targets/include_import/tasks/validate_tags.yml b/test/integration/targets/include_import/tasks/validate_tags.yml new file mode 100644 index 00000000..e2f3377b --- /dev/null +++ b/test/integration/targets/include_import/tasks/validate_tags.yml @@ -0,0 +1,8 @@ +- name: Assert that appropriate tasks were run + assert: + that: + - set_in_tasks1 is undefined + - set_in_tasks2 is undefined + - set_in_tasks5 + tags: + - validate diff --git a/test/integration/targets/include_import/test_copious_include_tasks.yml b/test/integration/targets/include_import/test_copious_include_tasks.yml new file mode 100644 index 00000000..4564c76e --- /dev/null +++ b/test/integration/targets/include_import/test_copious_include_tasks.yml @@ -0,0 +1,44 @@ +- name: Test many include_tasks + hosts: testhost + gather_facts: no + + tasks: + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-001.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-002.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-003.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-004.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-005.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-006.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-007.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-008.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-009.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-010.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-011.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-012.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-013.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-014.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-015.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-016.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-017.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-018.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-019.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-020.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-021.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-022.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-023.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-024.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-025.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-026.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-027.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-028.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-029.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-030.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-031.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-032.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-033.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-034.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-035.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-036.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-037.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-038.yml" + - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-039.yml" diff --git a/test/integration/targets/include_import/test_copious_include_tasks_fqcn.yml b/test/integration/targets/include_import/test_copious_include_tasks_fqcn.yml new file mode 100644 index 00000000..32fa9abc --- /dev/null +++ b/test/integration/targets/include_import/test_copious_include_tasks_fqcn.yml @@ -0,0 +1,44 @@ +- name: Test many ansible.builtin.include_tasks + hosts: testhost + gather_facts: no + + tasks: + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-001.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-002.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-003.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-004.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-005.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-006.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-007.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-008.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-009.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-010.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-011.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-012.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-013.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-014.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-015.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-016.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-017.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-018.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-019.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-020.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-021.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-022.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-023.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-024.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-025.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-026.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-027.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-028.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-029.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-030.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-031.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-032.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-033.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-034.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-035.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-036.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-037.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-038.yml" + - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-039.yml" diff --git a/test/integration/targets/include_import/test_grandparent_inheritance.yml b/test/integration/targets/include_import/test_grandparent_inheritance.yml new file mode 100644 index 00000000..45a3d836 --- /dev/null +++ b/test/integration/targets/include_import/test_grandparent_inheritance.yml @@ -0,0 +1,29 @@ +--- +- hosts: testhost + gather_facts: false + tasks: + - debug: + var: inventory_hostname + + - name: Test included tasks inherit from block + check_mode: true + block: + - include_tasks: grandchild/block_include_tasks.yml + + - debug: + var: block_include_result + + - assert: + that: + - block_include_result is skipped + + - name: Test included tasks inherit deeply from import + import_tasks: grandchild/import.yml + check_mode: true + + - debug: + var: import_include_include_result + + - assert: + that: + - import_include_include_result is skipped diff --git a/test/integration/targets/include_import/test_grandparent_inheritance_fqcn.yml b/test/integration/targets/include_import/test_grandparent_inheritance_fqcn.yml new file mode 100644 index 00000000..37a0ad0d --- /dev/null +++ b/test/integration/targets/include_import/test_grandparent_inheritance_fqcn.yml @@ -0,0 +1,29 @@ +--- +- hosts: testhost + gather_facts: false + tasks: + - debug: + var: inventory_hostname + + - name: Test included tasks inherit from block + check_mode: true + block: + - ansible.builtin.include_tasks: grandchild/block_include_tasks.yml + + - debug: + var: block_include_result + + - assert: + that: + - block_include_result is skipped + + - name: Test included tasks inherit deeply from import + ansible.builtin.import_tasks: grandchild/import.yml + check_mode: true + + - debug: + var: import_include_include_result + + - assert: + that: + - import_include_include_result is skipped diff --git a/test/integration/targets/include_import/test_include_loop_fqcn.yml b/test/integration/targets/include_import/test_include_loop_fqcn.yml new file mode 100644 index 00000000..62d91f22 --- /dev/null +++ b/test/integration/targets/include_import/test_include_loop_fqcn.yml @@ -0,0 +1,17 @@ +- hosts: localhost + gather_facts: false + tasks: + - name: skipped include undefined loop + ansible.builtin.include_tasks: doesnt_matter.yml + loop: '{{ lkjsdflkjsdlfkjsdlfkjsdf }}' + when: false + register: skipped_include + + - debug: + var: skipped_include + + - assert: + that: + - skipped_include.results is undefined + - skipped_include.skip_reason is defined + - skipped_include is skipped diff --git a/test/integration/targets/include_import/test_loop_var_bleed.yaml b/test/integration/targets/include_import/test_loop_var_bleed.yaml new file mode 100644 index 00000000..a5146f30 --- /dev/null +++ b/test/integration/targets/include_import/test_loop_var_bleed.yaml @@ -0,0 +1,9 @@ +- hosts: localhost + gather_facts: false + tasks: + - include_role: + name: loop_name_assert + loop: + - name_from_loop_var + loop_control: + loop_var: name diff --git a/test/integration/targets/include_import/test_nested_tasks.yml b/test/integration/targets/include_import/test_nested_tasks.yml new file mode 100644 index 00000000..7451ec4e --- /dev/null +++ b/test/integration/targets/include_import/test_nested_tasks.yml @@ -0,0 +1,6 @@ +- name: >- + verify that multiple level of nested statements and + include+meta doesnt mess included files mecanisms + hosts: testhost + tasks: + - include_tasks: ./tasks/nested/nested.yml diff --git a/test/integration/targets/include_import/test_nested_tasks_fqcn.yml b/test/integration/targets/include_import/test_nested_tasks_fqcn.yml new file mode 100644 index 00000000..14e72eed --- /dev/null +++ b/test/integration/targets/include_import/test_nested_tasks_fqcn.yml @@ -0,0 +1,6 @@ +- name: >- + verify that multiple level of nested statements and + include+meta doesnt mess included files mecanisms + hosts: testhost + tasks: + - ansible.builtin.include_tasks: ./tasks/nested/nested.yml diff --git a/test/integration/targets/include_import/test_role_recursion.yml b/test/integration/targets/include_import/test_role_recursion.yml new file mode 100644 index 00000000..ad2489a0 --- /dev/null +++ b/test/integration/targets/include_import/test_role_recursion.yml @@ -0,0 +1,7 @@ +- name: Test max recursion depth + hosts: testhost + + tasks: + - import_role: + name: role1 + tasks_from: r1t01.yml diff --git a/test/integration/targets/include_import/test_role_recursion_fqcn.yml b/test/integration/targets/include_import/test_role_recursion_fqcn.yml new file mode 100644 index 00000000..13d8d2cb --- /dev/null +++ b/test/integration/targets/include_import/test_role_recursion_fqcn.yml @@ -0,0 +1,7 @@ +- name: Test max recursion depth + hosts: testhost + + tasks: + - ansible.builtin.import_role: + name: role1 + tasks_from: r1t01.yml diff --git a/test/integration/targets/include_import/undefined_var/include_tasks.yml b/test/integration/targets/include_import/undefined_var/include_tasks.yml new file mode 100644 index 00000000..56f06c97 --- /dev/null +++ b/test/integration/targets/include_import/undefined_var/include_tasks.yml @@ -0,0 +1,5 @@ +--- + +- debug: + msg: "This message comes from an 'include_tasks'-task! :-)" + register: "_include_tasks_task_result" diff --git a/test/integration/targets/include_import/undefined_var/include_that_defines_var.yml b/test/integration/targets/include_import/undefined_var/include_that_defines_var.yml new file mode 100644 index 00000000..7f24a435 --- /dev/null +++ b/test/integration/targets/include_import/undefined_var/include_that_defines_var.yml @@ -0,0 +1,5 @@ +- vars: + _undefined: 'yes' + block: + - set_fact: + _include_defined_result: 'good' diff --git a/test/integration/targets/include_import/undefined_var/playbook.yml b/test/integration/targets/include_import/undefined_var/playbook.yml new file mode 100644 index 00000000..0584fa8a --- /dev/null +++ b/test/integration/targets/include_import/undefined_var/playbook.yml @@ -0,0 +1,36 @@ +--- +- hosts: testhost + gather_facts: false + tasks: + - include_tasks: "include_tasks.yml" + ignore_errors: True + register: "_include_tasks_result" + when: + - "_undefined == 'yes'" + + - assert: + that: + - "_include_tasks_result is failed" + - "_include_tasks_task_result is not defined" + msg: "'include_tasks' did not evaluate it's attached condition and failed" + + - include_role: + name: "no_log" + ignore_errors: True + register: "_include_role_result" + when: + - "_undefined == 'yes'" + + - assert: + that: + - "_include_role_result is failed" + msg: "'include_role' did not evaluate it's attached condition and failed" + + - include: include_that_defines_var.yml + static: yes + when: + - "_undefined == 'yes'" + + - assert: + that: + - _include_defined_result == 'good' diff --git a/test/integration/targets/include_import/valid_include_keywords/include_me.yml b/test/integration/targets/include_import/valid_include_keywords/include_me.yml new file mode 100644 index 00000000..ab5c6a9c --- /dev/null +++ b/test/integration/targets/include_import/valid_include_keywords/include_me.yml @@ -0,0 +1,6 @@ +- debug: + msg: include_me +- assert: + that: + - loopy == 1 + - baz == 'qux' diff --git a/test/integration/targets/include_import/valid_include_keywords/include_me_listen.yml b/test/integration/targets/include_import/valid_include_keywords/include_me_listen.yml new file mode 100644 index 00000000..47b424ad --- /dev/null +++ b/test/integration/targets/include_import/valid_include_keywords/include_me_listen.yml @@ -0,0 +1,2 @@ +- debug: + msg: listen diff --git a/test/integration/targets/include_import/valid_include_keywords/include_me_notify.yml b/test/integration/targets/include_import/valid_include_keywords/include_me_notify.yml new file mode 100644 index 00000000..4501e380 --- /dev/null +++ b/test/integration/targets/include_import/valid_include_keywords/include_me_notify.yml @@ -0,0 +1,2 @@ +- debug: + msg: notify diff --git a/test/integration/targets/include_import/valid_include_keywords/playbook.yml b/test/integration/targets/include_import/valid_include_keywords/playbook.yml new file mode 100644 index 00000000..c70ec81f --- /dev/null +++ b/test/integration/targets/include_import/valid_include_keywords/playbook.yml @@ -0,0 +1,40 @@ +- hosts: localhost + gather_facts: false + handlers: + - include_tasks: + file: include_me_listen.yml + listen: + - include_me_listen + + - name: Include Me Notify + include_tasks: include_me_notify.yml + + tasks: + - name: Include me + include_tasks: include_me.yml + args: + apply: + tags: + - bar + debugger: ~ + ignore_errors: false + loop: + - 1 + loop_control: + loop_var: loopy + no_log: false + register: this_isnt_useful + run_once: true + tags: + - foo + vars: + baz: qux + when: true + + - command: "true" + notify: + - include_me_listen + + - command: "true" + notify: + - Include Me Notify diff --git a/test/integration/targets/include_parent_role_vars/aliases b/test/integration/targets/include_parent_role_vars/aliases new file mode 100644 index 00000000..23abb8d3 --- /dev/null +++ b/test/integration/targets/include_parent_role_vars/aliases @@ -0,0 +1,2 @@ +# Continuation of special_vars integration tests to test special variables set on role inclusion. +hidden \ No newline at end of file diff --git a/test/integration/targets/include_parent_role_vars/tasks/included_by_other_role.yml b/test/integration/targets/include_parent_role_vars/tasks/included_by_other_role.yml new file mode 100644 index 00000000..79b7b1cb --- /dev/null +++ b/test/integration/targets/include_parent_role_vars/tasks/included_by_other_role.yml @@ -0,0 +1,37 @@ +# Copyright 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: ensure our parent role tree to contain only our direct parent item + assert: + that: + - "ansible_parent_role_names == ['special_vars']" + +- name: ensure that ansible_parent_role_paths has the same length as ansible_parent_role_names + assert: + that: + - "ansible_parent_role_names|length == ansible_parent_role_paths|length" + +- name: attempt to import ourselves + import_role: + name: "include_parent_role_vars" + tasks_from: "included_by_ourselves.yml" + +- name: ensure our parent role tree to contain only our direct parent item after importing + assert: + that: + - "ansible_parent_role_names == ['special_vars']" + +- name: attempt to include ourselves + include_role: + name: "include_parent_role_vars" + tasks_from: "included_by_ourselves.yml" + +- name: ensure our parent role tree to contain only our direct parent item after including + assert: + that: + - "ansible_parent_role_names == ['special_vars']" + +- name: ensure that ansible_parent_role_paths has the same length as ansible_parent_role_names + assert: + that: + - "ansible_parent_role_names|length == ansible_parent_role_paths|length" diff --git a/test/integration/targets/include_parent_role_vars/tasks/included_by_ourselves.yml b/test/integration/targets/include_parent_role_vars/tasks/included_by_ourselves.yml new file mode 100644 index 00000000..3ea93004 --- /dev/null +++ b/test/integration/targets/include_parent_role_vars/tasks/included_by_ourselves.yml @@ -0,0 +1,14 @@ +# Copyright 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: check if the inclusion tree shows ourself twice as well as our initial parent + assert: + that: + - "ansible_parent_role_names|length == 2" + - "ansible_parent_role_names[0] == 'include_parent_role_vars'" # Since we included ourselves, we're the top level + - "ansible_parent_role_names[1] == 'special_vars'" + +- name: ensure that ansible_parent_role_paths has the same length as ansible_parent_role_names + assert: + that: + - "ansible_parent_role_names|length == ansible_parent_role_paths|length" diff --git a/test/integration/targets/include_parent_role_vars/tasks/main.yml b/test/integration/targets/include_parent_role_vars/tasks/main.yml new file mode 100644 index 00000000..56a485bc --- /dev/null +++ b/test/integration/targets/include_parent_role_vars/tasks/main.yml @@ -0,0 +1,21 @@ +# Copyright 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +- name: ensure our parent role tree to contain only our direct parent item + assert: + that: + - "ansible_parent_role_names == ['special_vars']" + +- name: ensure that ansible_parent_role_paths has the same length as ansible_parent_role_names + assert: + that: + - "ansible_parent_role_names|length == ansible_parent_role_paths|length" + +# task importing should not affect ansible_parent_role_names +- name: test task-importing after we've been included by another role + import_tasks: "included_by_other_role.yml" + +# task inclusion should not affect ansible_parent_role_names +- name: test task-inclusion after we've been included by another role + include_tasks: "included_by_other_role.yml" diff --git a/test/integration/targets/include_vars-ad-hoc/aliases b/test/integration/targets/include_vars-ad-hoc/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/include_vars-ad-hoc/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/include_vars-ad-hoc/dir/inc.yml b/test/integration/targets/include_vars-ad-hoc/dir/inc.yml new file mode 100644 index 00000000..c1d24c84 --- /dev/null +++ b/test/integration/targets/include_vars-ad-hoc/dir/inc.yml @@ -0,0 +1 @@ +porter: cable diff --git a/test/integration/targets/include_vars-ad-hoc/runme.sh b/test/integration/targets/include_vars-ad-hoc/runme.sh new file mode 100755 index 00000000..51b68d21 --- /dev/null +++ b/test/integration/targets/include_vars-ad-hoc/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +ansible testhost -i ../../inventory -m include_vars -a 'dir/inc.yml' "$@" +ansible testhost -i ../../inventory -m include_vars -a 'dir=dir' "$@" diff --git a/test/integration/targets/include_vars/aliases b/test/integration/targets/include_vars/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/include_vars/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/include_vars/defaults/main.yml b/test/integration/targets/include_vars/defaults/main.yml new file mode 100644 index 00000000..901fb220 --- /dev/null +++ b/test/integration/targets/include_vars/defaults/main.yml @@ -0,0 +1,3 @@ +--- +testing: 1 +base_dir: defaults diff --git a/test/integration/targets/include_vars/tasks/main.yml b/test/integration/targets/include_vars/tasks/main.yml new file mode 100644 index 00000000..799d7b26 --- /dev/null +++ b/test/integration/targets/include_vars/tasks/main.yml @@ -0,0 +1,164 @@ +--- +- name: verify that the default value is indeed 1 + assert: + that: + - "testing == 1" + - "base_dir == 'defaults'" + +- name: include the vars/environments/development/all.yml + include_vars: + file: environments/development/all.yml + register: included_one_file + +- name: verify that the correct file has been loaded and default value is indeed 789 + assert: + that: + - "testing == 789" + - "base_dir == 'environments/development'" + - "{{ included_one_file.ansible_included_var_files | length }} == 1" + - "'vars/environments/development/all.yml' in included_one_file.ansible_included_var_files[0]" + +- name: include the vars/environments/development/all.yml and save results in all + include_vars: + file: environments/development/all.yml + name: all + +- name: verify that the values are stored in the all variable + assert: + that: + - "all['testing'] == 789" + - "all['base_dir'] == 'environments/development'" + +- name: include the all directory in vars + include_vars: + dir: all + depth: 1 + +- name: verify that the default value is indeed 123 + assert: + that: + - "testing == 123" + - "base_dir == 'all'" + +- name: include var files with extension only + include_vars: + dir: webapp + ignore_unknown_extensions: True + extensions: ['', 'yaml', 'yml', 'json'] + register: include_without_file_extension + +- name: verify that only files with valid extensions are loaded + assert: + that: + - webapp_version is defined + - "'file_without_extension' in '{{ include_without_file_extension.ansible_included_var_files | join(' ') }}'" + +- name: include every directory in vars + include_vars: + dir: vars + extensions: ['', 'yaml', 'yml', 'json'] + register: include_every_dir + +- name: verify that the correct files have been loaded and overwrite based on alphabetical order + assert: + that: + - "testing == 456" + - "base_dir == 'services'" + - "webapp_containers == 10" + - "{{ include_every_dir.ansible_included_var_files | length }} == 7" + - "'vars/all/all.yml' in include_every_dir.ansible_included_var_files[0]" + - "'vars/environments/development/all.yml' in include_every_dir.ansible_included_var_files[1]" + - "'vars/environments/development/services/webapp.yml' in include_every_dir.ansible_included_var_files[2]" + - "'vars/services/webapp.yml' in include_every_dir.ansible_included_var_files[5]" + - "'vars/webapp/file_without_extension' in include_every_dir.ansible_included_var_files[6]" + +- name: include every directory in vars except files matching webapp.yml + include_vars: + dir: vars + ignore_files: + - webapp.yml + - file_without_extension + register: include_without_webapp + +- name: verify that the webapp.yml file was not included + assert: + that: + - "testing == 789" + - "base_dir == 'environments/development'" + - "{{ include_without_webapp.ansible_included_var_files | length }} == 4" + - "'webapp.yml' not in '{{ include_without_webapp.ansible_included_var_files | join(' ') }}'" + - "'file_without_extension' not in '{{ include_without_webapp.ansible_included_var_files | join(' ') }}'" + +- name: include only files matching webapp.yml + include_vars: + dir: environments + files_matching: webapp.yml + register: include_match_webapp + +- name: verify that only files matching webapp.yml and in the environments directory get loaded. + assert: + that: + - "testing == 101112" + - "base_dir == 'development/services'" + - "webapp_containers == 20" + - "{{ include_match_webapp.ansible_included_var_files | length }} == 1" + - "'vars/environments/development/services/webapp.yml' in include_match_webapp.ansible_included_var_files[0]" + - "'all.yml' not in '{{ include_match_webapp.ansible_included_var_files | join(' ') }}'" + +- name: include only files matching webapp.yml and store results in webapp + include_vars: + dir: environments + files_matching: webapp.yml + name: webapp + +- name: verify that only files matching webapp.yml and in the environments directory get loaded into stored variable webapp. + assert: + that: + - "webapp['testing'] == 101112" + - "webapp['base_dir'] == 'development/services'" + - "webapp['webapp_containers'] == 20" + +- name: include var files without extension + include_vars: + dir: webapp + ignore_unknown_extensions: False + register: include_with_unknown_file_extension + ignore_errors: True + +- name: verify that files without valid extensions are loaded + assert: + that: + - "'a valid extension' in include_with_unknown_file_extension.message" + +- name: include var with raw params + include_vars: > + services/service_vars.yml + +- name: Verify that files with raw params is include without new line character + assert: + that: + - "service_name == 'my_custom_service'" + +- name: Check NoneType for raw params and file + include_vars: + file: "{{ lookup('first_found', possible_files, errors='ignore') }}" + vars: + possible_files: + - "does_not_exist.yml" + ignore_errors: True + register: include_with_non_existent_file + +- name: Verify that file and raw_params provide correct error message to user + assert: + that: + - "'Could not find file' in include_with_non_existent_file.message" + +- name: include var (FQCN) with raw params + ansible.builtin.include_vars: > + services/service_vars_fqcn.yml + +- name: Verify that FQCN of include_vars works + assert: + that: + - "'my_custom_service' == service_name_fqcn" + - "'my_custom_service' == service_name_tmpl_fqcn" diff --git a/test/integration/targets/include_vars/vars/all/all.yml b/test/integration/targets/include_vars/vars/all/all.yml new file mode 100644 index 00000000..14c3e92b --- /dev/null +++ b/test/integration/targets/include_vars/vars/all/all.yml @@ -0,0 +1,3 @@ +--- +testing: 123 +base_dir: all diff --git a/test/integration/targets/include_vars/vars/environments/development/all.yml b/test/integration/targets/include_vars/vars/environments/development/all.yml new file mode 100644 index 00000000..9f370de5 --- /dev/null +++ b/test/integration/targets/include_vars/vars/environments/development/all.yml @@ -0,0 +1,3 @@ +--- +testing: 789 +base_dir: 'environments/development' diff --git a/test/integration/targets/include_vars/vars/environments/development/services/webapp.yml b/test/integration/targets/include_vars/vars/environments/development/services/webapp.yml new file mode 100644 index 00000000..a0a809c9 --- /dev/null +++ b/test/integration/targets/include_vars/vars/environments/development/services/webapp.yml @@ -0,0 +1,4 @@ +--- +testing: 101112 +base_dir: 'development/services' +webapp_containers: 20 diff --git a/test/integration/targets/include_vars/vars/services/service_vars.yml b/test/integration/targets/include_vars/vars/services/service_vars.yml new file mode 100644 index 00000000..96b05d6c --- /dev/null +++ b/test/integration/targets/include_vars/vars/services/service_vars.yml @@ -0,0 +1,2 @@ +--- +service_name: 'my_custom_service' \ No newline at end of file diff --git a/test/integration/targets/include_vars/vars/services/service_vars_fqcn.yml b/test/integration/targets/include_vars/vars/services/service_vars_fqcn.yml new file mode 100644 index 00000000..2c04fee5 --- /dev/null +++ b/test/integration/targets/include_vars/vars/services/service_vars_fqcn.yml @@ -0,0 +1,3 @@ +--- +service_name_fqcn: 'my_custom_service' +service_name_tmpl_fqcn: '{{ service_name_fqcn }}' \ No newline at end of file diff --git a/test/integration/targets/include_vars/vars/services/webapp.yml b/test/integration/targets/include_vars/vars/services/webapp.yml new file mode 100644 index 00000000..f0dcc8b5 --- /dev/null +++ b/test/integration/targets/include_vars/vars/services/webapp.yml @@ -0,0 +1,4 @@ +--- +testing: 456 +base_dir: services +webapp_containers: 10 diff --git a/test/integration/targets/include_vars/vars/webapp/file_without_extension b/test/integration/targets/include_vars/vars/webapp/file_without_extension new file mode 100644 index 00000000..9cfb60fb --- /dev/null +++ b/test/integration/targets/include_vars/vars/webapp/file_without_extension @@ -0,0 +1,2 @@ +--- +webapp_version: "1" diff --git a/test/integration/targets/include_when_parent_is_dynamic/aliases b/test/integration/targets/include_when_parent_is_dynamic/aliases new file mode 100644 index 00000000..41c99f51 --- /dev/null +++ b/test/integration/targets/include_when_parent_is_dynamic/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/python2.6 # include is controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/include_when_parent_is_dynamic/playbook.yml b/test/integration/targets/include_when_parent_is_dynamic/playbook.yml new file mode 100644 index 00000000..afdbc54c --- /dev/null +++ b/test/integration/targets/include_when_parent_is_dynamic/playbook.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: no + tasks: + - include_tasks: tasks.yml diff --git a/test/integration/targets/include_when_parent_is_dynamic/runme.sh b/test/integration/targets/include_when_parent_is_dynamic/runme.sh new file mode 100755 index 00000000..b136965f --- /dev/null +++ b/test/integration/targets/include_when_parent_is_dynamic/runme.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -eu + +ansible-playbook playbook.yml "$@" > output.log 2>&1 || true + +if grep "task should always execute" output.log >/dev/null; then + echo "Test passed (playbook failed with expected output, output not shown)." + exit 0 +fi + +cat output.log +exit 1 diff --git a/test/integration/targets/include_when_parent_is_dynamic/syntax_error.yml b/test/integration/targets/include_when_parent_is_dynamic/syntax_error.yml new file mode 100644 index 00000000..101a18ab --- /dev/null +++ b/test/integration/targets/include_when_parent_is_dynamic/syntax_error.yml @@ -0,0 +1 @@ +intentional syntax error which should NOT be encountered diff --git a/test/integration/targets/include_when_parent_is_dynamic/tasks.yml b/test/integration/targets/include_when_parent_is_dynamic/tasks.yml new file mode 100644 index 00000000..6831245c --- /dev/null +++ b/test/integration/targets/include_when_parent_is_dynamic/tasks.yml @@ -0,0 +1,12 @@ +# intentionally stop execution of the play before reaching the include below +# if the include is dynamic as expected it will not trigger a syntax error +# however, if the include is static a syntax error will occur +- name: EXPECTED FAILURE + fail: + msg: + This task should always execute. + The playbook would have failed due to a syntax error in 'syntax_error.yml' when attempting a static include of that file. + +# perform an include task which should be static if all of the task's parents are static, otherwise it should be dynamic +# this file was loaded using include_tasks, which is dynamic, so this include should also be dynamic +- include: syntax_error.yml diff --git a/test/integration/targets/include_when_parent_is_static/aliases b/test/integration/targets/include_when_parent_is_static/aliases new file mode 100644 index 00000000..41c99f51 --- /dev/null +++ b/test/integration/targets/include_when_parent_is_static/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/python2.6 # include is controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/include_when_parent_is_static/playbook.yml b/test/integration/targets/include_when_parent_is_static/playbook.yml new file mode 100644 index 00000000..6189873e --- /dev/null +++ b/test/integration/targets/include_when_parent_is_static/playbook.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: no + tasks: + - import_tasks: tasks.yml diff --git a/test/integration/targets/include_when_parent_is_static/runme.sh b/test/integration/targets/include_when_parent_is_static/runme.sh new file mode 100755 index 00000000..33728bdf --- /dev/null +++ b/test/integration/targets/include_when_parent_is_static/runme.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -eu + +ansible-playbook playbook.yml "$@" > output.log 2>&1 || true + +if grep "intentional syntax error" output.log >/dev/null; then + echo "Test passed (playbook failed with expected output, output not shown)." + exit 0 +fi + +cat output.log +exit 1 diff --git a/test/integration/targets/include_when_parent_is_static/syntax_error.yml b/test/integration/targets/include_when_parent_is_static/syntax_error.yml new file mode 100644 index 00000000..e1a629ce --- /dev/null +++ b/test/integration/targets/include_when_parent_is_static/syntax_error.yml @@ -0,0 +1 @@ +intentional syntax error which SHOULD be encountered diff --git a/test/integration/targets/include_when_parent_is_static/tasks.yml b/test/integration/targets/include_when_parent_is_static/tasks.yml new file mode 100644 index 00000000..a234a3dd --- /dev/null +++ b/test/integration/targets/include_when_parent_is_static/tasks.yml @@ -0,0 +1,12 @@ +# intentionally stop execution of the play before reaching the include below +# if the include is static as expected it will trigger a syntax error +# however, if the include is dynamic a syntax error will not occur +- name: EXPECTED SUCCESS + fail: + msg: + This task should never execute. + The playbook should have failed due to a syntax error in 'syntax_error.yml' when attempting a static include of that file. + +# perform an include task which should be static if all of the task's parents are static, otherwise it should be dynamic +# this file was loaded using import_tasks, which is static, so this include should also be static +- include: syntax_error.yml diff --git a/test/integration/targets/includes/aliases b/test/integration/targets/includes/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/includes/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/includes/roles/test_includes/handlers/main.yml b/test/integration/targets/includes/roles/test_includes/handlers/main.yml new file mode 100644 index 00000000..7d3e625f --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes/handlers/main.yml @@ -0,0 +1 @@ +- include: more_handlers.yml diff --git a/test/integration/targets/includes/roles/test_includes/handlers/more_handlers.yml b/test/integration/targets/includes/roles/test_includes/handlers/more_handlers.yml new file mode 100644 index 00000000..c85d53cc --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes/handlers/more_handlers.yml @@ -0,0 +1,12 @@ +- name: included_handler + set_fact: + ca: 4001 + cb: 4002 + cc: 4003 + +- name: verify_handler + assert: + that: + - "ca == 4001" + - "cb == 4002" + - "cc == 4003" diff --git a/test/integration/targets/includes/roles/test_includes/tasks/branch_toplevel.yml b/test/integration/targets/includes/roles/test_includes/tasks/branch_toplevel.yml new file mode 100644 index 00000000..62416705 --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes/tasks/branch_toplevel.yml @@ -0,0 +1,9 @@ +# 'canary2' used instead of 'canary', otherwise a "recursive loop detected in +# template string" occurs when both includes use static=yes +- include: 'leaf_sublevel.yml canary2={{ canary }}' + static: yes + when: 'nested_include_static|bool' # value for 'static' can not be a variable, hence use 'when' + +- include: 'leaf_sublevel.yml canary2={{ canary }}' + static: no + when: 'not nested_include_static|bool' diff --git a/test/integration/targets/includes/roles/test_includes/tasks/empty.yml b/test/integration/targets/includes/roles/test_includes/tasks/empty.yml new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/includes/roles/test_includes/tasks/included_task1.yml b/test/integration/targets/includes/roles/test_includes/tasks/included_task1.yml new file mode 100644 index 00000000..6f4c0480 --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes/tasks/included_task1.yml @@ -0,0 +1,9 @@ +- set_fact: + ca: "{{ a }}" +- debug: var=ca +- set_fact: + cb: "{{b}}" +- debug: var=cb +- set_fact: + cc: "{{ c }}" +- debug: var=cc diff --git a/test/integration/targets/includes/roles/test_includes/tasks/leaf_sublevel.yml b/test/integration/targets/includes/roles/test_includes/tasks/leaf_sublevel.yml new file mode 100644 index 00000000..06632017 --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes/tasks/leaf_sublevel.yml @@ -0,0 +1,2 @@ +- set_fact: + canary_fact: '{{ canary2 }}' diff --git a/test/integration/targets/includes/roles/test_includes/tasks/main.yml b/test/integration/targets/includes/roles/test_includes/tasks/main.yml new file mode 100644 index 00000000..6fcac9eb --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes/tasks/main.yml @@ -0,0 +1,106 @@ +# test code for the ping module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +- include: included_task1.yml a=1 b=2 c=3 + +- name: verify non-variable include params + assert: + that: + - "ca == '1'" + - "cb == '2'" + - "cc == '3'" + +- set_fact: + a: 101 + b: 102 + c: 103 + +- include: included_task1.yml a={{a}} b={{b}} c=103 + +- name: verify variable include params + assert: + that: + - "ca == 101" + - "cb == 102" + - "cc == 103" + +# Test that strings are not turned into numbers +- set_fact: + a: "101" + b: "102" + c: "103" + +- include: included_task1.yml a={{a}} b={{b}} c=103 + +- name: verify variable include params + assert: + that: + - "ca == '101'" + - "cb == '102'" + - "cc == '103'" + +# now try long form includes + +- include: included_task1.yml + vars: + a: 201 + b: 202 + c: 203 + +- debug: var=a +- debug: var=b +- debug: var=c + +- name: verify long-form include params + assert: + that: + - "ca == 201" + - "cb == 202" + - "cc == 203" + +- name: test handlers with includes + shell: echo 1 + notify: + # both these via a handler include + - included_handler + - verify_handler + +- include: branch_toplevel.yml canary=value1 nested_include_static=no + static: no +- assert: + that: + - 'canary_fact == "value1"' + +- include: branch_toplevel.yml canary=value2 nested_include_static=yes + static: no +- assert: + that: + - 'canary_fact == "value2"' + +- include: branch_toplevel.yml canary=value3 nested_include_static=no + static: yes +- assert: + that: + - 'canary_fact == "value3"' + +- include: branch_toplevel.yml canary=value4 nested_include_static=yes + static: yes +- assert: + that: + - 'canary_fact == "value4"' diff --git a/test/integration/targets/includes/roles/test_includes/tasks/not_a_role_task.yml b/test/integration/targets/includes/roles/test_includes/tasks/not_a_role_task.yml new file mode 100644 index 00000000..862b051c --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes/tasks/not_a_role_task.yml @@ -0,0 +1,4 @@ +- set_fact: + ca: 33000 + cb: 33001 + cc: 33002 diff --git a/test/integration/targets/includes/roles/test_includes_free/tasks/inner.yml b/test/integration/targets/includes/roles/test_includes_free/tasks/inner.yml new file mode 100644 index 00000000..d9c32f4f --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes_free/tasks/inner.yml @@ -0,0 +1,2 @@ +- set_fact: + inner: "reached" diff --git a/test/integration/targets/includes/roles/test_includes_free/tasks/inner_fqcn.yml b/test/integration/targets/includes/roles/test_includes_free/tasks/inner_fqcn.yml new file mode 100644 index 00000000..5b4ce040 --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes_free/tasks/inner_fqcn.yml @@ -0,0 +1,2 @@ +- set_fact: + inner_fqcn: "reached" diff --git a/test/integration/targets/includes/roles/test_includes_free/tasks/main.yml b/test/integration/targets/includes/roles/test_includes_free/tasks/main.yml new file mode 100644 index 00000000..5ae7882f --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes_free/tasks/main.yml @@ -0,0 +1,9 @@ +- name: this needs to be here + debug: + msg: "hello" +- include: inner.yml + with_items: + - '1' +- ansible.builtin.include: inner_fqcn.yml + with_items: + - '1' diff --git a/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/inner.yml b/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/inner.yml new file mode 100644 index 00000000..fa4ec93e --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/inner.yml @@ -0,0 +1,2 @@ +- set_fact: + inner_host_pinned: "reached" diff --git a/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/main.yml b/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/main.yml new file mode 100644 index 00000000..7bc19faa --- /dev/null +++ b/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/main.yml @@ -0,0 +1,6 @@ +- name: this needs to be here + debug: + msg: "hello" +- include: inner.yml + with_items: + - '1' diff --git a/test/integration/targets/includes/runme.sh b/test/integration/targets/includes/runme.sh new file mode 100755 index 00000000..dff40029 --- /dev/null +++ b/test/integration/targets/includes/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_includes.yml -i ../../inventory "$@" diff --git a/test/integration/targets/includes/test_include_free.yml b/test/integration/targets/includes/test_include_free.yml new file mode 100644 index 00000000..dedad734 --- /dev/null +++ b/test/integration/targets/includes/test_include_free.yml @@ -0,0 +1,10 @@ +- hosts: testhost + gather_facts: no + strategy: free + roles: + - test_includes_free + tasks: + - assert: + that: + - "inner == 'reached'" + - "inner_fqcn == 'reached'" diff --git a/test/integration/targets/includes/test_include_host_pinned.yml b/test/integration/targets/includes/test_include_host_pinned.yml new file mode 100644 index 00000000..6ff92c66 --- /dev/null +++ b/test/integration/targets/includes/test_include_host_pinned.yml @@ -0,0 +1,9 @@ +- hosts: testhost + gather_facts: no + strategy: host_pinned + roles: + - test_includes_host_pinned + tasks: + - assert: + that: + - "inner_host_pinned == 'reached'" diff --git a/test/integration/targets/includes/test_includes.yml b/test/integration/targets/includes/test_includes.yml new file mode 100644 index 00000000..0bcebd4f --- /dev/null +++ b/test/integration/targets/includes/test_includes.yml @@ -0,0 +1,7 @@ +- include: test_includes2.yml parameter1=asdf parameter2=jkl + +- include: test_includes3.yml + +- include: test_include_free.yml + +- include: test_include_host_pinned.yml diff --git a/test/integration/targets/includes/test_includes2.yml b/test/integration/targets/includes/test_includes2.yml new file mode 100644 index 00000000..a32e8513 --- /dev/null +++ b/test/integration/targets/includes/test_includes2.yml @@ -0,0 +1,22 @@ +- name: verify playbook includes can take parameters + hosts: testhost + tasks: + - assert: + that: + - "parameter1 == 'asdf'" + - "parameter2 == 'jkl'" + +- name: verify task include logic + hosts: testhost + gather_facts: True + roles: + - role: test_includes + tags: test_includes + tasks: + - include: roles/test_includes/tasks/not_a_role_task.yml + - include: roles/test_includes/tasks/empty.yml + - assert: + that: + - "ca == 33000" + - "cb == 33001" + - "cc == 33002" diff --git a/test/integration/targets/includes/test_includes3.yml b/test/integration/targets/includes/test_includes3.yml new file mode 100644 index 00000000..0b4c6312 --- /dev/null +++ b/test/integration/targets/includes/test_includes3.yml @@ -0,0 +1,6 @@ +- hosts: testhost + tasks: + - include: test_includes4.yml + with_items: ["a"] + loop_control: + loop_var: r diff --git a/test/integration/targets/includes/test_includes4.yml b/test/integration/targets/includes/test_includes4.yml new file mode 100644 index 00000000..bee906bd --- /dev/null +++ b/test/integration/targets/includes/test_includes4.yml @@ -0,0 +1,2 @@ +- set_fact: + p: 1 diff --git a/test/integration/targets/includes_race/aliases b/test/integration/targets/includes_race/aliases new file mode 100644 index 00000000..fff62d9f --- /dev/null +++ b/test/integration/targets/includes_race/aliases @@ -0,0 +1,2 @@ +shippable/posix/group5 +skip/aix diff --git a/test/integration/targets/includes_race/inventory b/test/integration/targets/includes_race/inventory new file mode 100644 index 00000000..87879294 --- /dev/null +++ b/test/integration/targets/includes_race/inventory @@ -0,0 +1,30 @@ +host001 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host002 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host003 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host004 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host005 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host006 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host007 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host008 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host009 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host010 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host011 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host012 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host013 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host014 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host015 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host016 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host017 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host018 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host019 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host020 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host021 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host022 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host023 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host024 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host025 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host026 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host027 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host028 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host029 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +host030 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/includes_race/roles/random_sleep/tasks/main.yml b/test/integration/targets/includes_race/roles/random_sleep/tasks/main.yml new file mode 100644 index 00000000..cee459a2 --- /dev/null +++ b/test/integration/targets/includes_race/roles/random_sleep/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# tasks file for random_sleep +- name: Generate sleep time + set_fact: + sleep_time: "{{ 3 | random }}" + +- name: Do random sleep + shell: sleep "{{ sleep_time }}" diff --git a/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact1.yml b/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact1.yml new file mode 100644 index 00000000..36b08dcb --- /dev/null +++ b/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact1.yml @@ -0,0 +1,4 @@ +--- +- name: Set fact1 + set_fact: + fact1: yay diff --git a/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact2.yml b/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact2.yml new file mode 100644 index 00000000..865f130d --- /dev/null +++ b/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact2.yml @@ -0,0 +1,4 @@ +--- +- name: Set fact2 + set_fact: + fact2: yay diff --git a/test/integration/targets/includes_race/runme.sh b/test/integration/targets/includes_race/runme.sh new file mode 100755 index 00000000..2261d271 --- /dev/null +++ b/test/integration/targets/includes_race/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_includes_race.yml -i inventory -v "$@" diff --git a/test/integration/targets/includes_race/test_includes_race.yml b/test/integration/targets/includes_race/test_includes_race.yml new file mode 100644 index 00000000..20f7dddd --- /dev/null +++ b/test/integration/targets/includes_race/test_includes_race.yml @@ -0,0 +1,19 @@ +- hosts: all + strategy: free + gather_facts: false + tasks: + - include_role: + name: random_sleep + - block: + - name: set a fact (1) + include_role: + name: set_a_fact + tasks_from: fact1.yml + - name: set a fact (2) + include_role: + name: set_a_fact + tasks_from: fact2.yml + - name: include didn't run + fail: + msg: "set_a_fact didn't run fact1 {{ fact1 | default('not defined')}} fact2: {{ fact2 | default('not defined') }}" + when: (fact1 is not defined or fact2 is not defined) diff --git a/test/integration/targets/infra/aliases b/test/integration/targets/infra/aliases new file mode 100644 index 00000000..887d7029 --- /dev/null +++ b/test/integration/targets/infra/aliases @@ -0,0 +1,3 @@ +shippable/posix/group3 +needs/file/hacking/test-module.py +needs/file/lib/ansible/modules/ping.py diff --git a/test/integration/targets/infra/inventory.local b/test/integration/targets/infra/inventory.local new file mode 100644 index 00000000..2baa1f88 --- /dev/null +++ b/test/integration/targets/infra/inventory.local @@ -0,0 +1,2 @@ +testhost ansible_connection=local + diff --git a/test/integration/targets/infra/library/test.py b/test/integration/targets/infra/library/test.py new file mode 100644 index 00000000..93860575 --- /dev/null +++ b/test/integration/targets/infra/library/test.py @@ -0,0 +1,21 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + ) + result = { + 'selinux_special_fs': module._selinux_special_fs, + 'tmpdir': module._tmpdir, + 'keep_remote_files': module._keep_remote_files, + 'version': module.ansible_version, + } + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/infra/runme.sh b/test/integration/targets/infra/runme.sh new file mode 100755 index 00000000..c4d84572 --- /dev/null +++ b/test/integration/targets/infra/runme.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -ux + +# ensure fail/assert work locally and can stop execution with non-zero exit code +PB_OUT=$(ansible-playbook -i inventory.local test_test_infra.yml) +APB_RC=$? +echo "$PB_OUT" +echo "rc was $APB_RC (must be non-zero)" +[ ${APB_RC} -ne 0 ] +echo "ensure playbook output shows assert/fail works (True)" +echo "$PB_OUT" | grep -F "fail works (True)" || exit 1 +echo "$PB_OUT" | grep -F "assert works (True)" || exit 1 + +# ensure we work using all specified test args, overridden inventory, etc +PB_OUT=$(ansible-playbook -i ../../inventory test_test_infra.yml "$@") +APB_RC=$? +echo "$PB_OUT" +echo "rc was $APB_RC (must be non-zero)" +[ ${APB_RC} -ne 0 ] +echo "ensure playbook output shows assert/fail works (True)" +echo "$PB_OUT" | grep -F "fail works (True)" || exit 1 +echo "$PB_OUT" | grep -F "assert works (True)" || exit 1 + +set -e + +PING_MODULE_PATH="../../../../lib/ansible/modules/ping.py" + +# ensure test-module.py script works without passing Python interpreter path +../../../../hacking/test-module.py -m "$PING_MODULE_PATH" + +# ensure test-module.py script works well +../../../../hacking/test-module.py -m "$PING_MODULE_PATH" -I ansible_python_interpreter="$(which python)" + +# ensure module.ansible_version is defined when using test-module.py +../../../../hacking/test-module.py -m library/test.py -I ansible_python_interpreter="$(which python)" <<< '{"ANSIBLE_MODULE_ARGS": {}}' + +# ensure exercising module code locally works +python -m ansible.modules.file <<< '{"ANSIBLE_MODULE_ARGS": {"path": "/path/to/file", "state": "absent"}}' diff --git a/test/integration/targets/infra/test_test_infra.yml b/test/integration/targets/infra/test_test_infra.yml new file mode 100644 index 00000000..706f9b8f --- /dev/null +++ b/test/integration/targets/infra/test_test_infra.yml @@ -0,0 +1,25 @@ +- hosts: testhost + gather_facts: no + tags: + - always + tasks: + - name: ensure fail action produces a failing result + fail: + ignore_errors: yes + register: fail_out + + - debug: + msg: fail works ({{ fail_out.failed }}) + + - name: ensure assert produces a failing result + assert: + that: false + ignore_errors: yes + register: assert_out + + - debug: + msg: assert works ({{ assert_out.failed }}) + + - name: EXPECTED FAILURE ensure fail action stops execution + fail: + msg: fail actually failed (this is expected) diff --git a/test/integration/targets/interpreter_discovery_python/aliases b/test/integration/targets/interpreter_discovery_python/aliases new file mode 100644 index 00000000..740ed1a5 --- /dev/null +++ b/test/integration/targets/interpreter_discovery_python/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +non_local # workaround to allow override of ansible_python_interpreter; disables coverage on this integration target diff --git a/test/integration/targets/interpreter_discovery_python/library/test_echo_module.py b/test/integration/targets/interpreter_discovery_python/library/test_echo_module.py new file mode 100644 index 00000000..73179211 --- /dev/null +++ b/test/integration/targets/interpreter_discovery_python/library/test_echo_module.py @@ -0,0 +1,29 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# (c) 2016, Toshio Kuratomi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import sys +from ansible.module_utils.basic import AnsibleModule + + +def main(): + result = dict(changed=False) + + module = AnsibleModule(argument_spec=dict( + facts=dict(type=dict, default={}) + )) + + result['ansible_facts'] = module.params['facts'] + result['running_python_interpreter'] = sys.executable + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/interpreter_discovery_python/tasks/main.yml b/test/integration/targets/interpreter_discovery_python/tasks/main.yml new file mode 100644 index 00000000..be15186f --- /dev/null +++ b/test/integration/targets/interpreter_discovery_python/tasks/main.yml @@ -0,0 +1,177 @@ +- name: ensure we can override ansible_python_interpreter + vars: + ansible_python_interpreter: overriddenpython + assert: + that: + - ansible_python_interpreter == 'overriddenpython' + fail_msg: "'ansible_python_interpreter' appears to be set at a high precedence to {{ ansible_python_interpreter }}, + which breaks this test." + +- name: snag some facts to validate for later + set_fact: + distro: '{{ ansible_distribution | default("unknown") | lower }}' + distro_version: '{{ ansible_distribution_version | default("unknown") }}' + os_family: '{{ ansible_os_family | default("unknown") }}' + +- name: test that python discovery is working and that fact persistence makes it only run once + block: + - name: clear facts to force interpreter discovery to run + meta: clear_facts + + - name: trigger discovery with auto + vars: + ansible_python_interpreter: auto + ping: + register: auto_out + + - name: get the interpreter being used on the target to execute modules + vars: + # keep this set so we can verify we didn't repeat discovery + ansible_python_interpreter: auto + test_echo_module: + register: echoout + + - name: clear facts to force interpreter discovery to run again + meta: clear_facts + + - name: get the interpreter being used on the target to execute modules with ansible_facts + vars: + # keep this set so we can verify we didn't repeat discovery + ansible_python_interpreter: auto + test_echo_module: + facts: + sandwich: ham + register: echoout_with_facts + + - when: distro == 'macosx' + block: + - name: Get the sys.executable for the macos discovered interpreter, as it may be different than the actual path + raw: '{{ auto_out.ansible_facts.discovered_interpreter_python }} -c "import sys; print(sys.executable)"' + register: discovered_sys_executable + + - set_fact: + normalized_discovered_interpreter: '{{ discovered_sys_executable.stdout_lines[0] }}' + + - set_fact: + normalized_discovered_interpreter: '{{ auto_out.ansible_facts.discovered_interpreter_python }}' + when: distro != 'macosx' + + - assert: + that: + - auto_out.ansible_facts.discovered_interpreter_python is defined + - echoout.running_python_interpreter == normalized_discovered_interpreter + # verify that discovery didn't run again (if it did, we'd have the fact in the result) + - echoout.ansible_facts is not defined or echoout.ansible_facts.discovered_interpreter_python is not defined + - echoout_with_facts.ansible_facts is defined + - echoout_with_facts.running_python_interpreter == normalized_discovered_interpreter + +- name: test that auto_legacy gives a dep warning when /usr/bin/python present but != auto result + block: + - name: clear facts to force interpreter discovery to run + meta: clear_facts + + - name: trigger discovery with auto_legacy + vars: + ansible_python_interpreter: auto_legacy + ping: + register: legacy + + - name: check for dep warning (only on platforms where auto result is not /usr/bin/python and legacy is) + assert: + that: + - legacy.deprecations | default([]) | length > 0 + # only check for a dep warning if legacy returned /usr/bin/python and auto didn't + when: legacy.ansible_facts.discovered_interpreter_python == '/usr/bin/python' and + auto_out.ansible_facts.discovered_interpreter_python != '/usr/bin/python' + + +- name: test that auto_silent never warns and got the same answer as auto + block: + - name: clear facts to force interpreter discovery to run + meta: clear_facts + + - name: initial task to trigger discovery + vars: + ansible_python_interpreter: auto_silent + ping: + register: auto_silent_out + + - assert: + that: + - auto_silent_out.warnings is not defined + - auto_silent_out.ansible_facts.discovered_interpreter_python == auto_out.ansible_facts.discovered_interpreter_python + + +- name: test that auto_legacy_silent never warns and got the same answer as auto_legacy + block: + - name: clear facts to force interpreter discovery to run + meta: clear_facts + + - name: trigger discovery with auto_legacy_silent + vars: + ansible_python_interpreter: auto_legacy_silent + ping: + register: legacy_silent + + - assert: + that: + - legacy_silent.warnings is not defined + - legacy_silent.ansible_facts.discovered_interpreter_python == legacy.ansible_facts.discovered_interpreter_python + +- name: ensure modules can't set discovered_interpreter_X or ansible_X_interpreter + block: + - test_echo_module: + facts: + ansible_discovered_interpreter_bogus: from module + discovered_interpreter_bogus: from_module + ansible_bogus_interpreter: from_module + test_fact: from_module + register: echoout + + - assert: + that: + - test_fact == 'from_module' + - discovered_interpreter_bogus | default('nope') == 'nope' + - ansible_bogus_interpreter | default('nope') == 'nope' + # this one will exist in facts, but with its prefix removed + - ansible_facts['ansible_bogus_interpreter'] | default('nope') == 'nope' + - ansible_facts['discovered_interpreter_bogus'] | default('nope') == 'nope' + + - name: debian assertions + assert: + that: + - auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python3' + when: distro == 'debian' and distro_version is version('10', '>=') + + - name: fedora assertions + assert: + that: + - auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python3' + when: distro == 'fedora' and distro_version is version('23', '>=') + + - name: rhel assertions + assert: + that: + # rhel 6/7 + - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python' and distro_version is version('8','<')) or distro_version is version('8','>=') + # rhel 8+ + - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/libexec/platform-python' and distro_version is version('8','>=')) or distro_version is version('8','<') + when: distro == 'redhat' + + - name: ubuntu assertions + assert: + that: + # ubuntu < 16 + - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python' and distro_version is version('16.04','<')) or distro_version is version('16.04','>=') + # ubuntu >= 16 + - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python3' and distro_version is version('16.04','>=')) or distro_version is version('16.04','<') + when: distro == 'ubuntu' + + - name: mac assertions + assert: + that: + - auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python' + when: os_family == 'darwin' + + always: + - meta: clear_facts diff --git a/test/integration/targets/interpreter_discovery_python_delegate_facts/aliases b/test/integration/targets/interpreter_discovery_python_delegate_facts/aliases new file mode 100644 index 00000000..dc9ac468 --- /dev/null +++ b/test/integration/targets/interpreter_discovery_python_delegate_facts/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +non_local # this test requires interpreter discovery, which means code coverage must be disabled diff --git a/test/integration/targets/interpreter_discovery_python_delegate_facts/delegate_facts.yml b/test/integration/targets/interpreter_discovery_python_delegate_facts/delegate_facts.yml new file mode 100644 index 00000000..535269d1 --- /dev/null +++ b/test/integration/targets/interpreter_discovery_python_delegate_facts/delegate_facts.yml @@ -0,0 +1,10 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: Test python interpreter discovery with delegate_to without delegate_facts + ping: + delegate_to: testhost + - name: Test python interpreter discovery with delegate_to with delegate_facts + ping: + delegate_to: testhost + delegate_facts: yes diff --git a/test/integration/targets/interpreter_discovery_python_delegate_facts/inventory b/test/integration/targets/interpreter_discovery_python_delegate_facts/inventory new file mode 100644 index 00000000..350f3e89 --- /dev/null +++ b/test/integration/targets/interpreter_discovery_python_delegate_facts/inventory @@ -0,0 +1,2 @@ +[local] +testhost ansible_connection=local ansible_python_interpreter=auto # interpreter discovery required diff --git a/test/integration/targets/interpreter_discovery_python_delegate_facts/runme.sh b/test/integration/targets/interpreter_discovery_python_delegate_facts/runme.sh new file mode 100755 index 00000000..ca2caa1c --- /dev/null +++ b/test/integration/targets/interpreter_discovery_python_delegate_facts/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook delegate_facts.yml -i inventory "$@" diff --git a/test/integration/targets/inventory/aliases b/test/integration/targets/inventory/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/inventory/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/inventory/inv_with_int.yml b/test/integration/targets/inventory/inv_with_int.yml new file mode 100644 index 00000000..5b2f21da --- /dev/null +++ b/test/integration/targets/inventory/inv_with_int.yml @@ -0,0 +1,6 @@ +all: + hosts: + testing123: + x: + a: 1 + 0: 2 diff --git a/test/integration/targets/inventory/playbook.yml b/test/integration/targets/inventory/playbook.yml new file mode 100644 index 00000000..5e073614 --- /dev/null +++ b/test/integration/targets/inventory/playbook.yml @@ -0,0 +1,4 @@ +- hosts: all + gather_facts: false + tasks: + - ping: diff --git a/test/integration/targets/inventory/runme.sh b/test/integration/targets/inventory/runme.sh new file mode 100755 index 00000000..87bef447 --- /dev/null +++ b/test/integration/targets/inventory/runme.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +set -eux + +empty_limit_file="$(mktemp)" +touch "${empty_limit_file}" + +tmpdir="$(mktemp -d)" + +cleanup() { + if [[ -f "${empty_limit_file}" ]]; then + rm -rf "${empty_limit_file}" + fi + rm -rf "$tmpdir" +} + +trap 'cleanup' EXIT + +# https://github.com/ansible/ansible/issues/52152 +# Ensure that non-matching limit causes failure with rc 1 +if ansible-playbook -i ../../inventory --limit foo playbook.yml; then + echo "Non-matching limit should cause failure" + exit 1 +fi + +# Ensure that non-existing limit file causes failure with rc 1 +if ansible-playbook -i ../../inventory --limit @foo playbook.yml; then + echo "Non-existing limit file should cause failure" + exit 1 +fi + +if ! ansible-playbook -i ../../inventory --limit @"$tmpdir" playbook.yml 2>&1 | grep 'must be a file'; then + echo "Using a directory as a limit file should throw proper AnsibleError" + exit 1 +fi + +# Ensure that empty limit file does not cause IndexError #59695 +ansible-playbook -i ../../inventory --limit @"${empty_limit_file}" playbook.yml + +ansible-playbook -i ../../inventory "$@" strategy.yml +ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=always ansible-playbook -i ../../inventory "$@" strategy.yml +ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=never ansible-playbook -i ../../inventory "$@" strategy.yml + +# Do not fail when all inventories fail to parse. +# Do not fail when any inventory fails to parse. +ANSIBLE_INVENTORY_UNPARSED_FAILED=False ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=False ansible -m ping localhost -i /idontexist "$@" + +# Fail when all inventories fail to parse. +# Do not fail when just one inventory fails to parse. +if ANSIBLE_INVENTORY_UNPARSED_FAILED=True ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=False ansible -m ping localhost -i /idontexist; then + echo "All inventories failed/did not exist, should cause failure" + echo "ran with: ANSIBLE_INVENTORY_UNPARSED_FAILED=True ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=False" + exit 1 +fi + +# Same as above but ensuring no failure we *only* fail when all inventories fail to parse. +# Fail when all inventories fail to parse. +# Do not fail when just one inventory fails to parse. +ANSIBLE_INVENTORY_UNPARSED_FAILED=True ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=False ansible -m ping localhost -i /idontexist -i ../../inventory "$@" +# Fail when all inventories fail to parse. +# Do not fail when just one inventory fails to parse. + +# Fail when any inventories fail to parse. +if ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=True ansible -m ping localhost -i /idontexist -i ../../inventory; then + echo "One inventory failed/did not exist, should NOT cause failure" + echo "ran with: ANSIBLE_INVENTORY_UNPARSED_FAILED=True ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=False" + exit 1 +fi + +# Ensure we don't throw when an empty directory is used as inventory +ansible-playbook -i "$tmpdir" playbook.yml + +# Ensure we can use a directory of inventories +cp ../../inventory "$tmpdir" +ansible-playbook -i "$tmpdir" playbook.yml + +# ... even if it contains another empty directory +mkdir "$tmpdir/empty" +ansible-playbook -i "$tmpdir" playbook.yml + +if ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=True ansible -m ping localhost -i "$tmpdir"; then + echo "Empty directory should cause failure when ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=True" + exit 1 +fi + +ansible-inventory -i inv_with_int.yml --list "$@" diff --git a/test/integration/targets/inventory/strategy.yml b/test/integration/targets/inventory/strategy.yml new file mode 100644 index 00000000..5c1cbd2b --- /dev/null +++ b/test/integration/targets/inventory/strategy.yml @@ -0,0 +1,12 @@ +- name: Check that 'invalid' group works, problem exposed in #58980 + hosts: localhost + tasks: + - name: add a host to a group, that has - to trigger substitution + add_host: + name: localhost + groups: Not-Working + + - name: group hosts by distribution, with dash to trigger substitution + group_by: + key: "{{ ansible_distribution }}-{{ ansible_distribution_version }}" + changed_when: false diff --git a/test/integration/targets/inventory_cache/aliases b/test/integration/targets/inventory_cache/aliases new file mode 100644 index 00000000..70a7b7a9 --- /dev/null +++ b/test/integration/targets/inventory_cache/aliases @@ -0,0 +1 @@ +shippable/posix/group5 diff --git a/test/integration/targets/inventory_cache/cache/.keep b/test/integration/targets/inventory_cache/cache/.keep new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/inventory_cache/cache_host.yml b/test/integration/targets/inventory_cache/cache_host.yml new file mode 100644 index 00000000..3630641b --- /dev/null +++ b/test/integration/targets/inventory_cache/cache_host.yml @@ -0,0 +1,4 @@ +plugin: cache_host +cache: true +cache_plugin: jsonfile +cache_connection: ./cache diff --git a/test/integration/targets/inventory_cache/plugins/inventory/cache_host.py b/test/integration/targets/inventory_cache/plugins/inventory/cache_host.py new file mode 100644 index 00000000..628aba15 --- /dev/null +++ b/test/integration/targets/inventory_cache/plugins/inventory/cache_host.py @@ -0,0 +1,56 @@ +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + inventory: cache_host + short_description: add a host to inventory and cache it + description: add a host to inventory and cache it + extends_documentation_fragment: + - inventory_cache + options: + plugin: + required: true + description: name of the plugin (cache_host) +''' + +from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable +import random + + +class InventoryModule(BaseInventoryPlugin, Cacheable): + + NAME = 'cache_host' + + def verify_file(self, path): + if not path.endswith(('cache_host.yml', 'cache_host.yaml',)): + return False + return super(InventoryModule, self).verify_file(path) + + def parse(self, inventory, loader, path, cache=None): + super(InventoryModule, self).parse(inventory, loader, path) + self._read_config_data(path) + + cache_key = self.get_cache_key(path) + # user has enabled cache and the cache is not being flushed + read_cache = self.get_option('cache') and cache + # user has enabled cache and the cache is being flushed + update_cache = self.get_option('cache') and not cache + + host = None + if read_cache: + try: + host = self._cache[cache_key] + except KeyError: + # cache expired + update_cache = True + + if host is None: + host = 'testhost{0}'.format(random.randint(0, 50)) + + self.inventory.add_host(host, 'all') + + if update_cache: + self._cache[cache_key] = host diff --git a/test/integration/targets/inventory_cache/runme.sh b/test/integration/targets/inventory_cache/runme.sh new file mode 100755 index 00000000..098439eb --- /dev/null +++ b/test/integration/targets/inventory_cache/runme.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_INVENTORY_PLUGINS=./plugins/inventory + +cleanup() { + for f in ./cache/ansible_inventory*; do + if [ -f "$f" ]; then rm -rf "$f"; fi + done +} + +trap 'cleanup' EXIT + +# Test no warning when writing to the cache for the first time +test "$(ansible-inventory -i cache_host.yml --graph 2>&1 | tee out.txt | grep -c '\[WARNING\]')" = 0 +writehost="$(grep "testhost[0-9]\{1,2\}" out.txt)" + +# Test reading from the cache +test "$(ansible-inventory -i cache_host.yml --graph 2>&1 | tee out.txt | grep -c '\[WARNING\]')" = 0 +readhost="$(grep 'testhost[0-9]\{1,2\}' out.txt)" + +test "$readhost" = "$writehost" diff --git a/test/integration/targets/inventory_ini/aliases b/test/integration/targets/inventory_ini/aliases new file mode 100644 index 00000000..70a7b7a9 --- /dev/null +++ b/test/integration/targets/inventory_ini/aliases @@ -0,0 +1 @@ +shippable/posix/group5 diff --git a/test/integration/targets/inventory_ini/inventory.ini b/test/integration/targets/inventory_ini/inventory.ini new file mode 100644 index 00000000..a0c99ade --- /dev/null +++ b/test/integration/targets/inventory_ini/inventory.ini @@ -0,0 +1,5 @@ +[local] +testhost ansible_connection=local ansible_become=no ansible_become_user=ansibletest1 + +[all:vars] +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/inventory_ini/runme.sh b/test/integration/targets/inventory_ini/runme.sh new file mode 100755 index 00000000..81bf1475 --- /dev/null +++ b/test/integration/targets/inventory_ini/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook -v -i inventory.ini test_ansible_become.yml diff --git a/test/integration/targets/inventory_ini/test_ansible_become.yml b/test/integration/targets/inventory_ini/test_ansible_become.yml new file mode 100644 index 00000000..55bbe7da --- /dev/null +++ b/test/integration/targets/inventory_ini/test_ansible_become.yml @@ -0,0 +1,11 @@ +- hosts: testhost + gather_facts: no + tasks: + - name: Test proper bool evaluation of ansible_become (issue #70476) + shell: whoami + register: output + + - name: Assert we are NOT the become user specified + assert: + that: + - "output.stdout != 'ansibletest1'" diff --git a/test/integration/targets/inventory_script/aliases b/test/integration/targets/inventory_script/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/inventory_script/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/inventory_script/inventory.json b/test/integration/targets/inventory_script/inventory.json new file mode 100644 index 00000000..5046a9a8 --- /dev/null +++ b/test/integration/targets/inventory_script/inventory.json @@ -0,0 +1,1045 @@ +{ + "None": { + "hosts": [ + "DC0_C0_RP0_VM0_cd0681bf-2f18-5c00-9b9b-8197c0095348", + "DC0_C0_RP0_VM1_f7c371d6-2003-5a48-9859-3bc9a8b08908", + "DC0_H0_VM0_265104de-1472-547c-b873-6dc7883fb6cb", + "DC0_H0_VM1_39365506-5a0a-5fd0-be10-9586ad53aaad" + ] + }, + "_meta": { + "hostvars": { + "DC0_C0_RP0_VM0_cd0681bf-2f18-5c00-9b9b-8197c0095348": { + "alarmactionsenabled": null, + "ansible_host": "None", + "ansible_ssh_host": "None", + "ansible_uuid": "239fb366-6d93-430e-939a-0b6ab272d98f", + "availablefield": [], + "capability": { + "bootoptionssupported": false, + "bootretryoptionssupported": false, + "changetrackingsupported": false, + "consolepreferencessupported": false, + "cpufeaturemasksupported": false, + "disablesnapshotssupported": false, + "diskonlysnapshotonsuspendedvmsupported": null, + "disksharessupported": false, + "dynamicproperty": [], + "dynamictype": null, + "featurerequirementsupported": false, + "guestautolocksupported": false, + "hostbasedreplicationsupported": false, + "locksnapshotssupported": false, + "memoryreservationlocksupported": false, + "memorysnapshotssupported": false, + "multiplecorespersocketsupported": false, + "multiplesnapshotssupported": false, + "nestedhvsupported": false, + "npivwwnonnonrdmvmsupported": false, + "pervmevcsupported": null, + "poweredoffsnapshotssupported": false, + "poweredonmonitortypechangesupported": false, + "quiescedsnapshotssupported": false, + "recordreplaysupported": false, + "reverttosnapshotsupported": false, + "s1acpimanagementsupported": false, + "securebootsupported": null, + "sesparsedisksupported": false, + "settingdisplaytopologysupported": false, + "settingscreenresolutionsupported": false, + "settingvideoramsizesupported": false, + "snapshotconfigsupported": false, + "snapshotoperationssupported": false, + "swapplacementsupported": false, + "toolsautoupdatesupported": false, + "toolssynctimesupported": false, + "virtualexecusageignored": null, + "virtualmmuusageignored": null, + "virtualmmuusagesupported": false, + "vmnpivwwndisablesupported": false, + "vmnpivwwnsupported": false, + "vmnpivwwnupdatesupported": false, + "vpmcsupported": false + }, + "config": { + "alternateguestname": "", + "annotation": null, + "bootoptions": null, + "changetrackingenabled": null, + "changeversion": "", + "consolepreferences": null, + "contentlibiteminfo": null, + "cpuaffinity": null, + "cpuallocation": {}, + "cpufeaturemask": [], + "cpuhotaddenabled": null, + "cpuhotremoveenabled": null, + "createdate": null, + "datastoreurl": [], + "defaultpowerops": {}, + "dynamicproperty": [], + "dynamictype": null, + "extraconfig": [], + "files": {}, + "firmware": null, + "flags": {}, + "forkconfiginfo": null, + "ftinfo": null, + "guestautolockenabled": null, + "guestfullname": "otherGuest", + "guestid": "otherGuest", + "guestintegrityinfo": null, + "guestmonitoringmodeinfo": null, + "hardware": {}, + "hotplugmemoryincrementsize": null, + "hotplugmemorylimit": null, + "initialoverhead": null, + "instanceuuid": "bfff331f-7f07-572d-951e-edd3701dc061", + "keyid": null, + "latencysensitivity": null, + "locationid": null, + "managedby": null, + "maxmksconnections": null, + "memoryaffinity": null, + "memoryallocation": {}, + "memoryhotaddenabled": null, + "memoryreservationlockedtomax": null, + "messagebustunnelenabled": null, + "migrateencryption": null, + "modified": {}, + "name": "DC0_C0_RP0_VM0", + "nestedhvenabled": null, + "networkshaper": null, + "npivdesirednodewwns": null, + "npivdesiredportwwns": null, + "npivnodeworldwidename": [], + "npivonnonrdmdisks": null, + "npivportworldwidename": [], + "npivtemporarydisabled": null, + "npivworldwidenametype": null, + "repconfig": null, + "scheduledhardwareupgradeinfo": null, + "sgxinfo": null, + "swapplacement": null, + "swapstorageobjectid": null, + "template": false, + "tools": {}, + "uuid": "cd0681bf-2f18-5c00-9b9b-8197c0095348", + "vappconfig": null, + "vassertsenabled": null, + "vcpuconfig": [], + "version": "vmx-13", + "vflashcachereservation": null, + "vmstorageobjectid": null, + "vmxconfigchecksum": null, + "vpmcenabled": null + }, + "configissue": [], + "configstatus": "green", + "customvalue": [], + "datastore": [ + { + "_moId": "/tmp/govcsim-DC0-LocalDS_0-949174843@folder-5", + "name": "LocalDS_0" + } + ], + "effectiverole": [ + -1 + ], + "guest": { + "appheartbeatstatus": null, + "appstate": null, + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "generationinfo": [], + "guestfamily": null, + "guestfullname": null, + "guestid": null, + "guestkernelcrashed": null, + "guestoperationsready": null, + "gueststate": "", + "gueststatechangesupported": null, + "hostname": null, + "hwversion": null, + "interactiveguestoperationsready": null, + "ipaddress": null, + "ipstack": [], + "net": [], + "screen": null, + "toolsinstalltype": null, + "toolsrunningstatus": "guestToolsNotRunning", + "toolsstatus": "toolsNotInstalled", + "toolsversion": "0", + "toolsversionstatus": null, + "toolsversionstatus2": null + }, + "guestheartbeatstatus": null, + "layout": { + "configfile": [], + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "logfile": [], + "snapshot": [], + "swapfile": null + }, + "layoutex": { + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "file": [], + "snapshot": [], + "timestamp": {} + }, + "name": "DC0_C0_RP0_VM0", + "network": [], + "overallstatus": "green", + "parentvapp": null, + "permission": [], + "recenttask": [], + "resourcepool": { + "_moId": "resgroup-26", + "name": "Resources" + }, + "rootsnapshot": [], + "runtime": { + "boottime": null, + "cleanpoweroff": null, + "connectionstate": "connected", + "consolidationneeded": false, + "cryptostate": null, + "dasvmprotection": null, + "device": [], + "dynamicproperty": [], + "dynamictype": null, + "faulttolerancestate": null, + "featuremask": [], + "featurerequirement": [], + "host": { + "_moId": "host-47", + "name": "DC0_C0_H2" + }, + "instantclonefrozen": null, + "maxcpuusage": null, + "maxmemoryusage": null, + "memoryoverhead": null, + "minrequiredevcmodekey": null, + "needsecondaryreason": null, + "nummksconnections": 0, + "offlinefeaturerequirement": [], + "onlinestandby": false, + "paused": null, + "powerstate": "poweredOn", + "question": null, + "quiescedforkparent": null, + "recordreplaystate": null, + "snapshotinbackground": null, + "suspendinterval": null, + "suspendtime": null, + "toolsinstallermounted": false, + "vflashcacheallocation": null + }, + "snapshot": null, + "storage": { + "dynamicproperty": [], + "dynamictype": null, + "perdatastoreusage": [], + "timestamp": {} + }, + "summary": { + "config": {}, + "customvalue": [], + "dynamicproperty": [], + "dynamictype": null, + "guest": {}, + "overallstatus": "green", + "quickstats": {}, + "runtime": {}, + "storage": {}, + "vm": {} + }, + "tag": [], + "triggeredalarmstate": [], + "value": [] + }, + "DC0_C0_RP0_VM1_f7c371d6-2003-5a48-9859-3bc9a8b08908": { + "alarmactionsenabled": null, + "ansible_host": "None", + "ansible_ssh_host": "None", + "ansible_uuid": "64b6ca93-f35f-4749-abeb-fc1fabae6c79", + "availablefield": [], + "capability": { + "bootoptionssupported": false, + "bootretryoptionssupported": false, + "changetrackingsupported": false, + "consolepreferencessupported": false, + "cpufeaturemasksupported": false, + "disablesnapshotssupported": false, + "diskonlysnapshotonsuspendedvmsupported": null, + "disksharessupported": false, + "dynamicproperty": [], + "dynamictype": null, + "featurerequirementsupported": false, + "guestautolocksupported": false, + "hostbasedreplicationsupported": false, + "locksnapshotssupported": false, + "memoryreservationlocksupported": false, + "memorysnapshotssupported": false, + "multiplecorespersocketsupported": false, + "multiplesnapshotssupported": false, + "nestedhvsupported": false, + "npivwwnonnonrdmvmsupported": false, + "pervmevcsupported": null, + "poweredoffsnapshotssupported": false, + "poweredonmonitortypechangesupported": false, + "quiescedsnapshotssupported": false, + "recordreplaysupported": false, + "reverttosnapshotsupported": false, + "s1acpimanagementsupported": false, + "securebootsupported": null, + "sesparsedisksupported": false, + "settingdisplaytopologysupported": false, + "settingscreenresolutionsupported": false, + "settingvideoramsizesupported": false, + "snapshotconfigsupported": false, + "snapshotoperationssupported": false, + "swapplacementsupported": false, + "toolsautoupdatesupported": false, + "toolssynctimesupported": false, + "virtualexecusageignored": null, + "virtualmmuusageignored": null, + "virtualmmuusagesupported": false, + "vmnpivwwndisablesupported": false, + "vmnpivwwnsupported": false, + "vmnpivwwnupdatesupported": false, + "vpmcsupported": false + }, + "config": { + "alternateguestname": "", + "annotation": null, + "bootoptions": null, + "changetrackingenabled": null, + "changeversion": "", + "consolepreferences": null, + "contentlibiteminfo": null, + "cpuaffinity": null, + "cpuallocation": {}, + "cpufeaturemask": [], + "cpuhotaddenabled": null, + "cpuhotremoveenabled": null, + "createdate": null, + "datastoreurl": [], + "defaultpowerops": {}, + "dynamicproperty": [], + "dynamictype": null, + "extraconfig": [], + "files": {}, + "firmware": null, + "flags": {}, + "forkconfiginfo": null, + "ftinfo": null, + "guestautolockenabled": null, + "guestfullname": "otherGuest", + "guestid": "otherGuest", + "guestintegrityinfo": null, + "guestmonitoringmodeinfo": null, + "hardware": {}, + "hotplugmemoryincrementsize": null, + "hotplugmemorylimit": null, + "initialoverhead": null, + "instanceuuid": "6132d223-1566-5921-bc3b-df91ece09a4d", + "keyid": null, + "latencysensitivity": null, + "locationid": null, + "managedby": null, + "maxmksconnections": null, + "memoryaffinity": null, + "memoryallocation": {}, + "memoryhotaddenabled": null, + "memoryreservationlockedtomax": null, + "messagebustunnelenabled": null, + "migrateencryption": null, + "modified": {}, + "name": "DC0_C0_RP0_VM1", + "nestedhvenabled": null, + "networkshaper": null, + "npivdesirednodewwns": null, + "npivdesiredportwwns": null, + "npivnodeworldwidename": [], + "npivonnonrdmdisks": null, + "npivportworldwidename": [], + "npivtemporarydisabled": null, + "npivworldwidenametype": null, + "repconfig": null, + "scheduledhardwareupgradeinfo": null, + "sgxinfo": null, + "swapplacement": null, + "swapstorageobjectid": null, + "template": false, + "tools": {}, + "uuid": "f7c371d6-2003-5a48-9859-3bc9a8b08908", + "vappconfig": null, + "vassertsenabled": null, + "vcpuconfig": [], + "version": "vmx-13", + "vflashcachereservation": null, + "vmstorageobjectid": null, + "vmxconfigchecksum": null, + "vpmcenabled": null + }, + "configissue": [], + "configstatus": "green", + "customvalue": [], + "datastore": [ + { + "_moId": "/tmp/govcsim-DC0-LocalDS_0-949174843@folder-5", + "name": "LocalDS_0" + } + ], + "effectiverole": [ + -1 + ], + "guest": { + "appheartbeatstatus": null, + "appstate": null, + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "generationinfo": [], + "guestfamily": null, + "guestfullname": null, + "guestid": null, + "guestkernelcrashed": null, + "guestoperationsready": null, + "gueststate": "", + "gueststatechangesupported": null, + "hostname": null, + "hwversion": null, + "interactiveguestoperationsready": null, + "ipaddress": null, + "ipstack": [], + "net": [], + "screen": null, + "toolsinstalltype": null, + "toolsrunningstatus": "guestToolsNotRunning", + "toolsstatus": "toolsNotInstalled", + "toolsversion": "0", + "toolsversionstatus": null, + "toolsversionstatus2": null + }, + "guestheartbeatstatus": null, + "layout": { + "configfile": [], + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "logfile": [], + "snapshot": [], + "swapfile": null + }, + "layoutex": { + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "file": [], + "snapshot": [], + "timestamp": {} + }, + "name": "DC0_C0_RP0_VM1", + "network": [], + "overallstatus": "green", + "parentvapp": null, + "permission": [], + "recenttask": [], + "resourcepool": { + "_moId": "resgroup-26", + "name": "Resources" + }, + "rootsnapshot": [], + "runtime": { + "boottime": null, + "cleanpoweroff": null, + "connectionstate": "connected", + "consolidationneeded": false, + "cryptostate": null, + "dasvmprotection": null, + "device": [], + "dynamicproperty": [], + "dynamictype": null, + "faulttolerancestate": null, + "featuremask": [], + "featurerequirement": [], + "host": { + "_moId": "host-33", + "name": "DC0_C0_H0" + }, + "instantclonefrozen": null, + "maxcpuusage": null, + "maxmemoryusage": null, + "memoryoverhead": null, + "minrequiredevcmodekey": null, + "needsecondaryreason": null, + "nummksconnections": 0, + "offlinefeaturerequirement": [], + "onlinestandby": false, + "paused": null, + "powerstate": "poweredOn", + "question": null, + "quiescedforkparent": null, + "recordreplaystate": null, + "snapshotinbackground": null, + "suspendinterval": null, + "suspendtime": null, + "toolsinstallermounted": false, + "vflashcacheallocation": null + }, + "snapshot": null, + "storage": { + "dynamicproperty": [], + "dynamictype": null, + "perdatastoreusage": [], + "timestamp": {} + }, + "summary": { + "config": {}, + "customvalue": [], + "dynamicproperty": [], + "dynamictype": null, + "guest": {}, + "overallstatus": "green", + "quickstats": {}, + "runtime": {}, + "storage": {}, + "vm": {} + }, + "tag": [], + "triggeredalarmstate": [], + "value": [] + }, + "DC0_H0_VM0_265104de-1472-547c-b873-6dc7883fb6cb": { + "alarmactionsenabled": null, + "ansible_host": "None", + "ansible_ssh_host": "None", + "ansible_uuid": "6616671b-16b0-494c-8201-737ca506790b", + "availablefield": [], + "capability": { + "bootoptionssupported": false, + "bootretryoptionssupported": false, + "changetrackingsupported": false, + "consolepreferencessupported": false, + "cpufeaturemasksupported": false, + "disablesnapshotssupported": false, + "diskonlysnapshotonsuspendedvmsupported": null, + "disksharessupported": false, + "dynamicproperty": [], + "dynamictype": null, + "featurerequirementsupported": false, + "guestautolocksupported": false, + "hostbasedreplicationsupported": false, + "locksnapshotssupported": false, + "memoryreservationlocksupported": false, + "memorysnapshotssupported": false, + "multiplecorespersocketsupported": false, + "multiplesnapshotssupported": false, + "nestedhvsupported": false, + "npivwwnonnonrdmvmsupported": false, + "pervmevcsupported": null, + "poweredoffsnapshotssupported": false, + "poweredonmonitortypechangesupported": false, + "quiescedsnapshotssupported": false, + "recordreplaysupported": false, + "reverttosnapshotsupported": false, + "s1acpimanagementsupported": false, + "securebootsupported": null, + "sesparsedisksupported": false, + "settingdisplaytopologysupported": false, + "settingscreenresolutionsupported": false, + "settingvideoramsizesupported": false, + "snapshotconfigsupported": false, + "snapshotoperationssupported": false, + "swapplacementsupported": false, + "toolsautoupdatesupported": false, + "toolssynctimesupported": false, + "virtualexecusageignored": null, + "virtualmmuusageignored": null, + "virtualmmuusagesupported": false, + "vmnpivwwndisablesupported": false, + "vmnpivwwnsupported": false, + "vmnpivwwnupdatesupported": false, + "vpmcsupported": false + }, + "config": { + "alternateguestname": "", + "annotation": null, + "bootoptions": null, + "changetrackingenabled": null, + "changeversion": "", + "consolepreferences": null, + "contentlibiteminfo": null, + "cpuaffinity": null, + "cpuallocation": {}, + "cpufeaturemask": [], + "cpuhotaddenabled": null, + "cpuhotremoveenabled": null, + "createdate": null, + "datastoreurl": [], + "defaultpowerops": {}, + "dynamicproperty": [], + "dynamictype": null, + "extraconfig": [], + "files": {}, + "firmware": null, + "flags": {}, + "forkconfiginfo": null, + "ftinfo": null, + "guestautolockenabled": null, + "guestfullname": "otherGuest", + "guestid": "otherGuest", + "guestintegrityinfo": null, + "guestmonitoringmodeinfo": null, + "hardware": {}, + "hotplugmemoryincrementsize": null, + "hotplugmemorylimit": null, + "initialoverhead": null, + "instanceuuid": "b4689bed-97f0-5bcd-8a4c-07477cc8f06f", + "keyid": null, + "latencysensitivity": null, + "locationid": null, + "managedby": null, + "maxmksconnections": null, + "memoryaffinity": null, + "memoryallocation": {}, + "memoryhotaddenabled": null, + "memoryreservationlockedtomax": null, + "messagebustunnelenabled": null, + "migrateencryption": null, + "modified": {}, + "name": "DC0_H0_VM0", + "nestedhvenabled": null, + "networkshaper": null, + "npivdesirednodewwns": null, + "npivdesiredportwwns": null, + "npivnodeworldwidename": [], + "npivonnonrdmdisks": null, + "npivportworldwidename": [], + "npivtemporarydisabled": null, + "npivworldwidenametype": null, + "repconfig": null, + "scheduledhardwareupgradeinfo": null, + "sgxinfo": null, + "swapplacement": null, + "swapstorageobjectid": null, + "template": false, + "tools": {}, + "uuid": "265104de-1472-547c-b873-6dc7883fb6cb", + "vappconfig": null, + "vassertsenabled": null, + "vcpuconfig": [], + "version": "vmx-13", + "vflashcachereservation": null, + "vmstorageobjectid": null, + "vmxconfigchecksum": null, + "vpmcenabled": null + }, + "configissue": [], + "configstatus": "green", + "customvalue": [], + "datastore": [ + { + "_moId": "/tmp/govcsim-DC0-LocalDS_0-949174843@folder-5", + "name": "LocalDS_0" + } + ], + "effectiverole": [ + -1 + ], + "guest": { + "appheartbeatstatus": null, + "appstate": null, + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "generationinfo": [], + "guestfamily": null, + "guestfullname": null, + "guestid": null, + "guestkernelcrashed": null, + "guestoperationsready": null, + "gueststate": "", + "gueststatechangesupported": null, + "hostname": null, + "hwversion": null, + "interactiveguestoperationsready": null, + "ipaddress": null, + "ipstack": [], + "net": [], + "screen": null, + "toolsinstalltype": null, + "toolsrunningstatus": "guestToolsNotRunning", + "toolsstatus": "toolsNotInstalled", + "toolsversion": "0", + "toolsversionstatus": null, + "toolsversionstatus2": null + }, + "guestheartbeatstatus": null, + "layout": { + "configfile": [], + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "logfile": [], + "snapshot": [], + "swapfile": null + }, + "layoutex": { + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "file": [], + "snapshot": [], + "timestamp": {} + }, + "name": "DC0_H0_VM0", + "network": [], + "overallstatus": "green", + "parentvapp": null, + "permission": [], + "recenttask": [], + "resourcepool": { + "_moId": "resgroup-22", + "name": "Resources" + }, + "rootsnapshot": [], + "runtime": { + "boottime": null, + "cleanpoweroff": null, + "connectionstate": "connected", + "consolidationneeded": false, + "cryptostate": null, + "dasvmprotection": null, + "device": [], + "dynamicproperty": [], + "dynamictype": null, + "faulttolerancestate": null, + "featuremask": [], + "featurerequirement": [], + "host": { + "_moId": "host-21", + "name": "DC0_H0" + }, + "instantclonefrozen": null, + "maxcpuusage": null, + "maxmemoryusage": null, + "memoryoverhead": null, + "minrequiredevcmodekey": null, + "needsecondaryreason": null, + "nummksconnections": 0, + "offlinefeaturerequirement": [], + "onlinestandby": false, + "paused": null, + "powerstate": "poweredOn", + "question": null, + "quiescedforkparent": null, + "recordreplaystate": null, + "snapshotinbackground": null, + "suspendinterval": null, + "suspendtime": null, + "toolsinstallermounted": false, + "vflashcacheallocation": null + }, + "snapshot": null, + "storage": { + "dynamicproperty": [], + "dynamictype": null, + "perdatastoreusage": [], + "timestamp": {} + }, + "summary": { + "config": {}, + "customvalue": [], + "dynamicproperty": [], + "dynamictype": null, + "guest": {}, + "overallstatus": "green", + "quickstats": {}, + "runtime": {}, + "storage": {}, + "vm": {} + }, + "tag": [], + "triggeredalarmstate": [], + "value": [] + }, + "DC0_H0_VM1_39365506-5a0a-5fd0-be10-9586ad53aaad": { + "alarmactionsenabled": null, + "ansible_host": "None", + "ansible_ssh_host": "None", + "ansible_uuid": "50401ff9-720a-4166-b9e6-d7cd0d9a4dc9", + "availablefield": [], + "capability": { + "bootoptionssupported": false, + "bootretryoptionssupported": false, + "changetrackingsupported": false, + "consolepreferencessupported": false, + "cpufeaturemasksupported": false, + "disablesnapshotssupported": false, + "diskonlysnapshotonsuspendedvmsupported": null, + "disksharessupported": false, + "dynamicproperty": [], + "dynamictype": null, + "featurerequirementsupported": false, + "guestautolocksupported": false, + "hostbasedreplicationsupported": false, + "locksnapshotssupported": false, + "memoryreservationlocksupported": false, + "memorysnapshotssupported": false, + "multiplecorespersocketsupported": false, + "multiplesnapshotssupported": false, + "nestedhvsupported": false, + "npivwwnonnonrdmvmsupported": false, + "pervmevcsupported": null, + "poweredoffsnapshotssupported": false, + "poweredonmonitortypechangesupported": false, + "quiescedsnapshotssupported": false, + "recordreplaysupported": false, + "reverttosnapshotsupported": false, + "s1acpimanagementsupported": false, + "securebootsupported": null, + "sesparsedisksupported": false, + "settingdisplaytopologysupported": false, + "settingscreenresolutionsupported": false, + "settingvideoramsizesupported": false, + "snapshotconfigsupported": false, + "snapshotoperationssupported": false, + "swapplacementsupported": false, + "toolsautoupdatesupported": false, + "toolssynctimesupported": false, + "virtualexecusageignored": null, + "virtualmmuusageignored": null, + "virtualmmuusagesupported": false, + "vmnpivwwndisablesupported": false, + "vmnpivwwnsupported": false, + "vmnpivwwnupdatesupported": false, + "vpmcsupported": false + }, + "config": { + "alternateguestname": "", + "annotation": null, + "bootoptions": null, + "changetrackingenabled": null, + "changeversion": "", + "consolepreferences": null, + "contentlibiteminfo": null, + "cpuaffinity": null, + "cpuallocation": {}, + "cpufeaturemask": [], + "cpuhotaddenabled": null, + "cpuhotremoveenabled": null, + "createdate": null, + "datastoreurl": [], + "defaultpowerops": {}, + "dynamicproperty": [], + "dynamictype": null, + "extraconfig": [], + "files": {}, + "firmware": null, + "flags": {}, + "forkconfiginfo": null, + "ftinfo": null, + "guestautolockenabled": null, + "guestfullname": "otherGuest", + "guestid": "otherGuest", + "guestintegrityinfo": null, + "guestmonitoringmodeinfo": null, + "hardware": {}, + "hotplugmemoryincrementsize": null, + "hotplugmemorylimit": null, + "initialoverhead": null, + "instanceuuid": "12f8928d-f144-5c57-89db-dd2d0902c9fa", + "keyid": null, + "latencysensitivity": null, + "locationid": null, + "managedby": null, + "maxmksconnections": null, + "memoryaffinity": null, + "memoryallocation": {}, + "memoryhotaddenabled": null, + "memoryreservationlockedtomax": null, + "messagebustunnelenabled": null, + "migrateencryption": null, + "modified": {}, + "name": "DC0_H0_VM1", + "nestedhvenabled": null, + "networkshaper": null, + "npivdesirednodewwns": null, + "npivdesiredportwwns": null, + "npivnodeworldwidename": [], + "npivonnonrdmdisks": null, + "npivportworldwidename": [], + "npivtemporarydisabled": null, + "npivworldwidenametype": null, + "repconfig": null, + "scheduledhardwareupgradeinfo": null, + "sgxinfo": null, + "swapplacement": null, + "swapstorageobjectid": null, + "template": false, + "tools": {}, + "uuid": "39365506-5a0a-5fd0-be10-9586ad53aaad", + "vappconfig": null, + "vassertsenabled": null, + "vcpuconfig": [], + "version": "vmx-13", + "vflashcachereservation": null, + "vmstorageobjectid": null, + "vmxconfigchecksum": null, + "vpmcenabled": null + }, + "configissue": [], + "configstatus": "green", + "customvalue": [], + "datastore": [ + { + "_moId": "/tmp/govcsim-DC0-LocalDS_0-949174843@folder-5", + "name": "LocalDS_0" + } + ], + "effectiverole": [ + -1 + ], + "guest": { + "appheartbeatstatus": null, + "appstate": null, + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "generationinfo": [], + "guestfamily": null, + "guestfullname": null, + "guestid": null, + "guestkernelcrashed": null, + "guestoperationsready": null, + "gueststate": "", + "gueststatechangesupported": null, + "hostname": null, + "hwversion": null, + "interactiveguestoperationsready": null, + "ipaddress": null, + "ipstack": [], + "net": [], + "screen": null, + "toolsinstalltype": null, + "toolsrunningstatus": "guestToolsNotRunning", + "toolsstatus": "toolsNotInstalled", + "toolsversion": "0", + "toolsversionstatus": null, + "toolsversionstatus2": null + }, + "guestheartbeatstatus": null, + "layout": { + "configfile": [], + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "logfile": [], + "snapshot": [], + "swapfile": null + }, + "layoutex": { + "disk": [], + "dynamicproperty": [], + "dynamictype": null, + "file": [], + "snapshot": [], + "timestamp": {} + }, + "name": "DC0_H0_VM1", + "network": [], + "overallstatus": "green", + "parentvapp": null, + "permission": [], + "recenttask": [], + "resourcepool": { + "_moId": "resgroup-22", + "name": "Resources" + }, + "rootsnapshot": [], + "runtime": { + "boottime": null, + "cleanpoweroff": null, + "connectionstate": "connected", + "consolidationneeded": false, + "cryptostate": null, + "dasvmprotection": null, + "device": [], + "dynamicproperty": [], + "dynamictype": null, + "faulttolerancestate": null, + "featuremask": [], + "featurerequirement": [], + "host": { + "_moId": "host-21", + "name": "DC0_H0" + }, + "instantclonefrozen": null, + "maxcpuusage": null, + "maxmemoryusage": null, + "memoryoverhead": null, + "minrequiredevcmodekey": null, + "needsecondaryreason": null, + "nummksconnections": 0, + "offlinefeaturerequirement": [], + "onlinestandby": false, + "paused": null, + "powerstate": "poweredOn", + "question": null, + "quiescedforkparent": null, + "recordreplaystate": null, + "snapshotinbackground": null, + "suspendinterval": null, + "suspendtime": null, + "toolsinstallermounted": false, + "vflashcacheallocation": null + }, + "snapshot": null, + "storage": { + "dynamicproperty": [], + "dynamictype": null, + "perdatastoreusage": [], + "timestamp": {} + }, + "summary": { + "config": {}, + "customvalue": [], + "dynamicproperty": [], + "dynamictype": null, + "guest": {}, + "overallstatus": "green", + "quickstats": {}, + "runtime": {}, + "storage": {}, + "vm": {} + }, + "tag": [], + "triggeredalarmstate": [], + "value": [] + } + } + }, + "all": { + "children": [ + "None", + "guests", + "ungrouped" + ] + }, + "guests": { + "hosts": [ + "DC0_C0_RP0_VM0_cd0681bf-2f18-5c00-9b9b-8197c0095348", + "DC0_C0_RP0_VM1_f7c371d6-2003-5a48-9859-3bc9a8b08908", + "DC0_H0_VM0_265104de-1472-547c-b873-6dc7883fb6cb", + "DC0_H0_VM1_39365506-5a0a-5fd0-be10-9586ad53aaad" + ] + } +} diff --git a/test/integration/targets/inventory_script/inventory.sh b/test/integration/targets/inventory_script/inventory.sh new file mode 100755 index 00000000..b3f1d035 --- /dev/null +++ b/test/integration/targets/inventory_script/inventory.sh @@ -0,0 +1,7 @@ +#!/bin/sh +# This script mimics the output from what the contrib/inventory/vmware_inventory.py +# dynamic inventory script produced. +# This ensures we are still covering the same code that the original tests gave us +# and subsequently ensures that ansible-inventory produces output consistent with +# that of a dynamic inventory script +cat inventory.json diff --git a/test/integration/targets/inventory_script/runme.sh b/test/integration/targets/inventory_script/runme.sh new file mode 100755 index 00000000..bb4fcea9 --- /dev/null +++ b/test/integration/targets/inventory_script/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +diff -uw <(ansible-inventory -i inventory.sh --list --export) inventory.json diff --git a/test/integration/targets/inventory_yaml/aliases b/test/integration/targets/inventory_yaml/aliases new file mode 100644 index 00000000..f8e28c7e --- /dev/null +++ b/test/integration/targets/inventory_yaml/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/inventory_yaml/empty.json b/test/integration/targets/inventory_yaml/empty.json new file mode 100644 index 00000000..e1ae0684 --- /dev/null +++ b/test/integration/targets/inventory_yaml/empty.json @@ -0,0 +1,10 @@ +{ + "_meta": { + "hostvars": {} + }, + "all": { + "children": [ + "ungrouped" + ] + } +} diff --git a/test/integration/targets/inventory_yaml/runme.sh b/test/integration/targets/inventory_yaml/runme.sh new file mode 100755 index 00000000..b82f499d --- /dev/null +++ b/test/integration/targets/inventory_yaml/runme.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# handle empty/commented out group keys correctly https://github.com/ansible/ansible/issues/47254 +ANSIBLE_VERBOSITY=0 diff -w <(ansible-inventory -i ./test.yml --list) success.json diff --git a/test/integration/targets/inventory_yaml/success.json b/test/integration/targets/inventory_yaml/success.json new file mode 100644 index 00000000..a8b15f96 --- /dev/null +++ b/test/integration/targets/inventory_yaml/success.json @@ -0,0 +1,61 @@ +{ + "_meta": { + "hostvars": { + "alice": { + "status": "single" + }, + "bobby": { + "in_trouble": true, + "popular": false + }, + "cindy": { + "in_trouble": true, + "popular": true + }, + "greg": { + "in_trouble": true, + "popular": true + }, + "jan": { + "in_trouble": true, + "popular": false + }, + "marcia": { + "in_trouble": true, + "popular": true + }, + "peter": { + "in_trouble": true, + "popular": false + } + } + }, + "all": { + "children": [ + "cousins", + "kids", + "the-maid", + "ungrouped" + ] + }, + "cousins": { + "children": [ + "redheads" + ] + }, + "kids": { + "hosts": [ + "bobby", + "cindy", + "greg", + "jan", + "marcia", + "peter" + ] + }, + "the-maid": { + "hosts": [ + "alice" + ] + } +} diff --git a/test/integration/targets/inventory_yaml/test.yml b/test/integration/targets/inventory_yaml/test.yml new file mode 100644 index 00000000..9755396a --- /dev/null +++ b/test/integration/targets/inventory_yaml/test.yml @@ -0,0 +1,27 @@ +all: + children: + kids: + hosts: + marcia: + popular: True + jan: + popular: False + cindy: + popular: True + greg: + popular: True + peter: + popular: False + bobby: + popular: False + vars: + in_trouble: True + cousins: + children: + redheads: + hosts: + #oliver: # this used to cause an error and deliver incomplete inventory + the-maid: + hosts: + alice: + status: single diff --git a/test/integration/targets/jinja2_native_types/aliases b/test/integration/targets/jinja2_native_types/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/jinja2_native_types/nested_undefined.yml b/test/integration/targets/jinja2_native_types/nested_undefined.yml new file mode 100644 index 00000000..c808ffb7 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/nested_undefined.yml @@ -0,0 +1,24 @@ +- hosts: localhost + gather_facts: no + tasks: + - block: + - name: Test nested undefined var fails, single node + debug: + msg: "{{ [{ 'key': nested_and_undefined }] }}" + register: result + ignore_errors: yes + + - assert: + that: + - "\"'nested_and_undefined' is undefined\" in result.msg" + + - name: Test nested undefined var fails, multiple nodes + debug: + msg: "{{ [{ 'key': nested_and_undefined}] }} second_node" + register: result + ignore_errors: yes + + - assert: + that: + - "\"'nested_and_undefined' is undefined\" in result.msg" + when: lookup('pipe', ansible_python_interpreter ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.10', '>=') diff --git a/test/integration/targets/jinja2_native_types/runme.sh b/test/integration/targets/jinja2_native_types/runme.sh new file mode 100755 index 00000000..f648f875 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/runme.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_JINJA2_NATIVE=1 +ansible-playbook runtests.yml -v "$@" +ansible-playbook --vault-password-file test_vault_pass test_vault.yml -v "$@" +ansible-playbook test_hostvars.yml -v "$@" +ansible-playbook nested_undefined.yml -v "$@" +unset ANSIBLE_JINJA2_NATIVE diff --git a/test/integration/targets/jinja2_native_types/runtests.yml b/test/integration/targets/jinja2_native_types/runtests.yml new file mode 100644 index 00000000..efcdb7a5 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/runtests.yml @@ -0,0 +1,50 @@ +- name: Test jinja2 native types + hosts: localhost + gather_facts: no + vars: + i_one: 1 + i_two: 2 + i_three: 3 + s_one: "1" + s_two: "2" + s_three: "3" + dict_one: + foo: bar + baz: bang + dict_two: + bar: foo + foobar: barfoo + list_one: + - one + - two + list_two: + - three + - four + list_ints: + - 4 + - 2 + list_one_int: + - 1 + b_true: True + b_false: False + s_true: "True" + s_false: "False" + yaml_none: ~ + tasks: + - name: check jinja version + command: "{{ ansible_python_interpreter }} -c 'import jinja2; print(jinja2.__version__)'" + register: jinja2_version + + - name: make sure jinja is the right version + set_fact: + is_native: "{{ jinja2_version.stdout is version('2.10', '>=') }}" + + - block: + - import_tasks: test_casting.yml + - import_tasks: test_concatentation.yml + - import_tasks: test_bool.yml + - import_tasks: test_dunder.yml + - import_tasks: test_types.yml + - import_tasks: test_none.yml + - import_tasks: test_template.yml + when: is_native diff --git a/test/integration/targets/jinja2_native_types/test_bool.yml b/test/integration/targets/jinja2_native_types/test_bool.yml new file mode 100644 index 00000000..f3b5e8c0 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_bool.yml @@ -0,0 +1,53 @@ +- name: test bool True + set_fact: + bool_var_true: "{{ b_true }}" + +- assert: + that: + - 'bool_var_true is sameas true' + - 'bool_var_true|type_debug == "bool"' + +- name: test bool False + set_fact: + bool_var_false: "{{ b_false }}" + +- assert: + that: + - 'bool_var_false is sameas false' + - 'bool_var_false|type_debug == "bool"' + +- name: test bool expr True + set_fact: + bool_var_expr_true: "{{ 1 == 1 }}" + +- assert: + that: + - 'bool_var_expr_true is sameas true' + - 'bool_var_expr_true|type_debug == "bool"' + +- name: test bool expr False + set_fact: + bool_var_expr_false: "{{ 2 + 2 == 5 }}" + +- assert: + that: + - 'bool_var_expr_false is sameas false' + - 'bool_var_expr_false|type_debug == "bool"' + +- name: test bool expr with None, True + set_fact: + bool_var_none_expr_true: "{{ None == None }}" + +- assert: + that: + - 'bool_var_none_expr_true is sameas true' + - 'bool_var_none_expr_true|type_debug == "bool"' + +- name: test bool expr with None, False + set_fact: + bool_var_none_expr_false: "{{ '' == None }}" + +- assert: + that: + - 'bool_var_none_expr_false is sameas false' + - 'bool_var_none_expr_false|type_debug == "bool"' diff --git a/test/integration/targets/jinja2_native_types/test_casting.yml b/test/integration/targets/jinja2_native_types/test_casting.yml new file mode 100644 index 00000000..8627a056 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_casting.yml @@ -0,0 +1,31 @@ +- name: cast things to other things + set_fact: + int_to_str: "'{{ i_two }}'" + int_to_str2: "{{ i_two | string }}" + str_to_int: "{{ s_two|int }}" + dict_to_str: "'{{ dict_one }}'" + list_to_str: "'{{ list_one }}'" + int_to_bool: "{{ i_one|bool }}" + str_true_to_bool: "{{ s_true|bool }}" + str_false_to_bool: "{{ s_false|bool }}" + list_to_json_str: "{{ list_one | to_json }}" + list_to_yaml_str: "{{ list_one | to_yaml }}" + +- assert: + that: + - 'int_to_str == "2"' + - 'int_to_str|type_debug in ["str", "unicode"]' + - 'int_to_str2 == "2"' + - 'int_to_str2|type_debug in ["NativeJinjaText"]' + - 'str_to_int == 2' + - 'str_to_int|type_debug == "int"' + - 'dict_to_str|type_debug in ["str", "unicode"]' + - 'list_to_str|type_debug in ["str", "unicode"]' + - 'int_to_bool is sameas true' + - 'int_to_bool|type_debug == "bool"' + - 'str_true_to_bool is sameas true' + - 'str_true_to_bool|type_debug == "bool"' + - 'str_false_to_bool is sameas false' + - 'str_false_to_bool|type_debug == "bool"' + - 'list_to_json_str|type_debug in ["NativeJinjaText"]' + - 'list_to_yaml_str|type_debug in ["NativeJinjaText"]' diff --git a/test/integration/targets/jinja2_native_types/test_concatentation.yml b/test/integration/targets/jinja2_native_types/test_concatentation.yml new file mode 100644 index 00000000..8a8077b6 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_concatentation.yml @@ -0,0 +1,88 @@ +- name: add two ints + set_fact: + integer_sum: "{{ i_one + i_two }}" + +- assert: + that: + - 'integer_sum == 3' + - 'integer_sum|type_debug == "int"' + +- name: add casted string and int + set_fact: + integer_sum2: "{{ s_one|int + i_two }}" + +- assert: + that: + - 'integer_sum2 == 3' + - 'integer_sum2|type_debug == "int"' + +- name: concatenate int and string + set_fact: + string_sum: "'{{ [i_one, s_two]|join('') }}'" + +- assert: + that: + - 'string_sum == "12"' + - 'string_sum|type_debug in ["str", "unicode"]' + +- name: add two lists + set_fact: + list_sum: "{{ list_one + list_two }}" + +- assert: + that: + - 'list_sum == ["one", "two", "three", "four"]' + - 'list_sum|type_debug == "list"' + +- name: add two lists, multi expression + set_fact: + list_sum_multi: "{{ list_one }} + {{ list_two }}" + +- assert: + that: + - 'list_sum_multi|type_debug in ["str", "unicode"]' + +- name: add two dicts + set_fact: + dict_sum: "{{ dict_one + dict_two }}" + ignore_errors: yes + +- assert: + that: + - 'dict_sum is undefined' + +- name: loop through list with strings + set_fact: + list_for_strings: "{% for x in list_one %}{{ x }}{% endfor %}" + +- assert: + that: + - 'list_for_strings == "onetwo"' + - 'list_for_strings|type_debug in ["str", "unicode"]' + +- name: loop through list with int + set_fact: + list_for_int: "{% for x in list_one_int %}{{ x }}{% endfor %}" + +- assert: + that: + - 'list_for_int == 1' + - 'list_for_int|type_debug == "int"' + +- name: loop through list with ints + set_fact: + list_for_ints: "{% for x in list_ints %}{{ x }}{% endfor %}" + +- assert: + that: + - 'list_for_ints == 42' + - 'list_for_ints|type_debug == "int"' + +- name: loop through list to create a new list + set_fact: + list_from_list: "[{% for x in list_ints %}{{ x }},{% endfor %}]" + +- assert: + that: + - 'list_from_list == [4, 2]' + - 'list_from_list|type_debug == "list"' diff --git a/test/integration/targets/jinja2_native_types/test_dunder.yml b/test/integration/targets/jinja2_native_types/test_dunder.yml new file mode 100644 index 00000000..df5ea927 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_dunder.yml @@ -0,0 +1,23 @@ +- name: test variable dunder + set_fact: + var_dunder: "{{ b_true.__class__ }}" + +- assert: + that: + - 'var_dunder|type_debug == "type"' + +- name: test constant dunder + set_fact: + const_dunder: "{{ true.__class__ }}" + +- assert: + that: + - 'const_dunder|type_debug == "type"' + +- name: test constant dunder to string + set_fact: + const_dunder: "{{ true.__class__|string }}" + +- assert: + that: + - 'const_dunder|type_debug in ["str", "unicode", "NativeJinjaText"]' diff --git a/test/integration/targets/jinja2_native_types/test_hostvars.yml b/test/integration/targets/jinja2_native_types/test_hostvars.yml new file mode 100644 index 00000000..ef0047b8 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_hostvars.yml @@ -0,0 +1,10 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: Print vars + debug: + var: vars + + - name: Print hostvars + debug: + var: hostvars diff --git a/test/integration/targets/jinja2_native_types/test_none.yml b/test/integration/targets/jinja2_native_types/test_none.yml new file mode 100644 index 00000000..1d26154c --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_none.yml @@ -0,0 +1,11 @@ +- name: test none + set_fact: + none_var: "{{ yaml_none }}" + none_var_direct: "{{ None }}" + +- assert: + that: + - 'none_var is sameas none' + - 'none_var|type_debug == "NoneType"' + - 'none_var_direct is sameas none' + - 'none_var_direct|type_debug == "NoneType"' diff --git a/test/integration/targets/jinja2_native_types/test_template.yml b/test/integration/targets/jinja2_native_types/test_template.yml new file mode 100644 index 00000000..0896ac14 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_template.yml @@ -0,0 +1,27 @@ +- block: + - name: Template file with newlines + template: + src: test_template_newlines.j2 + dest: test_template_newlines.res + + - name: Dump template file + stat: + path: test_template_newlines.j2 + get_checksum: yes + register: template_stat + + - name: Dump result file + stat: + path: test_template_newlines.res + get_checksum: yes + register: result_stat + + - name: Check that number of newlines from original template are preserved + assert: + that: + - template_stat.stat.checksum == result_stat.stat.checksum + always: + - name: Clean up + file: + path: test_template_newlines.res + state: absent diff --git a/test/integration/targets/jinja2_native_types/test_template_newlines.j2 b/test/integration/targets/jinja2_native_types/test_template_newlines.j2 new file mode 100644 index 00000000..ca887efa --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_template_newlines.j2 @@ -0,0 +1,4 @@ +First line. + + + diff --git a/test/integration/targets/jinja2_native_types/test_types.yml b/test/integration/targets/jinja2_native_types/test_types.yml new file mode 100644 index 00000000..f5659d4e --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_types.yml @@ -0,0 +1,20 @@ +- assert: + that: + - 'i_one|type_debug == "int"' + - 's_one|type_debug == "AnsibleUnicode"' + - 'dict_one|type_debug == "dict"' + - 'dict_one is mapping' + - 'list_one|type_debug == "list"' + - 'b_true|type_debug == "bool"' + - 's_true|type_debug == "AnsibleUnicode"' + +- set_fact: + a_list: "{{[i_one, s_two]}}" + +- assert: + that: + - 'a_list|type_debug == "list"' + - 'a_list[0] == 1' + - 'a_list[0]|type_debug == "int"' + - 'a_list[1] == "2"' + - 'a_list[1]|type_debug == "AnsibleUnicode"' diff --git a/test/integration/targets/jinja2_native_types/test_vault.yml b/test/integration/targets/jinja2_native_types/test_vault.yml new file mode 100644 index 00000000..2daa3c5b --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_vault.yml @@ -0,0 +1,16 @@ +- hosts: localhost + gather_facts: no + vars: + # ansible-vault encrypt_string root + # vault_password_file = test_vault_pass + vaulted_root_string: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 39333565666430306232343266346635373235626564396332323838613063646132653436303239 + 3133363232306334393863343563366131373565616338380a666339383162333838653631663131 + 36633637303862353435643930393664386365323164643831363332666435303436373365393162 + 6535383134323539380a613663366631626534313837313565666665336164353362373431666366 + 3464 + tasks: + - name: make sure group root exists + group: + name: "{{ vaulted_root_string }}" diff --git a/test/integration/targets/jinja2_native_types/test_vault_pass b/test/integration/targets/jinja2_native_types/test_vault_pass new file mode 100644 index 00000000..9daeafb9 --- /dev/null +++ b/test/integration/targets/jinja2_native_types/test_vault_pass @@ -0,0 +1 @@ +test diff --git a/test/integration/targets/json_cleanup/aliases b/test/integration/targets/json_cleanup/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/json_cleanup/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/json_cleanup/library/bad_json b/test/integration/targets/json_cleanup/library/bad_json new file mode 100644 index 00000000..1df8c725 --- /dev/null +++ b/test/integration/targets/json_cleanup/library/bad_json @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -eu + +echo 'this stuff should be ignored' + +echo '[ looks like a json list]' + +echo '{"changed": false, "failed": false, "msg": "good json response"}' + +echo 'moar garbage' diff --git a/test/integration/targets/json_cleanup/module_output_cleaning.yml b/test/integration/targets/json_cleanup/module_output_cleaning.yml new file mode 100644 index 00000000..165352aa --- /dev/null +++ b/test/integration/targets/json_cleanup/module_output_cleaning.yml @@ -0,0 +1,26 @@ +- name: ensure we clean module output well + hosts: localhost + gather_facts: false + tasks: + - name: call module that spews extra stuff + bad_json: + register: clean_json + ignore_errors: true + + - name: all expected is there + assert: + that: + - clean_json is success + - clean_json is not changed + - "clean_json['msg'] == 'good json response'" + + - name: all non wanted is not there + assert: + that: + - item not in clean_json.values() + loop: + - this stuff should be ignored + - [ looks like a json list] + - '[ looks like a json list]' + - ' looks like a json list' + - moar garbage diff --git a/test/integration/targets/json_cleanup/runme.sh b/test/integration/targets/json_cleanup/runme.sh new file mode 100755 index 00000000..2de3bd0e --- /dev/null +++ b/test/integration/targets/json_cleanup/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook module_output_cleaning.yml "$@" diff --git a/test/integration/targets/known_hosts/aliases b/test/integration/targets/known_hosts/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/known_hosts/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/known_hosts/defaults/main.yml b/test/integration/targets/known_hosts/defaults/main.yml new file mode 100644 index 00000000..eb0a4ba3 --- /dev/null +++ b/test/integration/targets/known_hosts/defaults/main.yml @@ -0,0 +1,3 @@ +--- +example_org_rsa_key: > + example.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAglyZmHHWskQ9wkh8LYbIqzvg99/oloneH7BaZ02ripJUy/2Zynv4tgUfm9fdXvAb1XXCEuTRnts9FBer87+voU0FPRgx3CfY9Sgr0FspUjnm4lqs53FIab1psddAaS7/F7lrnjl6VqBtPwMRQZG7qlml5uogGJwYJHxX0PGtsdoTJsM= diff --git a/test/integration/targets/known_hosts/files/existing_known_hosts b/test/integration/targets/known_hosts/files/existing_known_hosts new file mode 100644 index 00000000..2564f409 --- /dev/null +++ b/test/integration/targets/known_hosts/files/existing_known_hosts @@ -0,0 +1,5 @@ +example.com ssh-dss AAAAB3NzaC1kc3MAAACBALT8YHxZ59d8yX4oQNPbpdK9AMPRQGKFY9X13S2fp4UMPijiB3ETxU1bAyVTjTbsoag065naFt13aIVl+u0MDPfMuYgVJFEorAZkDlBixvT25zpKyQhI4CtHhZ9Y9YWug4xLqSaFUYEPO31Bie7k8xRfDwsHtzTRPp/0zRURwARHAAAAFQDLx2DZMm3cR8cZtbq4zdSvkXLh0wAAAIAalkQYziu2b5dDRQMiFpDLpPdbymyVhDMmRKnXwAB1+dhGyJLGvfe0xO+ibqGXMp1aZ1iC3a/vHTpYKDVqKIIpFg5r0fxAcAZkJR0aRC8RDxW/IclbIliETD71osIT8I47OFc7vAVCWP8JbV3ZYzR+i98WUkmZ4/ZUzsDl2gi7WAAAAIAsdTGwAo4Fs784TdP2tIHCqxAIz2k4tWmZyeRmXkH5K/P1o9XSh3RNxvFKK7BY6dQK+h9jLunMBs0SCzhMoTcXaJq331kmLJltjq5peo0PnLGnQz5pas0PD7p7gb+soklmHoVp7J2oMC/U4N1Rxr6g9sv8Rpsf1PTPDT3sEbze6A== root@freezer +|1|d71/U7CbOH3Su+d2zxlbmiNfXtI=|g2YSPAVoK7bmg16FCOOPKTZe2BM= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== +|1|L0TqxOhAVh6mLZ2lbHdTv3owun0=|vn0La5pbHNxin3XzQQdvaOulvVU= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCNLCAA/SjVF3jkmlAlkgh+GtZdgxtusHaK66fcA7XSgCpQOdri1dGmND6pQDGwsxiKMy4Ou1GB2DR4N0G9T5E8= +|1|WPo7yAOdlQKLSuRatNJCmDoga0k=|D/QybGglKokWuEQUe9Okpy5uSh0= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCNLCAA/SjVF3jkmlAlkgh+GtZdgxtusHaK66fcA7XSgCpQOdri1dGmND6pQDGwsxiKMy4Ou1GB2DR4N0G9T5E8= +# example.net ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM6OSqweGdPdQ/metQaf738AdN3P+itYp1AypOTgXkyj root@localhost diff --git a/test/integration/targets/known_hosts/meta/main.yml b/test/integration/targets/known_hosts/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/known_hosts/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/known_hosts/tasks/main.yml b/test/integration/targets/known_hosts/tasks/main.yml new file mode 100644 index 00000000..4ea91c35 --- /dev/null +++ b/test/integration/targets/known_hosts/tasks/main.yml @@ -0,0 +1,377 @@ +# test code for the known_hosts module +# (c) 2017, Marius Gedminas + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: copy an existing file in place + copy: + src: existing_known_hosts + dest: "{{ output_dir }}/known_hosts" + +# test addition + +- name: add a new host in check mode + check_mode: yes + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + register: diff + +- name: assert that the diff looks as expected (the key was added at the end) + assert: + that: + - 'diff is changed' + - 'diff.diff.before_header == diff.diff.after_header == output_dir|expanduser + "/known_hosts"' + - 'diff.diff.after.splitlines()[:-1] == diff.diff.before.splitlines()' + - 'diff.diff.after.splitlines()[-1] == example_org_rsa_key.strip()' + +- name: add a new host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + register: result + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts + +- name: assert that the key was added and ordering preserved + assert: + that: + - 'result is changed' + - 'known_hosts.stdout_lines[0].startswith("example.com")' + - 'known_hosts.stdout_lines[4].startswith("# example.net")' + - 'known_hosts.stdout_lines[-1].strip() == example_org_rsa_key.strip()' + +# test idempotence of addition + +- name: add the same host in check mode + check_mode: yes + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + register: check + +- name: assert that no changes were expected + assert: + that: + - 'check is not changed' + - 'check.diff.before == check.diff.after' + +- name: add the same host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + register: result + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v2 + +- name: assert that no changes happened + assert: + that: + - 'result is not changed' + - 'result.diff.before == result.diff.after' + - 'known_hosts.stdout == known_hosts_v2.stdout' + +# test removal + +- name: remove the host in check mode + check_mode: yes + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: absent + path: "{{output_dir}}/known_hosts" + register: diff + +- name: assert that the diff looks as expected (the key was removed) + assert: + that: + - 'diff.diff.before_header == diff.diff.after_header == output_dir|expanduser + "/known_hosts"' + - 'diff.diff.before.splitlines()[-1] == example_org_rsa_key.strip()' + - 'diff.diff.after.splitlines() == diff.diff.before.splitlines()[:-1]' + +- name: remove the host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: absent + path: "{{output_dir}}/known_hosts" + register: result + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v3 + +- name: assert that the key was removed and ordering preserved + assert: + that: + - 'result is changed' + - '"example.org" not in known_hosts_v3.stdout' + - 'known_hosts_v3.stdout_lines[0].startswith("example.com")' + - 'known_hosts_v3.stdout_lines[-1].startswith("# example.net")' + +# test idempotence of removal + +- name: remove the same host in check mode + check_mode: yes + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: absent + path: "{{output_dir}}/known_hosts" + register: check + +- name: assert that no changes were expected + assert: + that: + - 'check is not changed' + - 'check.diff.before == check.diff.after' + +- name: remove the same host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: absent + path: "{{output_dir}}/known_hosts" + register: result + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v4 + +- name: assert that no changes happened + assert: + that: + - 'result is not changed' + - 'result.diff.before == result.diff.after' + - 'known_hosts_v3.stdout == known_hosts_v4.stdout' + +# test addition as hashed_host + +- name: add a new hashed host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + hash_host: yes + register: result + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v5 + +- name: assert that the key was added and ordering preserved + assert: + that: + - 'result is changed' + - 'known_hosts_v5.stdout_lines[0].startswith("example.com")' + - 'known_hosts_v5.stdout_lines[4].startswith("# example.net")' + - 'known_hosts_v5.stdout_lines[-1].strip().startswith("|1|")' + - 'known_hosts_v5.stdout_lines[-1].strip().endswith(example_org_rsa_key.strip().split()[-1])' + +# test idempotence of hashed addition + +- name: add the same host hashed + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + hash_host: yes + register: result + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v6 + +- name: assert that no changes happened + assert: + that: + - 'result is not changed' + - 'result.diff.before == result.diff.after' + - 'known_hosts_v5.stdout == known_hosts_v6.stdout' + +# test hashed removal + +- name: remove the hashed host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: absent + path: "{{output_dir}}/known_hosts" + register: result + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v7 + +- name: assert that the key was removed and ordering preserved + assert: + that: + - 'result is changed' + - 'example_org_rsa_key.strip().split()[-1] not in known_hosts_v7.stdout' + - 'known_hosts_v7.stdout_lines[0].startswith("example.com")' + - 'known_hosts_v7.stdout_lines[-1].startswith("# example.net")' + +# test idempotence of removal + +- name: remove the same hashed host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: absent + path: "{{output_dir}}/known_hosts" + register: result + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v8 + +- name: assert that no changes happened + assert: + that: + - 'result is not changed' + - 'result.diff.before == result.diff.after' + - 'known_hosts_v7.stdout == known_hosts_v8.stdout' + +# test roundtrip plaintext => hashed => plaintext +# The assertions are rather relaxed, because most of this hash been tested previously + +- name: add a new host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v8 + +- name: assert the plaintext host is there + assert: + that: + - 'known_hosts_v8.stdout_lines[-1].strip() == example_org_rsa_key.strip()' + +- name: update the host to hashed mode + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + hash_host: true + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v9 + +- name: assert the hashed host is there + assert: + that: + - 'known_hosts_v9.stdout_lines[-1].strip().startswith("|1|")' + - 'known_hosts_v9.stdout_lines[-1].strip().endswith(example_org_rsa_key.strip().split()[-1])' + +- name: downgrade the host to plaintext mode + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v10 + +- name: assert the plaintext host is there + assert: + that: + - 'known_hosts_v10.stdout_lines[5].strip() == example_org_rsa_key.strip()' + +# ... and remove the host again for the next test + +- name: copy an existing file in place + copy: + src: existing_known_hosts + dest: "{{ output_dir }}/known_hosts" + +# Test key changes + +- name: add a hashed host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key }}" + state: present + path: "{{output_dir}}/known_hosts" + hash_host: true + +- name: change the key of a hashed host + known_hosts: + name: example.org + key: "{{ example_org_rsa_key.strip()[:-7] + 'RANDOM=' }}" + state: present + path: "{{output_dir}}/known_hosts" + hash_host: true + +- name: get the file content + command: "cat {{output_dir}}/known_hosts" + register: known_hosts_v11 + +- name: assert the change took place and the key got modified + assert: + that: + - 'known_hosts_v11.stdout_lines[-1].strip().endswith("RANDOM=")' + +# test errors + +- name: Try using a comma separated list of hosts + known_hosts: + name: example.org,acme.com + key: "{{ example_org_rsa_key }}" + path: "{{output_dir}}/known_hosts" + ignore_errors: yes + register: result + +- name: Assert that error message was displayed + assert: + that: + - result is failed + - result.msg == 'Comma separated list of names is not supported. Please pass a single name to lookup in the known_hosts file.' + +- name: Try using a name that does not match the key + known_hosts: + name: example.com + key: "{{ example_org_rsa_key }}" + path: "{{output_dir}}/known_hosts" + ignore_errors: yes + register: result + +- name: Assert that name checking failed with error message + assert: + that: + - result is failed + - result.msg == 'Host parameter does not match hashed host field in supplied key' diff --git a/test/integration/targets/limit_inventory/aliases b/test/integration/targets/limit_inventory/aliases new file mode 100644 index 00000000..3005e4b2 --- /dev/null +++ b/test/integration/targets/limit_inventory/aliases @@ -0,0 +1 @@ +shippable/posix/group4 diff --git a/test/integration/targets/limit_inventory/hosts.yml b/test/integration/targets/limit_inventory/hosts.yml new file mode 100644 index 00000000..2e1b1927 --- /dev/null +++ b/test/integration/targets/limit_inventory/hosts.yml @@ -0,0 +1,5 @@ +all: + hosts: + host1: + host2: + host3: diff --git a/test/integration/targets/limit_inventory/runme.sh b/test/integration/targets/limit_inventory/runme.sh new file mode 100755 index 00000000..6a142b3b --- /dev/null +++ b/test/integration/targets/limit_inventory/runme.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -eux + +trap 'echo "Host pattern limit test failed"' ERR + +# https://github.com/ansible/ansible/issues/61964 + +# These tests should return all hosts +ansible -i hosts.yml all --limit ,, --list-hosts | tee out ; grep -q 'hosts (3)' out +ansible -i hosts.yml ,, --list-hosts | tee out ; grep -q 'hosts (3)' out +ansible -i hosts.yml , --list-hosts | tee out ; grep -q 'hosts (3)' out +ansible -i hosts.yml all --limit , --list-hosts | tee out ; grep -q 'hosts (3)' out +ansible -i hosts.yml all --limit '' --list-hosts | tee out ; grep -q 'hosts (3)' out + + +# Only one host +ansible -i hosts.yml all --limit ,,host1 --list-hosts | tee out ; grep -q 'hosts (1)' out +ansible -i hosts.yml ,,host1 --list-hosts | tee out ; grep -q 'hosts (1)' out + +ansible -i hosts.yml all --limit host1,, --list-hosts | tee out ; grep -q 'hosts (1)' out +ansible -i hosts.yml host1,, --list-hosts | tee out ; grep -q 'hosts (1)' out + + +# Only two hosts +ansible -i hosts.yml all --limit host1,,host3 --list-hosts | tee out ; grep -q 'hosts (2)' out +ansible -i hosts.yml host1,,host3 --list-hosts | tee out ; grep -q 'hosts (2)' out + +ansible -i hosts.yml all --limit 'host1, , ,host3' --list-hosts | tee out ; grep -q 'hosts (2)' out +ansible -i hosts.yml 'host1, , ,host3' --list-hosts | tee out ; grep -q 'hosts (2)' out + diff --git a/test/integration/targets/lineinfile/aliases b/test/integration/targets/lineinfile/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/lineinfile/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/lineinfile/files/firstmatch.txt b/test/integration/targets/lineinfile/files/firstmatch.txt new file mode 100644 index 00000000..347132c6 --- /dev/null +++ b/test/integration/targets/lineinfile/files/firstmatch.txt @@ -0,0 +1,5 @@ +line1 +line1 +line1 +line2 +line3 diff --git a/test/integration/targets/lineinfile/files/test.conf b/test/integration/targets/lineinfile/files/test.conf new file mode 100644 index 00000000..15404cd6 --- /dev/null +++ b/test/integration/targets/lineinfile/files/test.conf @@ -0,0 +1,5 @@ +[section_one] + +[section_two] + +[section_three] diff --git a/test/integration/targets/lineinfile/files/test.txt b/test/integration/targets/lineinfile/files/test.txt new file mode 100644 index 00000000..8187db9f --- /dev/null +++ b/test/integration/targets/lineinfile/files/test.txt @@ -0,0 +1,5 @@ +This is line 1 +This is line 2 +REF this is a line for backrefs REF +This is line 4 +This is line 5 diff --git a/test/integration/targets/lineinfile/files/test_58923.txt b/test/integration/targets/lineinfile/files/test_58923.txt new file mode 100644 index 00000000..34579fde --- /dev/null +++ b/test/integration/targets/lineinfile/files/test_58923.txt @@ -0,0 +1,4 @@ +#!/bin/sh + +case "`uname`" in + Darwin*) if [ -z "$JAVA_HOME" ] ; then diff --git a/test/integration/targets/lineinfile/files/testempty.txt b/test/integration/targets/lineinfile/files/testempty.txt new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/lineinfile/files/testmultiple.txt b/test/integration/targets/lineinfile/files/testmultiple.txt new file mode 100644 index 00000000..fb57082b --- /dev/null +++ b/test/integration/targets/lineinfile/files/testmultiple.txt @@ -0,0 +1,7 @@ +This is line 1 + +This is line 2 + +This is line 3 + +This is line 4 diff --git a/test/integration/targets/lineinfile/files/testnoeof.txt b/test/integration/targets/lineinfile/files/testnoeof.txt new file mode 100644 index 00000000..152780b9 --- /dev/null +++ b/test/integration/targets/lineinfile/files/testnoeof.txt @@ -0,0 +1,2 @@ +This is line 1 +This is line 2 \ No newline at end of file diff --git a/test/integration/targets/lineinfile/meta/main.yml b/test/integration/targets/lineinfile/meta/main.yml new file mode 100644 index 00000000..98e60f78 --- /dev/null +++ b/test/integration/targets/lineinfile/meta/main.yml @@ -0,0 +1,20 @@ +# test code for the lineinfile module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +dependencies: + - prepare_tests diff --git a/test/integration/targets/lineinfile/tasks/main.yml b/test/integration/targets/lineinfile/tasks/main.yml new file mode 100644 index 00000000..840051cf --- /dev/null +++ b/test/integration/targets/lineinfile/tasks/main.yml @@ -0,0 +1,1157 @@ +# test code for the lineinfile module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: deploy the test file for lineinfile + copy: + src: test.txt + dest: "{{ output_dir }}/test.txt" + register: result + +- name: assert that the test file was deployed + assert: + that: + - result is changed + - "result.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'" + - "result.state == 'file'" + +- name: insert a line at the beginning of the file, and back it up + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: present + line: "New line at the beginning" + insertbefore: "BOF" + backup: yes + register: result1 + +- name: insert a line at the beginning of the file again + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: present + line: "New line at the beginning" + insertbefore: "BOF" + register: result2 + +- name: assert that the line was inserted at the head of the file + assert: + that: + - result1 is changed + - result2 is not changed + - result1.msg == 'line added' + - result1.backup != '' + +- name: stat the backup file + stat: + path: "{{ result1.backup }}" + register: result + +- name: assert the backup file matches the previous hash + assert: + that: + - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'" + +- name: stat the test after the insert at the head + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test hash is what we expect for the file with the insert at the head + assert: + that: + - "result.stat.checksum == '7eade4042b23b800958fe807b5bfc29f8541ec09'" + +- name: insert a line at the end of the file + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: present + line: "New line at the end" + insertafter: "EOF" + register: result + +- name: assert that the line was inserted at the end of the file + assert: + that: + - result is changed + - "result.msg == 'line added'" + +- name: stat the test after the insert at the end + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test checksum matches after the insert at the end + assert: + that: + - "result.stat.checksum == 'fb57af7dc10a1006061b000f1f04c38e4bef50a9'" + +- name: insert a line after the first line + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: present + line: "New line after line 1" + insertafter: "^This is line 1$" + register: result + +- name: assert that the line was inserted after the first line + assert: + that: + - result is changed + - "result.msg == 'line added'" + +- name: stat the test after insert after the first line + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test checksum matches after the insert after the first line + assert: + that: + - "result.stat.checksum == '5348da605b1bc93dbadf3a16474cdf22ef975bec'" + +- name: insert a line before the last line + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: present + line: "New line before line 5" + insertbefore: "^This is line 5$" + register: result + +- name: assert that the line was inserted before the last line + assert: + that: + - result is changed + - "result.msg == 'line added'" + +- name: stat the test after the insert before the last line + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test checksum matches after the insert before the last line + assert: + that: + - "result.stat.checksum == '2e9e460ff68929e4453eb765761fd99814f6e286'" + +- name: Replace a line with backrefs + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: present + line: "This is line 3" + backrefs: yes + regexp: "^(REF) .* \\1$" + register: backrefs_result1 + +- name: Replace a line with backrefs again + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: present + line: "This is line 3" + backrefs: yes + regexp: "^(REF) .* \\1$" + register: backrefs_result2 +- command: cat {{ output_dir }}/test.txt + +- name: assert that the line with backrefs was changed + assert: + that: + - backrefs_result1 is changed + - backrefs_result2 is not changed + - "backrefs_result1.msg == 'line replaced'" + +- name: stat the test after the backref line was replaced + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test checksum matches after backref line was replaced + assert: + that: + - "result.stat.checksum == '72f60239a735ae06e769d823f5c2b4232c634d9c'" + +- name: remove the middle line + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: absent + regexp: "^This is line 3$" + register: result + +- name: assert that the line was removed + assert: + that: + - result is changed + - "result.msg == '1 line(s) removed'" + +- name: stat the test after the middle line was removed + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test checksum matches after the middle line was removed + assert: + that: + - "result.stat.checksum == 'd4eeb07bdebab2d1cdb3ec4a3635afa2618ad4ea'" + +- name: run a validation script that succeeds + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: absent + regexp: "^This is line 5$" + validate: "true %s" + register: result + +- name: assert that the file validated after removing a line + assert: + that: + - result is changed + - "result.msg == '1 line(s) removed'" + +- name: stat the test after the validation succeeded + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test checksum matches after the validation succeeded + assert: + that: + - "result.stat.checksum == 'ab56c210ea82839a54487464800fed4878cb2608'" + +- name: run a validation script that fails + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: absent + regexp: "^This is line 1$" + validate: "/bin/false %s" + register: result + ignore_errors: yes + +- name: assert that the validate failed + assert: + that: + - "result.failed == true" + +- name: stat the test after the validation failed + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test checksum matches the previous after the validation failed + assert: + that: + - "result.stat.checksum == 'ab56c210ea82839a54487464800fed4878cb2608'" + +- name: use create=yes + lineinfile: + dest: "{{ output_dir }}/new_test.txt" + create: yes + insertbefore: BOF + state: present + line: "This is a new file" + register: result + +- name: assert that the new file was created + assert: + that: + - result is changed + - "result.msg == 'line added'" + +- name: validate that the newly created file exists + stat: + path: "{{ output_dir }}/new_test.txt" + register: result + ignore_errors: yes + +- name: assert the newly created test checksum matches + assert: + that: + - "result.stat.checksum == '038f10f9e31202451b093163e81e06fbac0c6f3a'" + +- name: Create a file without a path + lineinfile: + dest: file.txt + create: yes + line: Test line + register: create_no_path_test + +- name: Stat the file + stat: + path: file.txt + register: create_no_path_file + +- name: Ensure file was created + assert: + that: + - create_no_path_test is changed + - create_no_path_file.stat.exists + +# Test EOF in cases where file has no newline at EOF +- name: testnoeof deploy the file for lineinfile + copy: + src: testnoeof.txt + dest: "{{ output_dir }}/testnoeof.txt" + register: result + +- name: testnoeof insert a line at the end of the file + lineinfile: + dest: "{{ output_dir }}/testnoeof.txt" + state: present + line: "New line at the end" + insertafter: "EOF" + register: result + +- name: testempty assert that the line was inserted at the end of the file + assert: + that: + - result is changed + - "result.msg == 'line added'" + +- name: insert a multiple lines at the end of the file + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: present + line: "This is a line\nwith \\n character" + insertafter: "EOF" + register: result + +- name: assert that the multiple lines was inserted + assert: + that: + - result is changed + - "result.msg == 'line added'" + +- name: testnoeof stat the no newline EOF test after the insert at the end + stat: + path: "{{ output_dir }}/testnoeof.txt" + register: result + +- name: testnoeof assert test checksum matches after the insert at the end + assert: + that: + - "result.stat.checksum == 'f9af7008e3cb67575ce653d094c79cabebf6e523'" + +# Test EOF with empty file to make sure no unnecessary newline is added +- name: testempty deploy the testempty file for lineinfile + copy: + src: testempty.txt + dest: "{{ output_dir }}/testempty.txt" + register: result + +- name: testempty insert a line at the end of the file + lineinfile: + dest: "{{ output_dir }}/testempty.txt" + state: present + line: "New line at the end" + insertafter: "EOF" + register: result + +- name: testempty assert that the line was inserted at the end of the file + assert: + that: + - result is changed + - "result.msg == 'line added'" + +- name: testempty stat the test after the insert at the end + stat: + path: "{{ output_dir }}/testempty.txt" + register: result + +- name: testempty assert test checksum matches after the insert at the end + assert: + that: + - "result.stat.checksum == 'f440dc65ea9cec3fd496c1479ddf937e1b949412'" + +- stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test checksum matches after inserting multiple lines + assert: + that: + - "result.stat.checksum == 'fde683229429a4f05d670e6c10afc875e1d5c489'" + +- name: replace a line with backrefs included in the line + lineinfile: + dest: "{{ output_dir }}/test.txt" + state: present + line: "New \\1 created with the backref" + backrefs: yes + regexp: "^This is (line 4)$" + register: result + +- name: assert that the line with backrefs was changed + assert: + that: + - result is changed + - "result.msg == 'line replaced'" + +- name: stat the test after the backref line was replaced + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: assert test checksum matches after backref line was replaced + assert: + that: + - "result.stat.checksum == '981ad35c4b30b03bc3a1beedce0d1e72c491898e'" + +################################################################### +# issue 8535 + +- name: create a new file for testing quoting issues + file: + dest: "{{ output_dir }}/test_quoting.txt" + state: touch + register: result + +- name: assert the new file was created + assert: + that: + - result is changed + +- name: use with_items to add code-like strings to the quoting txt file + lineinfile: + dest: "{{ output_dir }}/test_quoting.txt" + line: "{{ item }}" + insertbefore: BOF + with_items: + - "'foo'" + - "dotenv.load();" + - "var dotenv = require('dotenv');" + register: result + +- name: assert the quote test file was modified correctly + assert: + that: + - result.results|length == 3 + - result.results[0] is changed + - result.results[0].item == "'foo'" + - result.results[1] is changed + - result.results[1].item == "dotenv.load();" + - result.results[2] is changed + - result.results[2].item == "var dotenv = require('dotenv');" + +- name: stat the quote test file + stat: + path: "{{ output_dir }}/test_quoting.txt" + register: result + +- name: assert test checksum matches after backref line was replaced + assert: + that: + - "result.stat.checksum == '7dc3cb033c3971e73af0eaed6623d4e71e5743f1'" + +- name: insert a line into the quoted file with a single quote + lineinfile: + dest: "{{ output_dir }}/test_quoting.txt" + line: "import g'" + register: result + +- name: assert that the quoted file was changed + assert: + that: + - result is changed + +- name: stat the quote test file + stat: + path: "{{ output_dir }}/test_quoting.txt" + register: result + +- name: assert test checksum matches after backref line was replaced + assert: + that: + - "result.stat.checksum == '73b271c2cc1cef5663713bc0f00444b4bf9f4543'" + +- name: insert a line into the quoted file with many double quotation strings + lineinfile: + dest: "{{ output_dir }}/test_quoting.txt" + line: "\"quote\" and \"unquote\"" + register: result + +- name: assert that the quoted file was changed + assert: + that: + - result is changed + +- name: stat the quote test file + stat: + path: "{{ output_dir }}/test_quoting.txt" + register: result + +- name: assert test checksum matches after backref line was replaced + assert: + that: + - "result.stat.checksum == 'b10ab2a3c3b6492680c8d0b1d6f35aa6b8f9e731'" + +################################################################### +# Issue 28721 + +- name: Deploy the testmultiple file + copy: + src: testmultiple.txt + dest: "{{ output_dir }}/testmultiple.txt" + register: result + +- name: Assert that the testmultiple file was deployed + assert: + that: + - result is changed + - result.checksum == '3e0090a34fb641f3c01e9011546ff586260ea0ea' + - result.state == 'file' + +# Test insertafter +- name: Write the same line to a file inserted after different lines + lineinfile: + path: "{{ output_dir }}/testmultiple.txt" + insertafter: "{{ item.regex }}" + line: "{{ item.replace }}" + register: _multitest_1 + with_items: "{{ test_regexp }}" + +- name: Assert that the line is added once only + assert: + that: + - _multitest_1.results.0 is changed + - _multitest_1.results.1 is not changed + - _multitest_1.results.2 is not changed + - _multitest_1.results.3 is not changed + +- name: Do the same thing again to check for changes + lineinfile: + path: "{{ output_dir }}/testmultiple.txt" + insertafter: "{{ item.regex }}" + line: "{{ item.replace }}" + register: _multitest_2 + with_items: "{{ test_regexp }}" + +- name: Assert that the line is not added anymore + assert: + that: + - _multitest_2.results.0 is not changed + - _multitest_2.results.1 is not changed + - _multitest_2.results.2 is not changed + - _multitest_2.results.3 is not changed + +- name: Stat the insertafter file + stat: + path: "{{ output_dir }}/testmultiple.txt" + register: result + +- name: Assert that the insertafter file matches expected checksum + assert: + that: + - result.stat.checksum == 'c6733b6c53ddd0e11e6ba39daa556ef8f4840761' + +# Test insertbefore + +- name: Deploy the testmultiple file + copy: + src: testmultiple.txt + dest: "{{ output_dir }}/testmultiple.txt" + register: result + +- name: Assert that the testmultiple file was deployed + assert: + that: + - result is changed + - result.checksum == '3e0090a34fb641f3c01e9011546ff586260ea0ea' + - result.state == 'file' + +- name: Write the same line to a file inserted before different lines + lineinfile: + path: "{{ output_dir }}/testmultiple.txt" + insertbefore: "{{ item.regex }}" + line: "{{ item.replace }}" + register: _multitest_3 + with_items: "{{ test_regexp }}" + +- name: Assert that the line is added once only + assert: + that: + - _multitest_3.results.0 is changed + - _multitest_3.results.1 is not changed + - _multitest_3.results.2 is not changed + - _multitest_3.results.3 is not changed + +- name: Do the same thing again to check for changes + lineinfile: + path: "{{ output_dir }}/testmultiple.txt" + insertbefore: "{{ item.regex }}" + line: "{{ item.replace }}" + register: _multitest_4 + with_items: "{{ test_regexp }}" + +- name: Assert that the line is not added anymore + assert: + that: + - _multitest_4.results.0 is not changed + - _multitest_4.results.1 is not changed + - _multitest_4.results.2 is not changed + - _multitest_4.results.3 is not changed + +- name: Stat the insertbefore file + stat: + path: "{{ output_dir }}/testmultiple.txt" + register: result + +- name: Assert that the insertbefore file matches expected checksum + assert: + that: + - result.stat.checksum == '5d298651fbc377b45257da10308a9dc2fe1f8be5' + +################################################################### +# Issue 36156 +# Test insertbefore and insertafter with regexp + +- name: Deploy the test.conf file + copy: + src: test.conf + dest: "{{ output_dir }}/test.conf" + register: result + +- name: Assert that the test.conf file was deployed + assert: + that: + - result is changed + - result.checksum == '6037f13e419b132eb3fd20a89e60c6c87a6add38' + - result.state == 'file' + +# Test instertafter +- name: Insert lines after with regexp + lineinfile: + path: "{{ output_dir }}/test.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.after }}" + with_items: "{{ test_befaf_regexp }}" + register: _multitest_5 + +- name: Do the same thing again and check for changes + lineinfile: + path: "{{ output_dir }}/test.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.after }}" + with_items: "{{ test_befaf_regexp }}" + register: _multitest_6 + +- name: Assert that the file was changed the first time but not the second time + assert: + that: + - item.0 is changed + - item.1 is not changed + with_together: + - "{{ _multitest_5.results }}" + - "{{ _multitest_6.results }}" + +- name: Stat the file + stat: + path: "{{ output_dir }}/test.conf" + register: result + +- name: Assert that the file contents match what is expected + assert: + that: + - result.stat.checksum == '06e2c456e5028dd7bcd0b117b5927a1139458c82' + +- name: Do the same thing a third time without regexp and check for changes + lineinfile: + path: "{{ output_dir }}/test.conf" + line: "{{ item.line }}" + insertafter: "{{ item.after }}" + with_items: "{{ test_befaf_regexp }}" + register: _multitest_7 + +- name: Stat the file + stat: + path: "{{ output_dir }}/test.conf" + register: result + +- name: Assert that the file was changed when no regexp was provided + assert: + that: + - item is not changed + with_items: "{{ _multitest_7.results }}" + +- name: Stat the file + stat: + path: "{{ output_dir }}/test.conf" + register: result + +- name: Assert that the file contents match what is expected + assert: + that: + - result.stat.checksum == '06e2c456e5028dd7bcd0b117b5927a1139458c82' + +# Test insertbefore +- name: Deploy the test.conf file + copy: + src: test.conf + dest: "{{ output_dir }}/test.conf" + register: result + +- name: Assert that the test.conf file was deployed + assert: + that: + - result is changed + - result.checksum == '6037f13e419b132eb3fd20a89e60c6c87a6add38' + - result.state == 'file' + +- name: Insert lines before with regexp + lineinfile: + path: "{{ output_dir }}/test.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertbefore: "{{ item.before }}" + with_items: "{{ test_befaf_regexp }}" + register: _multitest_8 + +- name: Do the same thing again and check for changes + lineinfile: + path: "{{ output_dir }}/test.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertbefore: "{{ item.before }}" + with_items: "{{ test_befaf_regexp }}" + register: _multitest_9 + +- name: Assert that the file was changed the first time but not the second time + assert: + that: + - item.0 is changed + - item.1 is not changed + with_together: + - "{{ _multitest_8.results }}" + - "{{ _multitest_9.results }}" + +- name: Stat the file + stat: + path: "{{ output_dir }}/test.conf" + register: result + +- name: Assert that the file contents match what is expected + assert: + that: + - result.stat.checksum == 'c3be9438a07c44d4c256cebfcdbca15a15b1db91' + +- name: Do the same thing a third time without regexp and check for changes + lineinfile: + path: "{{ output_dir }}/test.conf" + line: "{{ item.line }}" + insertbefore: "{{ item.before }}" + with_items: "{{ test_befaf_regexp }}" + register: _multitest_10 + +- name: Stat the file + stat: + path: "{{ output_dir }}/test.conf" + register: result + +- name: Assert that the file was changed when no regexp was provided + assert: + that: + - item is not changed + with_items: "{{ _multitest_10.results }}" + +- name: Stat the file + stat: + path: "{{ output_dir }}/test.conf" + register: result + +- name: Assert that the file contents match what is expected + assert: + that: + - result.stat.checksum == 'c3be9438a07c44d4c256cebfcdbca15a15b1db91' + +- name: Copy empty file to test with insertbefore + copy: + src: testempty.txt + dest: "{{ output_dir }}/testempty.txt" + +- name: Add a line to empty file with insertbefore + lineinfile: + path: "{{ output_dir }}/testempty.txt" + line: top + insertbefore: '^not in the file$' + register: oneline_insbefore_test1 + +- name: Add a line to file with only one line using insertbefore + lineinfile: + path: "{{ output_dir }}/testempty.txt" + line: top + insertbefore: '^not in the file$' + register: oneline_insbefore_test2 + +- name: Stat the file + stat: + path: "{{ output_dir }}/testempty.txt" + register: oneline_insbefore_file + +- name: Assert that insertebefore worked properly with a one line file + assert: + that: + - oneline_insbefore_test1 is changed + - oneline_insbefore_test2 is not changed + - oneline_insbefore_file.stat.checksum == '4dca56d05a21f0d018cd311f43e134e4501cf6d9' + +################################################################### +# Issue 29443 +# When using an empty regexp, replace the last line (since it matches every line) +# but also provide a warning. + +- name: Deploy the test file for lineinfile + copy: + src: test.txt + dest: "{{ output_dir }}/test.txt" + register: result + +- name: Assert that the test file was deployed + assert: + that: + - result is changed + - result.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51' + - result.state == 'file' + +- name: Insert a line in the file using an empty string as a regular expression + lineinfile: + path: "{{ output_dir }}/test.txt" + regexp: '' + line: This is line 6 + register: insert_empty_regexp + +- name: Stat the file + stat: + path: "{{ output_dir }}/test.txt" + register: result + +- name: Assert that the file contents match what is expected and a warning was displayed + assert: + that: + - insert_empty_regexp is changed + - warning_message in insert_empty_regexp.warnings + - result.stat.checksum == '23555a98ceaa88756b4c7c7bba49d9f86eed868f' + vars: + warning_message: >- + The regular expression is an empty string, which will match every line in the file. + This may have unintended consequences, such as replacing the last line in the file rather than appending. + If this is desired, use '^' to match every line in the file and avoid this warning. + +################################################################### +## Issue #58923 +## Using firstmatch with insertafter and ensure multiple lines are not inserted + +- name: Deploy the firstmatch test file + copy: + src: firstmatch.txt + dest: "{{ output_dir }}/firstmatch.txt" + register: result + +- name: Assert that the test file was deployed + assert: + that: + - result is changed + - result.checksum == '1d644e5e2e51c67f1bd12d7bbe2686017f39923d' + - result.state == 'file' + +- name: Insert a line before an existing line using firstmatch + lineinfile: + path: "{{ output_dir }}/firstmatch.txt" + line: INSERT + insertafter: line1 + firstmatch: yes + register: insertafter1 + +- name: Insert a line before an existing line using firstmatch again + lineinfile: + path: "{{ output_dir }}/firstmatch.txt" + line: INSERT + insertafter: line1 + firstmatch: yes + register: insertafter2 + +- name: Stat the file + stat: + path: "{{ output_dir }}/firstmatch.txt" + register: result + +- name: Assert that the file was modified appropriately + assert: + that: + - insertafter1 is changed + - insertafter2 is not changed + - result.stat.checksum == '114aae024073a3ee8ec8db0ada03c5483326dd86' + +######################################################################################## +# Tests of fixing the same issue as above (#58923) by @Andersson007 +# and @samdoran : + +# Test insertafter with regexp +- name: Deploy the test file + copy: + src: test_58923.txt + dest: "{{ output_dir }}/test_58923.txt" + register: initial_file + +- name: Assert that the test file was deployed + assert: + that: + - initial_file is changed + - initial_file.checksum == 'b6379ba43261c451a62102acb2c7f438a177c66e' + - initial_file.state == 'file' + +# Regarding the documentation: +# If regular expressions are passed to both regexp and +# insertafter, insertafter is only honored if no match for regexp is found. +# Therefore, +# when regular expressions are passed to both regexp and insertafter, then: +# 1. regexp was found -> ignore insertafter, replace the founded line +# 2. regexp was not found -> insert the line after 'insertafter' line + +# Regexp is not present in the file, so the line must be inserted after ^#!/bin/sh +- name: Add the line using firstmatch, regexp, and insertafter + lineinfile: + path: "{{ output_dir }}/test_58923.txt" + insertafter: '^#!/bin/sh' + regexp: ^export FISHEYE_OPTS + firstmatch: true + line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m" + register: insertafter_test1 + +- name: Stat the file + stat: + path: "{{ output_dir }}/test_58923.txt" + register: insertafter_test1_file + +- name: Add the line using firstmatch, regexp, and insertafter again + lineinfile: + path: "{{ output_dir }}/test_58923.txt" + insertafter: '^#!/bin/sh' + regexp: ^export FISHEYE_OPTS + firstmatch: true + line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m" + register: insertafter_test2 + +# Check of the prev step. +# We tried to add the same line with the same playbook, +# so nothing has been added: +- name: Stat the file again + stat: + path: "{{ output_dir }}/test_58923.txt" + register: insertafter_test2_file + +- name: Assert insertafter tests gave the expected results + assert: + that: + - insertafter_test1 is changed + - insertafter_test1_file.stat.checksum == '9232aed6fe88714964d9e29d13e42cd782070b08' + - insertafter_test2 is not changed + - insertafter_test2_file.stat.checksum == '9232aed6fe88714964d9e29d13e42cd782070b08' + +# Test insertafter without regexp +- name: Deploy the test file + copy: + src: test_58923.txt + dest: "{{ output_dir }}/test_58923.txt" + register: initial_file + +- name: Assert that the test file was deployed + assert: + that: + - initial_file is changed + - initial_file.checksum == 'b6379ba43261c451a62102acb2c7f438a177c66e' + - initial_file.state == 'file' + +- name: Insert the line using firstmatch and insertafter without regexp + lineinfile: + path: "{{ output_dir }}/test_58923.txt" + insertafter: '^#!/bin/sh' + firstmatch: true + line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m" + register: insertafter_test3 + +- name: Stat the file + stat: + path: "{{ output_dir }}/test_58923.txt" + register: insertafter_test3_file + +- name: Insert the line using firstmatch and insertafter without regexp again + lineinfile: + path: "{{ output_dir }}/test_58923.txt" + insertafter: '^#!/bin/sh' + firstmatch: true + line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m" + register: insertafter_test4 + +- name: Stat the file again + stat: + path: "{{ output_dir }}/test_58923.txt" + register: insertafter_test4_file + +- name: Assert insertafter without regexp tests gave the expected results + assert: + that: + - insertafter_test3 is changed + - insertafter_test3_file.stat.checksum == '9232aed6fe88714964d9e29d13e42cd782070b08' + - insertafter_test4 is not changed + - insertafter_test4_file.stat.checksum == '9232aed6fe88714964d9e29d13e42cd782070b08' + + +# Test insertbefore with regexp +- name: Deploy the test file + copy: + src: test_58923.txt + dest: "{{ output_dir }}/test_58923.txt" + register: initial_file + +- name: Assert that the test file was deployed + assert: + that: + - initial_file is changed + - initial_file.checksum == 'b6379ba43261c451a62102acb2c7f438a177c66e' + - initial_file.state == 'file' + +- name: Add the line using regexp, firstmatch, and insertbefore + lineinfile: + path: "{{ output_dir }}/test_58923.txt" + insertbefore: '^#!/bin/sh' + regexp: ^export FISHEYE_OPTS + firstmatch: true + line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m" + register: insertbefore_test1 + +- name: Stat the file + stat: + path: "{{ output_dir }}/test_58923.txt" + register: insertbefore_test1_file + +- name: Add the line using regexp, firstmatch, and insertbefore again + lineinfile: + path: "{{ output_dir }}/test_58923.txt" + insertbefore: '^#!/bin/sh' + regexp: ^export FISHEYE_OPTS + firstmatch: true + line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m" + register: insertbefore_test2 + +- name: Stat the file again + stat: + path: "{{ output_dir }}/test_58923.txt" + register: insertbefore_test2_file + +- name: Assert insertbefore with regexp tests gave the expected results + assert: + that: + - insertbefore_test1 is changed + - insertbefore_test1_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7' + - insertbefore_test2 is not changed + - insertbefore_test2_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7' + + +# Test insertbefore without regexp +- name: Deploy the test file + copy: + src: test_58923.txt + dest: "{{ output_dir }}/test_58923.txt" + register: initial_file + +- name: Assert that the test file was deployed + assert: + that: + - initial_file is changed + - initial_file.checksum == 'b6379ba43261c451a62102acb2c7f438a177c66e' + - initial_file.state == 'file' + +- name: Add the line using insertbefore and firstmatch + lineinfile: + path: "{{ output_dir }}/test_58923.txt" + insertbefore: '^#!/bin/sh' + firstmatch: true + line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m" + register: insertbefore_test3 + +- name: Stat the file + stat: + path: "{{ output_dir }}/test_58923.txt" + register: insertbefore_test3_file + +- name: Add the line using insertbefore and firstmatch again + lineinfile: + path: "{{ output_dir }}/test_58923.txt" + insertbefore: '^#!/bin/sh' + firstmatch: true + line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m" + register: insertbefore_test4 + +- name: Stat the file again + stat: + path: "{{ output_dir }}/test_58923.txt" + register: insertbefore_test4_file + +# Test when the line is presented in the file but +# not in the before/after spot and it does match the regexp: +- name: > + Add the line using insertbefore and firstmatch when the regexp line + is presented but not close to insertbefore spot + lineinfile: + path: "{{ output_dir }}/test_58923.txt" + insertbefore: ' Darwin\*\) if \[ -z \"\$JAVA_HOME\" \] ; then' + firstmatch: true + line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m" + register: insertbefore_test5 + +- name: Stat the file again + stat: + path: "{{ output_dir }}/test_58923.txt" + register: insertbefore_test5_file + +- name: Assert insertbefore with regexp tests gave the expected results + assert: + that: + - insertbefore_test3 is changed + - insertbefore_test3_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7' + - insertbefore_test4 is not changed + - insertbefore_test4_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7' + - insertbefore_test5 is not changed + - insertbefore_test5_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7' + + +# Test inserting a line at the end of the file using regexp with insertafter +# https://github.com/ansible/ansible/issues/63684 +- name: Create a file by inserting a line + lineinfile: + path: "{{ output_dir }}/testend.txt" + create: yes + line: testline + register: testend1 + +- name: Insert a line at the end of the file + lineinfile: + path: "{{ output_dir }}/testend.txt" + insertafter: testline + regexp: line at the end + line: line at the end + register: testend2 + +- name: Stat the file + stat: + path: "{{ output_dir }}/testend.txt" + register: testend_file + +- name: Assert inserting at the end gave the expected results. + assert: + that: + - testend1 is changed + - testend2 is changed + - testend_file.stat.checksum == 'ef36116966836ce04f6b249fd1837706acae4e19' diff --git a/test/integration/targets/lineinfile/vars/main.yml b/test/integration/targets/lineinfile/vars/main.yml new file mode 100644 index 00000000..6e99d4f1 --- /dev/null +++ b/test/integration/targets/lineinfile/vars/main.yml @@ -0,0 +1,29 @@ +test_regexp: + - regex: '1' + replace: 'bar' + + - regex: '2' + replace: 'bar' + + - regex: '3' + replace: 'bar' + + - regex: '4' + replace: 'bar' + + +test_befaf_regexp: + - before: section_three + after: section_one + regexp: option_one= + line: option_one=1 + + - before: section_three + after: section_one + regexp: option_two= + line: option_two=2 + + - before: section_three + after: section_one + regexp: option_three= + line: option_three=3 diff --git a/test/integration/targets/lookup_config/aliases b/test/integration/targets/lookup_config/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_config/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_config/tasks/main.yml b/test/integration/targets/lookup_config/tasks/main.yml new file mode 100644 index 00000000..be185197 --- /dev/null +++ b/test/integration/targets/lookup_config/tasks/main.yml @@ -0,0 +1,59 @@ +- name: Verify lookup_config errors with no on_missing (failure expected) + set_fact: + foo: '{{lookup("config", "THIS_DOES_NOT_EXIST")}}' + ignore_errors: yes + register: lookup_config_1 + +- name: Verify lookup_config errors with on_missing=error (failure expected) + set_fact: + foo: '{{lookup("config", "THIS_DOES_NOT_EXIST", on_missing="error")}}' + ignore_errors: yes + register: lookup_config_2 + +- name: Verify lookup_config does not error with on_missing=skip + set_fact: + lookup3: '{{lookup("config", "THIS_DOES_NOT_EXIST", on_missing="skip")}}' + register: lookup_config_3 + +# TODO: Is there a decent way to check that the warning is actually triggered? +- name: Verify lookup_config does not error with on_missing=warn (warning expected) + set_fact: + lookup4: '{{lookup("config", "THIS_DOES_NOT_EXIST", on_missing="warn")}}' + register: lookup_config_4 + +- name: Verify lookup_config errors with invalid on_missing (failure expected) + set_fact: + foo: '{{lookup("config", "THIS_DOES_NOT_EXIST", on_missing="boo")}}' + ignore_errors: yes + register: lookup_config_5 + +- name: Verify lookup_config errors with invalid param type (failure expected) + set_fact: + foo: '{{lookup("config", 1337)}}' + ignore_errors: yes + register: lookup_config_6 + +- name: Verify lookup_config errors with callable arg (failure expected) + set_fact: + foo: '{{lookup("config", "ConfigManager")}}' + ignore_errors: yes + register: lookup_config_7 + +- name: Verify lookup_config + assert: + that: + - '"meow" in lookup("config", "ANSIBLE_COW_WHITELIST")' + - lookup_config_1 is failed + - '"Unable to find setting" in lookup_config_1.msg' + - lookup_config_2 is failed + - '"Unable to find setting" in lookup_config_2.msg' + - lookup_config_3 is success + - 'lookup3|length == 0' + - lookup_config_4 is success + - 'lookup4|length == 0' + - lookup_config_5 is failed + - '"must be a string and one of" in lookup_config_5.msg' + - lookup_config_6 is failed + - '"Invalid setting identifier" in lookup_config_6.msg' + - lookup_config_7 is failed + - '"Invalid setting" in lookup_config_7.msg' diff --git a/test/integration/targets/lookup_dict/aliases b/test/integration/targets/lookup_dict/aliases new file mode 100644 index 00000000..07b87020 --- /dev/null +++ b/test/integration/targets/lookup_dict/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_dict/tasks/main.yml b/test/integration/targets/lookup_dict/tasks/main.yml new file mode 100644 index 00000000..6f778548 --- /dev/null +++ b/test/integration/targets/lookup_dict/tasks/main.yml @@ -0,0 +1,54 @@ +- name: Define users dict + set_fact: + users: + alice: + name: Alice + age: 21 + bob: + name: Bob + age: 22 + +- name: Convert users dict to list + set_fact: + user_list: "{{ lookup('dict', users) | sort(attribute='key') }}" + +- name: Verify results + assert: + that: + - user_list | length == 2 + - user_list[0].key == 'alice' + - user_list[0].value | length == 2 + - user_list[0].value.name == 'Alice' + - user_list[0].value.age == 21 + - user_list[1].key == 'bob' + - user_list[1].value | length == 2 + - user_list[1].value.name == 'Bob' + - user_list[1].value.age == 22 + +- name: Convert a non-dict (failure expected) + set_fact: + bad_fact: "{{ lookup('dict', 1) }}" + register: result + ignore_errors: yes + +- name: Verify conversion failed + assert: + that: + - result is failed + +- name: Define simple dict + set_fact: + simple: + hello: World + +- name: Convert using with_dict to cause terms to not be a list + set_fact: + hello: "{{ item }}" + with_dict: "{{ simple }}" + +- name: Verify conversion + assert: + that: + - hello | length == 2 + - hello.key == 'hello' + - hello.value == 'World' diff --git a/test/integration/targets/lookup_env/aliases b/test/integration/targets/lookup_env/aliases new file mode 100644 index 00000000..07b87020 --- /dev/null +++ b/test/integration/targets/lookup_env/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_env/tasks/main.yml b/test/integration/targets/lookup_env/tasks/main.yml new file mode 100644 index 00000000..daaeb35f --- /dev/null +++ b/test/integration/targets/lookup_env/tasks/main.yml @@ -0,0 +1,15 @@ +- name: get HOME environment var value + shell: "echo $HOME" + register: home_var_value + +- name: use env lookup to get HOME var + set_fact: + test_val: "{{ lookup('env', 'HOME') }}" + +- debug: var=home_var_value.stdout +- debug: var=test_val + +- name: compare values + assert: + that: + - "test_val == home_var_value.stdout" diff --git a/test/integration/targets/lookup_file/aliases b/test/integration/targets/lookup_file/aliases new file mode 100644 index 00000000..07b87020 --- /dev/null +++ b/test/integration/targets/lookup_file/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_file/tasks/main.yml b/test/integration/targets/lookup_file/tasks/main.yml new file mode 100644 index 00000000..a6d636db --- /dev/null +++ b/test/integration/targets/lookup_file/tasks/main.yml @@ -0,0 +1,13 @@ +- name: make a new file to read + copy: dest={{output_dir}}/foo.txt mode=0644 content="bar" + +- name: load the file as a fact + set_fact: + foo: "{{ lookup('file', output_dir + '/foo.txt' ) }}" + +- debug: var=foo + +- name: verify file lookup + assert: + that: + - "foo == 'bar'" diff --git a/test/integration/targets/lookup_fileglob/aliases b/test/integration/targets/lookup_fileglob/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/lookup_fileglob/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/lookup_fileglob/find_levels/files/play_adj_subdir.txt b/test/integration/targets/lookup_fileglob/find_levels/files/play_adj_subdir.txt new file mode 100644 index 00000000..50255880 --- /dev/null +++ b/test/integration/targets/lookup_fileglob/find_levels/files/play_adj_subdir.txt @@ -0,0 +1 @@ +in files subdir adjacent to play diff --git a/test/integration/targets/lookup_fileglob/find_levels/files/somepath/play_adj_subsubdir.txt b/test/integration/targets/lookup_fileglob/find_levels/files/somepath/play_adj_subsubdir.txt new file mode 100644 index 00000000..96c7a549 --- /dev/null +++ b/test/integration/targets/lookup_fileglob/find_levels/files/somepath/play_adj_subsubdir.txt @@ -0,0 +1 @@ +in play adjacent subdir of files/ diff --git a/test/integration/targets/lookup_fileglob/find_levels/play.yml b/test/integration/targets/lookup_fileglob/find_levels/play.yml new file mode 100644 index 00000000..4bdee05d --- /dev/null +++ b/test/integration/targets/lookup_fileglob/find_levels/play.yml @@ -0,0 +1,13 @@ +- hosts: localhost + gather_facts: false + vars: + expected: + play_adj: ajectent to play + play_adj_subdir: in files subdir adjacent to play + somepath/play_adj_subsubdir: in play adjacent subdir of files/ + in_role: file in role + otherpath/in_role_subdir: file in role subdir + tasks: + - name: Import role lookup + import_role: + name: get_file diff --git a/test/integration/targets/lookup_fileglob/find_levels/play_adj.txt b/test/integration/targets/lookup_fileglob/find_levels/play_adj.txt new file mode 100644 index 00000000..14f0cf50 --- /dev/null +++ b/test/integration/targets/lookup_fileglob/find_levels/play_adj.txt @@ -0,0 +1 @@ +ajectent to play diff --git a/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/in_role.txt b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/in_role.txt new file mode 100644 index 00000000..fdfc9476 --- /dev/null +++ b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/in_role.txt @@ -0,0 +1 @@ +file in role diff --git a/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/otherpath/in_role_subdir.txt b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/otherpath/in_role_subdir.txt new file mode 100644 index 00000000..40e75a40 --- /dev/null +++ b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/otherpath/in_role_subdir.txt @@ -0,0 +1 @@ +file in role subdir diff --git a/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/tasks/main.yml b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/tasks/main.yml new file mode 100644 index 00000000..2fc21df7 --- /dev/null +++ b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/tasks/main.yml @@ -0,0 +1,10 @@ +- name: show file contents + debug: + msg: '{{ q("fileglob", seed + ".*") }}' + register: found + +- name: did we get right one? + assert: + that: + - found['msg'][0].endswith(seed + '.txt') + - q('file', found['msg'][0])[0] == expected[seed] diff --git a/test/integration/targets/lookup_fileglob/non_existent/play.yml b/test/integration/targets/lookup_fileglob/non_existent/play.yml new file mode 100644 index 00000000..e92dff5a --- /dev/null +++ b/test/integration/targets/lookup_fileglob/non_existent/play.yml @@ -0,0 +1,6 @@ +- hosts: localhost + gather_facts: false + tasks: + - name: fileglob should be empty + assert: + that: q("fileglob", seed) | length == 0 diff --git a/test/integration/targets/lookup_fileglob/runme.sh b/test/integration/targets/lookup_fileglob/runme.sh new file mode 100755 index 00000000..1e0297c7 --- /dev/null +++ b/test/integration/targets/lookup_fileglob/runme.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -eux + +# fun multilevel finds +for seed in play_adj play_adj_subdir somepath/play_adj_subsubdir in_role otherpath/in_role_subdir +do + ansible-playbook find_levels/play.yml -e "seed='${seed}'" "$@" +done + +# non-existent paths +for seed in foo foo/bar foo/bar/baz +do + ansible-playbook non_existent/play.yml -e "seed='${seed}'" "$@" +done diff --git a/test/integration/targets/lookup_first_found/aliases b/test/integration/targets/lookup_first_found/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_first_found/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_first_found/files/bar1 b/test/integration/targets/lookup_first_found/files/bar1 new file mode 100644 index 00000000..5716ca59 --- /dev/null +++ b/test/integration/targets/lookup_first_found/files/bar1 @@ -0,0 +1 @@ +bar diff --git a/test/integration/targets/lookup_first_found/files/foo1 b/test/integration/targets/lookup_first_found/files/foo1 new file mode 100644 index 00000000..257cc564 --- /dev/null +++ b/test/integration/targets/lookup_first_found/files/foo1 @@ -0,0 +1 @@ +foo diff --git a/test/integration/targets/lookup_first_found/tasks/main.yml b/test/integration/targets/lookup_first_found/tasks/main.yml new file mode 100644 index 00000000..87f2a404 --- /dev/null +++ b/test/integration/targets/lookup_first_found/tasks/main.yml @@ -0,0 +1,73 @@ +- name: test with_first_found + #shell: echo {{ item }} + set_fact: "first_found={{ item }}" + with_first_found: + - "{{ role_path + '/files/does_not_exist' }}" + - "{{ role_path + '/files/foo1' }}" + - "{{ role_path + '/files/bar1' }}" + +- name: set expected + set_fact: first_expected="{{ role_path + '/files/foo1' }}" + +- name: set unexpected + set_fact: first_unexpected="{{ role_path + '/files/bar1' }}" + +- name: verify with_first_found results + assert: + that: + - "first_found == first_expected" + - "first_found != first_unexpected" + +- name: test q(first_found) with no files produces empty list + set_fact: + first_found_var: "{{ q('first_found', params, errors='ignore') }}" + vars: + params: + files: "not_a_file.yaml" + +- name: verify q(first_found) result + assert: + that: + - "first_found_var == []" + +- name: test lookup(first_found) with no files produces empty string + set_fact: + first_found_var: "{{ lookup('first_found', params, errors='ignore') }}" + vars: + params: + files: "not_a_file.yaml" + +- name: verify lookup(first_found) result + assert: + that: + - "first_found_var == ''" + +# NOTE: skip: True deprecated e17a2b502d6601be53c60d7ba1c627df419460c9, remove 2.12 +- name: test first_found with no matches and skip=True does nothing + set_fact: "this_not_set={{ item }}" + vars: + params: + files: + - not/a/file.yaml + - another/non/file.yaml + skip: True + loop: "{{ q('first_found', params) }}" + +- name: verify skip + assert: + that: + - "this_not_set is not defined" + +- name: test first_found with no matches and errors='ignore' skips in a loop + set_fact: "this_not_set={{ item }}" + vars: + params: + files: + - not/a/file.yaml + - another/non/file.yaml + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: verify errors=ignore + assert: + that: + - "this_not_set is not defined" diff --git a/test/integration/targets/lookup_indexed_items/aliases b/test/integration/targets/lookup_indexed_items/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_indexed_items/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_indexed_items/tasks/main.yml b/test/integration/targets/lookup_indexed_items/tasks/main.yml new file mode 100644 index 00000000..84f5fbce --- /dev/null +++ b/test/integration/targets/lookup_indexed_items/tasks/main.yml @@ -0,0 +1,16 @@ +- name: create unindexed list + shell: for i in $(seq 1 5); do echo "x" ; done; + register: list_data + +- name: create indexed list + set_fact: "{{ item[1] + item[0]|string }}=set" + with_indexed_items: "{{list_data.stdout_lines}}" + +- name: verify with_indexed_items result + assert: + that: + - "x0 == 'set'" + - "x1 == 'set'" + - "x2 == 'set'" + - "x3 == 'set'" + - "x4 == 'set'" diff --git a/test/integration/targets/lookup_ini/aliases b/test/integration/targets/lookup_ini/aliases new file mode 100644 index 00000000..f9f29ef3 --- /dev/null +++ b/test/integration/targets/lookup_ini/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_ini/lookup-8859-15.ini b/test/integration/targets/lookup_ini/lookup-8859-15.ini new file mode 100644 index 00000000..33f9c29d --- /dev/null +++ b/test/integration/targets/lookup_ini/lookup-8859-15.ini @@ -0,0 +1,7 @@ +[global] +# A comment +value1=Text associated with value1 and global section +value2=Same for value2 and global section +value.dot=Properties with dot +field.with.space = another space +field_with_unicode=t indien o chteau franais diff --git a/test/integration/targets/lookup_ini/lookup.ini b/test/integration/targets/lookup_ini/lookup.ini new file mode 100644 index 00000000..5b7cc34b --- /dev/null +++ b/test/integration/targets/lookup_ini/lookup.ini @@ -0,0 +1,25 @@ +[global] +# A comment +value1=Text associated with value1 and global section +value2=Same for value2 and global section +value.dot=Properties with dot +field.with.space = another space +unicode=été indien où à château français ïîôû + +[section1] +value1=section1/value1 +value2=section1/value2 + +[value_section] +value1=1 +value2=2 +value3=3 +other1=4 +other2=5 + +[other_section] +value1=1 +value2=2 +value3=3 +other1=4 +other2=5 diff --git a/test/integration/targets/lookup_ini/lookup.properties b/test/integration/targets/lookup_ini/lookup.properties new file mode 100644 index 00000000..d71ce121 --- /dev/null +++ b/test/integration/targets/lookup_ini/lookup.properties @@ -0,0 +1,6 @@ +# A comment +value1=Text associated with value1 +value2=Same for value2 +value.dot=Properties with dot +field.with.space = another space +field.with.unicode = été indien où à château français ïîôû diff --git a/test/integration/targets/lookup_ini/runme.sh b/test/integration/targets/lookup_ini/runme.sh new file mode 100755 index 00000000..71a507de --- /dev/null +++ b/test/integration/targets/lookup_ini/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_lookup_properties.yml -i ../../inventory -v "$@" diff --git a/test/integration/targets/lookup_ini/test_lookup_properties.yml b/test/integration/targets/lookup_ini/test_lookup_properties.yml new file mode 100644 index 00000000..a8cad9de --- /dev/null +++ b/test/integration/targets/lookup_ini/test_lookup_properties.yml @@ -0,0 +1,71 @@ +--- +- name: "Lookup test" + hosts: "localhost" +# connection: local + tasks: + - name: "read properties value" + set_fact: + test1: "{{lookup('ini', 'value1 type=properties file=lookup.properties')}}" + test2: "{{lookup('ini', 'value2 type=properties file=lookup.properties')}}" + test_dot: "{{lookup('ini', 'value.dot type=properties file=lookup.properties')}}" + field_with_space: "{{lookup('ini', 'field.with.space type=properties file=lookup.properties')}}" + - assert: + that: "{{item}} is defined" + with_items: [ 'test1', 'test2', 'test_dot', 'field_with_space' ] + - name: "read ini value" + set_fact: + value1_global: "{{lookup('ini', 'value1 section=global file=lookup.ini')}}" + value2_global: "{{lookup('ini', 'value2 section=global file=lookup.ini')}}" + value1_section1: "{{lookup('ini', 'value1 section=section1 file=lookup.ini')}}" + field_with_unicode: "{{lookup('ini', 'unicode section=global file=lookup.ini')}}" + - debug: var={{item}} + with_items: [ 'value1_global', 'value2_global', 'value1_section1', 'field_with_unicode' ] + - assert: + that: + - "field_with_unicode == 'été indien où à château français ïîôû'" + - name: "read ini value from iso8859-15 file" + set_fact: + field_with_unicode: "{{lookup('ini', 'field_with_unicode section=global encoding=iso8859-1 file=lookup-8859-15.ini')}}" + - assert: + that: + - "field_with_unicode == 'été indien où à château français ïîôû'" + - name: "read ini value with section and regexp" + set_fact: + value_section: "{{lookup('ini', 'value[1-2] section=value_section file=lookup.ini re=true')}}" + other_section: "{{lookup('ini', 'other[1-2] section=other_section file=lookup.ini re=true')}}" + - debug: var={{item}} + with_items: [ 'value_section', 'other_section' ] + - assert: + that: + - "value_section == '1,2'" + - "other_section == '4,5'" + - name: "Reading unknown value" + set_fact: + unknown: "{{lookup('ini', 'unknown default=unknown section=section1 file=lookup.ini')}}" + - debug: var=unknown + - assert: + that: + - 'unknown == "unknown"' + - name: "Looping over section section1" + debug: msg="{{item}}" + with_ini: value[1-2] section=section1 file=lookup.ini re=true + register: _ + - assert: + that: + - '_.results.0.item == "section1/value1"' + - '_.results.1.item == "section1/value2"' + - name: "Looping over section value_section" + debug: msg="{{item}}" + with_ini: value[1-2] section=value_section file=lookup.ini re=true + register: _ + - assert: + that: + - '_.results.0.item == "1"' + - '_.results.1.item == "2"' + - debug: msg="{{item}}" + with_ini: value[1-2] section=section1 file=lookup.ini re=true + register: _ + - assert: + that: + - '_.results.0.item == "section1/value1"' + - '_.results.1.item == "section1/value2"' diff --git a/test/integration/targets/lookup_inventory_hostnames/aliases b/test/integration/targets/lookup_inventory_hostnames/aliases new file mode 100644 index 00000000..45489be8 --- /dev/null +++ b/test/integration/targets/lookup_inventory_hostnames/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_inventory_hostnames/inventory b/test/integration/targets/lookup_inventory_hostnames/inventory new file mode 100644 index 00000000..1b968af2 --- /dev/null +++ b/test/integration/targets/lookup_inventory_hostnames/inventory @@ -0,0 +1,6 @@ +[group01] +test01 +test05 +test03 +test02 +test04 diff --git a/test/integration/targets/lookup_inventory_hostnames/main.yml b/test/integration/targets/lookup_inventory_hostnames/main.yml new file mode 100644 index 00000000..afc09ea8 --- /dev/null +++ b/test/integration/targets/lookup_inventory_hostnames/main.yml @@ -0,0 +1,13 @@ +--- +- hosts: localhost + gather_facts: no + tasks: + - set_fact: + hosts_a: "{{ lookup('inventory_hostnames', 'group01', wantlist=true) }}" + + - set_fact: + hosts_b: "{{ groups['group01'] }}" + + - assert: + that: + - hosts_a == hosts_b diff --git a/test/integration/targets/lookup_inventory_hostnames/runme.sh b/test/integration/targets/lookup_inventory_hostnames/runme.sh new file mode 100755 index 00000000..449c66bb --- /dev/null +++ b/test/integration/targets/lookup_inventory_hostnames/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook main.yml -i inventory "$@" diff --git a/test/integration/targets/lookup_items/aliases b/test/integration/targets/lookup_items/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_items/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_items/tasks/main.yml b/test/integration/targets/lookup_items/tasks/main.yml new file mode 100644 index 00000000..12df8d0b --- /dev/null +++ b/test/integration/targets/lookup_items/tasks/main.yml @@ -0,0 +1,14 @@ +- name: test with_items + set_fact: "{{ item }}=moo" + with_items: + - 'foo' + - 'bar' + +- debug: var=foo +- debug: var=bar + +- name: verify with_items results + assert: + that: + - "foo == 'moo'" + - "bar == 'moo'" diff --git a/test/integration/targets/lookup_lines/aliases b/test/integration/targets/lookup_lines/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_lines/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_lines/tasks/main.yml b/test/integration/targets/lookup_lines/tasks/main.yml new file mode 100644 index 00000000..f864d720 --- /dev/null +++ b/test/integration/targets/lookup_lines/tasks/main.yml @@ -0,0 +1,13 @@ +- name: test with_lines + #shell: echo "{{ item }}" + set_fact: "{{ item }}=set" + with_lines: for i in $(seq 1 5); do echo "l$i" ; done; + +- name: verify with_lines results + assert: + that: + - "l1 == 'set'" + - "l2 == 'set'" + - "l3 == 'set'" + - "l4 == 'set'" + - "l5 == 'set'" diff --git a/test/integration/targets/lookup_list/aliases b/test/integration/targets/lookup_list/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_list/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_list/tasks/main.yml b/test/integration/targets/lookup_list/tasks/main.yml new file mode 100644 index 00000000..3c03014e --- /dev/null +++ b/test/integration/targets/lookup_list/tasks/main.yml @@ -0,0 +1,19 @@ +- name: Set variables to verify lookup_list + set_fact: "{{ item if item is string else item[0] }}={{ item }}" + with_list: + - a + - [b, c] + - d + +- name: Verify lookup_list + assert: + that: + - a is defined + - b is defined + - c is not defined + - d is defined + - b is iterable and b is not string + - b|length == 2 + - a == a + - b == ['b', 'c'] + - d == d diff --git a/test/integration/targets/lookup_nested/aliases b/test/integration/targets/lookup_nested/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_nested/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_nested/tasks/main.yml b/test/integration/targets/lookup_nested/tasks/main.yml new file mode 100644 index 00000000..fec081a3 --- /dev/null +++ b/test/integration/targets/lookup_nested/tasks/main.yml @@ -0,0 +1,18 @@ +- name: test with_nested + set_fact: "{{ item.0 + item.1 }}=x" + with_nested: + - [ 'a', 'b' ] + - [ 'c', 'd' ] + +- debug: var=ac +- debug: var=ad +- debug: var=bc +- debug: var=bd + +- name: verify with_nested results + assert: + that: + - "ac == 'x'" + - "ad == 'x'" + - "bc == 'x'" + - "bd == 'x'" diff --git a/test/integration/targets/lookup_password/aliases b/test/integration/targets/lookup_password/aliases new file mode 100644 index 00000000..07b87020 --- /dev/null +++ b/test/integration/targets/lookup_password/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_password/runme.sh b/test/integration/targets/lookup_password/runme.sh new file mode 100755 index 00000000..ac2c1704 --- /dev/null +++ b/test/integration/targets/lookup_password/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_TEST_PREFER_VENV=1 +source virtualenv.sh + +# Requirements have to be installed prior to running ansible-playbook +# because plugins and requirements are loaded before the task runs +pip install passlib + +ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml -e "output_dir=${OUTPUT_DIR}" "$@" diff --git a/test/integration/targets/lookup_password/runme.yml b/test/integration/targets/lookup_password/runme.yml new file mode 100644 index 00000000..4f55c1da --- /dev/null +++ b/test/integration/targets/lookup_password/runme.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: no + roles: + - { role: lookup_password } diff --git a/test/integration/targets/lookup_password/tasks/main.yml b/test/integration/targets/lookup_password/tasks/main.yml new file mode 100644 index 00000000..4eeef151 --- /dev/null +++ b/test/integration/targets/lookup_password/tasks/main.yml @@ -0,0 +1,104 @@ +- name: create a password file + set_fact: + newpass: "{{ lookup('password', output_dir + '/lookup/password length=8') }}" + +- name: stat the password file directory + stat: path="{{output_dir}}/lookup" + register: result + +- name: assert the directory's permissions + assert: + that: + - result.stat.mode == '0700' + +- name: stat the password file + stat: path="{{output_dir}}/lookup/password" + register: result + +- name: assert the directory's permissions + assert: + that: + - result.stat.mode == '0600' + +- name: get password length + shell: wc -c {{output_dir}}/lookup/password | awk '{print $1}' + register: wc_result + +- debug: var=wc_result.stdout + +- name: read password + shell: cat {{output_dir}}/lookup/password + register: cat_result + +- debug: var=cat_result.stdout + +- name: verify password + assert: + that: + - "wc_result.stdout == '9'" + - "cat_result.stdout == newpass" + - "' salt=' not in cat_result.stdout" + +- name: fetch password from an existing file + set_fact: + pass2: "{{ lookup('password', output_dir + '/lookup/password length=8') }}" + +- name: read password (again) + shell: cat {{output_dir}}/lookup/password + register: cat_result2 + +- debug: var=cat_result2.stdout + +- name: verify password (again) + assert: + that: + - "cat_result2.stdout == newpass" + - "' salt=' not in cat_result2.stdout" + + + +- name: create a password (with salt) file + debug: msg={{ lookup('password', output_dir + '/lookup/password_with_salt encrypt=sha256_crypt') }} + +- name: read password and salt + shell: cat {{output_dir}}/lookup/password_with_salt + register: cat_pass_salt + +- debug: var=cat_pass_salt.stdout + +- name: fetch unencrypted password + set_fact: + newpass: "{{ lookup('password', output_dir + '/lookup/password_with_salt') }}" + +- debug: var=newpass + +- name: verify password and salt + assert: + that: + - "cat_pass_salt.stdout != newpass" + - "cat_pass_salt.stdout.startswith(newpass)" + - "' salt=' in cat_pass_salt.stdout" + - "' salt=' not in newpass" + + +- name: fetch unencrypted password (using empty encrypt parameter) + set_fact: + newpass2: "{{ lookup('password', output_dir + '/lookup/password_with_salt encrypt=') }}" + +- name: verify lookup password behavior + assert: + that: + - "newpass == newpass2" + +- name: verify that we can generate a 1st password without writing it + set_fact: + newpass: "{{ lookup('password', '/dev/null') }}" + +- name: verify that we can generate a 2nd password without writing it + set_fact: + newpass2: "{{ lookup('password', '/dev/null') }}" + +- name: verify lookup password behavior with /dev/null + assert: + that: + - "newpass != newpass2" diff --git a/test/integration/targets/lookup_pipe/aliases b/test/integration/targets/lookup_pipe/aliases new file mode 100644 index 00000000..07b87020 --- /dev/null +++ b/test/integration/targets/lookup_pipe/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_pipe/tasks/main.yml b/test/integration/targets/lookup_pipe/tasks/main.yml new file mode 100644 index 00000000..8aa1bc64 --- /dev/null +++ b/test/integration/targets/lookup_pipe/tasks/main.yml @@ -0,0 +1,9 @@ +# https://github.com/ansible/ansible/issues/6550 +- name: confirm pipe lookup works with a single positional arg + set_fact: + result: "{{ lookup('pipe', 'echo $OUTPUT_DIR') }}" + +- name: verify the expected output was received + assert: + that: + - "result == output_dir" diff --git a/test/integration/targets/lookup_random_choice/aliases b/test/integration/targets/lookup_random_choice/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_random_choice/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_random_choice/tasks/main.yml b/test/integration/targets/lookup_random_choice/tasks/main.yml new file mode 100644 index 00000000..e18126ae --- /dev/null +++ b/test/integration/targets/lookup_random_choice/tasks/main.yml @@ -0,0 +1,10 @@ +- name: test with_random_choice + set_fact: "random={{ item }}" + with_random_choice: + - "foo" + - "bar" + +- name: verify with_random_choice + assert: + that: + - "random in ['foo', 'bar']" diff --git a/test/integration/targets/lookup_sequence/aliases b/test/integration/targets/lookup_sequence/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_sequence/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_sequence/tasks/main.yml b/test/integration/targets/lookup_sequence/tasks/main.yml new file mode 100644 index 00000000..72181a42 --- /dev/null +++ b/test/integration/targets/lookup_sequence/tasks/main.yml @@ -0,0 +1,63 @@ +- name: test with_sequence + set_fact: "{{ 'x' + item }}={{ item }}" + with_sequence: start=0 end=3 + +- name: test with_sequence backwards + set_fact: "{{ 'y' + item }}={{ item }}" + with_sequence: start=3 end=0 stride=-1 + +- name: verify with_sequence + assert: + that: + - "x0 == '0'" + - "x1 == '1'" + - "x2 == '2'" + - "x3 == '3'" + - "y3 == '3'" + - "y2 == '2'" + - "y1 == '1'" + - "y0 == '0'" + +- name: test with_sequence not failing on count == 0 + debug: msg='previously failed with backward counting error' + with_sequence: count=0 + register: count_of_zero + +- name: test with_sequence does 1 when start == end + debug: msg='should run once' + with_sequence: start=1 end=1 + register: start_equal_end + +- name: test with_sequence count 1 + set_fact: "{{ 'x' + item }}={{ item }}" + with_sequence: count=1 + register: count_of_one + +- assert: + that: + - start_equal_end is not skipped + - count_of_zero is skipped + - count_of_one is not skipped + +- name: test with_sequence shortcut syntax (end) + set_fact: "{{ 'ws_z_' + item }}={{ item }}" + with_sequence: '4' + +- name: test with_sequence shortcut syntax (start-end/stride) + set_fact: "{{ 'ws_z_' + item }}=stride_{{ item }}" + with_sequence: '2-6/2' + +- name: test with_sequence shortcut syntax (start-end:format) + set_fact: "{{ 'ws_z_' + item }}={{ item }}" + with_sequence: '7-8:host%02d' + +- name: verify with_sequence shortcut syntax + assert: + that: + - "ws_z_1 == '1'" + - "ws_z_2 == 'stride_2'" + - "ws_z_3 == '3'" + - "ws_z_4 == 'stride_4'" + - "ws_z_6 == 'stride_6'" + - "ws_z_host07 == 'host07'" + - "ws_z_host08 == 'host08'" diff --git a/test/integration/targets/lookup_subelements/aliases b/test/integration/targets/lookup_subelements/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_subelements/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_subelements/tasks/main.yml b/test/integration/targets/lookup_subelements/tasks/main.yml new file mode 100644 index 00000000..5c706b27 --- /dev/null +++ b/test/integration/targets/lookup_subelements/tasks/main.yml @@ -0,0 +1,45 @@ +- name: test with_subelements + set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}" + with_subelements: + - "{{element_data}}" + - the_list + +- name: verify with_subelements results + assert: + that: + - "_xf == 'f'" + - "_xd == 'd'" + - "_ye == 'e'" + - "_yf == 'f'" + +- name: test with_subelements in subkeys + set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}" + with_subelements: + - "{{element_data}}" + - the.sub.key.list + +- name: verify with_subelements in subkeys results + assert: + that: + - "_xq == 'q'" + - "_xr == 'r'" + - "_yi == 'i'" + - "_yo == 'o'" + +- name: test with_subelements with missing key or subkey + set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}" + with_subelements: + - "{{element_data_missing}}" + - the.sub.key.list + - skip_missing: yes + register: _subelements_missing_subkeys + +- debug: var=_subelements_missing_subkeys +- debug: var=_subelements_missing_subkeys.results|length +- name: verify with_subelements in subkeys results + assert: + that: + - _subelements_missing_subkeys.skipped is not defined + - _subelements_missing_subkeys.results|length == 2 + - "_xk == 'k'" + - "_xl == 'l'" diff --git a/test/integration/targets/lookup_subelements/vars/main.yml b/test/integration/targets/lookup_subelements/vars/main.yml new file mode 100644 index 00000000..f7ef50f5 --- /dev/null +++ b/test/integration/targets/lookup_subelements/vars/main.yml @@ -0,0 +1,43 @@ +element_data: + - id: x + the_list: + - "f" + - "d" + the: + sub: + key: + list: + - "q" + - "r" + - id: y + the_list: + - "e" + - "f" + the: + sub: + key: + list: + - "i" + - "o" +element_data_missing: + - id: x + the_list: + - "f" + - "d" + the: + sub: + key: + list: + - "k" + - "l" + - id: y + the_list: + - "f" + - "d" + - id: z + the_list: + - "e" + - "f" + the: + sub: + key: diff --git a/test/integration/targets/lookup_template/aliases b/test/integration/targets/lookup_template/aliases new file mode 100644 index 00000000..07b87020 --- /dev/null +++ b/test/integration/targets/lookup_template/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_template/tasks/main.yml b/test/integration/targets/lookup_template/tasks/main.yml new file mode 100644 index 00000000..df115766 --- /dev/null +++ b/test/integration/targets/lookup_template/tasks/main.yml @@ -0,0 +1,19 @@ +# ref #18526 +- name: Test that we have a proper jinja search path in template lookup + set_fact: + hello_world: "{{ lookup('template', 'hello.txt') }}" + +- assert: + that: + - "hello_world|trim == 'Hello world!'" + + +- name: Test that we have a proper jinja search path in template lookup with different variable start and end string + vars: + my_var: world + set_fact: + hello_world_string: "{{ lookup('template', 'hello_string.txt', variable_start_string='[%', variable_end_string='%]') }}" + +- assert: + that: + - "hello_world_string|trim == 'Hello world!'" diff --git a/test/integration/targets/lookup_template/templates/hello.txt b/test/integration/targets/lookup_template/templates/hello.txt new file mode 100644 index 00000000..be15a4f3 --- /dev/null +++ b/test/integration/targets/lookup_template/templates/hello.txt @@ -0,0 +1 @@ +Hello {% include 'world.txt' %}! diff --git a/test/integration/targets/lookup_template/templates/hello_string.txt b/test/integration/targets/lookup_template/templates/hello_string.txt new file mode 100644 index 00000000..75199afd --- /dev/null +++ b/test/integration/targets/lookup_template/templates/hello_string.txt @@ -0,0 +1 @@ +Hello [% my_var %]! diff --git a/test/integration/targets/lookup_template/templates/world.txt b/test/integration/targets/lookup_template/templates/world.txt new file mode 100644 index 00000000..cc628ccd --- /dev/null +++ b/test/integration/targets/lookup_template/templates/world.txt @@ -0,0 +1 @@ +world diff --git a/test/integration/targets/lookup_together/aliases b/test/integration/targets/lookup_together/aliases new file mode 100644 index 00000000..bc987654 --- /dev/null +++ b/test/integration/targets/lookup_together/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_together/tasks/main.yml b/test/integration/targets/lookup_together/tasks/main.yml new file mode 100644 index 00000000..ee59a2ae --- /dev/null +++ b/test/integration/targets/lookup_together/tasks/main.yml @@ -0,0 +1,14 @@ +- name: test with_together + #shell: echo {{ item }} + set_fact: "{{ item.0 }}={{ item.1 }}" + with_together: + - [ 'a', 'b', 'c', 'd' ] + - [ '1', '2', '3', '4' ] + +- name: verify with_together results + assert: + that: + - "a == '1'" + - "b == '2'" + - "c == '3'" + - "d == '4'" diff --git a/test/integration/targets/lookup_unvault/aliases b/test/integration/targets/lookup_unvault/aliases new file mode 100644 index 00000000..4a2ce27c --- /dev/null +++ b/test/integration/targets/lookup_unvault/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +needs/root +skip/aix diff --git a/test/integration/targets/lookup_unvault/files/foot.txt b/test/integration/targets/lookup_unvault/files/foot.txt new file mode 100644 index 00000000..5716ca59 --- /dev/null +++ b/test/integration/targets/lookup_unvault/files/foot.txt @@ -0,0 +1 @@ +bar diff --git a/test/integration/targets/lookup_unvault/files/foot.txt.vault b/test/integration/targets/lookup_unvault/files/foot.txt.vault new file mode 100644 index 00000000..98ee41bc --- /dev/null +++ b/test/integration/targets/lookup_unvault/files/foot.txt.vault @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +35363932323438383333343462373431376162373631636238353061616565323630656464393939 +3937313630326662336264636662313163343832643239630a646436313833633135353834343364 +63363039663765363365626531643533616232333533383239323234393934356639373136323635 +3632356163343031300a373766636130626237346630653537633764663063313439666135623032 +6139 diff --git a/test/integration/targets/lookup_unvault/runme.sh b/test/integration/targets/lookup_unvault/runme.sh new file mode 100755 index 00000000..a7a0be5a --- /dev/null +++ b/test/integration/targets/lookup_unvault/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +# run tests +ansible-playbook unvault.yml --vault-password-file='secret' -v "$@" diff --git a/test/integration/targets/lookup_unvault/secret b/test/integration/targets/lookup_unvault/secret new file mode 100644 index 00000000..f925edd3 --- /dev/null +++ b/test/integration/targets/lookup_unvault/secret @@ -0,0 +1 @@ +ssssshhhhhh diff --git a/test/integration/targets/lookup_unvault/unvault.yml b/test/integration/targets/lookup_unvault/unvault.yml new file mode 100644 index 00000000..f1f3b98a --- /dev/null +++ b/test/integration/targets/lookup_unvault/unvault.yml @@ -0,0 +1,9 @@ +- name: test vault lookup plugin + hosts: localhost + gather_facts: false + tasks: + - debug: msg={{lookup('unvault', 'foot.txt.vault')}} + - name: verify vault lookup works with both vaulted and unvaulted + assert: + that: + - lookup('unvault', 'foot.txt.vault') == lookup('unvault', 'foot.txt') diff --git a/test/integration/targets/lookup_url/aliases b/test/integration/targets/lookup_url/aliases new file mode 100644 index 00000000..28990148 --- /dev/null +++ b/test/integration/targets/lookup_url/aliases @@ -0,0 +1,5 @@ +destructive +shippable/posix/group1 +needs/httptester +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_url/meta/main.yml b/test/integration/targets/lookup_url/meta/main.yml new file mode 100644 index 00000000..374b5fdf --- /dev/null +++ b/test/integration/targets/lookup_url/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_http_tests diff --git a/test/integration/targets/lookup_url/tasks/main.yml b/test/integration/targets/lookup_url/tasks/main.yml new file mode 100644 index 00000000..4eaa32e0 --- /dev/null +++ b/test/integration/targets/lookup_url/tasks/main.yml @@ -0,0 +1,28 @@ +- name: Test that retrieving a url works + set_fact: + web_data: "{{ lookup('url', 'https://gist.githubusercontent.com/abadger/9858c22712f62a8effff/raw/43dd47ea691c90a5fa7827892c70241913351963/test') }}" + +- name: Assert that the url was retrieved + assert: + that: + - "'one' in web_data" + +- name: Test that retrieving a url with invalid cert fails + set_fact: + web_data: "{{ lookup('url', 'https://{{ badssl_host }}/') }}" + ignore_errors: True + register: url_invalid_cert + +- assert: + that: + - "url_invalid_cert.failed" + - "'Error validating the server' in url_invalid_cert.msg or 'Hostname mismatch' in url_invalid_cert.msg or ( url_invalid_cert.msg is search('hostname .* doesn.t match .*'))" + +- name: Test that retrieving a url with invalid cert with validate_certs=False works + set_fact: + web_data: "{{ lookup('url', 'https://{{ badssl_host }}/', validate_certs=False) }}" + register: url_no_validate_cert + +- assert: + that: + - "'{{ badssl_host_substring }}' in web_data" diff --git a/test/integration/targets/lookup_vars/aliases b/test/integration/targets/lookup_vars/aliases new file mode 100644 index 00000000..07b87020 --- /dev/null +++ b/test/integration/targets/lookup_vars/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/lookup_vars/tasks/main.yml b/test/integration/targets/lookup_vars/tasks/main.yml new file mode 100644 index 00000000..f24d8657 --- /dev/null +++ b/test/integration/targets/lookup_vars/tasks/main.yml @@ -0,0 +1,16 @@ +- name: Test that we can give it a single value and receive a single value + set_fact: + var_host: '{{ lookup("vars", "ansible_host") }}' + +- assert: + that: + - 'var_host == ansible_host' + +- name: Test that we can give a list of values to var and receive a list of values back + set_fact: + var_host_info: '{{ query("vars", "ansible_host", "ansible_connection") }}' + +- assert: + that: + - 'var_host_info[0] == ansible_host' + - 'var_host_info[1] == ansible_connection' diff --git a/test/integration/targets/loop_control/aliases b/test/integration/targets/loop_control/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/loop_control/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/loop_control/extended.yml b/test/integration/targets/loop_control/extended.yml new file mode 100644 index 00000000..6ad9933a --- /dev/null +++ b/test/integration/targets/loop_control/extended.yml @@ -0,0 +1,12 @@ +- name: loop_control/extended/include https://github.com/ansible/ansible/issues/61218 + hosts: localhost + gather_facts: false + tasks: + - name: loop on an include + include_tasks: inner.yml + loop: + - first + - second + - third + loop_control: + extended: yes diff --git a/test/integration/targets/loop_control/inner.yml b/test/integration/targets/loop_control/inner.yml new file mode 100644 index 00000000..1c286fa4 --- /dev/null +++ b/test/integration/targets/loop_control/inner.yml @@ -0,0 +1,9 @@ +- name: assert ansible_loop variables in include_tasks + assert: + that: + - ansible_loop.index == ansible_loop.index0 + 1 + - ansible_loop.revindex == ansible_loop.revindex0 + 1 + - ansible_loop.first == {{ ansible_loop.index == 1 }} + - ansible_loop.last == {{ ansible_loop.index == ansible_loop.length }} + - ansible_loop.length == 3 + - ansible_loop.allitems|join(',') == 'first,second,third' diff --git a/test/integration/targets/loop_control/label.yml b/test/integration/targets/loop_control/label.yml new file mode 100644 index 00000000..5ac85fdf --- /dev/null +++ b/test/integration/targets/loop_control/label.yml @@ -0,0 +1,23 @@ +- name: loop_control/label https://github.com/ansible/ansible/pull/36430 + hosts: localhost + gather_facts: false + tasks: + - set_fact: + loopthis: + - name: foo + label: foo_label + - name: bar + label: bar_label + + - name: check that item label is updated each iteration + debug: + msg: "{{ looped_var.name }}" + with_items: "{{ loopthis }}" + loop_control: + loop_var: looped_var + label: "looped_var {{ looped_var.label }}" +# +# - assert: +# that: +# - "output.results[0]['_ansible_item_label'] == 'looped_var foo_label'" +# - "output.results[1]['_ansible_item_label'] == 'looped_var bar_label'" diff --git a/test/integration/targets/loop_control/runme.sh b/test/integration/targets/loop_control/runme.sh new file mode 100755 index 00000000..af065ea0 --- /dev/null +++ b/test/integration/targets/loop_control/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -eux + +# user output has: +#ok: [localhost] => (item=looped_var foo_label) => { +#ok: [localhost] => (item=looped_var bar_label) => { +MATCH='foo_label +bar_label' +[ "$(ansible-playbook label.yml "$@" |grep 'item='|sed -e 's/^.*(item=looped_var \(.*\)).*$/\1/')" == "${MATCH}" ] + +ansible-playbook extended.yml "$@" diff --git a/test/integration/targets/loops/aliases b/test/integration/targets/loops/aliases new file mode 100644 index 00000000..ed821c27 --- /dev/null +++ b/test/integration/targets/loops/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/aix diff --git a/test/integration/targets/loops/files/data1.txt b/test/integration/targets/loops/files/data1.txt new file mode 100644 index 00000000..b044a82a --- /dev/null +++ b/test/integration/targets/loops/files/data1.txt @@ -0,0 +1 @@ + Hello World diff --git a/test/integration/targets/loops/files/data2.txt b/test/integration/targets/loops/files/data2.txt new file mode 100644 index 00000000..e9359ad1 --- /dev/null +++ b/test/integration/targets/loops/files/data2.txt @@ -0,0 +1 @@ + Olá Mundo diff --git a/test/integration/targets/loops/tasks/index_var_tasks.yml b/test/integration/targets/loops/tasks/index_var_tasks.yml new file mode 100644 index 00000000..fa9a5bdf --- /dev/null +++ b/test/integration/targets/loops/tasks/index_var_tasks.yml @@ -0,0 +1,3 @@ +- name: check that index var exists inside included tasks file + assert: + that: my_idx == item|int diff --git a/test/integration/targets/loops/tasks/main.yml b/test/integration/targets/loops/tasks/main.yml new file mode 100644 index 00000000..5575dd36 --- /dev/null +++ b/test/integration/targets/loops/tasks/main.yml @@ -0,0 +1,391 @@ +# +# loop_control/pause +# + +- name: Measure time before + shell: date +%s + register: before + +- debug: + var: i + with_sequence: count=3 + loop_control: + loop_var: i + pause: 2 + +- name: Measure time after + shell: date +%s + register: after + +# since there is 3 rounds, and 2 seconds between, it should last 4 seconds +# we do not test the upper bound, since CI can lag significantly +- assert: + that: + - '(after.stdout |int) - (before.stdout|int) >= 4' + +- name: test subsecond pause + block: + - name: Measure time before loop with .5s pause + set_fact: + times: "{{times|default([]) + [ lookup('pipe','date +%s.%3N') ]}}" + with_sequence: count=3 + loop_control: + pause: 0.6 + + - name: ensure lag, since there is 3 rounds, and 0.5 seconds between, it should last 1.2 seconds, but allowing leeway due to CI lag + assert: + that: + - tdiff|float >= 1.2 + - tdiff|int < 3 + vars: + tdiff: '{{ times[2]|float - times[0]|float }}' + when: + - ansible_facts['distribution'] not in ("MacOSX", "FreeBSD") + +# +# Tests of loop syntax with args +# + +- name: Test that with_list works with a list + ping: + data: '{{ item }}' + with_list: + - 'Hello World' + - 'Olá Mundo' + register: results + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results["results"][0]["ping"] == "Hello World"' + - 'results["results"][1]["ping"] == "Olá Mundo"' + +- name: Test that with_list works with a list inside a variable + ping: + data: '{{ item }}' + with_list: '{{ phrases }}' + register: results2 + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results2["results"][0]["ping"] == "Hello World"' + - 'results2["results"][1]["ping"] == "Olá Mundo"' + +- name: Test that loop works with a manual list + ping: + data: '{{ item }}' + loop: + - 'Hello World' + - 'Olá Mundo' + register: results3 + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results3["results"][0]["ping"] == "Hello World"' + - 'results3["results"][1]["ping"] == "Olá Mundo"' + +- name: Test that loop works with a list in a variable + ping: + data: '{{ item }}' + loop: '{{ phrases }}' + register: results4 + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results4["results"][0]["ping"] == "Hello World"' + - 'results4["results"][1]["ping"] == "Olá Mundo"' + +- name: Test that loop works with a list via the list lookup + ping: + data: '{{ item }}' + loop: '{{ lookup("list", "Hello World", "Olá Mundo", wantlist=True) }}' + register: results5 + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results5["results"][0]["ping"] == "Hello World"' + - 'results5["results"][1]["ping"] == "Olá Mundo"' + +- name: Test that loop works with a list in a variable via the list lookup + ping: + data: '{{ item }}' + loop: '{{ lookup("list", wantlist=True, *phrases) }}' + register: results6 + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results6["results"][0]["ping"] == "Hello World"' + - 'results6["results"][1]["ping"] == "Olá Mundo"' + +- name: Test that loop works with a list via the query lookup + ping: + data: '{{ item }}' + loop: '{{ query("list", "Hello World", "Olá Mundo") }}' + register: results7 + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results7["results"][0]["ping"] == "Hello World"' + - 'results7["results"][1]["ping"] == "Olá Mundo"' + +- name: Test that loop works with a list in a variable via the query lookup + ping: + data: '{{ item }}' + loop: '{{ q("list", *phrases) }}' + register: results8 + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results8["results"][0]["ping"] == "Hello World"' + - 'results8["results"][1]["ping"] == "Olá Mundo"' + +- name: Test that loop works with a list and keyword args + ping: + data: '{{ item }}' + loop: '{{ q("file", "data1.txt", "data2.txt", lstrip=True) }}' + register: results9 + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results9["results"][0]["ping"] == "Hello World"' + - 'results9["results"][1]["ping"] == "Olá Mundo"' + +- name: Test that loop works with a list in variable and keyword args + ping: + data: '{{ item }}' + loop: '{{ q("file", lstrip=True, *filenames) }}' + register: results10 + +- name: Assert that we ran the module twice with the correct strings + assert: + that: + - 'results10["results"][0]["ping"] == "Hello World"' + - 'results10["results"][1]["ping"] == "Olá Mundo"' + +# +# loop_control/index_var +# + +- name: check that the index var is created and increments as expected + assert: + that: my_idx == item|int + with_sequence: start=0 count=3 + loop_control: + index_var: my_idx + +- name: check that value of index var matches position of current item in source list + assert: + that: 'test_var.index(item) == my_idx' + vars: + test_var: ['a', 'b', 'c'] + with_items: "{{ test_var }}" + loop_control: + index_var: my_idx + +- name: check index var with included tasks file + include_tasks: index_var_tasks.yml + with_sequence: start=0 count=3 + loop_control: + index_var: my_idx + + +# The following test cases are to ensure that we don't have a regression on +# GitHub Issue https://github.com/ansible/ansible/issues/35481 +# +# This should execute and not cause a RuntimeError +- debug: + msg: "with_dict passed a list: {{item}}" + with_dict: "{{ a_list }}" + register: with_dict_passed_a_list + ignore_errors: True +- assert: + that: + - with_dict_passed_a_list is failed +- debug: + msg: "with_list passed a dict: {{item}}" + with_list: "{{ a_dict }}" + register: with_list_passed_a_dict + ignore_errors: True +- assert: + that: + - with_list_passed_a_dict is failed + +- debug: + var: "item" + loop: + - "{{ ansible_search_path }}" + register: loop_search_path + +- assert: + that: + - ansible_search_path == loop_search_path.results.0.item + +# https://github.com/ansible/ansible/issues/45189 +- name: with_X conditional delegate_to shortcircuit on templating error + debug: + msg: "loop" + when: false + delegate_to: localhost + with_list: "{{ fake_var }}" + register: result + failed_when: result is not skipped + +- name: loop conditional delegate_to shortcircuit on templating error + debug: + msg: "loop" + when: false + delegate_to: localhost + loop: "{{ fake_var }}" + register: result + failed_when: result is not skipped + +- name: Loop on literal empty list + debug: + loop: [] + register: literal_empty_list + failed_when: literal_empty_list is not skipped + +# https://github.com/ansible/ansible/issues/47372 +- name: Loop unsafe list + debug: + var: item + with_items: "{{ things|list|unique }}" + vars: + things: + - !unsafe foo + - !unsafe bar + +- name: extended loop info + assert: + that: + - ansible_loop.nextitem == 'orange' + - ansible_loop.index == 1 + - ansible_loop.index0 == 0 + - ansible_loop.first + - not ansible_loop.last + - ansible_loop.previtem is undefined + - ansible_loop.allitems == ['apple', 'orange', 'banana'] + - ansible_loop.revindex == 3 + - ansible_loop.revindex0 == 2 + - ansible_loop.length == 3 + loop: + - apple + - orange + - banana + loop_control: + extended: true + when: item == 'apple' + +- name: extended loop info 2 + assert: + that: + - ansible_loop.nextitem == 'banana' + - ansible_loop.index == 2 + - ansible_loop.index0 == 1 + - not ansible_loop.first + - not ansible_loop.last + - ansible_loop.previtem == 'apple' + - ansible_loop.allitems == ['apple', 'orange', 'banana'] + - ansible_loop.revindex == 2 + - ansible_loop.revindex0 == 1 + - ansible_loop.length == 3 + loop: + - apple + - orange + - banana + loop_control: + extended: true + when: item == 'orange' + +- name: extended loop info 3 + assert: + that: + - ansible_loop.nextitem is undefined + - ansible_loop.index == 3 + - ansible_loop.index0 == 2 + - not ansible_loop.first + - ansible_loop.last + - ansible_loop.previtem == 'orange' + - ansible_loop.allitems == ['apple', 'orange', 'banana'] + - ansible_loop.revindex == 1 + - ansible_loop.revindex0 == 0 + - ansible_loop.length == 3 + loop: + - apple + - orange + - banana + loop_control: + extended: true + when: item == 'banana' + +- name: Validate the loop_var name + assert: + that: + - ansible_loop_var == 'alvin' + loop: + - 1 + loop_control: + loop_var: alvin + +# https://github.com/ansible/ansible/issues/58820 +- name: Test using templated loop_var inside include_tasks + include_tasks: templated_loop_var_tasks.yml + loop: + - value + loop_control: + loop_var: "{{ loop_var_name }}" + vars: + loop_var_name: templated_loop_var_name + +# https://github.com/ansible/ansible/issues/59414 +- name: Test preserving original connection related vars + debug: + var: ansible_remote_tmp + vars: + ansible_remote_tmp: /tmp/test1 + with_items: + - 1 + - 2 + register: loop_out + +- assert: + that: + - loop_out['results'][1]['ansible_remote_tmp'] == '/tmp/test1' + +# https://github.com/ansible/ansible/issues/64169 +- include_vars: 64169.yml + +- set_fact: "{{ item.key }}={{ hostvars[inventory_hostname][item.value] }}" + with_dict: + foo: __foo + +- debug: + var: foo + +- assert: + that: + - foo[0] != 'foo1.0' + - foo[0] == unsafe_value + vars: + unsafe_value: !unsafe 'foo{{ version_64169 }}' + +- set_fact: "{{ item.key }}={{ hostvars[inventory_hostname][item.value] }}" + loop: "{{ dicty_dict|dict2items }}" + vars: + dicty_dict: + foo: __foo + +- debug: + var: foo + +- assert: + that: + - foo[0] == 'foo1.0' diff --git a/test/integration/targets/loops/tasks/templated_loop_var_tasks.yml b/test/integration/targets/loops/tasks/templated_loop_var_tasks.yml new file mode 100644 index 00000000..1f8f9697 --- /dev/null +++ b/test/integration/targets/loops/tasks/templated_loop_var_tasks.yml @@ -0,0 +1,4 @@ +- name: Validate that the correct value was used + assert: + that: + - templated_loop_var_name == 'value' diff --git a/test/integration/targets/loops/vars/64169.yml b/test/integration/targets/loops/vars/64169.yml new file mode 100644 index 00000000..f48d616a --- /dev/null +++ b/test/integration/targets/loops/vars/64169.yml @@ -0,0 +1,2 @@ +__foo: + - "foo{{ version_64169 }}" diff --git a/test/integration/targets/loops/vars/main.yml b/test/integration/targets/loops/vars/main.yml new file mode 100644 index 00000000..5d85370d --- /dev/null +++ b/test/integration/targets/loops/vars/main.yml @@ -0,0 +1,8 @@ +--- +phrases: + - 'Hello World' + - 'Olá Mundo' +filenames: + - 'data1.txt' + - 'data2.txt' +version_64169: '1.0' diff --git a/test/integration/targets/meta_tasks/aliases b/test/integration/targets/meta_tasks/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/meta_tasks/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/meta_tasks/inventory.yml b/test/integration/targets/meta_tasks/inventory.yml new file mode 100644 index 00000000..5fb39e5f --- /dev/null +++ b/test/integration/targets/meta_tasks/inventory.yml @@ -0,0 +1,9 @@ +local: + hosts: + testhost: + host_var_role_name: role3 + testhost2: + host_var_role_name: role2 + vars: + ansible_connection: local + ansible_python_interpreter: "{{ ansible_playbook_python }}" diff --git a/test/integration/targets/meta_tasks/runme.sh b/test/integration/targets/meta_tasks/runme.sh new file mode 100755 index 00000000..3f456def --- /dev/null +++ b/test/integration/targets/meta_tasks/runme.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +set -eux + +# test end_host meta task, with when conditional +for test_strategy in linear free; do + out="$(ansible-playbook test_end_host.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")" + + grep -q "META: end_host conditional evaluated to false, continuing execution for testhost" <<< "$out" + grep -q "META: ending play for testhost2" <<< "$out" + grep -q "play not ended for testhost" <<< "$out" + grep -qv "play not ended for testhost2" <<< "$out" + + out="$(ansible-playbook test_end_host_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")" + + grep -q "META: end_host conditional evaluated to false, continuing execution for testhost" <<< "$out" + grep -q "META: ending play for testhost2" <<< "$out" + grep -q "play not ended for testhost" <<< "$out" + grep -qv "play not ended for testhost2" <<< "$out" +done + +# test end_host meta task, on all hosts +for test_strategy in linear free; do + out="$(ansible-playbook test_end_host_all.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")" + + grep -q "META: ending play for testhost" <<< "$out" + grep -q "META: ending play for testhost2" <<< "$out" + grep -qv "play not ended for testhost" <<< "$out" + grep -qv "play not ended for testhost2" <<< "$out" + + out="$(ansible-playbook test_end_host_all_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")" + + grep -q "META: ending play for testhost" <<< "$out" + grep -q "META: ending play for testhost2" <<< "$out" + grep -qv "play not ended for testhost" <<< "$out" + grep -qv "play not ended for testhost2" <<< "$out" +done + +# test end_play meta task +for test_strategy in linear free; do + out="$(ansible-playbook test_end_play.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")" + + grep -q "META: ending play" <<< "$out" + grep -qv 'Failed to end using end_play' <<< "$out" + + out="$(ansible-playbook test_end_play_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")" + + grep -q "META: ending play" <<< "$out" + grep -qv 'Failed to end using end_play' <<< "$out" +done diff --git a/test/integration/targets/meta_tasks/test_end_host.yml b/test/integration/targets/meta_tasks/test_end_host.yml new file mode 100644 index 00000000..a8bb0562 --- /dev/null +++ b/test/integration/targets/meta_tasks/test_end_host.yml @@ -0,0 +1,14 @@ +- name: "Testing end_host with strategy={{ test_strategy | default('linear') }}" + hosts: + - testhost + - testhost2 + gather_facts: no + strategy: "{{ test_strategy | default('linear') }}" + tasks: + - debug: + + - meta: end_host + when: "host_var_role_name == 'role2'" # end play for testhost2, see inventory + + - debug: + msg: "play not ended for {{ inventory_hostname }}" diff --git a/test/integration/targets/meta_tasks/test_end_host_all.yml b/test/integration/targets/meta_tasks/test_end_host_all.yml new file mode 100644 index 00000000..dab5e881 --- /dev/null +++ b/test/integration/targets/meta_tasks/test_end_host_all.yml @@ -0,0 +1,13 @@ +- name: "Testing end_host all hosts with strategy={{ test_strategy | default('linear') }}" + hosts: + - testhost + - testhost2 + gather_facts: no + strategy: "{{ test_strategy | default('linear') }}" + tasks: + - debug: + + - meta: end_host + + - debug: + msg: "play not ended {{ inventory_hostname }}" diff --git a/test/integration/targets/meta_tasks/test_end_host_all_fqcn.yml b/test/integration/targets/meta_tasks/test_end_host_all_fqcn.yml new file mode 100644 index 00000000..78b5a2e9 --- /dev/null +++ b/test/integration/targets/meta_tasks/test_end_host_all_fqcn.yml @@ -0,0 +1,13 @@ +- name: "Testing end_host all hosts with strategy={{ test_strategy | default('linear') }}" + hosts: + - testhost + - testhost2 + gather_facts: no + strategy: "{{ test_strategy | default('linear') }}" + tasks: + - debug: + + - ansible.builtin.meta: end_host + + - debug: + msg: "play not ended {{ inventory_hostname }}" diff --git a/test/integration/targets/meta_tasks/test_end_host_fqcn.yml b/test/integration/targets/meta_tasks/test_end_host_fqcn.yml new file mode 100644 index 00000000..bdb38b53 --- /dev/null +++ b/test/integration/targets/meta_tasks/test_end_host_fqcn.yml @@ -0,0 +1,14 @@ +- name: "Testing end_host with strategy={{ test_strategy | default('linear') }}" + hosts: + - testhost + - testhost2 + gather_facts: no + strategy: "{{ test_strategy | default('linear') }}" + tasks: + - debug: + + - ansible.builtin.meta: end_host + when: "host_var_role_name == 'role2'" # end play for testhost2, see inventory + + - debug: + msg: "play not ended for {{ inventory_hostname }}" diff --git a/test/integration/targets/meta_tasks/test_end_play.yml b/test/integration/targets/meta_tasks/test_end_play.yml new file mode 100644 index 00000000..29489dc4 --- /dev/null +++ b/test/integration/targets/meta_tasks/test_end_play.yml @@ -0,0 +1,12 @@ +- name: Testing end_play with strategy {{ test_strategy | default('linear') }} + hosts: testhost:testhost2 + gather_facts: no + strategy: "{{ test_strategy | default('linear') }}" + tasks: + - debug: + msg: "Testing end_play on host {{ inventory_hostname }}" + + - meta: end_play + + - fail: + msg: 'Failed to end using end_play' diff --git a/test/integration/targets/meta_tasks/test_end_play_fqcn.yml b/test/integration/targets/meta_tasks/test_end_play_fqcn.yml new file mode 100644 index 00000000..2ae67fbe --- /dev/null +++ b/test/integration/targets/meta_tasks/test_end_play_fqcn.yml @@ -0,0 +1,12 @@ +- name: Testing end_play with strategy {{ test_strategy | default('linear') }} + hosts: testhost:testhost2 + gather_facts: no + strategy: "{{ test_strategy | default('linear') }}" + tasks: + - debug: + msg: "Testing end_play on host {{ inventory_hostname }}" + + - ansible.builtin.meta: end_play + + - fail: + msg: 'Failed to end using end_play' diff --git a/test/integration/targets/missing_required_lib/aliases b/test/integration/targets/missing_required_lib/aliases new file mode 100644 index 00000000..70a7b7a9 --- /dev/null +++ b/test/integration/targets/missing_required_lib/aliases @@ -0,0 +1 @@ +shippable/posix/group5 diff --git a/test/integration/targets/missing_required_lib/library/missing_required_lib.py b/test/integration/targets/missing_required_lib/library/missing_required_lib.py new file mode 100644 index 00000000..480ea001 --- /dev/null +++ b/test/integration/targets/missing_required_lib/library/missing_required_lib.py @@ -0,0 +1,37 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + import ansible_missing_lib + HAS_LIB = True +except ImportError as e: + HAS_LIB = False + + +def main(): + module = AnsibleModule({ + 'url': {'type': 'bool'}, + 'reason': {'type': 'bool'}, + }) + kwargs = {} + if module.params['url']: + kwargs['url'] = 'https://github.com/ansible/ansible' + if module.params['reason']: + kwargs['reason'] = 'for fun' + if not HAS_LIB: + module.fail_json( + msg=missing_required_lib( + 'ansible_missing_lib', + **kwargs + ), + ) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/missing_required_lib/runme.sh b/test/integration/targets/missing_required_lib/runme.sh new file mode 100755 index 00000000..2e1ea8d8 --- /dev/null +++ b/test/integration/targets/missing_required_lib/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux +export ANSIBLE_ROLES_PATH=../ +ansible-playbook -i ../../inventory runme.yml -e "output_dir=${OUTPUT_DIR}" -v "$@" diff --git a/test/integration/targets/missing_required_lib/runme.yml b/test/integration/targets/missing_required_lib/runme.yml new file mode 100644 index 00000000..e1df7959 --- /dev/null +++ b/test/integration/targets/missing_required_lib/runme.yml @@ -0,0 +1,57 @@ +- hosts: localhost + gather_facts: false + tasks: + - command: ansible localhost -m import_role -a role=missing_required_lib -e url=true -e reason=true + register: missing_required_lib_all + failed_when: missing_required_lib_all.rc == 0 + + - command: ansible localhost -m import_role -a role=missing_required_lib + register: missing_required_lib_none + failed_when: missing_required_lib_none.rc == 0 + + - command: ansible localhost -m import_role -a role=missing_required_lib -e url=true + register: missing_required_lib_url + failed_when: missing_required_lib_url.rc == 0 + + - command: ansible localhost -m import_role -a role=missing_required_lib -e reason=true + register: missing_required_lib_reason + failed_when: missing_required_lib_reason.rc == 0 + + - assert: + that: + - missing_required_lib_all.stdout is search(expected_all) + - missing_required_lib_none.stdout is search(expected_none) + - missing_required_lib_url.stdout is search(expected_url) + - missing_required_lib_reason.stdout is search(expected_reason) + vars: + expected_all: >- + Failed to import the required Python library \(ansible_missing_lib\) on + \S+'s Python \S+\. + This is required for fun\. See https://github.com/ansible/ansible for + more info. Please read the module documentation and install it in the + appropriate location\. If the required library is installed, but Ansible + is using the wrong Python interpreter, please consult the documentation + on ansible_python_interpreter + expected_none: >- + Failed to import the required Python library \(ansible_missing_lib\) on + \S+'s Python \S+\. + Please read the module documentation and install it in the + appropriate location\. If the required library is installed, but Ansible + is using the wrong Python interpreter, please consult the documentation + on ansible_python_interpreter + expected_url: >- + Failed to import the required Python library \(ansible_missing_lib\) on + \S+'s Python \S+\. + See https://github.com/ansible/ansible for + more info\. Please read the module documentation and install it in the + appropriate location\. If the required library is installed, but Ansible + is using the wrong Python interpreter, please consult the documentation + on ansible_python_interpreter + expected_reason: >- + Failed to import the required Python library \(ansible_missing_lib\) on + \S+'s Python \S+\. + This is required for fun\. + Please read the module documentation and install it in the + appropriate location\. If the required library is installed, but Ansible + is using the wrong Python interpreter, please consult the documentation + on ansible_python_interpreter diff --git a/test/integration/targets/missing_required_lib/tasks/main.yml b/test/integration/targets/missing_required_lib/tasks/main.yml new file mode 100644 index 00000000..a50f5ac4 --- /dev/null +++ b/test/integration/targets/missing_required_lib/tasks/main.yml @@ -0,0 +1,3 @@ +- missing_required_lib: + url: '{{ url|default(omit) }}' + reason: '{{ reason|default(omit) }}' diff --git a/test/integration/targets/module_defaults/aliases b/test/integration/targets/module_defaults/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/module_defaults/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/action/other_echoaction.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/action/other_echoaction.py new file mode 100644 index 00000000..f7777b8a --- /dev/null +++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/action/other_echoaction.py @@ -0,0 +1,8 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.testns.testcoll.plugins.action.echoaction import ActionModule as BaseAM + + +class ActionModule(BaseAM): + pass diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/modules/other_echo1.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/modules/other_echo1.py new file mode 100644 index 00000000..771395f2 --- /dev/null +++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/modules/other_echo1.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.testns.testcoll.plugins.module_utils.echo_impl import do_echo + + +def main(): + do_echo() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/meta/runtime.yml b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/meta/runtime.yml new file mode 100644 index 00000000..62695fbc --- /dev/null +++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/meta/runtime.yml @@ -0,0 +1,9 @@ +action_groups: + testgroup: + - testns.testcoll.echo1 + - testns.testcoll.echo2 +# note we can define defaults for an action + - testns.testcoll.echoaction +# note we can define defaults in this group for actions/modules in another collection + - testns.othercoll.other_echoaction + - testns.othercoll.other_echo1 diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/echoaction.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/echoaction.py new file mode 100644 index 00000000..2fa097b2 --- /dev/null +++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/echoaction.py @@ -0,0 +1,19 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + TRANSFERS_FILES = False + _VALID_ARGS = frozenset() + + def run(self, tmp=None, task_vars=None): + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(None, task_vars) + + result = dict(changed=False, args_in=self._task.args) + + return result diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py new file mode 100644 index 00000000..f5c5d737 --- /dev/null +++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py @@ -0,0 +1,15 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +from ansible.module_utils import basic +from ansible.module_utils.basic import _load_params, AnsibleModule + + +def do_echo(): + p = _load_params() + d = json.loads(basic._ANSIBLE_ARGS) + d['ANSIBLE_MODULE_ARGS'] = {} + basic._ANSIBLE_ARGS = json.dumps(d).encode('utf-8') + module = AnsibleModule(argument_spec={}) + module.exit_json(args_in=p) diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo1.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo1.py new file mode 100644 index 00000000..771395f2 --- /dev/null +++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo1.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.testns.testcoll.plugins.module_utils.echo_impl import do_echo + + +def main(): + do_echo() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo2.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo2.py new file mode 100644 index 00000000..771395f2 --- /dev/null +++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo2.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.testns.testcoll.plugins.module_utils.echo_impl import do_echo + + +def main(): + do_echo() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_defaults/library/test_module_defaults.py b/test/integration/targets/module_defaults/library/test_module_defaults.py new file mode 100644 index 00000000..ede8c995 --- /dev/null +++ b/test/integration/targets/module_defaults/library/test_module_defaults.py @@ -0,0 +1,30 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + arg1=dict(type='str', default='default1'), + arg2=dict(type='str', default='default2'), + arg3=dict(type='str', default='default3'), + ), + supports_check_mode=True + ) + + result = dict( + test_module_defaults=dict( + arg1=module.params['arg1'], + arg2=module.params['arg2'], + arg3=module.params['arg3'], + ), + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_defaults/runme.sh b/test/integration/targets/module_defaults/runme.sh new file mode 100755 index 00000000..c19e607b --- /dev/null +++ b/test/integration/targets/module_defaults/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_defaults.yml "$@" diff --git a/test/integration/targets/module_defaults/tasks/main.yml b/test/integration/targets/module_defaults/tasks/main.yml new file mode 100644 index 00000000..3ed960d3 --- /dev/null +++ b/test/integration/targets/module_defaults/tasks/main.yml @@ -0,0 +1,89 @@ +- name: main block + vars: + test_file: /tmp/ansible-test.module_defaults.foo + module_defaults: + debug: + msg: test default + file: + path: '{{ test_file }}' + block: + - debug: + register: foo + + - name: test that 'debug' task used default 'msg' param + assert: + that: foo.msg == "test default" + + - name: remove test file + file: + state: absent + + - name: touch test file + file: + state: touch + + - name: stat test file + stat: + path: '{{ test_file }}' + register: foo + + - name: check that test file exists + assert: + that: foo.stat.exists + + - name: remove test file + file: + state: absent + + - name: test that module defaults from parent are inherited and merged + module_defaults: + # Meaningless values to make sure that 'module_defaults' gets + # evaluated for this block + foo: + bar: baz + block: + - debug: + register: foo + + - assert: + that: foo.msg == "test default" + + - name: test that we can override module defaults inherited from parent + module_defaults: + debug: + msg: "different test message" + block: + - debug: + register: foo + + - assert: + that: foo.msg == "different test message" + + - name: test that module defaults inherited from parent can be removed + module_defaults: + debug: {} + block: + - debug: + register: foo + + - assert: + that: + foo.msg == "Hello world!" + + - name: test that module defaults can be overridden by module params + block: + - debug: + msg: another test message + register: foo + + - assert: + that: + foo.msg == "another test message" + + - debug: + msg: '{{ omit }}' + register: foo + + - assert: + that: + foo.msg == "Hello world!" diff --git a/test/integration/targets/module_defaults/test_defaults.yml b/test/integration/targets/module_defaults/test_defaults.yml new file mode 100644 index 00000000..15b66362 --- /dev/null +++ b/test/integration/targets/module_defaults/test_defaults.yml @@ -0,0 +1,60 @@ +- hosts: localhost + gather_facts: no + collections: + - testns.testcoll + - testns.othercoll + module_defaults: + testns.testcoll.echoaction: + explicit_module_default: from playbook + testns.testcoll.echo1: + explicit_module_default: from playbook + group/testgroup: + group_module_default: from playbook + tasks: + - testns.testcoll.echoaction: + task_arg: from task + register: echoaction_fq + - echoaction: + task_arg: from task + register: echoaction_unq + - testns.testcoll.echo1: + task_arg: from task + register: echo1_fq + - echo1: + task_arg: from task + register: echo1_unq + - testns.testcoll.echo2: + task_arg: from task + register: echo2_fq + - echo2: + task_arg: from task + register: echo2_unq + - testns.othercoll.other_echoaction: + task_arg: from task + register: other_echoaction_fq + - other_echoaction: + task_arg: from task + register: other_echoaction_unq + - testns.othercoll.other_echo1: + task_arg: from task + register: other_echo1_fq + - other_echo1: + task_arg: from task + register: other_echo1_unq + + - debug: var=echo1_fq + + - assert: + that: + - "echoaction_fq.args_in == {'task_arg': 'from task', 'explicit_module_default': 'from playbook', 'group_module_default': 'from playbook' }" + - "echoaction_unq.args_in == {'task_arg': 'from task', 'explicit_module_default': 'from playbook', 'group_module_default': 'from playbook' }" + - "echo1_fq.args_in == {'task_arg': 'from task', 'explicit_module_default': 'from playbook', 'group_module_default': 'from playbook' }" + - "echo1_unq.args_in == {'task_arg': 'from task', 'explicit_module_default': 'from playbook', 'group_module_default': 'from playbook' }" + - "echo2_fq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }" + - "echo2_unq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }" + - "other_echoaction_fq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }" + - "other_echoaction_unq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }" + - "other_echo1_fq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }" + - "other_echo1_unq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }" + + - include_tasks: tasks/main.yml diff --git a/test/integration/targets/module_no_log/aliases b/test/integration/targets/module_no_log/aliases new file mode 100644 index 00000000..cbbb8804 --- /dev/null +++ b/test/integration/targets/module_no_log/aliases @@ -0,0 +1,5 @@ +shippable/posix/group1 +skip/aix # not configured to log user.info to /var/log/syslog +skip/freebsd # not configured to log user.info to /var/log/syslog +skip/osx # not configured to log user.info to /var/log/syslog +skip/macos # not configured to log user.info to /var/log/syslog diff --git a/test/integration/targets/module_no_log/library/module_that_logs.py b/test/integration/targets/module_no_log/library/module_that_logs.py new file mode 100644 index 00000000..44b36eeb --- /dev/null +++ b/test/integration/targets/module_no_log/library/module_that_logs.py @@ -0,0 +1,18 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule(argument_spec=dict( + number=dict(type='int'), + )) + + module.log('My number is: (%d)' % module.params['number']) + module.exit_json() + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_no_log/tasks/main.yml b/test/integration/targets/module_no_log/tasks/main.yml new file mode 100644 index 00000000..cf9e5802 --- /dev/null +++ b/test/integration/targets/module_no_log/tasks/main.yml @@ -0,0 +1,61 @@ +- name: Detect syslog + stat: + path: /var/log/syslog + register: syslog + +- name: Detect journalctl + shell: command -V journalctl + ignore_errors: yes + changed_when: no + register: journalctl + +- block: + - name: Skip tests if logs were not found. + debug: + msg: Did not find /var/log/syslog or journalctl. Tests will be skipped. + - meta: end_play + when: journalctl is failed and not syslog.stat.exists + +- name: Generate random numbers for unique log entries + set_fact: + good_number: "{{ 999999999999 | random }}" + bad_number: "{{ 999999999999 | random }}" + +- name: Generate expected log entry messages + set_fact: + good_message: 'My number is: ({{ good_number }})' + bad_message: 'My number is: ({{ bad_number }})' + +- name: Generate log message search patterns + set_fact: + # these search patterns are designed to avoid matching themselves + good_search: '{{ good_message.replace(":", "[:]") }}' + bad_search: '{{ bad_message.replace(":", "[:]") }}' + +- name: Generate grep command + set_fact: + grep_command: "grep -e '{{ good_search }}' -e '{{ bad_search }}'" + +- name: Run a module that logs without no_log + module_that_logs: + number: "{{ good_number }}" + +- name: Run a module that logs with no_log + module_that_logs: + number: "{{ bad_number }}" + no_log: yes + +- name: Search for expected log messages + # if this fails the tests are probably running on a system which stores logs elsewhere + shell: "({{ grep_command }} /var/log/syslog) || (journalctl | {{ grep_command }})" + changed_when: no + register: grep + +- name: Verify the correct log messages were found + assert: + that: + # if the good message is not found then the cause is likely one of: + # 1) the remote system does not write user.info messages to the logs + # 2) the AnsibleModule.log method is not working + - good_message in grep.stdout + - bad_message not in grep.stdout diff --git a/test/integration/targets/module_precedence/aliases b/test/integration/targets/module_precedence/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/module_precedence/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/module_precedence/lib_no_extension/ping b/test/integration/targets/module_precedence/lib_no_extension/ping new file mode 100644 index 00000000..e30706e8 --- /dev/null +++ b/test/integration/targets/module_precedence/lib_no_extension/ping @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ping +version_added: historical +short_description: Try to connect to host, verify a usable python and return C(pong) on success. +description: + - A trivial test module, this module always returns C(pong) on successful + contact. It does not make sense in playbooks, but it is useful from + C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured. + - This is NOT ICMP ping, this is just a trivial test module. +options: {} +author: + - "Ansible Core Team" + - "Michael DeHaan" +''' + +EXAMPLES = ''' +# Test we can logon to 'webservers' and execute python with json lib. +ansible webservers -m ping +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + data=dict(required=False, default=None), + ), + supports_check_mode=True + ) + result = dict(ping='pong') + if module.params['data']: + if module.params['data'] == 'crash': + raise Exception("boom") + result['ping'] = module.params['data'] + result['location'] = 'library' + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/lib_with_extension/a.ini b/test/integration/targets/module_precedence/lib_with_extension/a.ini new file mode 100644 index 00000000..80278c9e --- /dev/null +++ b/test/integration/targets/module_precedence/lib_with_extension/a.ini @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, location='a.ini'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/lib_with_extension/a.py b/test/integration/targets/module_precedence/lib_with_extension/a.py new file mode 100644 index 00000000..8eda1419 --- /dev/null +++ b/test/integration/targets/module_precedence/lib_with_extension/a.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, location='a.py'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/lib_with_extension/ping.ini b/test/integration/targets/module_precedence/lib_with_extension/ping.ini new file mode 100644 index 00000000..6f4b6a1a --- /dev/null +++ b/test/integration/targets/module_precedence/lib_with_extension/ping.ini @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, location='ping.ini'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/lib_with_extension/ping.py b/test/integration/targets/module_precedence/lib_with_extension/ping.py new file mode 100644 index 00000000..e30706e8 --- /dev/null +++ b/test/integration/targets/module_precedence/lib_with_extension/ping.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ping +version_added: historical +short_description: Try to connect to host, verify a usable python and return C(pong) on success. +description: + - A trivial test module, this module always returns C(pong) on successful + contact. It does not make sense in playbooks, but it is useful from + C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured. + - This is NOT ICMP ping, this is just a trivial test module. +options: {} +author: + - "Ansible Core Team" + - "Michael DeHaan" +''' + +EXAMPLES = ''' +# Test we can logon to 'webservers' and execute python with json lib. +ansible webservers -m ping +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + data=dict(required=False, default=None), + ), + supports_check_mode=True + ) + result = dict(ping='pong') + if module.params['data']: + if module.params['data'] == 'crash': + raise Exception("boom") + result['ping'] = module.params['data'] + result['location'] = 'library' + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/modules_test.yml b/test/integration/targets/module_precedence/modules_test.yml new file mode 100644 index 00000000..cf3e8886 --- /dev/null +++ b/test/integration/targets/module_precedence/modules_test.yml @@ -0,0 +1,10 @@ +- hosts: testhost + gather_facts: no + tasks: + - name: Use standard ping module + ping: + register: result + + - assert: + that: + - '"location" not in result' diff --git a/test/integration/targets/module_precedence/modules_test_envvar.yml b/test/integration/targets/module_precedence/modules_test_envvar.yml new file mode 100644 index 00000000..f52e2f91 --- /dev/null +++ b/test/integration/targets/module_precedence/modules_test_envvar.yml @@ -0,0 +1,11 @@ +- hosts: testhost + gather_facts: no + tasks: + - name: Use ping from library path + ping: + register: result + + - assert: + that: + - '"location" in result' + - 'result["location"] == "library"' diff --git a/test/integration/targets/module_precedence/modules_test_envvar_ext.yml b/test/integration/targets/module_precedence/modules_test_envvar_ext.yml new file mode 100644 index 00000000..48f27c4f --- /dev/null +++ b/test/integration/targets/module_precedence/modules_test_envvar_ext.yml @@ -0,0 +1,16 @@ +- hosts: testhost + gather_facts: no + tasks: + - name: Use ping from library path + ping: + register: result + + - name: Use a from library path + a: + register: a_res + + - assert: + that: + - '"location" in result' + - 'result["location"] == "library"' + - 'a_res["location"] == "a.py"' diff --git a/test/integration/targets/module_precedence/modules_test_multiple_roles.yml b/test/integration/targets/module_precedence/modules_test_multiple_roles.yml new file mode 100644 index 00000000..f4bd2649 --- /dev/null +++ b/test/integration/targets/module_precedence/modules_test_multiple_roles.yml @@ -0,0 +1,17 @@ +- hosts: testhost + gather_facts: no + vars: + expected_location: "role: foo" + roles: + - foo + - bar + + tasks: + - name: Use ping from role + ping: + register: result + + - assert: + that: + - '"location" in result' + - 'result["location"] == "{{ expected_location}}"' diff --git a/test/integration/targets/module_precedence/modules_test_multiple_roles_reverse_order.yml b/test/integration/targets/module_precedence/modules_test_multiple_roles_reverse_order.yml new file mode 100644 index 00000000..5403ae23 --- /dev/null +++ b/test/integration/targets/module_precedence/modules_test_multiple_roles_reverse_order.yml @@ -0,0 +1,16 @@ +- hosts: testhost + gather_facts: no + vars: + expected_location: "role: bar" + roles: + - bar + - foo + tasks: + - name: Use ping from role + ping: + register: result + + - assert: + that: + - '"location" in result' + - 'result["location"] == "{{ expected_location}}"' diff --git a/test/integration/targets/module_precedence/modules_test_role.yml b/test/integration/targets/module_precedence/modules_test_role.yml new file mode 100644 index 00000000..ccbe31d8 --- /dev/null +++ b/test/integration/targets/module_precedence/modules_test_role.yml @@ -0,0 +1,13 @@ +- hosts: testhost + gather_facts: no + roles: + - foo + tasks: + - name: Use ping from role + ping: + register: result + + - assert: + that: + - '"location" in result' + - 'result["location"] == "role: foo"' diff --git a/test/integration/targets/module_precedence/modules_test_role_ext.yml b/test/integration/targets/module_precedence/modules_test_role_ext.yml new file mode 100644 index 00000000..f8816f93 --- /dev/null +++ b/test/integration/targets/module_precedence/modules_test_role_ext.yml @@ -0,0 +1,18 @@ +- hosts: testhost + gather_facts: no + roles: + - foo + tasks: + - name: Use ping from role + ping: + register: result + + - name: Use from role + a: + register: a_res + + - assert: + that: + - '"location" in result' + - 'result["location"] == "role: foo"' + - 'a_res["location"] == "role: foo, a.py"' diff --git a/test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py b/test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py new file mode 100644 index 00000000..e7776001 --- /dev/null +++ b/test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ping +version_added: historical +short_description: Try to connect to host, verify a usable python and return C(pong) on success. +description: + - A trivial test module, this module always returns C(pong) on successful + contact. It does not make sense in playbooks, but it is useful from + C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured. + - This is NOT ICMP ping, this is just a trivial test module. +options: {} +author: + - "Ansible Core Team" + - "Michael DeHaan" +''' + +EXAMPLES = ''' +# Test we can logon to 'webservers' and execute python with json lib. +ansible webservers -m ping +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + data=dict(required=False, default=None), + ), + supports_check_mode=True + ) + result = dict(ping='pong') + if module.params['data']: + if module.params['data'] == 'crash': + raise Exception("boom") + result['ping'] = module.params['data'] + result['location'] = 'role: bar' + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/multiple_roles/bar/tasks/main.yml b/test/integration/targets/module_precedence/multiple_roles/bar/tasks/main.yml new file mode 100644 index 00000000..52c34020 --- /dev/null +++ b/test/integration/targets/module_precedence/multiple_roles/bar/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Use ping from inside foo role + ping: + register: result + +- name: Make sure that we used the ping module from the foo role + assert: + that: + - '"location" in result' + - 'result["location"] == "{{ expected_location }}"' diff --git a/test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py b/test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py new file mode 100644 index 00000000..a6d153ba --- /dev/null +++ b/test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ping +version_added: historical +short_description: Try to connect to host, verify a usable python and return C(pong) on success. +description: + - A trivial test module, this module always returns C(pong) on successful + contact. It does not make sense in playbooks, but it is useful from + C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured. + - This is NOT ICMP ping, this is just a trivial test module. +options: {} +author: + - "Ansible Core Team" + - "Michael DeHaan" +''' + +EXAMPLES = ''' +# Test we can logon to 'webservers' and execute python with json lib. +ansible webservers -m ping +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + data=dict(required=False, default=None), + ), + supports_check_mode=True + ) + result = dict(ping='pong') + if module.params['data']: + if module.params['data'] == 'crash': + raise Exception("boom") + result['ping'] = module.params['data'] + result['location'] = 'role: foo' + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/multiple_roles/foo/tasks/main.yml b/test/integration/targets/module_precedence/multiple_roles/foo/tasks/main.yml new file mode 100644 index 00000000..52c34020 --- /dev/null +++ b/test/integration/targets/module_precedence/multiple_roles/foo/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Use ping from inside foo role + ping: + register: result + +- name: Make sure that we used the ping module from the foo role + assert: + that: + - '"location" in result' + - 'result["location"] == "{{ expected_location }}"' diff --git a/test/integration/targets/module_precedence/roles_no_extension/foo/library/ping b/test/integration/targets/module_precedence/roles_no_extension/foo/library/ping new file mode 100644 index 00000000..a6d153ba --- /dev/null +++ b/test/integration/targets/module_precedence/roles_no_extension/foo/library/ping @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ping +version_added: historical +short_description: Try to connect to host, verify a usable python and return C(pong) on success. +description: + - A trivial test module, this module always returns C(pong) on successful + contact. It does not make sense in playbooks, but it is useful from + C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured. + - This is NOT ICMP ping, this is just a trivial test module. +options: {} +author: + - "Ansible Core Team" + - "Michael DeHaan" +''' + +EXAMPLES = ''' +# Test we can logon to 'webservers' and execute python with json lib. +ansible webservers -m ping +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + data=dict(required=False, default=None), + ), + supports_check_mode=True + ) + result = dict(ping='pong') + if module.params['data']: + if module.params['data'] == 'crash': + raise Exception("boom") + result['ping'] = module.params['data'] + result['location'] = 'role: foo' + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/roles_no_extension/foo/tasks/main.yml b/test/integration/targets/module_precedence/roles_no_extension/foo/tasks/main.yml new file mode 100644 index 00000000..985fc341 --- /dev/null +++ b/test/integration/targets/module_precedence/roles_no_extension/foo/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Use ping from inside foo role + ping: + register: result + +- name: Make sure that we used the ping module from the foo role + assert: + that: + - '"location" in result' + - 'result["location"] == "role: foo"' diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini b/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini new file mode 100644 index 00000000..8b170291 --- /dev/null +++ b/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, location='role: foo, a.ini'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.py b/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.py new file mode 100644 index 00000000..4bc5906d --- /dev/null +++ b/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, location='role: foo, a.py'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini b/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini new file mode 100644 index 00000000..f9c04f5c --- /dev/null +++ b/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, location='role: foo, ping.ini'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py b/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py new file mode 100644 index 00000000..a6d153ba --- /dev/null +++ b/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ping +version_added: historical +short_description: Try to connect to host, verify a usable python and return C(pong) on success. +description: + - A trivial test module, this module always returns C(pong) on successful + contact. It does not make sense in playbooks, but it is useful from + C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured. + - This is NOT ICMP ping, this is just a trivial test module. +options: {} +author: + - "Ansible Core Team" + - "Michael DeHaan" +''' + +EXAMPLES = ''' +# Test we can logon to 'webservers' and execute python with json lib. +ansible webservers -m ping +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + data=dict(required=False, default=None), + ), + supports_check_mode=True + ) + result = dict(ping='pong') + if module.params['data']: + if module.params['data'] == 'crash': + raise Exception("boom") + result['ping'] = module.params['data'] + result['location'] = 'role: foo' + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/tasks/main.yml b/test/integration/targets/module_precedence/roles_with_extension/foo/tasks/main.yml new file mode 100644 index 00000000..985fc341 --- /dev/null +++ b/test/integration/targets/module_precedence/roles_with_extension/foo/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Use ping from inside foo role + ping: + register: result + +- name: Make sure that we used the ping module from the foo role + assert: + that: + - '"location" in result' + - 'result["location"] == "role: foo"' diff --git a/test/integration/targets/module_precedence/runme.sh b/test/integration/targets/module_precedence/runme.sh new file mode 100755 index 00000000..0f6a98fe --- /dev/null +++ b/test/integration/targets/module_precedence/runme.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +set -eux + +# Standard ping module +ansible-playbook modules_test.yml -i ../../inventory -v "$@" + +# Library path ping module +ANSIBLE_LIBRARY=lib_with_extension ansible-playbook modules_test_envvar.yml -i ../../inventory -v "$@" +ANSIBLE_LIBRARY=lib_no_extension ansible-playbook modules_test_envvar.yml -i ../../inventory -v "$@" + +# ping module from role +ANSIBLE_ROLES_PATH=roles_with_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@" +ANSIBLE_ROLES_PATH=roles_no_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@" + +# ping module from role when there's a library path module too +ANSIBLE_LIBRARY=lib_no_extension ANSIBLE_ROLES_PATH=roles_with_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@" +ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=roles_with_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@" +ANSIBLE_LIBRARY=lib_no_extension ANSIBLE_ROLES_PATH=roles_no_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@" +ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=roles_no_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@" + +# ping module in multiple roles: Note that this will use the first module found +# which is the current way things work but may not be the best way +ANSIBLE_LIBRARY=lib_no_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles.yml -i ../../inventory -v "$@" +ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles.yml -i ../../inventory -v "$@" +ANSIBLE_LIBRARY=lib_no_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles.yml -i ../../inventory -v "$@" +ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles.yml -i ../../inventory -v "$@" + +# And prove that with multiple roles, it's the order the roles are listed in the play that matters +ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles_reverse_order.yml -i ../../inventory -v "$@" + +# Tests for MODULE_IGNORE_EXTS. +# +# Very similar to two tests above, but adds a check to test extension +# precedence. Separate from the above playbooks because we *only* care about +# extensions here and 'a' will not exist when the above playbooks run with +# non-extension library/role paths. There is also no way to guarantee that +# these tests will be useful due to how the pluginloader seems to work. It uses +# os.listdir which returns things in an arbitrary order (likely dependent on +# filesystem). If it happens to return 'a.py' on the test node before it +# returns 'a.ini', then this test is pointless anyway because there's no chance +# that 'a.ini' would ever have run regardless of what MODULE_IGNORE_EXTS is set +# to. The hope is that we test across enough systems that one would fail this +# test if the MODULE_IGNORE_EXTS broke, but there is no guarantee. This would +# perhaps be better as a mocked unit test because of this but would require +# a fair bit of work to be feasible as none of that loader logic is tested at +# all right now. +ANSIBLE_LIBRARY=lib_with_extension ansible-playbook modules_test_envvar_ext.yml -i ../../inventory -v "$@" +ANSIBLE_ROLES_PATH=roles_with_extension ansible-playbook modules_test_role_ext.yml -i ../../inventory -v "$@" diff --git a/test/integration/targets/module_tracebacks/aliases b/test/integration/targets/module_tracebacks/aliases new file mode 100644 index 00000000..804f0460 --- /dev/null +++ b/test/integration/targets/module_tracebacks/aliases @@ -0,0 +1,3 @@ +shippable/posix/group4 +needs/ssh +skip/aix diff --git a/test/integration/targets/module_tracebacks/inventory b/test/integration/targets/module_tracebacks/inventory new file mode 100644 index 00000000..91565267 --- /dev/null +++ b/test/integration/targets/module_tracebacks/inventory @@ -0,0 +1,5 @@ +testhost_local ansible_connection=local +testhost_ssh ansible_connection=ssh ansible_host=localhost + +[all:vars] +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/module_tracebacks/runme.sh b/test/integration/targets/module_tracebacks/runme.sh new file mode 100755 index 00000000..b8ac8068 --- /dev/null +++ b/test/integration/targets/module_tracebacks/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook traceback.yml -i inventory "$@" diff --git a/test/integration/targets/module_tracebacks/traceback.yml b/test/integration/targets/module_tracebacks/traceback.yml new file mode 100644 index 00000000..b1f0b516 --- /dev/null +++ b/test/integration/targets/module_tracebacks/traceback.yml @@ -0,0 +1,21 @@ +- hosts: all + gather_facts: no + tasks: + - name: intentionally fail module execution + ping: + data: crash + ignore_errors: yes + register: ping + +- hosts: localhost + gather_facts: no + tasks: + - name: verify exceptions were properly captured + assert: + that: + - hostvars.testhost_local.ping is failed + - "'boom' in hostvars.testhost_local.ping.exception" + - "'boom' in hostvars.testhost_local.ping.module_stderr" + - hostvars.testhost_ssh.ping is failed + - "'boom' in hostvars.testhost_ssh.ping.exception" + - "'boom' in hostvars.testhost_ssh.ping.module_stdout" diff --git a/test/integration/targets/module_utils/aliases b/test/integration/targets/module_utils/aliases new file mode 100644 index 00000000..2f5770ff --- /dev/null +++ b/test/integration/targets/module_utils/aliases @@ -0,0 +1,3 @@ +shippable/posix/group3 +needs/root +needs/target/setup_nobody diff --git a/test/integration/targets/module_utils/callback/pure_json.py b/test/integration/targets/module_utils/callback/pure_json.py new file mode 100644 index 00000000..1723d7bb --- /dev/null +++ b/test/integration/targets/module_utils/callback/pure_json.py @@ -0,0 +1,31 @@ +# (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: pure_json + type: stdout + short_description: only outputs the module results as json +''' + +import json + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'pure_json' + + def v2_runner_on_failed(self, result, ignore_errors=False): + self._display.display(json.dumps(result._result)) + + def v2_runner_on_ok(self, result): + self._display.display(json.dumps(result._result)) + + def v2_runner_on_skipped(self, result): + self._display.display(json.dumps(result._result)) diff --git a/test/integration/targets/module_utils/collections/ansible_collections/testns/testcoll/plugins/module_utils/legit.py b/test/integration/targets/module_utils/collections/ansible_collections/testns/testcoll/plugins/module_utils/legit.py new file mode 100644 index 00000000..b9d63482 --- /dev/null +++ b/test/integration/targets/module_utils/collections/ansible_collections/testns/testcoll/plugins/module_utils/legit.py @@ -0,0 +1,6 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +def importme(): + return "successfully imported from testns.testcoll" diff --git a/test/integration/targets/module_utils/library/test.py b/test/integration/targets/module_utils/library/test.py new file mode 100644 index 00000000..fbb7e6e2 --- /dev/null +++ b/test/integration/targets/module_utils/library/test.py @@ -0,0 +1,85 @@ +#!/usr/bin/python +# Most of these names are only available via PluginLoader so pylint doesn't +# know they exist +# pylint: disable=no-name-in-module +results = {} + +# Test import with no from +import ansible.module_utils.foo0 +results['foo0'] = ansible.module_utils.foo0.data + +# Test depthful import with no from +import ansible.module_utils.bar0.foo +results['bar0'] = ansible.module_utils.bar0.foo.data + +# Test import of module_utils/foo1.py +from ansible.module_utils import foo1 +results['foo1'] = foo1.data + +# Test import of an identifier inside of module_utils/foo2.py +from ansible.module_utils.foo2 import data +results['foo2'] = data + +# Test import of module_utils/bar1/__init__.py +from ansible.module_utils import bar1 +results['bar1'] = bar1.data + +# Test import of an identifier inside of module_utils/bar2/__init__.py +from ansible.module_utils.bar2 import data +results['bar2'] = data + +# Test import of module_utils/baz1/one.py +from ansible.module_utils.baz1 import one +results['baz1'] = one.data + +# Test import of an identifier inside of module_utils/baz2/one.py +from ansible.module_utils.baz2.one import data +results['baz2'] = data + +# Test import of module_utils/spam1/ham/eggs/__init__.py +from ansible.module_utils.spam1.ham import eggs +results['spam1'] = eggs.data + +# Test import of an identifier inside module_utils/spam2/ham/eggs/__init__.py +from ansible.module_utils.spam2.ham.eggs import data +results['spam2'] = data + +# Test import of module_utils/spam3/ham/bacon.py +from ansible.module_utils.spam3.ham import bacon +results['spam3'] = bacon.data + +# Test import of an identifier inside of module_utils/spam4/ham/bacon.py +from ansible.module_utils.spam4.ham.bacon import data +results['spam4'] = data + +# Test import of module_utils.spam5.ham bacon and eggs (modules) +from ansible.module_utils.spam5.ham import bacon, eggs +results['spam5'] = (bacon.data, eggs.data) + +# Test import of module_utils.spam6.ham bacon and eggs (identifiers) +from ansible.module_utils.spam6.ham import bacon, eggs +results['spam6'] = (bacon, eggs) + +# Test import of module_utils.spam7.ham bacon and eggs (module and identifier) +from ansible.module_utils.spam7.ham import bacon, eggs +results['spam7'] = (bacon.data, eggs) + +# Test import of module_utils/spam8/ham/bacon.py and module_utils/spam8/ham/eggs.py separately +from ansible.module_utils.spam8.ham import bacon +from ansible.module_utils.spam8.ham import eggs +results['spam8'] = (bacon.data, eggs) + +# Test that import of module_utils/qux1/quux.py using as works +from ansible.module_utils.qux1 import quux as one +results['qux1'] = one.data + +# Test that importing qux2/quux.py and qux2/quuz.py using as works +from ansible.module_utils.qux2 import quux as one, quuz as two +results['qux2'] = (one.data, two.data) + +# Test depth +from ansible.module_utils.a.b.c.d.e.f.g.h import data + +results['abcdefgh'] = data +from ansible.module_utils.basic import AnsibleModule +AnsibleModule(argument_spec=dict()).exit_json(**results) diff --git a/test/integration/targets/module_utils/library/test_alias_deprecation.py b/test/integration/targets/module_utils/library/test_alias_deprecation.py new file mode 100644 index 00000000..96410fc4 --- /dev/null +++ b/test/integration/targets/module_utils/library/test_alias_deprecation.py @@ -0,0 +1,15 @@ +#!/usr/bin/python + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.facts import data + +results = {"data": data} + +arg_spec = dict( + foo=dict(type='str', aliases=['baz'], deprecated_aliases=[dict(name='baz', version='9.99')]) +) + +AnsibleModule(argument_spec=arg_spec).exit_json(**results) diff --git a/test/integration/targets/module_utils/library/test_cwd_missing.py b/test/integration/targets/module_utils/library/test_cwd_missing.py new file mode 100644 index 00000000..cd1f9c77 --- /dev/null +++ b/test/integration/targets/module_utils/library/test_cwd_missing.py @@ -0,0 +1,33 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + # This module verifies that AnsibleModule works when cwd does not exist. + # This situation can occur as a race condition when the following conditions are met: + # + # 1) Execute a module which has high startup overhead prior to instantiating AnsibleModule (0.5s is enough in many cases). + # 2) Run the module async as the last task in a playbook using connection=local (a fire-and-forget task). + # 3) Remove the directory containing the playbook immediately after playbook execution ends (playbook in a temp dir). + # + # To ease testing of this race condition the deletion of cwd is handled in this module. + # This avoids race conditions in the test, including timing cwd deletion between AnsiballZ wrapper execution and AnsibleModule instantiation. + # The timing issue with AnsiballZ is due to cwd checking in the wrapper when code coverage is enabled. + + temp = os.path.abspath('temp') + + os.mkdir(temp) + os.chdir(temp) + os.rmdir(temp) + + module = AnsibleModule(argument_spec=dict()) + module.exit_json(before=temp, after=os.getcwd()) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_utils/library/test_cwd_unreadable.py b/test/integration/targets/module_utils/library/test_cwd_unreadable.py new file mode 100644 index 00000000..d65f31ac --- /dev/null +++ b/test/integration/targets/module_utils/library/test_cwd_unreadable.py @@ -0,0 +1,28 @@ +#!/usr/bin/python +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + # This module verifies that AnsibleModule works when cwd exists but is unreadable. + # This situation can occur when running tasks as an unprivileged user. + + try: + cwd = os.getcwd() + except OSError: + # Compensate for macOS being unable to access cwd as an unprivileged user. + # This test is a no-op in this case. + # Testing for os.getcwd() failures is handled by the test_cwd_missing module. + cwd = '/' + os.chdir(cwd) + + module = AnsibleModule(argument_spec=dict()) + module.exit_json(before=cwd, after=os.getcwd()) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_utils/library/test_env_override.py b/test/integration/targets/module_utils/library/test_env_override.py new file mode 100644 index 00000000..94e3051b --- /dev/null +++ b/test/integration/targets/module_utils/library/test_env_override.py @@ -0,0 +1,11 @@ +#!/usr/bin/python +# Most of these names are only available via PluginLoader so pylint doesn't +# know they exist +# pylint: disable=no-name-in-module +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.json_utils import data +from ansible.module_utils.mork import data as mork_data + +results = {"json_utils": data, "mork": mork_data} + +AnsibleModule(argument_spec=dict()).exit_json(**results) diff --git a/test/integration/targets/module_utils/library/test_failure.py b/test/integration/targets/module_utils/library/test_failure.py new file mode 100644 index 00000000..e5257aef --- /dev/null +++ b/test/integration/targets/module_utils/library/test_failure.py @@ -0,0 +1,12 @@ +#!/usr/bin/python + +results = {} +# Test that we are rooted correctly +# Following files: +# module_utils/yak/zebra/foo.py +from ansible.module_utils.zebra import foo + +results['zebra'] = foo.data + +from ansible.module_utils.basic import AnsibleModule +AnsibleModule(argument_spec=dict()).exit_json(**results) diff --git a/test/integration/targets/module_utils/library/test_no_log.py b/test/integration/targets/module_utils/library/test_no_log.py new file mode 100644 index 00000000..770e0b3a --- /dev/null +++ b/test/integration/targets/module_utils/library/test_no_log.py @@ -0,0 +1,35 @@ +#!/usr/bin/python +# (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible.module_utils.basic import AnsibleModule, env_fallback + + +def main(): + module = AnsibleModule( + argument_spec=dict( + explicit_pass=dict(type='str', no_log=True), + fallback_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['SECRET_ENV'])), + default_pass=dict(type='str', no_log=True, default='zyx'), + normal=dict(type='str', default='plaintext'), + suboption=dict( + type='dict', + options=dict( + explicit_sub_pass=dict(type='str', no_log=True), + fallback_sub_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['SECRET_SUB_ENV'])), + default_sub_pass=dict(type='str', no_log=True, default='xvu'), + normal=dict(type='str', default='plaintext'), + ), + ), + ), + ) + + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_utils/library/test_optional.py b/test/integration/targets/module_utils/library/test_optional.py new file mode 100644 index 00000000..4d0225d9 --- /dev/null +++ b/test/integration/targets/module_utils/library/test_optional.py @@ -0,0 +1,84 @@ +#!/usr/bin/python +# Most of these names are only available via PluginLoader so pylint doesn't +# know they exist +# pylint: disable=no-name-in-module +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule + +# internal constants to keep pylint from griping about constant-valued conditionals +_private_false = False +_private_true = True + +# module_utils import statements nested below any block are considered optional "best-effort" for AnsiballZ to include. +# test a number of different import shapes and nesting types to exercise this... + +# first, some nested imports that should succeed... +try: + from ansible.module_utils.urls import fetch_url as yep1 +except ImportError: + yep1 = None + +try: + import ansible.module_utils.common.text.converters as yep2 +except ImportError: + yep2 = None + +try: + # optional import from a legit collection + from ansible_collections.testns.testcoll.plugins.module_utils.legit import importme as yep3 +except ImportError: + yep3 = None + +# and a bunch that should fail to be found, but not break the module_utils payload build in the process... +try: + from ansible.module_utils.bogus import fromnope1 +except ImportError: + fromnope1 = None + +if _private_false: + from ansible.module_utils.alsobogus import fromnope2 +else: + fromnope2 = None + +try: + import ansible.module_utils.verybogus + nope1 = ansible.module_utils.verybogus +except ImportError: + nope1 = None + +# deepish nested with multiple block types- make sure the AST walker made it all the way down +try: + if _private_true: + if _private_true: + if _private_true: + if _private_true: + try: + import ansible.module_utils.stillbogus as nope2 + except ImportError: + raise +except ImportError: + nope2 = None + +try: + # optional import from a valid collection with an invalid package + from ansible_collections.testns.testcoll.plugins.module_utils.bogus import collnope1 +except ImportError: + collnope1 = None + +try: + # optional import from a bogus collection + from ansible_collections.bogusns.boguscoll.plugins.module_utils.bogus import collnope2 +except ImportError: + collnope2 = None + +module = AnsibleModule(argument_spec={}) + +if not all([yep1, yep2, yep3]): + module.fail_json(msg='one or more existing optional imports did not resolve') + +if any([fromnope1, fromnope2, nope1, nope2, collnope1, collnope2]): + module.fail_json(msg='one or more missing optional imports resolved unexpectedly') + +module.exit_json(msg='all missing optional imports behaved as expected') diff --git a/test/integration/targets/module_utils/library/test_override.py b/test/integration/targets/module_utils/library/test_override.py new file mode 100644 index 00000000..9ff54bf9 --- /dev/null +++ b/test/integration/targets/module_utils/library/test_override.py @@ -0,0 +1,7 @@ +#!/usr/bin/python +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.facts import data + +results = {"data": data} + +AnsibleModule(argument_spec=dict()).exit_json(**results) diff --git a/test/integration/targets/module_utils/library/test_recursive_diff.py b/test/integration/targets/module_utils/library/test_recursive_diff.py new file mode 100644 index 00000000..0cf39d9c --- /dev/null +++ b/test/integration/targets/module_utils/library/test_recursive_diff.py @@ -0,0 +1,29 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.dict_transformations import recursive_diff + + +def main(): + module = AnsibleModule( + { + 'a': {'type': 'dict'}, + 'b': {'type': 'dict'}, + } + ) + + module.exit_json( + the_diff=recursive_diff( + module.params['a'], + module.params['b'], + ), + ) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/module_utils/module_utils/__init__.py b/test/integration/targets/module_utils/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/a/__init__.py b/test/integration/targets/module_utils/module_utils/a/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/a/b/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/e/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/h/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/h/__init__.py new file mode 100644 index 00000000..722f4b77 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/h/__init__.py @@ -0,0 +1 @@ +data = 'abcdefgh' diff --git a/test/integration/targets/module_utils/module_utils/bar0/__init__.py b/test/integration/targets/module_utils/module_utils/bar0/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/bar0/foo.py b/test/integration/targets/module_utils/module_utils/bar0/foo.py new file mode 100644 index 00000000..1072dcc2 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/bar0/foo.py @@ -0,0 +1 @@ +data = 'bar0' diff --git a/test/integration/targets/module_utils/module_utils/bar1/__init__.py b/test/integration/targets/module_utils/module_utils/bar1/__init__.py new file mode 100644 index 00000000..68e43509 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/bar1/__init__.py @@ -0,0 +1 @@ +data = 'bar1' diff --git a/test/integration/targets/module_utils/module_utils/bar2/__init__.py b/test/integration/targets/module_utils/module_utils/bar2/__init__.py new file mode 100644 index 00000000..59e86afd --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/bar2/__init__.py @@ -0,0 +1 @@ +data = 'bar2' diff --git a/test/integration/targets/module_utils/module_utils/baz1/__init__.py b/test/integration/targets/module_utils/module_utils/baz1/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/baz1/one.py b/test/integration/targets/module_utils/module_utils/baz1/one.py new file mode 100644 index 00000000..e5d7894a --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/baz1/one.py @@ -0,0 +1 @@ +data = 'baz1' diff --git a/test/integration/targets/module_utils/module_utils/baz2/__init__.py b/test/integration/targets/module_utils/module_utils/baz2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/baz2/one.py b/test/integration/targets/module_utils/module_utils/baz2/one.py new file mode 100644 index 00000000..1efe196c --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/baz2/one.py @@ -0,0 +1 @@ +data = 'baz2' diff --git a/test/integration/targets/module_utils/module_utils/facts.py b/test/integration/targets/module_utils/module_utils/facts.py new file mode 100644 index 00000000..ba7cbb7b --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/facts.py @@ -0,0 +1 @@ +data = 'overridden facts.py' diff --git a/test/integration/targets/module_utils/module_utils/foo.py b/test/integration/targets/module_utils/module_utils/foo.py new file mode 100644 index 00000000..20698f1f --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/foo.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +foo = "FOO FROM foo.py" diff --git a/test/integration/targets/module_utils/module_utils/foo0.py b/test/integration/targets/module_utils/module_utils/foo0.py new file mode 100644 index 00000000..4b528b6d --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/foo0.py @@ -0,0 +1 @@ +data = 'foo0' diff --git a/test/integration/targets/module_utils/module_utils/foo1.py b/test/integration/targets/module_utils/module_utils/foo1.py new file mode 100644 index 00000000..18e0cef1 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/foo1.py @@ -0,0 +1 @@ +data = 'foo1' diff --git a/test/integration/targets/module_utils/module_utils/foo2.py b/test/integration/targets/module_utils/module_utils/foo2.py new file mode 100644 index 00000000..feb142df --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/foo2.py @@ -0,0 +1 @@ +data = 'foo2' diff --git a/test/integration/targets/module_utils/module_utils/qux1/__init__.py b/test/integration/targets/module_utils/module_utils/qux1/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/qux1/quux.py b/test/integration/targets/module_utils/module_utils/qux1/quux.py new file mode 100644 index 00000000..3d288c96 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/qux1/quux.py @@ -0,0 +1 @@ +data = 'qux1' diff --git a/test/integration/targets/module_utils/module_utils/qux2/__init__.py b/test/integration/targets/module_utils/module_utils/qux2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/qux2/quux.py b/test/integration/targets/module_utils/module_utils/qux2/quux.py new file mode 100644 index 00000000..496d446a --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/qux2/quux.py @@ -0,0 +1 @@ +data = 'qux2:quux' diff --git a/test/integration/targets/module_utils/module_utils/qux2/quuz.py b/test/integration/targets/module_utils/module_utils/qux2/quuz.py new file mode 100644 index 00000000..cdc0fad7 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/qux2/quuz.py @@ -0,0 +1 @@ +data = 'qux2:quuz' diff --git a/test/integration/targets/module_utils/module_utils/service.py b/test/integration/targets/module_utils/module_utils/service.py new file mode 100644 index 00000000..1492f468 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/service.py @@ -0,0 +1 @@ +sysv_is_enabled = 'sysv_is_enabled' diff --git a/test/integration/targets/module_utils/module_utils/spam1/__init__.py b/test/integration/targets/module_utils/module_utils/spam1/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam1/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam1/ham/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam1/ham/eggs/__init__.py b/test/integration/targets/module_utils/module_utils/spam1/ham/eggs/__init__.py new file mode 100644 index 00000000..f290e156 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam1/ham/eggs/__init__.py @@ -0,0 +1 @@ +data = 'spam1' diff --git a/test/integration/targets/module_utils/module_utils/spam2/__init__.py b/test/integration/targets/module_utils/module_utils/spam2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam2/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam2/ham/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam2/ham/eggs/__init__.py b/test/integration/targets/module_utils/module_utils/spam2/ham/eggs/__init__.py new file mode 100644 index 00000000..5e053d88 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam2/ham/eggs/__init__.py @@ -0,0 +1 @@ +data = 'spam2' diff --git a/test/integration/targets/module_utils/module_utils/spam3/__init__.py b/test/integration/targets/module_utils/module_utils/spam3/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam3/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam3/ham/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py new file mode 100644 index 00000000..91075089 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py @@ -0,0 +1 @@ +data = 'spam3' diff --git a/test/integration/targets/module_utils/module_utils/spam4/__init__.py b/test/integration/targets/module_utils/module_utils/spam4/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam4/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam4/ham/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam4/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam4/ham/bacon.py new file mode 100644 index 00000000..7d552882 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam4/ham/bacon.py @@ -0,0 +1 @@ +data = 'spam4' diff --git a/test/integration/targets/module_utils/module_utils/spam5/__init__.py b/test/integration/targets/module_utils/module_utils/spam5/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam5/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam5/ham/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam5/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam5/ham/bacon.py new file mode 100644 index 00000000..cc947b83 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam5/ham/bacon.py @@ -0,0 +1 @@ +data = 'spam5:bacon' diff --git a/test/integration/targets/module_utils/module_utils/spam5/ham/eggs.py b/test/integration/targets/module_utils/module_utils/spam5/ham/eggs.py new file mode 100644 index 00000000..f0394c87 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam5/ham/eggs.py @@ -0,0 +1 @@ +data = 'spam5:eggs' diff --git a/test/integration/targets/module_utils/module_utils/spam6/__init__.py b/test/integration/targets/module_utils/module_utils/spam6/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam6/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam6/ham/__init__.py new file mode 100644 index 00000000..8c1a70ea --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam6/ham/__init__.py @@ -0,0 +1,2 @@ +bacon = 'spam6:bacon' +eggs = 'spam6:eggs' diff --git a/test/integration/targets/module_utils/module_utils/spam7/__init__.py b/test/integration/targets/module_utils/module_utils/spam7/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam7/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam7/ham/__init__.py new file mode 100644 index 00000000..cd9a05d0 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam7/ham/__init__.py @@ -0,0 +1 @@ +eggs = 'spam7:eggs' diff --git a/test/integration/targets/module_utils/module_utils/spam7/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam7/ham/bacon.py new file mode 100644 index 00000000..490121f8 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam7/ham/bacon.py @@ -0,0 +1 @@ +data = 'spam7:bacon' diff --git a/test/integration/targets/module_utils/module_utils/spam8/__init__.py b/test/integration/targets/module_utils/module_utils/spam8/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/spam8/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam8/ham/__init__.py new file mode 100644 index 00000000..c02bf5fd --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam8/ham/__init__.py @@ -0,0 +1 @@ +eggs = 'spam8:eggs' diff --git a/test/integration/targets/module_utils/module_utils/spam8/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam8/ham/bacon.py new file mode 100644 index 00000000..28ea2857 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/spam8/ham/bacon.py @@ -0,0 +1 @@ +data = 'spam8:bacon' diff --git a/test/integration/targets/module_utils/module_utils/sub/__init__.py b/test/integration/targets/module_utils/module_utils/sub/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/sub/bam.py b/test/integration/targets/module_utils/module_utils/sub/bam.py new file mode 100644 index 00000000..566f8b7c --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/sub/bam.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +bam = "BAM FROM sub/bam.py" diff --git a/test/integration/targets/module_utils/module_utils/sub/bam/__init__.py b/test/integration/targets/module_utils/module_utils/sub/bam/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/sub/bam/bam.py b/test/integration/targets/module_utils/module_utils/sub/bam/bam.py new file mode 100644 index 00000000..b7ed7072 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/sub/bam/bam.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +bam = "BAM FROM sub/bam/bam.py" diff --git a/test/integration/targets/module_utils/module_utils/sub/bar/__init__.py b/test/integration/targets/module_utils/module_utils/sub/bar/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/sub/bar/bam.py b/test/integration/targets/module_utils/module_utils/sub/bar/bam.py new file mode 100644 index 00000000..02fafd40 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/sub/bar/bam.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +bam = "BAM FROM sub/bar/bam.py" diff --git a/test/integration/targets/module_utils/module_utils/sub/bar/bar.py b/test/integration/targets/module_utils/module_utils/sub/bar/bar.py new file mode 100644 index 00000000..8566901f --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/sub/bar/bar.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +bar = "BAR FROM sub/bar/bar.py" diff --git a/test/integration/targets/module_utils/module_utils/yak/__init__.py b/test/integration/targets/module_utils/module_utils/yak/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/yak/zebra/__init__.py b/test/integration/targets/module_utils/module_utils/yak/zebra/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/module_utils/yak/zebra/foo.py b/test/integration/targets/module_utils/module_utils/yak/zebra/foo.py new file mode 100644 index 00000000..89b2bfe8 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils/yak/zebra/foo.py @@ -0,0 +1 @@ +data = 'yak' diff --git a/test/integration/targets/module_utils/module_utils_basic_setcwd.yml b/test/integration/targets/module_utils/module_utils_basic_setcwd.yml new file mode 100644 index 00000000..97dbf873 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils_basic_setcwd.yml @@ -0,0 +1,22 @@ +- hosts: testhost + gather_facts: no + tasks: + - name: make sure the nobody user is available + include_role: + name: setup_nobody + + - name: verify AnsibleModule works when cwd is missing + test_cwd_missing: + register: missing + + - name: verify AnsibleModule works when cwd is unreadable + test_cwd_unreadable: + register: unreadable + become: yes + become_user: nobody # root can read cwd regardless of permissions, so a non-root user is required here + + - name: verify AnsibleModule was able to adjust cwd as expected + assert: + that: + - missing.before != missing.after + - unreadable.before != unreadable.after or unreadable.before == '/' # allow / fallback on macOS when using an unprivileged user diff --git a/test/integration/targets/module_utils/module_utils_common_dict_transformation.yml b/test/integration/targets/module_utils/module_utils_common_dict_transformation.yml new file mode 100644 index 00000000..7d961c4c --- /dev/null +++ b/test/integration/targets/module_utils/module_utils_common_dict_transformation.yml @@ -0,0 +1,34 @@ +- hosts: testhost + gather_facts: no + tasks: + - test_recursive_diff: + a: + foo: + bar: + - baz: + qux: ham_sandwich + b: + foo: + bar: + - baz: + qux: turkey_sandwich + register: recursive_diff_diff + + - test_recursive_diff: + a: + foo: + bar: + - baz: + qux: ham_sandwich + b: + foo: + bar: + - baz: + qux: ham_sandwich + register: recursive_diff_same + + - assert: + that: + - recursive_diff_diff.the_diff is not none + - recursive_diff_diff.the_diff|length == 2 + - recursive_diff_same.the_diff is none diff --git a/test/integration/targets/module_utils/module_utils_envvar.yml b/test/integration/targets/module_utils/module_utils_envvar.yml new file mode 100644 index 00000000..8d97e0eb --- /dev/null +++ b/test/integration/targets/module_utils/module_utils_envvar.yml @@ -0,0 +1,51 @@ +- hosts: testhost + gather_facts: no + tasks: + - name: Use a specially crafted module to see if things were imported correctly + test: + register: result + + - name: Check that these are all loaded from playbook dir's module_utils + assert: + that: + - 'result["abcdefgh"] == "abcdefgh"' + - 'result["bar0"] == "bar0"' + - 'result["bar1"] == "bar1"' + - 'result["bar2"] == "bar2"' + - 'result["baz1"] == "baz1"' + - 'result["baz2"] == "baz2"' + - 'result["foo0"] == "foo0"' + - 'result["foo1"] == "foo1"' + - 'result["foo2"] == "foo2"' + - 'result["qux1"] == "qux1"' + - 'result["qux2"] == ["qux2:quux", "qux2:quuz"]' + - 'result["spam1"] == "spam1"' + - 'result["spam2"] == "spam2"' + - 'result["spam3"] == "spam3"' + - 'result["spam4"] == "spam4"' + - 'result["spam5"] == ["spam5:bacon", "spam5:eggs"]' + - 'result["spam6"] == ["spam6:bacon", "spam6:eggs"]' + - 'result["spam7"] == ["spam7:bacon", "spam7:eggs"]' + - 'result["spam8"] == ["spam8:bacon", "spam8:eggs"]' + + # Test that overriding something in module_utils with something in the local library works + - name: Test that playbook dir's module_utils overrides facts.py + test_override: + register: result + + - name: Make sure the we used the local facts.py, not the one shipped with ansible + assert: + that: + - 'result["data"] == "overridden facts.py"' + + - name: Test that importing something from the module_utils in the env_vars works + test_env_override: + register: result + + - name: Make sure we used the module_utils from the env_var for these + assert: + that: + # Override of shipped module_utils + - 'result["json_utils"] == "overridden json_utils"' + # Only i nthe env vars directory + - 'result["mork"] == "mork"' diff --git a/test/integration/targets/module_utils/module_utils_test.yml b/test/integration/targets/module_utils/module_utils_test.yml new file mode 100644 index 00000000..0550b9f0 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils_test.yml @@ -0,0 +1,71 @@ +- hosts: testhost + gather_facts: no + tasks: + - name: Use a specially crafted module to see if things were imported correctly + test: + register: result + + - name: Check that the module imported the correct version of each module_util + assert: + that: + - 'result["abcdefgh"] == "abcdefgh"' + - 'result["bar0"] == "bar0"' + - 'result["bar1"] == "bar1"' + - 'result["bar2"] == "bar2"' + - 'result["baz1"] == "baz1"' + - 'result["baz2"] == "baz2"' + - 'result["foo0"] == "foo0"' + - 'result["foo1"] == "foo1"' + - 'result["foo2"] == "foo2"' + - 'result["qux1"] == "qux1"' + - 'result["qux2"] == ["qux2:quux", "qux2:quuz"]' + - 'result["spam1"] == "spam1"' + - 'result["spam2"] == "spam2"' + - 'result["spam3"] == "spam3"' + - 'result["spam4"] == "spam4"' + - 'result["spam5"] == ["spam5:bacon", "spam5:eggs"]' + - 'result["spam6"] == ["spam6:bacon", "spam6:eggs"]' + - 'result["spam7"] == ["spam7:bacon", "spam7:eggs"]' + - 'result["spam8"] == ["spam8:bacon", "spam8:eggs"]' + + # Test that overriding something in module_utils with something in the local library works + - name: Test that local module_utils overrides facts.py + test_override: + register: result + + - name: Make sure the we used the local facts.py, not the one shipped with ansible + assert: + that: + - result["data"] == "overridden facts.py" + + - name: Test that importing a module that only exists inside of a submodule does not work + test_failure: + ignore_errors: True + register: result + + - name: Make sure we failed in AnsiBallZ + assert: + that: + - result is failed + - result['msg'] == "Could not find imported module support code for ansible.modules.test_failure. Looked for (['ansible.module_utils.zebra.foo', 'ansible.module_utils.zebra'])" + + - name: Test that alias deprecation works + test_alias_deprecation: + baz: 'bar' + register: result + + - name: Assert that the deprecation message is given correctly + assert: + that: + - result.deprecations[0].msg == "Alias 'baz' is deprecated. See the module docs for more information" + - result.deprecations[0].version == '9.99' + + + - name: Test that optional imports behave properly + test_optional: + register: optionaltest + + - assert: + that: + - optionaltest is success + - optionaltest.msg == 'all missing optional imports behaved as expected' \ No newline at end of file diff --git a/test/integration/targets/module_utils/module_utils_test_no_log.yml b/test/integration/targets/module_utils/module_utils_test_no_log.yml new file mode 100644 index 00000000..bad2efd4 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils_test_no_log.yml @@ -0,0 +1,9 @@ +# This is called by module_utils_vvvvv.yml with a custom callback +- hosts: testhost + gather_facts: no + tasks: + - name: Check no_log invocation results + test_no_log: + explicit_pass: abc + suboption: + explicit_sub_pass: def diff --git a/test/integration/targets/module_utils/module_utils_vvvvv.yml b/test/integration/targets/module_utils/module_utils_vvvvv.yml new file mode 100644 index 00000000..1fd91d25 --- /dev/null +++ b/test/integration/targets/module_utils/module_utils_vvvvv.yml @@ -0,0 +1,27 @@ +- hosts: testhost + gather_facts: no + tasks: + # Invocation usually is output with 3vs or more, our callback plugin displays it anyway + - name: Check no_log invocation results + command: ansible-playbook -i {{ inventory_file }} module_utils_test_no_log.yml + environment: + ANSIBLE_CALLBACK_PLUGINS: callback + ANSIBLE_STDOUT_CALLBACK: pure_json + SECRET_ENV: ghi + SECRET_SUB_ENV: jkl + register: no_log_invocation + + - set_fact: + no_log_invocation: '{{ no_log_invocation.stdout | trim | from_json }}' + + - name: check no log values from fallback or default are masked + assert: + that: + - no_log_invocation.invocation.module_args.default_pass == 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + - no_log_invocation.invocation.module_args.explicit_pass == 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + - no_log_invocation.invocation.module_args.fallback_pass == 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + - no_log_invocation.invocation.module_args.normal == 'plaintext' + - no_log_invocation.invocation.module_args.suboption.default_sub_pass == 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + - no_log_invocation.invocation.module_args.suboption.explicit_sub_pass == 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + - no_log_invocation.invocation.module_args.suboption.fallback_sub_pass == 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + - no_log_invocation.invocation.module_args.suboption.normal == 'plaintext' diff --git a/test/integration/targets/module_utils/other_mu_dir/__init__.py b/test/integration/targets/module_utils/other_mu_dir/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/other_mu_dir/a/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/h/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/h/__init__.py new file mode 100644 index 00000000..796fed38 --- /dev/null +++ b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/h/__init__.py @@ -0,0 +1 @@ +data = 'should not be visible abcdefgh' diff --git a/test/integration/targets/module_utils/other_mu_dir/facts.py b/test/integration/targets/module_utils/other_mu_dir/facts.py new file mode 100644 index 00000000..dbfab271 --- /dev/null +++ b/test/integration/targets/module_utils/other_mu_dir/facts.py @@ -0,0 +1 @@ +data = 'should not be visible facts.py' diff --git a/test/integration/targets/module_utils/other_mu_dir/json_utils.py b/test/integration/targets/module_utils/other_mu_dir/json_utils.py new file mode 100644 index 00000000..59757e40 --- /dev/null +++ b/test/integration/targets/module_utils/other_mu_dir/json_utils.py @@ -0,0 +1 @@ +data = 'overridden json_utils' diff --git a/test/integration/targets/module_utils/other_mu_dir/mork.py b/test/integration/targets/module_utils/other_mu_dir/mork.py new file mode 100644 index 00000000..3b700fca --- /dev/null +++ b/test/integration/targets/module_utils/other_mu_dir/mork.py @@ -0,0 +1 @@ +data = 'mork' diff --git a/test/integration/targets/module_utils/runme.sh b/test/integration/targets/module_utils/runme.sh new file mode 100755 index 00000000..f25dba63 --- /dev/null +++ b/test/integration/targets/module_utils/runme.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_ROLES_PATH=../ ansible-playbook module_utils_basic_setcwd.yml -i ../../inventory "$@" + +# Keep the -vvvvv here. This acts as a test for testing that higher verbosity +# doesn't traceback with unicode in the custom module_utils directory path. +ansible-playbook module_utils_vvvvv.yml -i ../../inventory -vvvvv "$@" + +ansible-playbook module_utils_test.yml -i ../../inventory -v "$@" +ANSIBLE_MODULE_UTILS=other_mu_dir ansible-playbook module_utils_envvar.yml -i ../../inventory -v "$@" + +ansible-playbook module_utils_common_dict_transformation.yml -i ../../inventory "$@" diff --git a/test/integration/targets/module_utils_Ansible.AccessToken/aliases b/test/integration/targets/module_utils_Ansible.AccessToken/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.AccessToken/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.AccessToken/library/ansible_access_token_tests.ps1 b/test/integration/targets/module_utils_Ansible.AccessToken/library/ansible_access_token_tests.ps1 new file mode 100644 index 00000000..5e3a0af5 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.AccessToken/library/ansible_access_token_tests.ps1 @@ -0,0 +1,378 @@ +# End of the setup code and start of the module code +#!powershell + +#AnsibleRequires -CSharpUtil Ansible.AccessToken +#AnsibleRequires -CSharpUtil Ansible.Basic + +$spec = @{ + options = @{ + test_username = @{ type = "str"; required = $true } + test_password = @{ type = "str"; required = $true; no_log = $true } + } +} +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + +$test_username = $module.Params.test_username +$test_password = $module.Params.test_password + +Function Assert-Equals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + + $matched = $false + if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) { + $Actual.Count | Assert-Equals -Expected $Expected.Count + for ($i = 0; $i -lt $Actual.Count; $i++) { + $actual_value = $Actual[$i] + $expected_value = $Expected[$i] + Assert-Equals -Actual $actual_value -Expected $expected_value + } + $matched = $true + } else { + $matched = $Actual -ceq $Expected + } + + if (-not $matched) { + if ($Actual -is [PSObject]) { + $Actual = $Actual.ToString() + } + + $call_stack = (Get-PSCallStack)[1] + $module.Result.test = $test + $module.Result.actual = $Actual + $module.Result.expected = $Expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + + $module.FailJson("AssertionError: actual != expected") + } +} + +$current_user = [System.Security.Principal.WindowsIdentity]::GetCurrent().User + +$tests = [Ordered]@{ + "Open process token" = { + $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess() + + $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Query") + try { + $h_token.IsClosed | Assert-Equals -Expected $false + $h_token.IsInvalid | Assert-Equals -Expected $false + + $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token) + $actual_user | Assert-Equals -Expected $current_user + } finally { + $h_token.Dispose() + } + $h_token.IsClosed | Assert-Equals -Expected $true + } + + "Open process token of another process" = { + $proc_info = Start-Process -FilePath "powershell.exe" -ArgumentList "-Command Start-Sleep -Seconds 60" -WindowStyle Hidden -PassThru + try { + $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess($proc_info.Id, "QueryInformation", $false) + try { + $h_process.IsClosed | Assert-Equals -Expected $false + $h_process.IsInvalid | Assert-Equals -Expected $false + + $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Query") + try { + $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token) + $actual_user | Assert-Equals -Expected $current_user + } finally { + $h_token.Dispose() + } + } finally { + $h_process.Dispose() + } + $h_process.IsClosed | Assert-Equals -Expected $true + } finally { + $proc_info | Stop-Process + } + } + + "Failed to open process token" = { + $failed = $false + try { + $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess(4, "QueryInformation", $false) + $h_process.Dispose() # Incase this doesn't fail, make sure we still dispose of it + } catch [Ansible.AccessToken.Win32Exception] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "Failed to open process 4 with access QueryInformation (Access is denied, Win32ErrorCode 5 - 0x00000005)" + } + $failed | Assert-Equals -Expected $true + } + + "Duplicate access token primary" = { + $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess() + $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Duplicate") + try { + $dup_token = [Ansible.AccessToken.TokenUtil]::DuplicateToken($h_token, "Query", "Anonymous", "Primary") + try { + $dup_token.IsClosed | Assert-Equals -Expected $false + $dup_token.IsInvalid | Assert-Equals -Expected $false + + $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($dup_token) + + $actual_user | Assert-Equals -Expected $current_user + $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($dup_token) + + $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Primary) + $actual_stat.ImpersonationLevel | Assert-Equals -Expected ([Ansible.AccessToken.SecurityImpersonationLevel]::Anonymous) + } finally { + $dup_token.Dispose() + } + + $dup_token.IsClosed | Assert-Equals -Expected $true + } finally { + $h_token.Dispose() + } + } + + "Duplicate access token impersonation" = { + $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess() + $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Duplicate") + try { + "Anonymous", "Identification", "Impersonation", "Delegation" | ForEach-Object -Process { + $dup_token = [Ansible.AccessToken.TokenUtil]::DuplicateToken($h_token, "Query", $_, "Impersonation") + try { + $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($dup_token) + + $actual_user | Assert-Equals -Expected $current_user + $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($dup_token) + + $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Impersonation) + $actual_stat.ImpersonationLevel | Assert-Equals -Expected ([Ansible.AccessToken.SecurityImpersonationLevel]"$_") + } finally { + $dup_token.Dispose() + } + } + } finally { + $h_token.Dispose() + } + } + + "Impersonate SYSTEM token" = { + $system_sid = New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @( + [System.Security.Principal.WellKnownSidType]::LocalSystemSid, + $null + ) + $tested = $false + foreach ($h_token in [Ansible.AccessToken.TokenUtil]::EnumerateUserTokens($system_sid, "Duplicate, Impersonate, Query")) { + $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token) + $actual_user | Assert-Equals -Expected $system_sid + + [Ansible.AccessToken.TokenUtil]::ImpersonateToken($h_token) + try { + $current_sid = [System.Security.Principal.WindowsIdentity]::GetCurrent().User + $current_sid | Assert-Equals -Expected $system_sid + } finally { + [Ansible.AccessToken.TokenUtil]::RevertToSelf() + } + + $current_sid = [System.Security.Principal.WindowsIdentity]::GetCurrent().User + $current_sid | Assert-Equals -Expected $current_user + + # Will keep on looping for each SYSTEM token it can retrieve, we only want to test 1 + $tested = $true + break + } + + $tested | Assert-Equals -Expected $true + } + + "Get token privileges" = { + $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess() + $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Query") + try { + $priv_info = &whoami.exe /priv | Where-Object { $_.StartsWith("Se") } + $actual_privs = [Ansible.AccessToken.Tokenutil]::GetTokenPrivileges($h_token) + $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($h_token) + + $actual_privs.Count | Assert-Equals -Expected $priv_info.Count + $actual_privs.Count | Assert-Equals -Expected $actual_stat.PrivilegeCount + + foreach ($info in $priv_info) { + $info_split = $info.Split(" ", [System.StringSplitOptions]::RemoveEmptyEntries) + $priv_name = $info_split[0] + $priv_enabled = $info_split[-1] -eq "Enabled" + $actual_priv = $actual_privs | Where-Object { $_.Name -eq $priv_name } + + $actual_priv -eq $null | Assert-Equals -Expected $false + if ($priv_enabled) { + $actual_priv.Attributes.HasFlag([Ansible.AccessToken.PrivilegeAttributes]::Enabled) | Assert-Equals -Expected $true + } else { + $actual_priv.Attributes.HasFlag([Ansible.AccessToken.PrivilegeAttributes]::Disabled) | Assert-Equals -Expected $true + } + } + } finally { + $h_token.Dispose() + } + } + + "Get token statistics" = { + $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess() + $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Query") + try { + $actual_priv = [Ansible.AccessToken.Tokenutil]::GetTokenPrivileges($h_token) + $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($h_token) + + $actual_stat.TokenId.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Luid" + $actual_stat.AuthenticationId.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Luid" + $actual_stat.ExpirationTime.GetType().FullName | Assert-Equals -Expected "System.Int64" + + $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Primary) + + $os_version = [Version](Get-Item -LiteralPath $env:SystemRoot\System32\kernel32.dll).VersionInfo.ProductVersion + if ($os_version -lt [Version]"6.1") { + # While the token is a primary token, Server 2008 reports the SecurityImpersonationLevel for a primary token as Impersonation + $actual_stat.ImpersonationLevel | Assert-Equals -Expected ([Ansible.AccessToken.SecurityImpersonationLevel]::Impersonation) + } else { + $actual_stat.ImpersonationLevel | Assert-Equals -Expected ([Ansible.AccessToken.SecurityImpersonationLevel]::Anonymous) + } + $actual_stat.DynamicCharged.GetType().FullName | Assert-Equals -Expected "System.UInt32" + $actual_stat.DynamicAvailable.GetType().FullName | Assert-Equals -Expected "System.UInt32" + $actual_stat.GroupCount.GetType().FullName | Assert-Equals -Expected "System.UInt32" + $actual_stat.PrivilegeCount | Assert-Equals -Expected $actual_priv.Count + $actual_stat.ModifiedId.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Luid" + } finally { + $h_token.Dispose() + } + } + + "Get token linked token impersonation" = { + $h_token = [Ansible.AccessToken.TokenUtil]::LogonUser($test_username, $null, $test_password, "Interactive", "Default") + try { + $actual_elevation_type = [Ansible.AccessToken.TokenUtil]::GetTokenElevationType($h_token) + $actual_elevation_type | Assert-Equals -Expected ([Ansible.AccessToken.TokenElevationType]::Limited) + + $actual_linked = [Ansible.AccessToken.TokenUtil]::GetTokenLinkedToken($h_token) + try { + $actual_linked.IsClosed | Assert-Equals -Expected $false + $actual_linked.IsInvalid | Assert-Equals -Expected $false + + $actual_elevation_type = [Ansible.AccessToken.TokenUtil]::GetTokenElevationType($actual_linked) + $actual_elevation_type | Assert-Equals -Expected ([Ansible.AccessToken.TokenElevationType]::Full) + + $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($actual_linked) + $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Impersonation) + } finally { + $actual_linked.Dispose() + } + $actual_linked.IsClosed | Assert-Equals -Expected $true + } finally { + $h_token.Dispose() + } + } + + "Get token linked token primary" = { + # We need a token with the SeTcbPrivilege for this to work. + $system_sid = New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @( + [System.Security.Principal.WellKnownSidType]::LocalSystemSid, + $null + ) + $tested = $false + foreach ($system_token in [Ansible.AccessToken.TokenUtil]::EnumerateUserTokens($system_sid, "Duplicate, Impersonate, Query")) { + $privileges = [Ansible.AccessToken.TokenUtil]::GetTokenPrivileges($system_token) + if ($null -eq ($privileges | Where-Object { $_.Name -eq "SeTcbPrivilege" })) { + continue + } + + $h_token = [Ansible.AccessToken.TokenUtil]::LogonUser($test_username, $null, $test_password, "Interactive", "Default") + try { + [Ansible.AccessToken.TokenUtil]::ImpersonateToken($system_token) + try { + $actual_linked = [Ansible.AccessToken.TokenUtil]::GetTokenLinkedToken($h_token) + try { + $actual_linked.IsClosed | Assert-Equals -Expected $false + $actual_linked.IsInvalid | Assert-Equals -Expected $false + + $actual_elevation_type = [Ansible.AccessToken.TokenUtil]::GetTokenElevationType($actual_linked) + $actual_elevation_type | Assert-Equals -Expected ([Ansible.AccessToken.TokenElevationType]::Full) + + $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($actual_linked) + $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Primary) + } finally { + $actual_linked.Dispose() + } + $actual_linked.IsClosed | Assert-Equals -Expected $true + } finally { + [Ansible.AccessToken.TokenUtil]::RevertToSelf() + } + } finally { + $h_token.Dispose() + } + + $tested = $true + break + } + $tested | Assert-Equals -Expected $true + } + + "Failed to get token information" = { + $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess() + $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, 'Duplicate') # Without Query the below will fail + + $failed = $false + try { + [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token) + } catch [Ansible.AccessToken.Win32Exception] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "GetTokenInformation(TokenUser) failed to get buffer length (Access is denied, Win32ErrorCode 5 - 0x00000005)" + } finally { + $h_token.Dispose() + } + $failed | Assert-Equals -Expected $true + } + + "Logon with valid credentials" = { + $expected_user = New-Object -TypeName System.Security.Principal.NTAccount -ArgumentList $test_username + $expected_sid = $expected_user.Translate([System.Security.Principal.SecurityIdentifier]) + + $h_token = [Ansible.AccessToken.TokenUtil]::LogonUser($test_username, $null, $test_password, "Network", "Default") + try { + $h_token.IsClosed | Assert-Equals -Expected $false + $h_token.IsInvalid | Assert-Equals -Expected $false + + $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token) + $actual_user | Assert-Equals -Expected $expected_sid + } finally { + $h_token.Dispose() + } + $h_token.IsClosed | Assert-Equals -Expected $true + } + + "Logon with invalid credentials" = { + $failed = $false + try { + [Ansible.AccessToken.TokenUtil]::LogonUser("fake-user", $null, "fake-pass", "Network", "Default") + } catch [Ansible.AccessToken.Win32Exception] { + $failed = $true + $_.Exception.Message.Contains("Failed to logon fake-user") | Assert-Equals -Expected $true + $_.Exception.Message.Contains("Win32ErrorCode 1326 - 0x0000052E)") | Assert-Equals -Expected $true + } + $failed | Assert-Equals -Expected $true + } + + "Logon with invalid credential with domain account" = { + $failed = $false + try { + [Ansible.AccessToken.TokenUtil]::LogonUser("fake-user", "fake-domain", "fake-pass", "Network", "Default") + } catch [Ansible.AccessToken.Win32Exception] { + $failed = $true + $_.Exception.Message.Contains("Failed to logon fake-domain\fake-user") | Assert-Equals -Expected $true + $_.Exception.Message.Contains("Win32ErrorCode 1326 - 0x0000052E)") | Assert-Equals -Expected $true + } + $failed | Assert-Equals -Expected $true + } +} + +foreach ($test_impl in $tests.GetEnumerator()) { + $test = $test_impl.Key + &$test_impl.Value +} + +$module.Result.data = "success" +$module.ExitJson() diff --git a/test/integration/targets/module_utils_Ansible.AccessToken/tasks/main.yml b/test/integration/targets/module_utils_Ansible.AccessToken/tasks/main.yml new file mode 100644 index 00000000..dbd64b06 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.AccessToken/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- set_fact: + test_username: ansible-test + test_password: Password123{{ lookup('password', '/dev/null chars=ascii_letters,digits length=8') }} + +- name: create test Admin user + win_user: + name: '{{ test_username }}' + password: '{{ test_password }}' + state: present + groups: + - Administrators + +- block: + - name: test Ansible.AccessToken.cs + ansible_access_token_tests: + test_username: '{{ test_username }}' + test_password: '{{ test_password }}' + register: ansible_access_token_test + + - name: assert test Ansible.AccessToken.cs + assert: + that: + - ansible_access_token_test.data == "success" + always: + - name: remove test Admin user + win_user: + name: '{{ test_username }}' + state: absent diff --git a/test/integration/targets/module_utils_Ansible.Basic/aliases b/test/integration/targets/module_utils_Ansible.Basic/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Basic/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.Basic/library/ansible_basic_tests.ps1 b/test/integration/targets/module_utils_Ansible.Basic/library/ansible_basic_tests.ps1 new file mode 100644 index 00000000..9278e386 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Basic/library/ansible_basic_tests.ps1 @@ -0,0 +1,3098 @@ +#!powershell + +#AnsibleRequires -CSharpUtil Ansible.Basic + +$module = [Ansible.Basic.AnsibleModule]::Create($args, @{}) + +Function Assert-Equals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + + $matched = $false + if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) { + $Actual.Count | Assert-Equals -Expected $Expected.Count + for ($i = 0; $i -lt $Actual.Count; $i++) { + $actual_value = $Actual[$i] + $expected_value = $Expected[$i] + Assert-Equals -Actual $actual_value -Expected $expected_value + } + $matched = $true + } else { + $matched = $Actual -ceq $Expected + } + + if (-not $matched) { + if ($Actual -is [PSObject]) { + $Actual = $Actual.ToString() + } + + $call_stack = (Get-PSCallStack)[1] + $module.Result.failed = $true + $module.Result.test = $test + $module.Result.actual = $Actual + $module.Result.expected = $Expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + $module.Result.msg = "AssertionError: actual != expected" + + Exit-Module + } +} + +Function Assert-DictionaryEquals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + $actual_keys = $Actual.Keys + $expected_keys = $Expected.Keys + + $actual_keys.Count | Assert-Equals -Expected $expected_keys.Count + foreach ($actual_entry in $Actual.GetEnumerator()) { + $actual_key = $actual_entry.Key + ($actual_key -cin $expected_keys) | Assert-Equals -Expected $true + $actual_value = $actual_entry.Value + $expected_value = $Expected.$actual_key + + if ($actual_value -is [System.Collections.IDictionary]) { + $actual_value | Assert-DictionaryEquals -Expected $expected_value + } elseif ($actual_value -is [System.Collections.ArrayList] -or $actual_value -is [Array]) { + for ($i = 0; $i -lt $actual_value.Count; $i++) { + $actual_entry = $actual_value[$i] + $expected_entry = $expected_value[$i] + if ($actual_entry -is [System.Collections.IDictionary]) { + $actual_entry | Assert-DictionaryEquals -Expected $expected_entry + } else { + Assert-Equals -Actual $actual_entry -Expected $expected_entry + } + } + } else { + Assert-Equals -Actual $actual_value -Expected $expected_value + } + } + foreach ($expected_key in $expected_keys) { + ($expected_key -cin $actual_keys) | Assert-Equals -Expected $true + } +} + +Function Exit-Module { + # Make sure Exit actually calls exit and not our overriden test behaviour + [Ansible.Basic.AnsibleModule]::Exit = { param([Int32]$rc) exit $rc } + Write-Output -InputObject (ConvertTo-Json -InputObject $module.Result -Compress -Depth 99) + $module.ExitJson() +} + +$tmpdir = $module.Tmpdir + +# Override the Exit and WriteLine behaviour to throw an exception instead of exiting the module +[Ansible.Basic.AnsibleModule]::Exit = { + param([Int32]$rc) + $exp = New-Object -TypeName System.Exception -ArgumentList "exit: $rc" + $exp | Add-Member -Type NoteProperty -Name Output -Value $_test_out + throw $exp +} +[Ansible.Basic.AnsibleModule]::WriteLine = { + param([String]$line) + Set-Variable -Name _test_out -Scope Global -Value $line +} + +$tests = @{ + "Empty spec and no options - args file" = { + $args_file = Join-Path -Path $tmpdir -ChildPath "args-$(Get-Random).json" + [System.IO.File]::WriteAllText($args_file, '{ "ANSIBLE_MODULE_ARGS": {} }') + $m = [Ansible.Basic.AnsibleModule]::Create(@($args_file), @{}) + + $m.CheckMode | Assert-Equals -Expected $false + $m.DebugMode | Assert-Equals -Expected $false + $m.DiffMode | Assert-Equals -Expected $false + $m.KeepRemoteFiles | Assert-Equals -Expected $false + $m.ModuleName | Assert-Equals -Expected "undefined win module" + $m.NoLog | Assert-Equals -Expected $false + $m.Verbosity | Assert-Equals -Expected 0 + $m.AnsibleVersion | Assert-Equals -Expected $null + } + + "Empty spec and no options - complex_args" = { + Set-Variable -Name complex_args -Scope Global -Value @{} + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + + $m.CheckMode | Assert-Equals -Expected $false + $m.DebugMode | Assert-Equals -Expected $false + $m.DiffMode | Assert-Equals -Expected $false + $m.KeepRemoteFiles | Assert-Equals -Expected $false + $m.ModuleName | Assert-Equals -Expected "undefined win module" + $m.NoLog | Assert-Equals -Expected $false + $m.Verbosity | Assert-Equals -Expected 0 + $m.AnsibleVersion | Assert-Equals -Expected $null + } + + "Internal param changes - args file" = { + $m_tmpdir = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)" + New-Item -Path $m_tmpdir -ItemType Directory > $null + $args_file = Join-Path -Path $tmpdir -ChildPath "args-$(Get-Random).json" + [System.IO.File]::WriteAllText($args_file, @" +{ + "ANSIBLE_MODULE_ARGS": { + "_ansible_check_mode": true, + "_ansible_debug": true, + "_ansible_diff": true, + "_ansible_keep_remote_files": true, + "_ansible_module_name": "ansible_basic_tests", + "_ansible_no_log": true, + "_ansible_remote_tmp": "%TEMP%", + "_ansible_selinux_special_fs": "ignored", + "_ansible_shell_executable": "ignored", + "_ansible_socket": "ignored", + "_ansible_syslog_facility": "ignored", + "_ansible_tmpdir": "$($m_tmpdir -replace "\\", "\\")", + "_ansible_verbosity": 3, + "_ansible_version": "2.8.0" + } +} +"@) + $m = [Ansible.Basic.AnsibleModule]::Create(@($args_file), @{supports_check_mode=$true}) + $m.CheckMode | Assert-Equals -Expected $true + $m.DebugMode | Assert-Equals -Expected $true + $m.DiffMode | Assert-Equals -Expected $true + $m.KeepRemoteFiles | Assert-Equals -Expected $true + $m.ModuleName | Assert-Equals -Expected "ansible_basic_tests" + $m.NoLog | Assert-Equals -Expected $true + $m.Verbosity | Assert-Equals -Expected 3 + $m.AnsibleVersion | Assert-Equals -Expected "2.8.0" + $m.Tmpdir | Assert-Equals -Expected $m_tmpdir + } + + "Internal param changes - complex_args" = { + $m_tmpdir = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)" + New-Item -Path $m_tmpdir -ItemType Directory > $null + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_check_mode = $true + _ansible_debug = $true + _ansible_diff = $true + _ansible_keep_remote_files = $true + _ansible_module_name = "ansible_basic_tests" + _ansible_no_log = $true + _ansible_remote_tmp = "%TEMP%" + _ansible_selinux_special_fs = "ignored" + _ansible_shell_executable = "ignored" + _ansible_socket = "ignored" + _ansible_syslog_facility = "ignored" + _ansible_tmpdir = $m_tmpdir.ToString() + _ansible_verbosity = 3 + _ansible_version = "2.8.0" + } + $spec = @{ + supports_check_mode = $true + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + $m.CheckMode | Assert-Equals -Expected $true + $m.DebugMode | Assert-Equals -Expected $true + $m.DiffMode | Assert-Equals -Expected $true + $m.KeepRemoteFiles | Assert-Equals -Expected $true + $m.ModuleName | Assert-Equals -Expected "ansible_basic_tests" + $m.NoLog | Assert-Equals -Expected $true + $m.Verbosity | Assert-Equals -Expected 3 + $m.AnsibleVersion | Assert-Equals -Expected "2.8.0" + $m.Tmpdir | Assert-Equals -Expected $m_tmpdir + } + + "Parse complex module options" = { + $spec = @{ + options = @{ + option_default = @{} + missing_option_default = @{} + string_option = @{type = "str"} + required_option = @{required = $true} + missing_choices = @{choices = "a", "b"} + choices = @{choices = "a", "b"} + one_choice = @{choices = ,"b"} + choice_with_default = @{choices = "a", "b"; default = "b"} + alias_direct = @{aliases = ,"alias_direct1"} + alias_as_alias = @{aliases = "alias_as_alias1", "alias_as_alias2"} + bool_type = @{type = "bool"} + bool_from_str = @{type = "bool"} + dict_type = @{ + type = "dict" + options = @{ + int_type = @{type = "int"} + str_type = @{type = "str"; default = "str_sub_type"} + } + } + dict_type_missing = @{ + type = "dict" + options = @{ + int_type = @{type = "int"} + str_type = @{type = "str"; default = "str_sub_type"} + } + } + dict_type_defaults = @{ + type = "dict" + apply_defaults = $true + options = @{ + int_type = @{type = "int"} + str_type = @{type = "str"; default = "str_sub_type"} + } + } + dict_type_json = @{type = "dict"} + dict_type_str = @{type = "dict"} + float_type = @{type = "float"} + int_type = @{type = "int"} + json_type = @{type = "json"} + json_type_dict = @{type = "json"} + list_type = @{type = "list"} + list_type_str = @{type = "list"} + list_with_int = @{type = "list"; elements = "int"} + list_type_single = @{type = "list"} + list_with_dict = @{ + type = "list" + elements = "dict" + options = @{ + int_type = @{type = "int"} + str_type = @{type = "str"; default = "str_sub_type"} + } + } + path_type = @{type = "path"} + path_type_nt = @{type = "path"} + path_type_missing = @{type = "path"} + raw_type_str = @{type = "raw"} + raw_type_int = @{type = "raw"} + sid_type = @{type = "sid"} + sid_from_name = @{type = "sid"} + str_type = @{type = "str"} + delegate_type = @{type = [Func[[Object], [UInt64]]]{ [System.UInt64]::Parse($args[0]) }} + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_default = 1 + string_option = 1 + required_option = "required" + choices = "a" + one_choice = "b" + alias_direct = "a" + alias_as_alias2 = "a" + bool_type = $true + bool_from_str = "false" + dict_type = @{ + int_type = "10" + } + dict_type_json = '{"a":"a","b":1,"c":["a","b"]}' + dict_type_str = 'a=a b="b 2" c=c' + float_type = "3.14159" + int_type = 0 + json_type = '{"a":"a","b":1,"c":["a","b"]}' + json_type_dict = @{ + a = "a" + b = 1 + c = @("a", "b") + } + list_type = @("a", "b", 1, 2) + list_type_str = "a, b,1,2 " + list_with_int = @("1", 2) + list_type_single = "single" + list_with_dict = @( + @{ + int_type = 2 + str_type = "dict entry" + }, + @{ int_type = 1 }, + @{} + ) + path_type = "%SystemRoot%\System32" + path_type_nt = "\\?\%SystemRoot%\System32" + path_type_missing = "T:\missing\path" + raw_type_str = "str" + raw_type_int = 1 + sid_type = "S-1-5-18" + sid_from_name = "SYSTEM" + str_type = "str" + delegate_type = "1234" + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + + $m.Params.option_default | Assert-Equals -Expected "1" + $m.Params.option_default.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.missing_option_default | Assert-Equals -Expected $null + $m.Params.string_option | Assert-Equals -Expected "1" + $m.Params.string_option.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.required_option | Assert-Equals -Expected "required" + $m.Params.required_option.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.missing_choices | Assert-Equals -Expected $null + $m.Params.choices | Assert-Equals -Expected "a" + $m.Params.choices.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.one_choice | Assert-Equals -Expected "b" + $m.Params.one_choice.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.choice_with_default | Assert-Equals -Expected "b" + $m.Params.choice_with_default.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.alias_direct | Assert-Equals -Expected "a" + $m.Params.alias_direct.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.alias_as_alias | Assert-Equals -Expected "a" + $m.Params.alias_as_alias.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.bool_type | Assert-Equals -Expected $true + $m.Params.bool_type.GetType().ToString() | Assert-Equals -Expected "System.Boolean" + $m.Params.bool_from_str | Assert-Equals -Expected $false + $m.Params.bool_from_str.GetType().ToString() | Assert-Equals -Expected "System.Boolean" + $m.Params.dict_type | Assert-DictionaryEquals -Expected @{int_type = 10; str_type = "str_sub_type"} + $m.Params.dict_type.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.Dictionary``2[System.String,System.Object]" + $m.Params.dict_type.int_type.GetType().ToString() | Assert-Equals -Expected "System.Int32" + $m.Params.dict_type.str_type.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.dict_type_missing | Assert-Equals -Expected $null + $m.Params.dict_type_defaults | Assert-DictionaryEquals -Expected @{int_type = $null; str_type = "str_sub_type"} + $m.Params.dict_type_defaults.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.Dictionary``2[System.String,System.Object]" + $m.Params.dict_type_defaults.str_type.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.dict_type_json | Assert-DictionaryEquals -Expected @{ + a = "a" + b = 1 + c = @("a", "b") + } + $m.Params.dict_type_json.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.Dictionary``2[System.String,System.Object]" + $m.Params.dict_type_json.a.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.dict_type_json.b.GetType().ToString() | Assert-Equals -Expected "System.Int32" + $m.Params.dict_type_json.c.GetType().ToString() | Assert-Equals -Expected "System.Collections.ArrayList" + $m.Params.dict_type_str | Assert-DictionaryEquals -Expected @{a = "a"; b = "b 2"; c = "c"} + $m.Params.dict_type_str.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.Dictionary``2[System.String,System.Object]" + $m.Params.dict_type_str.a.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.dict_type_str.b.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.dict_type_str.c.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.float_type | Assert-Equals -Expected ([System.Single]3.14159) + $m.Params.float_type.GetType().ToString() | Assert-Equals -Expected "System.Single" + $m.Params.int_type | Assert-Equals -Expected 0 + $m.Params.int_type.GetType().ToString() | Assert-Equals -Expected "System.Int32" + $m.Params.json_type | Assert-Equals -Expected '{"a":"a","b":1,"c":["a","b"]}' + $m.Params.json_type.GetType().ToString() | Assert-Equals -Expected "System.String" + [Ansible.Basic.AnsibleModule]::FromJson($m.Params.json_type_dict) | Assert-DictionaryEquals -Expected ([Ansible.Basic.AnsibleModule]::FromJson('{"a":"a","b":1,"c":["a","b"]}')) + $m.Params.json_type_dict.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.list_type.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.List``1[System.Object]" + $m.Params.list_type.Count | Assert-Equals -Expected 4 + $m.Params.list_type[0] | Assert-Equals -Expected "a" + $m.Params.list_type[0].GetType().FullName | Assert-Equals -Expected "System.String" + $m.Params.list_type[1] | Assert-Equals -Expected "b" + $m.Params.list_type[1].GetType().FullName | Assert-Equals -Expected "System.String" + $m.Params.list_type[2] | Assert-Equals -Expected 1 + $m.Params.list_type[2].GetType().FullName | Assert-Equals -Expected "System.Int32" + $m.Params.list_type[3] | Assert-Equals -Expected 2 + $m.Params.list_type[3].GetType().FullName | Assert-Equals -Expected "System.Int32" + $m.Params.list_type_str.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.List``1[System.Object]" + $m.Params.list_type_str.Count | Assert-Equals -Expected 4 + $m.Params.list_type_str[0] | Assert-Equals -Expected "a" + $m.Params.list_type_str[0].GetType().FullName | Assert-Equals -Expected "System.String" + $m.Params.list_type_str[1] | Assert-Equals -Expected "b" + $m.Params.list_type_str[1].GetType().FullName | Assert-Equals -Expected "System.String" + $m.Params.list_type_str[2] | Assert-Equals -Expected "1" + $m.Params.list_type_str[2].GetType().FullName | Assert-Equals -Expected "System.String" + $m.Params.list_type_str[3] | Assert-Equals -Expected "2" + $m.Params.list_type_str[3].GetType().FullName | Assert-Equals -Expected "System.String" + $m.Params.list_with_int.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.List``1[System.Object]" + $m.Params.list_with_int.Count | Assert-Equals -Expected 2 + $m.Params.list_with_int[0] | Assert-Equals -Expected 1 + $m.Params.list_with_int[0].GetType().FullName | Assert-Equals -Expected "System.Int32" + $m.Params.list_with_int[1] | Assert-Equals -Expected 2 + $m.Params.list_with_int[1].GetType().FullName | Assert-Equals -Expected "System.Int32" + $m.Params.list_type_single.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.List``1[System.Object]" + $m.Params.list_type_single.Count | Assert-Equals -Expected 1 + $m.Params.list_type_single[0] | Assert-Equals -Expected "single" + $m.Params.list_type_single[0].GetType().FullName | Assert-Equals -Expected "System.String" + $m.Params.list_with_dict.GetType().FullName.StartsWith("System.Collections.Generic.List``1[[System.Object") | Assert-Equals -Expected $true + $m.Params.list_with_dict.Count | Assert-Equals -Expected 3 + $m.Params.list_with_dict[0].GetType().FullName.StartsWith("System.Collections.Generic.Dictionary``2[[System.String") | Assert-Equals -Expected $true + $m.Params.list_with_dict[0] | Assert-DictionaryEquals -Expected @{int_type = 2; str_type = "dict entry"} + $m.Params.list_with_dict[0].int_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.Int32" + $m.Params.list_with_dict[0].str_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.String" + $m.Params.list_with_dict[1].GetType().FullName.StartsWith("System.Collections.Generic.Dictionary``2[[System.String") | Assert-Equals -Expected $true + $m.Params.list_with_dict[1] | Assert-DictionaryEquals -Expected @{int_type = 1; str_type = "str_sub_type"} + $m.Params.list_with_dict[1].int_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.Int32" + $m.Params.list_with_dict[1].str_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.String" + $m.Params.list_with_dict[2].GetType().FullName.StartsWith("System.Collections.Generic.Dictionary``2[[System.String") | Assert-Equals -Expected $true + $m.Params.list_with_dict[2] | Assert-DictionaryEquals -Expected @{int_type = $null; str_type = "str_sub_type"} + $m.Params.list_with_dict[2].str_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.String" + $m.Params.path_type | Assert-Equals -Expected "$($env:SystemRoot)\System32" + $m.Params.path_type.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.path_type_nt | Assert-Equals -Expected "\\?\%SystemRoot%\System32" + $m.Params.path_type_nt.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.path_type_missing | Assert-Equals -Expected "T:\missing\path" + $m.Params.path_type_missing.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.raw_type_str | Assert-Equals -Expected "str" + $m.Params.raw_type_str.GetType().FullName | Assert-Equals -Expected "System.String" + $m.Params.raw_type_int | Assert-Equals -Expected 1 + $m.Params.raw_type_int.GetType().FullName | Assert-Equals -Expected "System.Int32" + $m.Params.sid_type | Assert-Equals -Expected (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList "S-1-5-18") + $m.Params.sid_type.GetType().ToString() | Assert-Equals -Expected "System.Security.Principal.SecurityIdentifier" + $m.Params.sid_from_name | Assert-Equals -Expected (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList "S-1-5-18") + $m.Params.sid_from_name.GetType().ToString() | Assert-Equals -Expected "System.Security.Principal.SecurityIdentifier" + $m.Params.str_type | Assert-Equals -Expected "str" + $m.Params.str_type.GetType().ToString() | Assert-Equals -Expected "System.String" + $m.Params.delegate_type | Assert-Equals -Expected 1234 + $m.Params.delegate_type.GetType().ToString() | Assert-Equals -Expected "System.UInt64" + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_module_args = @{ + option_default = "1" + missing_option_default = $null + string_option = "1" + required_option = "required" + missing_choices = $null + choices = "a" + one_choice = "b" + choice_with_default = "b" + alias_direct = "a" + alias_as_alias = "a" + alias_as_alias2 = "a" + bool_type = $true + bool_from_str = $false + dict_type = @{ + int_type = 10 + str_type = "str_sub_type" + } + dict_type_missing = $null + dict_type_defaults = @{ + int_type = $null + str_type = "str_sub_type" + } + dict_type_json = @{ + a = "a" + b = 1 + c = @("a", "b") + } + dict_type_str = @{ + a = "a" + b = "b 2" + c = "c" + } + float_type = 3.14159 + int_type = 0 + json_type = $m.Params.json_type.ToString() + json_type_dict = $m.Params.json_type_dict.ToString() + list_type = @("a", "b", 1, 2) + list_type_str = @("a", "b", "1", "2") + list_with_int = @(1, 2) + list_type_single = @("single") + list_with_dict = @( + @{ + int_type = 2 + str_type = "dict entry" + }, + @{ + int_type = 1 + str_type = "str_sub_type" + }, + @{ + int_type = $null + str_type = "str_sub_type" + } + ) + path_type = "$($env:SystemRoot)\System32" + path_type_nt = "\\?\%SystemRoot%\System32" + path_type_missing = "T:\missing\path" + raw_type_str = "str" + raw_type_int = 1 + sid_type = "S-1-5-18" + sid_from_name = "S-1-5-18" + str_type = "str" + delegate_type = 1234 + } + $actual.Keys.Count | Assert-Equals -Expected 2 + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $expected_module_args} + } + + "Parse module args with list elements and delegate type" = { + $spec = @{ + options = @{ + list_delegate_type = @{ + type = "list" + elements = [Func[[Object], [UInt16]]]{ [System.UInt16]::Parse($args[0]) } + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + list_delegate_type = @( + "1234", + 4321 + ) + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + $m.Params.list_delegate_type.GetType().Name | Assert-Equals -Expected 'List`1' + $m.Params.list_delegate_type[0].GetType().FullName | Assert-Equals -Expected "System.UInt16" + $m.Params.list_delegate_Type[1].GetType().FullName | Assert-Equals -Expected "System.UInt16" + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_module_args = @{ + list_delegate_type = @( + 1234, + 4321 + ) + } + $actual.Keys.Count | Assert-Equals -Expected 2 + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $expected_module_args} + } + + "Parse module args with case insensitive input" = { + $spec = @{ + options = @{ + option1 = @{ type = "int"; required = $true } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_module_name = "win_test" + Option1 = "1" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + # Verifies the case of the params key is set to the module spec not actual input + $m.Params.Keys | Assert-Equals -Expected @("option1") + $m.Params.option1 | Assert-Equals -Expected 1 + + # Verifies the type conversion happens even on a case insensitive match + $m.Params.option1.GetType().FullName | Assert-Equals -Expected "System.Int32" + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_warnings = "Parameters for (win_test) was a case insensitive match: Option1. " + $expected_warnings += "Module options will become case sensitive in a future Ansible release. " + $expected_warnings += "Supported parameters include: option1" + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{ + option1 = 1 + } + } + # We have disabled the warning for now + #warnings = @($expected_warnings) + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "No log values" = { + $spec = @{ + options = @{ + username = @{type = "str"} + password = @{type = "str"; no_log = $true} + password2 = @{type = "int"; no_log = $true} + dict = @{type = "dict"} + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_module_name = "test_no_log" + username = "user - pass - name" + password = "pass" + password2 = 1234 + dict = @{ + data = "Oops this is secret: pass" + dict = @{ + pass = "plain" + hide = "pass" + sub_hide = "password" + int_hide = 123456 + } + list = @( + "pass", + "password", + 1234567, + "pa ss", + @{ + pass = "plain" + hide = "pass" + sub_hide = "password" + int_hide = 123456 + } + ) + custom = "pass" + } + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + $m.Result.data = $complex_args.dict + + # verify params internally aren't masked + $m.Params.username | Assert-Equals -Expected "user - pass - name" + $m.Params.password | Assert-Equals -Expected "pass" + $m.Params.password2 | Assert-Equals -Expected 1234 + $m.Params.dict.custom | Assert-Equals -Expected "pass" + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + # verify no_log params are masked in invocation + $expected = @{ + invocation = @{ + module_args = @{ + password2 = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + dict = @{ + dict = @{ + pass = "plain" + hide = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + sub_hide = "********word" + int_hide = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + } + custom = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + list = @( + "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "********word", + "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "pa ss", + @{ + pass = "plain" + hide = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + sub_hide = "********word" + int_hide = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + } + ) + data = "Oops this is secret: ********" + } + username = "user - ******** - name" + password = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + } + } + changed = $false + data = $complex_args.dict + } + $actual | Assert-DictionaryEquals -Expected $expected + + $expected_event = @' +test_no_log - Invoked with: + username: user - ******** - name + dict: dict: sub_hide: ****word + pass: plain + int_hide: ********56 + hide: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + data: Oops this is secret: ******** + custom: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + list: + - VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + - ********word + - ********567 + - pa ss + - sub_hide: ********word + pass: plain + int_hide: ********56 + hide: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + password2: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + password: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER +'@ + $actual_event = (Get-EventLog -LogName Application -Source Ansible -Newest 1).Message + $actual_event | Assert-DictionaryEquals -Expected $expected_event + } + + "No log value with an empty string" = { + $spec = @{ + options = @{ + password1 = @{type = "str"; no_log = $true} + password2 = @{type = "str"; no_log = $true} + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_module_name = "test_no_log" + password1 = "" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + $m.Result.data = $complex_args.dict + + # verify params internally aren't masked + $m.Params.password1 | Assert-Equals -Expected "" + $m.Params.password2 | Assert-Equals -Expected $null + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + invocation = @{ + module_args = @{ + password1 = "" + password2 = $null + } + } + changed = $false + data = $complex_args.dict + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Removed in version" = { + $spec = @{ + options = @{ + removed1 = @{removed_in_version = "2.1"} + removed2 = @{removed_in_version = "2.2"} + removed3 = @{removed_in_version = "2.3"; removed_from_collection = "ansible.builtin"} + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + removed1 = "value" + removed3 = "value" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{ + removed1 = "value" + removed2 = $null + removed3 = "value" + } + } + deprecations = @( + @{ + msg = "Param 'removed3' is deprecated. See the module docs for more information" + version = "2.3" + collection_name = "ansible.builtin" + }, + @{ + msg = "Param 'removed1' is deprecated. See the module docs for more information" + version = "2.1" + collection_name = $null + } + ) + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Removed at date" = { + $spec = @{ + options = @{ + removed1 = @{removed_at_date = [DateTime]"2020-03-10"} + removed2 = @{removed_at_date = [DateTime]"2020-03-11"} + removed3 = @{removed_at_date = [DateTime]"2020-06-07"; removed_from_collection = "ansible.builtin"} + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + removed1 = "value" + removed3 = "value" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{ + removed1 = "value" + removed2 = $null + removed3 = "value" + } + } + deprecations = @( + @{ + msg = "Param 'removed3' is deprecated. See the module docs for more information" + date = "2020-06-07" + collection_name = "ansible.builtin" + }, + @{ + msg = "Param 'removed1' is deprecated. See the module docs for more information" + date = "2020-03-10" + collection_name = $null + } + ) + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Deprecated aliases" = { + $spec = @{ + options = @{ + option1 = @{ type = "str"; aliases = "alias1"; deprecated_aliases = @(@{name = "alias1"; version = "2.10"}) } + option2 = @{ type = "str"; aliases = "alias2"; deprecated_aliases = @(@{name = "alias2"; version = "2.11"}) } + option3 = @{ + type = "dict" + options = @{ + option1 = @{ type = "str"; aliases = "alias1"; deprecated_aliases = @(@{name = "alias1"; version = "2.10"}) } + option2 = @{ type = "str"; aliases = "alias2"; deprecated_aliases = @(@{name = "alias2"; version = "2.11"}) } + option3 = @{ type = "str"; aliases = "alias3"; deprecated_aliases = @(@{name = "alias3"; version = "2.12"; collection_name = "ansible.builtin"}) } + option4 = @{ type = "str"; aliases = "alias4"; deprecated_aliases = @(@{name = "alias4"; date = [DateTime]"2020-03-11"}) } + option5 = @{ type = "str"; aliases = "alias5"; deprecated_aliases = @(@{name = "alias5"; date = [DateTime]"2020-03-09"}) } + option6 = @{ type = "str"; aliases = "alias6"; deprecated_aliases = @(@{name = "alias6"; date = [DateTime]"2020-06-01"; collection_name = "ansible.builtin"}) } + } + } + option4 = @{ type = "str"; aliases = "alias4"; deprecated_aliases = @(@{name = "alias4"; date = [DateTime]"2020-03-10"}) } + option5 = @{ type = "str"; aliases = "alias5"; deprecated_aliases = @(@{name = "alias5"; date = [DateTime]"2020-03-12"}) } + option6 = @{ type = "str"; aliases = "alias6"; deprecated_aliases = @(@{name = "alias6"; version = "2.12"; collection_name = "ansible.builtin"}) } + option7 = @{ type = "str"; aliases = "alias7"; deprecated_aliases = @(@{name = "alias7"; date = [DateTime]"2020-06-07"; collection_name = "ansible.builtin"}) } + } + } + + Set-Variable -Name complex_args -Scope Global -Value @{ + alias1 = "alias1" + option2 = "option2" + option3 = @{ + option1 = "option1" + alias2 = "alias2" + alias3 = "alias3" + option4 = "option4" + alias5 = "alias5" + alias6 = "alias6" + } + option4 = "option4" + alias5 = "alias5" + alias6 = "alias6" + alias7 = "alias7" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{ + alias1 = "alias1" + option1 = "alias1" + option2 = "option2" + option3 = @{ + option1 = "option1" + option2 = "alias2" + alias2 = "alias2" + option3 = "alias3" + alias3 = "alias3" + option4 = "option4" + option5 = "alias5" + alias5 = "alias5" + option6 = "alias6" + alias6 = "alias6" + } + option4 = "option4" + option5 = "alias5" + alias5 = "alias5" + option6 = "alias6" + alias6 = "alias6" + option7 = "alias7" + alias7 = "alias7" + } + } + deprecations = @( + @{ + msg = "Alias 'alias7' is deprecated. See the module docs for more information" + date = "2020-06-07" + collection_name = "ansible.builtin" + }, + @{ + msg = "Alias 'alias1' is deprecated. See the module docs for more information" + version = "2.10" + collection_name = $null + }, + @{ + msg = "Alias 'alias5' is deprecated. See the module docs for more information" + date = "2020-03-12" + collection_name = $null + }, + @{ + msg = "Alias 'alias6' is deprecated. See the module docs for more information" + version = "2.12" + collection_name = "ansible.builtin" + }, + @{ + msg = "Alias 'alias2' is deprecated. See the module docs for more information - found in option3" + version = "2.11" + collection_name = $null + }, + @{ + msg = "Alias 'alias5' is deprecated. See the module docs for more information - found in option3" + date = "2020-03-09" + collection_name = $null + }, + @{ + msg = "Alias 'alias3' is deprecated. See the module docs for more information - found in option3" + version = "2.12" + collection_name = "ansible.builtin" + }, + @{ + msg = "Alias 'alias6' is deprecated. See the module docs for more information - found in option3" + date = "2020-06-01" + collection_name = "ansible.builtin" + } + ) + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Required by - single value" = { + $spec = @{ + options = @{ + option1 = @{type = "str"} + option2 = @{type = "str"} + option3 = @{type = "str"} + } + required_by = @{ + option1 = "option2" + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "option1" + option2 = "option2" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{ + option1 = "option1" + option2 = "option2" + option3 = $null + } + } + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Required by - multiple values" = { + $spec = @{ + options = @{ + option1 = @{type = "str"} + option2 = @{type = "str"} + option3 = @{type = "str"} + } + required_by = @{ + option1 = "option2", "option3" + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "option1" + option2 = "option2" + option3 = "option3" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{ + option1 = "option1" + option2 = "option2" + option3 = "option3" + } + } + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Required by explicit null" = { + $spec = @{ + options = @{ + option1 = @{type = "str"} + option2 = @{type = "str"} + option3 = @{type = "str"} + } + required_by = @{ + option1 = "option2" + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "option1" + option2 = $null + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{ + option1 = "option1" + option2 = $null + option3 = $null + } + } + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Required by failed - single value" = { + $spec = @{ + options = @{ + option1 = @{type = "str"} + option2 = @{type = "str"} + option3 = @{type = "str"} + } + required_by = @{ + option1 = "option2" + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "option1" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + failed = $true + invocation = @{ + module_args = @{ + option1 = "option1" + } + } + msg = "missing parameter(s) required by 'option1': option2" + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Required by failed - multiple values" = { + $spec = @{ + options = @{ + option1 = @{type = "str"} + option2 = @{type = "str"} + option3 = @{type = "str"} + } + required_by = @{ + option1 = "option2", "option3" + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "option1" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + failed = $true + invocation = @{ + module_args = @{ + option1 = "option1" + } + } + msg = "missing parameter(s) required by 'option1': option2, option3" + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Debug without debug set" = { + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_debug = $false + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + $m.Debug("debug message") + $actual_event = (Get-EventLog -LogName Application -Source Ansible -Newest 1).Message + $actual_event | Assert-Equals -Expected "undefined win module - Invoked with:`r`n " + } + + "Debug with debug set" = { + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_debug = $true + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + $m.Debug("debug message") + $actual_event = (Get-EventLog -LogName Application -Source Ansible -Newest 1).Message + $actual_event | Assert-Equals -Expected "undefined win module - [DEBUG] debug message" + } + + "Deprecate and warn with version" = { + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + $m.Deprecate("message", "2.7") + $actual_deprecate_event_1 = Get-EventLog -LogName Application -Source Ansible -Newest 1 + $m.Deprecate("message w collection", "2.8", "ansible.builtin") + $actual_deprecate_event_2 = Get-EventLog -LogName Application -Source Ansible -Newest 1 + $m.Warn("warning") + $actual_warn_event = Get-EventLog -LogName Application -Source Ansible -Newest 1 + + $actual_deprecate_event_1.Message | Assert-Equals -Expected "undefined win module - [DEPRECATION WARNING] message 2.7" + $actual_deprecate_event_2.Message | Assert-Equals -Expected "undefined win module - [DEPRECATION WARNING] message w collection 2.8" + $actual_warn_event.EntryType | Assert-Equals -Expected "Warning" + $actual_warn_event.Message | Assert-Equals -Expected "undefined win module - [WARNING] warning" + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{} + } + warnings = @("warning") + deprecations = @( + @{msg = "message"; version = "2.7"; collection_name = $null}, + @{msg = "message w collection"; version = "2.8"; collection_name = "ansible.builtin"} + ) + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Deprecate and warn with date" = { + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + $m.Deprecate("message", [DateTime]"2020-01-01") + $actual_deprecate_event_1 = Get-EventLog -LogName Application -Source Ansible -Newest 1 + $m.Deprecate("message w collection", [DateTime]"2020-01-02", "ansible.builtin") + $actual_deprecate_event_2 = Get-EventLog -LogName Application -Source Ansible -Newest 1 + $m.Warn("warning") + $actual_warn_event = Get-EventLog -LogName Application -Source Ansible -Newest 1 + + $actual_deprecate_event_1.Message | Assert-Equals -Expected "undefined win module - [DEPRECATION WARNING] message 2020-01-01" + $actual_deprecate_event_2.Message | Assert-Equals -Expected "undefined win module - [DEPRECATION WARNING] message w collection 2020-01-02" + $actual_warn_event.EntryType | Assert-Equals -Expected "Warning" + $actual_warn_event.Message | Assert-Equals -Expected "undefined win module - [WARNING] warning" + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{} + } + warnings = @("warning") + deprecations = @( + @{msg = "message"; date = "2020-01-01"; collection_name = $null}, + @{msg = "message w collection"; date = "2020-01-02"; collection_name = "ansible.builtin"} + ) + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "FailJson with message" = { + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + + $failed = $false + try { + $m.FailJson("fail message") + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $failed + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{} + } + failed = $true + msg = "fail message" + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "FailJson with Exception" = { + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + + try { + [System.IO.Path]::GetFullPath($null) + } catch { + $excp = $_.Exception + } + + $failed = $false + try { + $m.FailJson("fail message", $excp) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $failed + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{} + } + failed = $true + msg = "fail message" + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "FailJson with ErrorRecord" = { + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + + try { + Get-Item -LiteralPath $null + } catch { + $error_record = $_ + } + + $failed = $false + try { + $m.FailJson("fail message", $error_record) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $failed + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{} + } + failed = $true + msg = "fail message" + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "FailJson with Exception and verbosity 3" = { + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_verbosity = 3 + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + + try { + [System.IO.Path]::GetFullPath($null) + } catch { + $excp = $_.Exception + } + + $failed = $false + try { + $m.FailJson("fail message", $excp) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $failed + + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{}} + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected "fail message" + $actual.exception.Contains('System.Management.Automation.MethodInvocationException: Exception calling "GetFullPath" with "1" argument(s)') | Assert-Equals -Expected $true + } + + "FailJson with ErrorRecord and verbosity 3" = { + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_verbosity = 3 + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + + try { + Get-Item -LiteralPath $null + } catch { + $error_record = $_ + } + + $failed = $false + try { + $m.FailJson("fail message", $error_record) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $failed + + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{}} + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected "fail message" + $actual.exception.Contains("Cannot bind argument to parameter 'LiteralPath' because it is null") | Assert-Equals -Expected $true + $actual.exception.Contains("+ Get-Item -LiteralPath `$null") | Assert-Equals -Expected $true + $actual.exception.Contains("ScriptStackTrace:") | Assert-Equals -Expected $true + } + + "Diff entry without diff set" = { + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + $m.Diff.before = @{a = "a"} + $m.Diff.after = @{b = "b"} + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $failed + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{} + } + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "Diff entry with diff set" = { + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_diff = $true + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + $m.Diff.before = @{a = "a"} + $m.Diff.after = @{b = "b"} + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $failed + + $expected = @{ + changed = $false + invocation = @{ + module_args = @{} + } + diff = @{ + before = @{a = "a"} + after = @{b = "b"} + } + } + $actual | Assert-DictionaryEquals -Expected $expected + } + + "ParseBool tests" = { + $mapping = New-Object -TypeName 'System.Collections.Generic.Dictionary`2[[Object], [Bool]]' + $mapping.Add("y", $true) + $mapping.Add("Y", $true) + $mapping.Add("yes", $true) + $mapping.Add("Yes", $true) + $mapping.Add("on", $true) + $mapping.Add("On", $true) + $mapping.Add("1", $true) + $mapping.Add(1, $true) + $mapping.Add("true", $true) + $mapping.Add("True", $true) + $mapping.Add("t", $true) + $mapping.Add("T", $true) + $mapping.Add("1.0", $true) + $mapping.Add(1.0, $true) + $mapping.Add($true, $true) + $mapping.Add("n", $false) + $mapping.Add("N", $false) + $mapping.Add("no", $false) + $mapping.Add("No", $false) + $mapping.Add("off", $false) + $mapping.Add("Off", $false) + $mapping.Add("0", $false) + $mapping.Add(0, $false) + $mapping.Add("false", $false) + $mapping.Add("False", $false) + $mapping.Add("f", $false) + $mapping.Add("F", $false) + $mapping.Add("0.0", $false) + $mapping.Add(0.0, $false) + $mapping.Add($false, $false) + + foreach ($map in $mapping.GetEnumerator()) { + $expected = $map.Value + $actual = [Ansible.Basic.AnsibleModule]::ParseBool($map.Key) + $actual | Assert-Equals -Expected $expected + $actual.GetType().FullName | Assert-Equals -Expected "System.Boolean" + } + + $fail_bools = @( + "falsey", + "abc", + 2, + "2", + -1 + ) + foreach ($fail_bool in $fail_bools) { + $failed = $false + try { + [Ansible.Basic.AnsibleModule]::ParseBool($fail_bool) + } catch { + $failed = $true + $_.Exception.Message.Contains("The value '$fail_bool' is not a valid boolean") | Assert-Equals -Expected $true + } + $failed | Assert-Equals -Expected $true + } + } + + "Unknown internal key" = { + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_invalid = "invalid" + } + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + + $expected = @{ + invocation = @{ + module_args = @{ + _ansible_invalid = "invalid" + } + } + changed = $false + failed = $true + msg = "Unsupported parameters for (undefined win module) module: _ansible_invalid. Supported parameters include: " + } + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + $actual | Assert-DictionaryEquals -Expected $expected + } + $failed | Assert-Equals -Expected $true + } + + "Module tmpdir with present remote tmp" = { + $current_user = [System.Security.Principal.WindowsIdentity]::GetCurrent().User + $dir_security = New-Object -TypeName System.Security.AccessControl.DirectorySecurity + $dir_security.SetOwner($current_user) + $dir_security.SetAccessRuleProtection($true, $false) + $ace = New-Object -TypeName System.Security.AccessControl.FileSystemAccessRule -ArgumentList @( + $current_user, [System.Security.AccessControl.FileSystemRights]::FullControl, + [System.Security.AccessControl.InheritanceFlags]"ContainerInherit, ObjectInherit", + [System.Security.AccessControl.PropagationFlags]::None, [System.Security.AccessControl.AccessControlType]::Allow + ) + $dir_security.AddAccessRule($ace) + $expected_sd = $dir_security.GetSecurityDescriptorSddlForm("Access, Owner") + + $remote_tmp = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)" + New-Item -Path $remote_tmp -ItemType Directory > $null + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_remote_tmp = $remote_tmp.ToString() + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true + + $actual_tmpdir = $m.Tmpdir + $parent_tmpdir = Split-Path -Path $actual_tmpdir -Parent + $tmpdir_name = Split-Path -Path $actual_tmpdir -Leaf + + $parent_tmpdir | Assert-Equals -Expected $remote_tmp + $tmpdir_name.StartSwith("ansible-moduletmp-") | Assert-Equals -Expected $true + (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $true + (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true + $children = [System.IO.Directory]::EnumerateDirectories($remote_tmp) + $children.Count | Assert-Equals -Expected 1 + $actual_tmpdir_sd = (Get-Acl -Path $actual_tmpdir).GetSecurityDescriptorSddlForm("Access, Owner") + $actual_tmpdir_sd | Assert-Equals -Expected $expected_sd + + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $false + (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true + $output.warnings.Count | Assert-Equals -Expected 0 + } + + "Module tmpdir with missing remote_tmp" = { + $current_user = [System.Security.Principal.WindowsIdentity]::GetCurrent().User + $dir_security = New-Object -TypeName System.Security.AccessControl.DirectorySecurity + $dir_security.SetOwner($current_user) + $dir_security.SetAccessRuleProtection($true, $false) + $ace = New-Object -TypeName System.Security.AccessControl.FileSystemAccessRule -ArgumentList @( + $current_user, [System.Security.AccessControl.FileSystemRights]::FullControl, + [System.Security.AccessControl.InheritanceFlags]"ContainerInherit, ObjectInherit", + [System.Security.AccessControl.PropagationFlags]::None, [System.Security.AccessControl.AccessControlType]::Allow + ) + $dir_security.AddAccessRule($ace) + $expected_sd = $dir_security.GetSecurityDescriptorSddlForm("Access, Owner") + + $remote_tmp = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)" + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_remote_tmp = $remote_tmp.ToString() + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $false + + $actual_tmpdir = $m.Tmpdir + $parent_tmpdir = Split-Path -Path $actual_tmpdir -Parent + $tmpdir_name = Split-Path -Path $actual_tmpdir -Leaf + + $parent_tmpdir | Assert-Equals -Expected $remote_tmp + $tmpdir_name.StartSwith("ansible-moduletmp-") | Assert-Equals -Expected $true + (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $true + (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true + $children = [System.IO.Directory]::EnumerateDirectories($remote_tmp) + $children.Count | Assert-Equals -Expected 1 + $actual_remote_sd = (Get-Acl -Path $remote_tmp).GetSecurityDescriptorSddlForm("Access, Owner") + $actual_tmpdir_sd = (Get-Acl -Path $actual_tmpdir).GetSecurityDescriptorSddlForm("Access, Owner") + $actual_remote_sd | Assert-Equals -Expected $expected_sd + $actual_tmpdir_sd | Assert-Equals -Expected $expected_sd + + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $false + (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true + $output.warnings.Count | Assert-Equals -Expected 1 + $nt_account = $current_user.Translate([System.Security.Principal.NTAccount]) + $actual_warning = "Module remote_tmp $remote_tmp did not exist and was created with FullControl to $nt_account, " + $actual_warning += "this may cause issues when running as another user. To avoid this, " + $actual_warning += "create the remote_tmp dir with the correct permissions manually" + $actual_warning | Assert-Equals -Expected $output.warnings[0] + } + + "Module tmp, keep remote files" = { + $remote_tmp = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)" + New-Item -Path $remote_tmp -ItemType Directory > $null + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_remote_tmp = $remote_tmp.ToString() + _ansible_keep_remote_files = $true + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + + $actual_tmpdir = $m.Tmpdir + $parent_tmpdir = Split-Path -Path $actual_tmpdir -Parent + $tmpdir_name = Split-Path -Path $actual_tmpdir -Leaf + + $parent_tmpdir | Assert-Equals -Expected $remote_tmp + $tmpdir_name.StartSwith("ansible-moduletmp-") | Assert-Equals -Expected $true + (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $true + (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true + + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $true + (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true + $output.warnings.Count | Assert-Equals -Expected 0 + Remove-Item -LiteralPath $actual_tmpdir -Force -Recurse + } + + "Invalid argument spec key" = { + $spec = @{ + invalid = $true + } + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: argument spec entry contains an invalid key 'invalid', valid keys: apply_defaults, " + $expected_msg += "aliases, choices, default, deprecated_aliases, elements, mutually_exclusive, no_log, options, " + $expected_msg += "removed_in_version, removed_at_date, removed_from_collection, required, required_by, required_if, " + $expected_msg += "required_one_of, required_together, supports_check_mode, type" + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true + } + + "Invalid argument spec key - nested" = { + $spec = @{ + options = @{ + option_key = @{ + options = @{ + sub_option_key = @{ + invalid = $true + } + } + } + } + } + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: argument spec entry contains an invalid key 'invalid', valid keys: apply_defaults, " + $expected_msg += "aliases, choices, default, deprecated_aliases, elements, mutually_exclusive, no_log, options, " + $expected_msg += "removed_in_version, removed_at_date, removed_from_collection, required, required_by, required_if, " + $expected_msg += "required_one_of, required_together, supports_check_mode, type - found in option_key -> sub_option_key" + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true + } + + "Invalid argument spec value type" = { + $spec = @{ + apply_defaults = "abc" + } + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: argument spec for 'apply_defaults' did not match expected " + $expected_msg += "type System.Boolean: actual type System.String" + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true + } + + "Invalid argument spec option type" = { + $spec = @{ + options = @{ + option_key = @{ + type = "invalid type" + } + } + } + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: type 'invalid type' is unsupported - found in option_key. " + $expected_msg += "Valid types are: bool, dict, float, int, json, list, path, raw, sid, str" + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true + } + + "Invalid argument spec option element type" = { + $spec = @{ + options = @{ + option_key = @{ + type = "list" + elements = "invalid type" + } + } + } + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: elements 'invalid type' is unsupported - found in option_key. " + $expected_msg += "Valid types are: bool, dict, float, int, json, list, path, raw, sid, str" + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true + } + + "Invalid deprecated aliases entry - no version and date" = { + $spec = @{ + options = @{ + option_key = @{ + type = "str" + aliases = ,"alias_name" + deprecated_aliases = @( + @{name = "alias_name"} + ) + } + } + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: One of version or date is required in a deprecated_aliases entry" + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true + } + + "Invalid deprecated aliases entry - no name (nested)" = { + $spec = @{ + options = @{ + option_key = @{ + type = "dict" + options = @{ + sub_option_key = @{ + type = "str" + aliases = ,"alias_name" + deprecated_aliases = @( + @{version = "2.10"} + ) + } + } + } + } + } + + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = @{ + sub_option_key = "a" + } + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.ArgumentException] { + $failed = $true + $expected_msg = "name is required in a deprecated_aliases entry - found in option_key" + $_.Exception.Message | Assert-Equals -Expected $expected_msg + } + $failed | Assert-Equals -Expected $true + } + + "Invalid deprecated aliases entry - both version and date" = { + $spec = @{ + options = @{ + option_key = @{ + type = "str" + aliases = ,"alias_name" + deprecated_aliases = @( + @{ + name = "alias_name" + date = [DateTime]"2020-03-10" + version = "2.11" + } + ) + } + } + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: Only one of version or date is allowed in a deprecated_aliases entry" + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true + } + + "Invalid deprecated aliases entry - wrong date type" = { + $spec = @{ + options = @{ + option_key = @{ + type = "str" + aliases = ,"alias_name" + deprecated_aliases = @( + @{ + name = "alias_name" + date = "2020-03-10" + } + ) + } + } + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: A deprecated_aliases date must be a DateTime object" + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true + } + + "Spec required and default set at the same time" = { + $spec = @{ + options = @{ + option_key = @{ + required = $true + default = "default value" + } + } + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: required and default are mutually exclusive for option_key" + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true + } + + "Unsupported options" = { + $spec = @{ + options = @{ + option_key = @{ + type = "str" + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = "abc" + invalid_key = "def" + another_key = "ghi" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "Unsupported parameters for (undefined win module) module: another_key, invalid_key. " + $expected_msg += "Supported parameters include: option_key" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Check mode and module doesn't support check mode" = { + $spec = @{ + options = @{ + option_key = @{ + type = "str" + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_check_mode = $true + option_key = "abc" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "remote module (undefined win module) does not support check mode" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.skipped | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = "abc"}} + } + + "Check mode with suboption without supports_check_mode" = { + $spec = @{ + options = @{ + sub_options = @{ + # This tests the situation where a sub key doesn't set supports_check_mode, the logic in + # Ansible.Basic automatically sets that to $false and we want it to ignore it for a nested check + type = "dict" + options = @{ + sub_option = @{ type = "str"; default = "value" } + } + } + } + supports_check_mode = $true + } + Set-Variable -Name complex_args -Scope Global -Value @{ + _ansible_check_mode = $true + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + $m.CheckMode | Assert-Equals -Expected $true + } + + "Type conversion error" = { + $spec = @{ + options = @{ + option_key = @{ + type = "int" + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = "a" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "argument for option_key is of type System.String and we were unable to convert to int: " + $expected_msg += "Input string was not in a correct format." + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Type conversion error - delegate" = { + $spec = @{ + options = @{ + option_key = @{ + type = "dict" + options = @{ + sub_option_key = @{ + type = [Func[[Object], [UInt64]]]{ [System.UInt64]::Parse($args[0]) } + } + } + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = @{ + sub_option_key = "a" + } + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "argument for sub_option_key is of type System.String and we were unable to convert to delegate: " + $expected_msg += "Exception calling `"Parse`" with `"1`" argument(s): `"Input string was not in a correct format.`" " + $expected_msg += "found in option_key" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Numeric choices" = { + $spec = @{ + options = @{ + option_key = @{ + choices = 1, 2, 3 + type = "int" + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = "2" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $output.Keys.Count | Assert-Equals -Expected 2 + $output.changed | Assert-Equals -Expected $false + $output.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = 2}} + } + + "Case insensitive choice" = { + $spec = @{ + options = @{ + option_key = @{ + choices = "abc", "def" + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = "ABC" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $expected_warning = "value of option_key was a case insensitive match of one of: abc, def. " + $expected_warning += "Checking of choices will be case sensitive in a future Ansible release. " + $expected_warning += "Case insensitive matches were: ABC" + + $output.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = "ABC"}} + # We have disabled the warnings for now + #$output.warnings.Count | Assert-Equals -Expected 1 + #$output.warnings[0] | Assert-Equals -Expected $expected_warning + } + + "Case insensitive choice no_log" = { + $spec = @{ + options = @{ + option_key = @{ + choices = "abc", "def" + no_log = $true + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = "ABC" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $expected_warning = "value of option_key was a case insensitive match of one of: abc, def. " + $expected_warning += "Checking of choices will be case sensitive in a future Ansible release. " + $expected_warning += "Case insensitive matches were: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + + $output.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"}} + # We have disabled the warnings for now + #$output.warnings.Count | Assert-Equals -Expected 1 + #$output.warnings[0] | Assert-Equals -Expected $expected_warning + } + + "Case insentitive choice as list" = { + $spec = @{ + options = @{ + option_key = @{ + choices = "abc", "def", "ghi", "JKL" + type = "list" + elements = "str" + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = "AbC", "ghi", "jkl" + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $expected_warning = "value of option_key was a case insensitive match of one or more of: abc, def, ghi, JKL. " + $expected_warning += "Checking of choices will be case sensitive in a future Ansible release. " + $expected_warning += "Case insensitive matches were: AbC, jkl" + + $output.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + # We have disabled the warnings for now + #$output.warnings.Count | Assert-Equals -Expected 1 + #$output.warnings[0] | Assert-Equals -Expected $expected_warning + } + + "Invalid choice" = { + $spec = @{ + options = @{ + option_key = @{ + choices = "a", "b" + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = "c" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "value of option_key must be one of: a, b. Got no match for: c" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Invalid choice with no_log" = { + $spec = @{ + options = @{ + option_key = @{ + choices = "a", "b" + no_log = $true + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = "abc" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "value of option_key must be one of: a, b. Got no match for: ********" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"}} + } + + "Invalid choice in list" = { + $spec = @{ + options = @{ + option_key = @{ + choices = "a", "b" + type = "list" + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = "a", "c" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "value of option_key must be one or more of: a, b. Got no match for: c" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Mutually exclusive options" = { + $spec = @{ + options = @{ + option1 = @{} + option2 = @{} + } + mutually_exclusive = @(,@("option1", "option2")) + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "a" + option2 = "b" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "parameters are mutually exclusive: option1, option2" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Missing required argument" = { + $spec = @{ + options = @{ + option1 = @{} + option2 = @{required = $true} + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "a" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "missing required arguments: option2" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Missing required argument subspec - no value defined" = { + $spec = @{ + options = @{ + option_key = @{ + type = "dict" + options = @{ + sub_option_key = @{ + required = $true + } + } + } + } + } + + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.Keys.Count | Assert-Equals -Expected 2 + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Missing required argument subspec" = { + $spec = @{ + options = @{ + option_key = @{ + type = "dict" + options = @{ + sub_option_key = @{ + required = $true + } + another_key = @{} + } + } + } + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = @{ + another_key = "abc" + } + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "missing required arguments: sub_option_key found in option_key" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Required together not set" = { + $spec = @{ + options = @{ + option1 = @{} + option2 = @{} + } + required_together = @(,@("option1", "option2")) + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "abc" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "parameters are required together: option1, option2" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Required together not set - subspec" = { + $spec = @{ + options = @{ + option_key = @{ + type = "dict" + options = @{ + option1 = @{} + option2 = @{} + } + required_together = @(,@("option1", "option2")) + } + another_option = @{} + } + required_together = @(,@("option_key", "another_option")) + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option_key = @{ + option1 = "abc" + } + another_option = "def" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "parameters are required together: option1, option2 found in option_key" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Required one of not set" = { + $spec = @{ + options = @{ + option1 = @{} + option2 = @{} + option3 = @{} + } + required_one_of = @(@("option1", "option2"), @("option2", "option3")) + } + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "abc" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "one of the following is required: option2, option3" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Required if invalid entries" = { + $spec = @{ + options = @{ + state = @{choices = "absent", "present"; default = "present"} + path = @{type = "path"} + } + required_if = @(,@("state", "absent")) + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "internal error: invalid required_if value count of 2, expecting 3 or 4 entries" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Required if no missing option" = { + $spec = @{ + options = @{ + state = @{choices = "absent", "present"; default = "present"} + name = @{} + path = @{type = "path"} + } + required_if = @(,@("state", "absent", @("name", "path"))) + } + Set-Variable -Name complex_args -Scope Global -Value @{ + name = "abc" + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.Keys.Count | Assert-Equals -Expected 2 + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Required if missing option" = { + $spec = @{ + options = @{ + state = @{choices = "absent", "present"; default = "present"} + name = @{} + path = @{type = "path"} + } + required_if = @(,@("state", "absent", @("name", "path"))) + } + Set-Variable -Name complex_args -Scope Global -Value @{ + state = "absent" + name = "abc" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "state is absent but all of the following are missing: path" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Required if missing option and required one is set" = { + $spec = @{ + options = @{ + state = @{choices = "absent", "present"; default = "present"} + name = @{} + path = @{type = "path"} + } + required_if = @(,@("state", "absent", @("name", "path"), $true)) + } + Set-Variable -Name complex_args -Scope Global -Value @{ + state = "absent" + } + + $failed = $false + try { + $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $expected_msg = "state is absent but any of the following are missing: name, path" + + $actual.Keys.Count | Assert-Equals -Expected 4 + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected $expected_msg + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Required if missing option but one required set" = { + $spec = @{ + options = @{ + state = @{choices = "absent", "present"; default = "present"} + name = @{} + path = @{type = "path"} + } + required_if = @(,@("state", "absent", @("name", "path"), $true)) + } + Set-Variable -Name complex_args -Scope Global -Value @{ + state = "absent" + name = "abc" + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.Keys.Count | Assert-Equals -Expected 2 + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "PS Object in return result" = { + $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{}) + + # JavaScriptSerializer struggles with PS Object like PSCustomObject due to circular references, this test makes + # sure we can handle these types of objects without bombing + $m.Result.output = [PSCustomObject]@{a = "a"; b = "b"} + $failed = $true + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.Keys.Count | Assert-Equals -Expected 3 + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{}} + $actual.output | Assert-DictionaryEquals -Expected @{a = "a"; b = "b"} + } + + "String json array to object" = { + $input_json = '["abc", "def"]' + $actual = [Ansible.Basic.AnsibleModule]::FromJson($input_json) + $actual -is [Array] | Assert-Equals -Expected $true + $actual.Length | Assert-Equals -Expected 2 + $actual[0] | Assert-Equals -Expected "abc" + $actual[1] | Assert-Equals -Expected "def" + } + + "String json array of dictionaries to object" = { + $input_json = '[{"abc":"def"}]' + $actual = [Ansible.Basic.AnsibleModule]::FromJson($input_json) + $actual -is [Array] | Assert-Equals -Expected $true + $actual.Length | Assert-Equals -Expected 1 + $actual[0] | Assert-DictionaryEquals -Expected @{"abc" = "def"} + } + + "Spec with fragments" = { + $spec = @{ + options = @{ + option1 = @{ type = "str" } + } + } + $fragment1 = @{ + options = @{ + option2 = @{ type = "str" } + } + } + + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "option1" + option2 = "option2" + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment1)) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args} + } + + "Fragment spec that with a deprecated alias" = { + $spec = @{ + options = @{ + option1 = @{ + aliases = @("alias1_spec") + type = "str" + deprecated_aliases = @( + @{name = "alias1_spec"; version = "2.0"} + ) + } + option2 = @{ + aliases = @("alias2_spec") + deprecated_aliases = @( + @{name = "alias2_spec"; version = "2.0"; collection_name = "ansible.builtin"} + ) + } + } + } + $fragment1 = @{ + options = @{ + option1 = @{ + aliases = @("alias1") + deprecated_aliases = @() # Makes sure it doesn't overwrite the spec, just adds to it. + } + option2 = @{ + aliases = @("alias2") + deprecated_aliases = @( + @{name = "alias2"; version = "2.0"; collection_name = "foo.bar"} + ) + type = "str" + } + } + } + + Set-Variable -Name complex_args -Scope Global -Value @{ + alias1_spec = "option1" + alias2 = "option2" + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment1)) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.deprecations.Count | Assert-Equals -Expected 2 + $actual.deprecations[0] | Assert-DictionaryEquals -Expected @{ + msg = "Alias 'alias1_spec' is deprecated. See the module docs for more information"; version = "2.0"; collection_name = $null + } + $actual.deprecations[1] | Assert-DictionaryEquals -Expected @{ + msg = "Alias 'alias2' is deprecated. See the module docs for more information"; version = "2.0"; collection_name = "foo.bar" + } + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{ + module_args = @{ + option1 = "option1" + alias1_spec = "option1" + option2 = "option2" + alias2 = "option2" + } + } + } + + "Fragment spec with mutual args" = { + $spec = @{ + options = @{ + option1 = @{ type = "str" } + option2 = @{ type = "str" } + } + mutually_exclusive = @( + ,@('option1', 'option2') + ) + } + $fragment1 = @{ + options = @{ + fragment1_1 = @{ type = "str" } + fragment1_2 = @{ type = "str" } + } + mutually_exclusive = @( + ,@('fragment1_1', 'fragment1_2') + ) + } + $fragment2 = @{ + options = @{ + fragment2 = @{ type = "str" } + } + } + + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "option1" + fragment1_1 = "fragment1_1" + fragment1_2 = "fragment1_2" + fragment2 = "fragment2" + } + + $failed = $false + try { + [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment1, $fragment2)) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.changed | Assert-Equals -Expected $false + $actual.failed | Assert-Equals -Expected $true + $actual.msg | Assert-Equals -Expected "parameters are mutually exclusive: fragment1_1, fragment1_2" + $actual.invocation | Assert-DictionaryEquals -Expected @{ module_args = $complex_args } + } + + "Fragment spec with no_log" = { + $spec = @{ + options = @{ + option1 = @{ + aliases = @("alias") + } + } + } + $fragment1 = @{ + options = @{ + option1 = @{ + no_log = $true # Makes sure that a value set in the fragment but not in the spec is respected. + type = "str" + } + } + } + + Set-Variable -Name complex_args -Scope Global -Value @{ + alias = "option1" + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment1)) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.changed | Assert-Equals -Expected $false + $actual.invocation | Assert-DictionaryEquals -Expected @{ + module_args = @{ + option1 = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + alias = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + } + } + } + + "Catch invalid fragment spec format" = { + $spec = @{ + options = @{ + option1 = @{ type = "str" } + } + } + $fragment = @{ + options = @{} + invalid = "will fail" + } + + Set-Variable -Name complex_args -Scope Global -Value @{ + option1 = "option1" + } + + $failed = $false + try { + [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment)) + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 1" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.failed | Assert-Equals -Expected $true + $actual.msg.StartsWith("internal error: argument spec entry contains an invalid key 'invalid', valid keys: ") | Assert-Equals -Expected $true + } + + "Spec with different list types" = { + $spec = @{ + options = @{ + # Single element of the same list type not in a list + option1 = @{ + aliases = "alias1" + deprecated_aliases = @{name="alias1";version="2.0";collection_name="foo.bar"} + } + + # Arrays + option2 = @{ + aliases = ,"alias2" + deprecated_aliases = ,@{name="alias2";version="2.0";collection_name="foo.bar"} + } + + # ArrayList + option3 = @{ + aliases = [System.Collections.ArrayList]@("alias3") + deprecated_aliases = [System.Collections.ArrayList]@(@{name="alias3";version="2.0";collection_name="foo.bar"}) + } + + # Generic.List[Object] + option4 = @{ + aliases = [System.Collections.Generic.List[Object]]@("alias4") + deprecated_aliases = [System.Collections.Generic.List[Object]]@(@{name="alias4";version="2.0";collection_name="foo.bar"}) + } + + # Generic.List[T] + option5 = @{ + aliases = [System.Collections.Generic.List[String]]@("alias5") + deprecated_aliases = [System.Collections.Generic.List[Hashtable]]@() + } + } + } + $spec.options.option5.deprecated_aliases.Add(@{name="alias5";version="2.0";collection_name="foo.bar"}) + + Set-Variable -Name complex_args -Scope Global -Value @{ + alias1 = "option1" + alias2 = "option2" + alias3 = "option3" + alias4 = "option4" + alias5 = "option5" + } + $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec) + + $failed = $false + try { + $m.ExitJson() + } catch [System.Management.Automation.RuntimeException] { + $failed = $true + $_.Exception.Message | Assert-Equals -Expected "exit: 0" + $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output) + } + $failed | Assert-Equals -Expected $true + + $actual.changed | Assert-Equals -Expected $false + $actual.deprecations.Count | Assert-Equals -Expected 5 + foreach ($dep in $actual.deprecations) { + $dep.msg -like "Alias 'alias?' is deprecated. See the module docs for more information" | Assert-Equals -Expected $true + $dep.version | Assert-Equals -Expected '2.0' + $dep.collection_name | Assert-Equals -Expected 'foo.bar' + } + $actual.invocation | Assert-DictionaryEquals -Expected @{ + module_args = @{ + alias1 = "option1" + option1 = "option1" + alias2 = "option2" + option2 = "option2" + alias3 = "option3" + option3 = "option3" + alias4 = "option4" + option4 = "option4" + alias5 = "option5" + option5 = "option5" + } + } + } +} + +try { + foreach ($test_impl in $tests.GetEnumerator()) { + # Reset the variables before each test + Set-Variable -Name complex_args -Value @{} -Scope Global + + $test = $test_impl.Key + &$test_impl.Value + } + $module.Result.data = "success" +} catch [System.Management.Automation.RuntimeException] { + $module.Result.failed = $true + $module.Result.test = $test + $module.Result.line = $_.InvocationInfo.ScriptLineNumber + $module.Result.method = $_.InvocationInfo.Line.Trim() + + if ($_.Exception.Message.StartSwith("exit: ")) { + # The exception was caused by an unexpected Exit call, log that on the output + $module.Result.output = (ConvertFrom-Json -InputObject $_.Exception.InnerException.Output) + $module.Result.msg = "Uncaught AnsibleModule exit in tests, see output" + } else { + # Unrelated exception + $module.Result.exception = $_.Exception.ToString() + $module.Result.msg = "Uncaught exception: $(($_ | Out-String).ToString())" + } +} + +Exit-Module diff --git a/test/integration/targets/module_utils_Ansible.Basic/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Basic/tasks/main.yml new file mode 100644 index 00000000..010c2d50 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Basic/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: test Ansible.Basic.cs + ansible_basic_tests: + register: ansible_basic_test + +- name: assert test Ansible.Basic.cs + assert: + that: + - ansible_basic_test.data == "success" diff --git a/test/integration/targets/module_utils_Ansible.Become/aliases b/test/integration/targets/module_utils_Ansible.Become/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Become/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.Become/library/ansible_become_tests.ps1 b/test/integration/targets/module_utils_Ansible.Become/library/ansible_become_tests.ps1 new file mode 100644 index 00000000..4d1f319b --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Become/library/ansible_become_tests.ps1 @@ -0,0 +1,1009 @@ +#!powershell + +#AnsibleRequires -CSharpUtil Ansible.Basic +#AnsibleRequires -CSharpUtil Ansible.Become + +$module = [Ansible.Basic.AnsibleModule]::Create($args, @{}) + +Function Assert-Equals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + + $matched = $false + if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) { + $Actual.Count | Assert-Equals -Expected $Expected.Count + for ($i = 0; $i -lt $Actual.Count; $i++) { + $actual_value = $Actual[$i] + $expected_value = $Expected[$i] + Assert-Equals -Actual $actual_value -Expected $expected_value + } + $matched = $true + } else { + $matched = $Actual -ceq $Expected + } + + if (-not $matched) { + if ($Actual -is [PSObject]) { + $Actual = $Actual.ToString() + } + + $call_stack = (Get-PSCallStack)[1] + $module.Result.test = $test + $module.Result.actual = $Actual + $module.Result.expected = $Expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + $module.FailJson("AssertionError: actual != expected") + } +} + +# Would be great to move win_whomai out into it's own module util and share the +# code here, for now just rely on a cut down version +$test_whoami = { + Add-Type -TypeDefinition @' +using Microsoft.Win32.SafeHandles; +using System; +using System.Runtime.ConstrainedExecution; +using System.Runtime.InteropServices; +using System.Security.Principal; +using System.Text; + +namespace Ansible +{ + internal class NativeHelpers + { + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + public struct LSA_UNICODE_STRING + { + public UInt16 Length; + public UInt16 MaximumLength; + public IntPtr Buffer; + + public override string ToString() + { + return Marshal.PtrToStringUni(Buffer, Length / sizeof(char)); + } + } + + [StructLayout(LayoutKind.Sequential)] + public struct LUID + { + public UInt32 LowPart; + public Int32 HighPart; + + public static explicit operator UInt64(LUID l) + { + return (UInt64)((UInt64)l.HighPart << 32) | (UInt64)l.LowPart; + } + } + + [StructLayout(LayoutKind.Sequential)] + public struct SECURITY_LOGON_SESSION_DATA + { + public UInt32 Size; + public LUID LogonId; + public LSA_UNICODE_STRING UserName; + public LSA_UNICODE_STRING LogonDomain; + public LSA_UNICODE_STRING AuthenticationPackage; + public SECURITY_LOGON_TYPE LogonType; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SID_AND_ATTRIBUTES + { + public IntPtr Sid; + public int Attributes; + } + + [StructLayout(LayoutKind.Sequential)] + public struct TOKEN_MANDATORY_LABEL + { + public SID_AND_ATTRIBUTES Label; + } + + [StructLayout(LayoutKind.Sequential)] + public struct TOKEN_SOURCE + { + [MarshalAs(UnmanagedType.ByValArray, SizeConst = 8)] public char[] SourceName; + public LUID SourceIdentifier; + } + + [StructLayout(LayoutKind.Sequential)] + public struct TOKEN_STATISTICS + { + public LUID TokenId; + public LUID AuthenticationId; + } + + [StructLayout(LayoutKind.Sequential)] + public struct TOKEN_USER + { + public SID_AND_ATTRIBUTES User; + } + + public enum SECURITY_LOGON_TYPE + { + System = 0, // Used only by the Sytem account + Interactive = 2, + Network, + Batch, + Service, + Proxy, + Unlock, + NetworkCleartext, + NewCredentials, + RemoteInteractive, + CachedInteractive, + CachedRemoteInteractive, + CachedUnlock + } + + public enum TokenInformationClass + { + TokenUser = 1, + TokenSource = 7, + TokenStatistics = 10, + TokenIntegrityLevel = 25, + } + } + + internal class NativeMethods + { + [DllImport("kernel32.dll", SetLastError = true)] + public static extern bool CloseHandle( + IntPtr hObject); + + [DllImport("kernel32.dll")] + public static extern SafeNativeHandle GetCurrentProcess(); + + [DllImport("userenv.dll", SetLastError = true)] + public static extern bool GetProfileType( + out UInt32 dwFlags); + + [DllImport("advapi32.dll", SetLastError = true)] + public static extern bool GetTokenInformation( + SafeNativeHandle TokenHandle, + NativeHelpers.TokenInformationClass TokenInformationClass, + SafeMemoryBuffer TokenInformation, + UInt32 TokenInformationLength, + out UInt32 ReturnLength); + + [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)] + public static extern bool LookupAccountSid( + string lpSystemName, + IntPtr Sid, + StringBuilder lpName, + ref UInt32 cchName, + StringBuilder ReferencedDomainName, + ref UInt32 cchReferencedDomainName, + out UInt32 peUse); + + [DllImport("secur32.dll", SetLastError = true)] + public static extern UInt32 LsaEnumerateLogonSessions( + out UInt32 LogonSessionCount, + out SafeLsaMemoryBuffer LogonSessionList); + + [DllImport("secur32.dll", SetLastError = true)] + public static extern UInt32 LsaFreeReturnBuffer( + IntPtr Buffer); + + [DllImport("secur32.dll", SetLastError = true)] + public static extern UInt32 LsaGetLogonSessionData( + IntPtr LogonId, + out SafeLsaMemoryBuffer ppLogonSessionData); + + [DllImport("advapi32.dll")] + public static extern UInt32 LsaNtStatusToWinError( + UInt32 Status); + + [DllImport("advapi32.dll", SetLastError = true)] + public static extern bool OpenProcessToken( + SafeNativeHandle ProcessHandle, + TokenAccessLevels DesiredAccess, + out SafeNativeHandle TokenHandle); + } + + internal class SafeLsaMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid + { + public SafeLsaMemoryBuffer() : base(true) { } + + [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] + protected override bool ReleaseHandle() + { + UInt32 res = NativeMethods.LsaFreeReturnBuffer(handle); + return res == 0; + } + } + + internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid + { + public SafeMemoryBuffer() : base(true) { } + public SafeMemoryBuffer(int cb) : base(true) + { + base.SetHandle(Marshal.AllocHGlobal(cb)); + } + public SafeMemoryBuffer(IntPtr handle) : base(true) + { + base.SetHandle(handle); + } + + [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] + protected override bool ReleaseHandle() + { + Marshal.FreeHGlobal(handle); + return true; + } + } + + internal class SafeNativeHandle : SafeHandleZeroOrMinusOneIsInvalid + { + public SafeNativeHandle() : base(true) { } + public SafeNativeHandle(IntPtr handle) : base(true) { this.handle = handle; } + + [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] + protected override bool ReleaseHandle() + { + return NativeMethods.CloseHandle(handle); + } + } + + public class Win32Exception : System.ComponentModel.Win32Exception + { + private string _msg; + + public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { } + public Win32Exception(int errorCode, string message) : base(errorCode) + { + _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode); + } + + public override string Message { get { return _msg; } } + public static explicit operator Win32Exception(string message) { return new Win32Exception(message); } + } + + public class Logon + { + public string AuthenticationPackage { get; internal set; } + public string LogonType { get; internal set; } + public string MandatoryLabelName { get; internal set; } + public SecurityIdentifier MandatoryLabelSid { get; internal set; } + public bool ProfileLoaded { get; internal set; } + public string SourceName { get; internal set; } + public string UserName { get; internal set; } + public SecurityIdentifier UserSid { get; internal set; } + + public Logon() + { + using (SafeNativeHandle process = NativeMethods.GetCurrentProcess()) + { + TokenAccessLevels dwAccess = TokenAccessLevels.Query | TokenAccessLevels.QuerySource; + + SafeNativeHandle hToken; + NativeMethods.OpenProcessToken(process, dwAccess, out hToken); + using (hToken) + { + SetLogonSessionData(hToken); + SetTokenMandatoryLabel(hToken); + SetTokenSource(hToken); + SetTokenUser(hToken); + } + } + SetProfileLoaded(); + } + + private void SetLogonSessionData(SafeNativeHandle hToken) + { + NativeHelpers.TokenInformationClass tokenClass = NativeHelpers.TokenInformationClass.TokenStatistics; + UInt32 returnLength; + NativeMethods.GetTokenInformation(hToken, tokenClass, new SafeMemoryBuffer(IntPtr.Zero), 0, out returnLength); + + UInt64 tokenLuidId; + using (SafeMemoryBuffer infoPtr = new SafeMemoryBuffer((int)returnLength)) + { + if (!NativeMethods.GetTokenInformation(hToken, tokenClass, infoPtr, returnLength, out returnLength)) + throw new Win32Exception("GetTokenInformation(TokenStatistics) failed"); + + NativeHelpers.TOKEN_STATISTICS stats = (NativeHelpers.TOKEN_STATISTICS)Marshal.PtrToStructure( + infoPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_STATISTICS)); + tokenLuidId = (UInt64)stats.AuthenticationId; + } + + UInt32 sessionCount; + SafeLsaMemoryBuffer sessionPtr; + UInt32 res = NativeMethods.LsaEnumerateLogonSessions(out sessionCount, out sessionPtr); + if (res != 0) + throw new Win32Exception((int)NativeMethods.LsaNtStatusToWinError(res), "LsaEnumerateLogonSession() failed"); + using (sessionPtr) + { + IntPtr currentSession = sessionPtr.DangerousGetHandle(); + for (UInt32 i = 0; i < sessionCount; i++) + { + SafeLsaMemoryBuffer sessionDataPtr; + res = NativeMethods.LsaGetLogonSessionData(currentSession, out sessionDataPtr); + if (res != 0) + { + currentSession = IntPtr.Add(currentSession, Marshal.SizeOf(typeof(NativeHelpers.LUID))); + continue; + } + using (sessionDataPtr) + { + NativeHelpers.SECURITY_LOGON_SESSION_DATA sessionData = (NativeHelpers.SECURITY_LOGON_SESSION_DATA)Marshal.PtrToStructure( + sessionDataPtr.DangerousGetHandle(), typeof(NativeHelpers.SECURITY_LOGON_SESSION_DATA)); + UInt64 sessionId = (UInt64)sessionData.LogonId; + if (sessionId == tokenLuidId) + { + AuthenticationPackage = sessionData.AuthenticationPackage.ToString(); + LogonType = sessionData.LogonType.ToString(); + break; + } + } + + currentSession = IntPtr.Add(currentSession, Marshal.SizeOf(typeof(NativeHelpers.LUID))); + } + } + } + + private void SetTokenMandatoryLabel(SafeNativeHandle hToken) + { + NativeHelpers.TokenInformationClass tokenClass = NativeHelpers.TokenInformationClass.TokenIntegrityLevel; + UInt32 returnLength; + NativeMethods.GetTokenInformation(hToken, tokenClass, new SafeMemoryBuffer(IntPtr.Zero), 0, out returnLength); + using (SafeMemoryBuffer infoPtr = new SafeMemoryBuffer((int)returnLength)) + { + if (!NativeMethods.GetTokenInformation(hToken, tokenClass, infoPtr, returnLength, out returnLength)) + throw new Win32Exception("GetTokenInformation(TokenIntegrityLevel) failed"); + NativeHelpers.TOKEN_MANDATORY_LABEL label = (NativeHelpers.TOKEN_MANDATORY_LABEL)Marshal.PtrToStructure( + infoPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_MANDATORY_LABEL)); + MandatoryLabelName = LookupSidName(label.Label.Sid); + MandatoryLabelSid = new SecurityIdentifier(label.Label.Sid); + } + } + + private void SetTokenSource(SafeNativeHandle hToken) + { + NativeHelpers.TokenInformationClass tokenClass = NativeHelpers.TokenInformationClass.TokenSource; + UInt32 returnLength; + NativeMethods.GetTokenInformation(hToken, tokenClass, new SafeMemoryBuffer(IntPtr.Zero), 0, out returnLength); + using (SafeMemoryBuffer infoPtr = new SafeMemoryBuffer((int)returnLength)) + { + if (!NativeMethods.GetTokenInformation(hToken, tokenClass, infoPtr, returnLength, out returnLength)) + throw new Win32Exception("GetTokenInformation(TokenSource) failed"); + NativeHelpers.TOKEN_SOURCE source = (NativeHelpers.TOKEN_SOURCE)Marshal.PtrToStructure( + infoPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_SOURCE)); + SourceName = new string(source.SourceName).Replace('\0', ' ').TrimEnd(); + } + } + + private void SetTokenUser(SafeNativeHandle hToken) + { + NativeHelpers.TokenInformationClass tokenClass = NativeHelpers.TokenInformationClass.TokenUser; + UInt32 returnLength; + NativeMethods.GetTokenInformation(hToken, tokenClass, new SafeMemoryBuffer(IntPtr.Zero), 0, out returnLength); + using (SafeMemoryBuffer infoPtr = new SafeMemoryBuffer((int)returnLength)) + { + if (!NativeMethods.GetTokenInformation(hToken, tokenClass, infoPtr, returnLength, out returnLength)) + throw new Win32Exception("GetTokenInformation(TokenSource) failed"); + NativeHelpers.TOKEN_USER user = (NativeHelpers.TOKEN_USER)Marshal.PtrToStructure( + infoPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_USER)); + UserName = LookupSidName(user.User.Sid); + UserSid = new SecurityIdentifier(user.User.Sid); + } + } + + private void SetProfileLoaded() + { + UInt32 flags; + ProfileLoaded = NativeMethods.GetProfileType(out flags); + } + + private static string LookupSidName(IntPtr pSid) + { + StringBuilder name = new StringBuilder(0); + StringBuilder domain = new StringBuilder(0); + UInt32 nameLength = 0; + UInt32 domainLength = 0; + UInt32 peUse; + NativeMethods.LookupAccountSid(null, pSid, name, ref nameLength, domain, ref domainLength, out peUse); + name.EnsureCapacity((int)nameLength); + domain.EnsureCapacity((int)domainLength); + + if (!NativeMethods.LookupAccountSid(null, pSid, name, ref nameLength, domain, ref domainLength, out peUse)) + throw new Win32Exception("LookupAccountSid() failed"); + + return String.Format("{0}\\{1}", domain.ToString(), name.ToString()); + } + } +} +'@ + $logon = New-Object -TypeName Ansible.Logon + ConvertTo-Json -InputObject $logon +}.ToString() + +$current_user_raw = [Ansible.Process.ProcessUtil]::CreateProcess($null, "powershell.exe -NoProfile -", $null, $null, $test_whoami + "`r`n") +$current_user = ConvertFrom-Json -InputObject $current_user_raw.StandardOut + +$adsi = [ADSI]"WinNT://$env:COMPUTERNAME" + +$standard_user = "become_standard" +$admin_user = "become_admin" +$become_pass = "password123!$([System.IO.Path]::GetRandomFileName())" +$medium_integrity_sid = "S-1-16-8192" +$high_integrity_sid = "S-1-16-12288" +$system_integrity_sid = "S-1-16-16384" + +$tests = @{ + "Runas standard user" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, + "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Interactive" + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid + } + + "Runas admin user" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, + "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Interactive" + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid + } + + "Runas SYSTEM" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, + "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "System" + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.UserSid.Value | Assert-Equals -Expected "S-1-5-18" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $system_integrity_sid + + $with_domain = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("NT AUTHORITY\System", $null, "whoami.exe") + $with_domain.StandardOut | Assert-Equals -Expected "nt authority\system`r`n" + } + + "Runas LocalService" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("LocalService", $null, + "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Service" + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.UserSid.Value | Assert-Equals -Expected "S-1-5-19" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $system_integrity_sid + + $with_domain = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("NT AUTHORITY\LocalService", $null, "whoami.exe") + $with_domain.StandardOut | Assert-Equals -Expected "nt authority\local service`r`n" + } + + "Runas NetworkService" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("NetworkService", $null, + "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Service" + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.UserSid.Value | Assert-Equals -Expected "S-1-5-20" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $system_integrity_sid + + $with_domain = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("NT AUTHORITY\NetworkService", $null, "whoami.exe") + $with_domain.StandardOut | Assert-Equals -Expected "nt authority\network service`r`n" + } + + "Runas without working dir set" = { + $expected = "$env:SystemRoot\system32`r`n" + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, 0, "Interactive", $null, + 'powershell.exe $pwd.Path', $null, $null, "") + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "Runas with working dir set" = { + $expected = "$env:SystemRoot`r`n" + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, 0, "Interactive", $null, + 'powershell.exe $pwd.Path', $env:SystemRoot, $null, "") + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "Runas without environment set" = { + $expected = "Windows_NT`r`n" + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, 0, "Interactive", $null, + 'powershell.exe $env:TEST; $env:OS', $null, $null, "") + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "Runas with environment set" = { + $env_vars = @{ + TEST = "tesTing" + TEST2 = "Testing 2" + } + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0, "Interactive", $null, + 'cmd.exe /c set', $null, $env_vars, "") + ("TEST=tesTing" -cin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true + ("TEST2=Testing 2" -cin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true + ("OS=Windows_NT" -cnotin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "Runas with string stdin" = { + $expected = "input value`r`n`r`n" + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0, "Interactive", $null, + 'powershell.exe [System.Console]::In.ReadToEnd()', $null, $null, "input value") + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "Runas with string stdin and newline" = { + $expected = "input value`r`n`r`n" + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0, "Interactive", $null, + 'powershell.exe [System.Console]::In.ReadToEnd()', $null, $null, "input value`r`n") + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "Runas with byte stdin" = { + $expected = "input value`r`n" + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0, "Interactive", $null, + 'powershell.exe [System.Console]::In.ReadToEnd()', $null, $null, [System.Text.Encoding]::UTF8.GetBytes("input value")) + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "Missing executable" = { + $failed = $false + try { + [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, "fake.exe") + } catch { + $failed = $true + $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "Ansible.Process.Win32Exception" + $expected = 'Exception calling "CreateProcessAsUser" with "3" argument(s): "CreateProcessWithTokenW() failed ' + $expected += '(The system cannot find the file specified, Win32ErrorCode 2)"' + $_.Exception.Message | Assert-Equals -Expected $expected + } + $failed | Assert-Equals -Expected $true + } + + "CreateProcessAsUser with lpApplicationName" = { + $expected = "abc`r`n" + $full_path = "$($env:SystemRoot)\System32\WindowsPowerShell\v1.0\powershell.exe" + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, 0, "Interactive", $full_path, + "Write-Output 'abc'", $null, $null, "") + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, 0, "Interactive", $full_path, + "powershell.exe Write-Output 'abc'", $null, $null, "") + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcessAsUser with stderr" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, 0, "Interactive", $null, + "powershell.exe [System.Console]::Error.WriteLine('hi')", $null, $null, "") + $actual.StandardOut | Assert-Equals -Expected "" + $actual.StandardError | Assert-Equals -Expected "hi`r`n" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcessAsUser with exit code" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, 0, "Interactive", $null, + "powershell.exe exit 10", $null, $null, "") + $actual.StandardOut | Assert-Equals -Expected "" + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 10 + } + + "Local account with computer name" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("$env:COMPUTERNAME\$standard_user", $become_pass, + "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Interactive" + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid + } + + "Local account with computer as period" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser(".\$standard_user", $become_pass, + "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Interactive" + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid + } + + "Local account with invalid password" = { + $failed = $false + try { + [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, "incorrect", "powershell.exe Write-Output abc") + } catch { + $failed = $true + $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Win32Exception" + # Server 2008 has a slightly different error msg, just assert we get the error 1326 + ($_.Exception.Message.Contains("Win32ErrorCode 1326")) | Assert-Equals -Expected $true + } + $failed | Assert-Equals -Expected $true + } + + "Invalid account" = { + $failed = $false + try { + [Ansible.Become.BecomeUtil]::CreateProcessAsUser("incorrect", "incorrect", "powershell.exe Write-Output abc") + } catch { + $failed = $true + $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "System.Security.Principal.IdentityNotMappedException" + $expected = 'Exception calling "CreateProcessAsUser" with "3" argument(s): "Some or all ' + $expected += 'identity references could not be translated."' + $_.Exception.Message | Assert-Equals -Expected $expected + } + $failed | Assert-Equals -Expected $true + } + + "Interactive logon with standard" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, "WithProfile", + "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Interactive" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid + } + + "Batch logon with standard" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, "WithProfile", + "Batch", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Batch" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid + } + + "Network logon with standard" = { + # Server 2008 will not work with become to Network or Network Credentials + if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") { + continue + } + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, "WithProfile", + "Network", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Network" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid + } + + "Network with cleartext logon with standard" = { + # Server 2008 will not work with become to Network or Network Cleartext + if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") { + continue + } + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, "WithProfile", + "NetworkCleartext", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "NetworkCleartext" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid + } + + "Logon without password with standard" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, [NullString]::Value, "WithProfile", + "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + # Too unstable, there might be another process still lingering which causes become to steal instead of using + # S4U. Just don't check the type and source to verify we can become without a password + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + # $stdout.LogonType | Assert-Equals -Expected "Batch" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + # $stdout.SourceName | Assert-Equals -Expected "ansible" + $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid + } + + "Logon without password and network type with standard" = { + # Server 2008 will not work with become to Network or Network Cleartext + if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") { + continue + } + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, [NullString]::Value, "WithProfile", + "Network", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + # Too unstable, there might be another process still lingering which causes become to steal instead of using + # S4U. Just don't check the type and source to verify we can become without a password + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + # $stdout.LogonType | Assert-Equals -Expected "Network" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + # $stdout.SourceName | Assert-Equals -Expected "ansible" + $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid + } + + "Interactive logon with admin" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, "WithProfile", + "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Interactive" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid + } + + "Batch logon with admin" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, "WithProfile", + "Batch", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Batch" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid + } + + "Network logon with admin" = { + # Server 2008 will not work with become to Network or Network Credentials + if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") { + continue + } + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, "WithProfile", + "Network", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Network" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid + } + + "Network with cleartext logon with admin" = { + # Server 2008 will not work with become to Network or Network Credentials + if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") { + continue + } + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, "WithProfile", + "NetworkCleartext", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "NetworkCleartext" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid + } + + "Fail to logon with null or empty password" = { + $failed = $false + try { + # Having $null or an empty string means we are trying to become a user with a blank password and not + # become without setting the password. This is confusing as $null gets converted to "" and we need to + # use [NullString]::Value instead if we want that behaviour. This just tests to see that an empty + # string won't go the S4U route. + [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $null, "WithProfile", + "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + } catch { + $failed = $true + $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Win32Exception" + # Server 2008 has a slightly different error msg, just assert we get the error 1326 + ($_.Exception.Message.Contains("Win32ErrorCode 1326")) | Assert-Equals -Expected $true + } + $failed | Assert-Equals -Expected $true + } + + "Logon without password with admin" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, [NullString]::Value, "WithProfile", + "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + # Too unstable, there might be another process still lingering which causes become to steal instead of using + # S4U. Just don't check the type and source to verify we can become without a password + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + # $stdout.LogonType | Assert-Equals -Expected "Batch" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + # $stdout.SourceName | Assert-Equals -Expected "ansible" + $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid + } + + "Logon without password and network type with admin" = { + # become network doesn't work on Server 2008 + if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") { + continue + } + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, [NullString]::Value, "WithProfile", + "Network", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + # Too unstable, there might be another process still lingering which causes become to steal instead of using + # S4U. Just don't check the type and source to verify we can become without a password + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + # $stdout.LogonType | Assert-Equals -Expected "Network" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $true + # $stdout.SourceName | Assert-Equals -Expected "ansible" + $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid + } + + "Logon without profile with admin" = { + # Server 2008 and 2008 R2 does not support running without the profile being set + if ([System.Environment]::OSVersion.Version -lt [Version]"6.2") { + continue + } + + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0, + "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "Interactive" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid + $stdout.ProfileLoaded | Assert-Equals -Expected $false + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid + } + + "Logon with network credentials and no profile" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("fakeuser", "fakepassword", "NetcredentialsOnly", + "NewCredentials", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "NewCredentials" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $current_user.MandatoryLabelSid.Value + + # while we didn't set WithProfile, the new process is based on the current process + $stdout.ProfileLoaded | Assert-Equals -Expected $current_user.ProfileLoaded + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $current_user.UserSid.Value + } + + "Logon with network credentials and with profile" = { + $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("fakeuser", "fakepassword", "NetcredentialsOnly, WithProfile", + "NewCredentials", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n") + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $stdout = ConvertFrom-Json -InputObject $actual.StandardOut + $stdout.LogonType | Assert-Equals -Expected "NewCredentials" + $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $current_user.MandatoryLabelSid.Value + $stdout.ProfileLoaded | Assert-Equals -Expected $current_user.ProfileLoaded + $stdout.SourceName | Assert-Equals -Expected "Advapi" + $stdout.UserSid.Value | Assert-Equals -Expected $current_user.UserSid.Value + } +} + +try { + $tmp_dir = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath ([System.IO.Path]::GetRandomFileName()) + New-Item -Path $tmp_dir -ItemType Directory > $null + $acl = Get-Acl -Path $tmp_dir + $ace = New-Object -TypeName System.Security.AccessControl.FileSystemAccessRule -ArgumentList @( + New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList ([System.Security.Principal.WellKnownSidType]::WorldSid, $null) + [System.Security.AccessControl.FileSystemRights]::FullControl, + [System.Security.AccessControl.InheritanceFlags]"ContainerInherit, ObjectInherit", + [System.Security.AccessControl.PropagationFlags]::None, + [System.Security.AccessControl.AccessControlType]::Allow + ) + $acl.AddAccessRule($ace) + Set-Acl -Path $tmp_dir -AclObject $acl + + $tmp_script = Join-Path -Path $tmp_dir -ChildPath "whoami.ps1" + Set-Content -LiteralPath $tmp_script -Value $test_whoami + + foreach ($user in $standard_user, $admin_user) { + $user_obj = $adsi.Children | Where-Object { $_.SchemaClassName -eq "User" -and $_.Name -eq $user } + if ($null -eq $user_obj) { + $user_obj = $adsi.Create("User", $user) + $user_obj.SetPassword($become_pass) + $user_obj.SetInfo() + } else { + $user_obj.SetPassword($become_pass) + } + $user_obj.RefreshCache() + + if ($user -eq $standard_user) { + $standard_user_sid = (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @($user_obj.ObjectSid.Value, 0)).Value + $group = [System.Security.Principal.WellKnownSidType]::BuiltinUsersSid + } else { + $admin_user_sid = (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @($user_obj.ObjectSid.Value, 0)).Value + $group = [System.Security.Principal.WellKnownSidType]::BuiltinAdministratorsSid + } + $group = (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList $group, $null).Value + [string[]]$current_groups = $user_obj.Groups() | ForEach-Object { + New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @($_.GetType().InvokeMember("objectSID", "GetProperty", $null, $_, $null), 0) + } + if ($current_groups -notcontains $group) { + $group_obj = $adsi.Children | Where-Object { + if ($_.SchemaClassName -eq "Group") { + $group_sid = New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @($_.objectSID.Value, 0) + $group_sid -eq $group + } + } + $group_obj.Add($user_obj.Path) + } + } + foreach ($test_impl in $tests.GetEnumerator()) { + $test = $test_impl.Key + &$test_impl.Value + } +} finally { + Remove-Item -LiteralPath $tmp_dir -Force -Recurse + foreach ($user in $standard_user, $admin_user) { + $user_obj = $adsi.Children | Where-Object { $_.SchemaClassName -eq "User" -and $_.Name -eq $user } + $adsi.Delete("User", $user_obj.Name.Value) + } +} + + +$module.Result.data = "success" +$module.ExitJson() + diff --git a/test/integration/targets/module_utils_Ansible.Become/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Become/tasks/main.yml new file mode 100644 index 00000000..deb228b5 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Become/tasks/main.yml @@ -0,0 +1,28 @@ +--- +# Users by default don't have this right, temporarily enable it +- name: ensure the Users group have the SeBatchLogonRight + win_user_right: + name: SeBatchLogonRight + users: + - Users + action: add + register: batch_user_add + +- block: + - name: test Ansible.Become.cs + ansible_become_tests: + register: ansible_become_tests + + always: + - name: remove SeBatchLogonRight from users if added in test + win_user_right: + name: SeBatchLogonRight + users: + - Users + action: remove + when: batch_user_add is changed + +- name: assert test Ansible.Become.cs + assert: + that: + - ansible_become_tests.data == "success" diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/library/add_type_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/library/add_type_test.ps1 new file mode 100644 index 00000000..d89f99b7 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/library/add_type_test.ps1 @@ -0,0 +1,299 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.AddType + +$ErrorActionPreference = "Stop" + +$result = @{ + changed = $false +} + +Function Assert-Equals($actual, $expected) { + if ($actual -cne $expected) { + $call_stack = (Get-PSCallStack)[1] + $error_msg = "AssertionError:`r`nActual: `"$actual`" != Expected: `"$expected`"`r`nLine: $($call_stack.ScriptLineNumber), Method: $($call_stack.Position.Text)" + Fail-Json -obj $result -message $error_msg + } +} + +$code = @' +using System; + +namespace Namespace1 +{ + public class Class1 + { + public static string GetString(bool error) + { + if (error) + throw new Exception("error"); + return "Hello World"; + } + } +} +'@ +$res = Add-CSharpType -References $code +Assert-Equals -actual $res -expected $null + +$actual = [Namespace1.Class1]::GetString($false) +Assert-Equals $actual -expected "Hello World" + +try { + [Namespace1.Class1]::GetString($true) +} catch { + Assert-Equals ($_.Exception.ToString().Contains("at Namespace1.Class1.GetString(Boolean error)`r`n")) -expected $true +} + +$code_debug = @' +using System; + +namespace Namespace2 +{ + public class Class2 + { + public static string GetString(bool error) + { + if (error) + throw new Exception("error"); + return "Hello World"; + } + } +} +'@ +$res = Add-CSharpType -References $code_debug -IncludeDebugInfo +Assert-Equals -actual $res -expected $null + +$actual = [Namespace2.Class2]::GetString($false) +Assert-Equals $actual -expected "Hello World" + +try { + [Namespace2.Class2]::GetString($true) +} catch { + $tmp_path = [System.IO.Path]::GetFullPath($env:TMP).ToLower() + Assert-Equals ($_.Exception.ToString().ToLower().Contains("at namespace2.class2.getstring(boolean error) in $tmp_path")) -expected $true + Assert-Equals ($_.Exception.ToString().Contains(".cs:line 10")) -expected $true +} + +$code_tmp = @' +using System; + +namespace Namespace3 +{ + public class Class3 + { + public static string GetString(bool error) + { + if (error) + throw new Exception("error"); + return "Hello World"; + } + } +} +'@ +$tmp_path = $env:USERPROFILE +$res = Add-CSharpType -References $code_tmp -IncludeDebugInfo -TempPath $tmp_path -PassThru +Assert-Equals -actual $res.GetType().Name -expected "RuntimeAssembly" +Assert-Equals -actual $res.Location -expected "" +Assert-Equals -actual $res.GetTypes().Length -expected 1 +Assert-Equals -actual $res.GetTypes()[0].Name -expected "Class3" + +$actual = [Namespace3.Class3]::GetString($false) +Assert-Equals $actual -expected "Hello World" + +try { + [Namespace3.Class3]::GetString($true) +} catch { + Assert-Equals ($_.Exception.ToString().ToLower().Contains("at namespace3.class3.getstring(boolean error) in $($tmp_path.ToLower())")) -expected $true + Assert-Equals ($_.Exception.ToString().Contains(".cs:line 10")) -expected $true +} + +$warning_code = @' +using System; + +namespace Namespace4 +{ + public class Class4 + { + public static string GetString(bool test) + { + if (test) + { + string a = ""; + } + + return "Hello World"; + } + } +} +'@ +$failed = $false +try { + Add-CSharpType -References $warning_code +} catch { + $failed = $true + Assert-Equals -actual ($_.Exception.Message.Contains("error CS0219: Warning as Error: The variable 'a' is assigned but its value is never used")) -expected $true +} +Assert-Equals -actual $failed -expected $true + +Add-CSharpType -References $warning_code -IgnoreWarnings +$actual = [Namespace4.Class4]::GetString($true) +Assert-Equals -actual $actual -expected "Hello World" + +$reference_1 = @' +using System; +using System.Web.Script.Serialization; + +//AssemblyReference -Name System.Web.Extensions.dll + +namespace Namespace5 +{ + public class Class5 + { + public static string GetString() + { + return "Hello World"; + } + } +} +'@ + +$reference_2 = @' +using System; +using Namespace5; +using System.Management.Automation; +using System.Collections; +using System.Collections.Generic; + +namespace Namespace6 +{ + public class Class6 + { + public static string GetString() + { + Hashtable hash = new Hashtable(); + hash["test"] = "abc"; + return Class5.GetString(); + } + } +} +'@ + +Add-CSharpType -References $reference_1, $reference_2 +$actual = [Namespace6.Class6]::GetString() +Assert-Equals -actual $actual -expected "Hello World" + +$ignored_warning = @' +using System; + +//NoWarn -Name CS0219 + +namespace Namespace7 +{ + public class Class7 + { + public static string GetString() + { + string a = ""; + return "abc"; + } + } +} +'@ +Add-CSharpType -References $ignored_warning +$actual = [Namespace7.Class7]::GetString() +Assert-Equals -actual $actual -expected "abc" + +$defined_symbol = @' +using System; + +namespace Namespace8 +{ + public class Class8 + { + public static string GetString() + { +#if SYMBOL1 + string a = "symbol"; +#else + string a = "no symbol"; +#endif + return a; + } + } +} +'@ +Add-CSharpType -References $defined_symbol -CompileSymbols "SYMBOL1" +$actual = [Namespace8.Class8]::GetString() +Assert-Equals -actual $actual -expected "symbol" + +$type_accelerator = @' +using System; + +//TypeAccelerator -Name AnsibleType -TypeName Class9 + +namespace Namespace9 +{ + public class Class9 + { + public static string GetString() + { + return "a"; + } + } +} +'@ +Add-CSharpType -Reference $type_accelerator +$actual = [AnsibleType]::GetString() +Assert-Equals -actual $actual -expected "a" + +$missing_type_class = @' +using System; + +//TypeAccelerator -Name AnsibleTypeMissing -TypeName MissingClass + +namespace Namespace10 +{ + public class Class10 + { + public static string GetString() + { + return "b"; + } + } +} +'@ +$failed = $false +try { + Add-CSharpType -Reference $missing_type_class +} catch { + $failed = $true + Assert-Equals -actual $_.Exception.Message -expected "Failed to find compiled class 'MissingClass' for custom TypeAccelerator." +} +Assert-Equals -actual $failed -expected $true + +$arch_class = @' +using System; + +namespace Namespace11 +{ + public class Class11 + { + public static int GetIntPtrSize() + { +#if X86 + return 4; +#elif AMD64 + return 8; +#else + return 0; +#endif + } + } +} +'@ +Add-CSharpType -Reference $arch_class +Assert-Equals -actual ([Namespace11.Class11]::GetIntPtrSize()) -expected ([System.IntPtr]::Size) + +$result.res = "success" +Exit-Json -obj $result diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/tasks/main.yml new file mode 100644 index 00000000..4c4810be --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: call module with AddType tests + add_type_test: + register: add_type_test + +- name: assert call module with AddType tests + assert: + that: + - not add_type_test is failed + - add_type_test.res == 'success' diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/library/argv_parser_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/library/argv_parser_test.ps1 new file mode 100644 index 00000000..d7bd4bb3 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/library/argv_parser_test.ps1 @@ -0,0 +1,93 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.ArgvParser + +$ErrorActionPreference = 'Continue' + +$params = Parse-Args $args +$exe = Get-AnsibleParam -obj $params -name "exe" -type "path" -failifempty $true + +Add-Type -TypeDefinition @' +using System.IO; +using System.Threading; + +namespace Ansible.Command +{ + public static class NativeUtil + { + public static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr) + { + var sowait = new EventWaitHandle(false, EventResetMode.ManualReset); + var sewait = new EventWaitHandle(false, EventResetMode.ManualReset); + string so = null, se = null; + ThreadPool.QueueUserWorkItem((s)=> + { + so = stdoutStream.ReadToEnd(); + sowait.Set(); + }); + ThreadPool.QueueUserWorkItem((s) => + { + se = stderrStream.ReadToEnd(); + sewait.Set(); + }); + foreach(var wh in new WaitHandle[] { sowait, sewait }) + wh.WaitOne(); + stdout = so; + stderr = se; + } + } +} +'@ + +Function Invoke-Process($executable, $arguments) { + $proc = New-Object System.Diagnostics.Process + $psi = $proc.StartInfo + $psi.FileName = $executable + $psi.Arguments = $arguments + $psi.RedirectStandardOutput = $true + $psi.RedirectStandardError = $true + $psi.UseShellExecute = $false + + $proc.Start() > $null # will always return $true for non shell-exec cases + $stdout = $stderr = [string] $null + + [Ansible.Command.NativeUtil]::GetProcessOutput($proc.StandardOutput, $proc.StandardError, [ref] $stdout, [ref] $stderr) > $null + $proc.WaitForExit() > $null + $actual_args = $stdout.Substring(0, $stdout.Length - 2) -split "`r`n" + + return $actual_args +} + +$tests = @( + @('abc', 'd', 'e'), + @('a\\b', 'de fg', 'h'), + @('a\"b', 'c', 'd'), + @('a\\b c', 'd', 'e'), + @('C:\Program Files\file\', 'arg with " quote'), + @('ADDLOCAL="a,b,c"', '/s', 'C:\\Double\\Backslash') +) + +foreach ($expected in $tests) { + $joined_string = Argv-ToString -arguments $expected + # We can't used CommandLineToArgvW to test this out as it seems to mangle + # \, might be something to do with unicode but not sure... + $actual = Invoke-Process -executable $exe -arguments $joined_string + + if ($expected.Count -ne $actual.Count) { + $result.actual = $actual -join "`n" + $result.expected = $expected -join "`n" + Fail-Json -obj $result -message "Actual arg count: $($actual.Count) != Expected arg count: $($expected.Count)" + } + for ($i = 0; $i -lt $expected.Count; $i++) { + $expected_arg = $expected[$i] + $actual_arg = $actual[$i] + if ($expected_arg -cne $actual_arg) { + $result.actual = $actual -join "`n" + $result.expected = $expected -join "`n" + Fail-Json -obj $result -message "Actual arg: '$actual_arg' != Expected arg: '$expected_arg'" + } + } +} + +Exit-Json @{ data = 'success' } diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/meta/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/meta/main.yml new file mode 100644 index 00000000..fd0dc543 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- setup_win_printargv diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/tasks/main.yml new file mode 100644 index 00000000..b39155e0 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: call module with ArgvParser tests + argv_parser_test: + exe: '{{ win_printargv_path }}' + register: argv_test + +- assert: + that: + - argv_test.data == 'success' diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/library/backup_file_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/library/backup_file_test.ps1 new file mode 100644 index 00000000..15527560 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/library/backup_file_test.ps1 @@ -0,0 +1,89 @@ +#!powershell + +# Copyright: (c) 2019, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#Requires -Module Ansible.ModuleUtils.Backup + +$module = [Ansible.Basic.AnsibleModule]::Create($args, @{}) + +Function Assert-Equals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + + $matched = $false + if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) { + $Actual.Count | Assert-Equals -Expected $Expected.Count + for ($i = 0; $i -lt $Actual.Count; $i++) { + $actual_value = $Actual[$i] + $expected_value = $Expected[$i] + Assert-Equals -Actual $actual_value -Expected $expected_value + } + $matched = $true + } else { + $matched = $Actual -ceq $Expected + } + + if (-not $matched) { + if ($Actual -is [PSObject]) { + $Actual = $Actual.ToString() + } + + $call_stack = (Get-PSCallStack)[1] + $module.Result.test = $test + $module.Result.actual = $Actual + $module.Result.expected = $Expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + $module.FailJson("AssertionError: actual != expected") + } +} + +$tmp_dir = $module.Tmpdir + +$tests = @{ + "Test backup file with missing file" = { + $actual = Backup-File -path (Join-Path -Path $tmp_dir -ChildPath "missing") + $actual | Assert-Equals -Expected $null + } + + "Test backup file in check mode" = { + $orig_file = Join-Path -Path $tmp_dir -ChildPath "file-check.txt" + Set-Content -LiteralPath $orig_file -Value "abc" + $actual = Backup-File -path $orig_file -WhatIf + + (Test-Path -LiteralPath $actual) | Assert-Equals -Expected $false + + $parent_dir = Split-Path -LiteralPath $actual + $backup_file = Split-Path -Path $actual -Leaf + $parent_dir | Assert-Equals -Expected $tmp_dir + ($backup_file -match "^file-check\.txt\.$pid\.\d{8}-\d{6}\.bak$") | Assert-Equals -Expected $true + } + + "Test backup file" = { + $content = "abc" + $orig_file = Join-Path -Path $tmp_dir -ChildPath "file.txt" + Set-Content -LiteralPath $orig_file -Value $content + $actual = Backup-File -path $orig_file + + (Test-Path -LiteralPath $actual) | Assert-Equals -Expected $true + + $parent_dir = Split-Path -LiteralPath $actual + $backup_file = Split-Path -Path $actual -Leaf + $parent_dir | Assert-Equals -Expected $tmp_dir + ($backup_file -match "^file\.txt\.$pid\.\d{8}-\d{6}\.bak$") | Assert-Equals -Expected $true + (Get-Content -LiteralPath $actual -Raw) | Assert-Equals -Expected "$content`r`n" + } +} + +foreach ($test_impl in $tests.GetEnumerator()) { + $test = $test_impl.Key + &$test_impl.Value +} + +$module.Result.res = 'success' + +$module.ExitJson() diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/tasks/main.yml new file mode 100644 index 00000000..cb979ebc --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: call module with BackupFile tests + backup_file_test: + register: backup_file_test + +- name: assert call module with BackupFile tests + assert: + that: + - not backup_file_test is failed + - backup_file_test.res == 'success' diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/library/camel_conversion_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/library/camel_conversion_test.ps1 new file mode 100644 index 00000000..d3dc9d7b --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/library/camel_conversion_test.ps1 @@ -0,0 +1,74 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.CamelConversion + +$ErrorActionPreference = 'Stop' + +Function Assert-Equals($actual, $expected) { + if ($actual -cne $expected) { + Fail-Json @{} "actual != expected`nActual: $actual`nExpected: $expected" + } +} + +$input_dict = @{ + alllower = 'alllower' + ALLUPPER = 'allupper' + camelCase = 'camel_case' + mixedCase_withCamel = 'mixed_case_with_camel' + TwoWords = 'two_words' + AllUpperAtEND = 'all_upper_at_end' + AllUpperButPLURALs = 'all_upper_but_plurals' + TargetGroupARNs = 'target_group_arns' + HTTPEndpoints = 'http_endpoints' + PLURALs = 'plurals' + listDict = @( + @{ entry1 = 'entry1'; entryTwo = 'entry_two' }, + 'stringTwo', + 0 + ) + INNERHashTable = @{ + ID = 'id' + IEnumerable = 'i_enumerable' + } + emptyList = @() + singleList = @("a") +} + +$output_dict = Convert-DictToSnakeCase -dict $input_dict +foreach ($entry in $output_dict.GetEnumerator()) { + $key = $entry.Name + $value = $entry.Value + + if ($value -is [Hashtable]) { + Assert-Equals -actual $key -expected "inner_hash_table" + foreach ($inner_hash in $value.GetEnumerator()) { + Assert-Equals -actual $inner_hash.Name -expected $inner_hash.Value + } + } elseif ($value -is [Array] -or $value -is [System.Collections.ArrayList]) { + if ($key -eq "list_dict") { + foreach ($inner_list in $value) { + if ($inner_list -is [Hashtable]) { + foreach ($inner_list_hash in $inner_list.GetEnumerator()) { + Assert-Equals -actual $inner_list_hash.Name -expected $inner_list_hash.Value + } + } elseif ($inner_list -is [String]) { + # this is not a string key so we need to keep it the same + Assert-Equals -actual $inner_list -expected "stringTwo" + } else { + Assert-Equals -actual $inner_list -expected 0 + } + } + } elseif ($key -eq "empty_list") { + Assert-Equals -actual $value.Count -expected 0 + } elseif ($key -eq "single_list") { + Assert-Equals -actual $value.Count -expected 1 + } else { + Fail-Json -obj $result -message "invalid key found for list $key" + } + } else { + Assert-Equals -actual $key -expected $value + } +} + +Exit-Json @{ data = 'success' } diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/tasks/main.yml new file mode 100644 index 00000000..f28ea30d --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: call module with camel conversion tests + camel_conversion_test: + register: camel_conversion + +- assert: + that: + - camel_conversion.data == 'success' diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/library/command_util_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/library/command_util_test.ps1 new file mode 100644 index 00000000..6e644fe2 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/library/command_util_test.ps1 @@ -0,0 +1,135 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.CommandUtil + +$ErrorActionPreference = 'Stop' + +$params = Parse-Args $args +$exe = Get-AnsibleParam -obj $params -name "exe" -type "path" -failifempty $true + +$result = @{ + changed = $false +} + +$exe_directory = Split-Path -Path $exe -Parent +$exe_filename = Split-Path -Path $exe -Leaf +$test_name = $null + +Function Assert-Equals($actual, $expected) { + if ($actual -cne $expected) { + Fail-Json -obj $result -message "Test $test_name failed`nActual: '$actual' != Expected: '$expected'" + } +} + +$test_name = "full exe path" +$actual = Run-Command -command "`"$exe`" arg1 arg2 `"arg 3`"" +Assert-Equals -actual $actual.rc -expected 0 +Assert-Equals -actual $actual.stdout -expected "arg1`r`narg2`r`narg 3`r`n" +Assert-Equals -actual $actual.stderr -expected "" +Assert-Equals -actual $actual.executable -expected $exe + +$test_name = "exe in special char dir" +$tmp_dir = Join-Path -Path $env:TEMP -ChildPath "ansible .ÅÑŚÌβŁÈ [$!@^&test(;)]" +try { + New-Item -Path $tmp_dir -ItemType Directory > $null + $exe_special = Join-Path $tmp_dir -ChildPath "PrintArgv.exe" + Copy-Item -LiteralPath $exe -Destination $exe_special + $actual = Run-Command -command "`"$exe_special`" arg1 arg2 `"arg 3`"" +} finally { + Remove-Item -LiteralPath $tmp_dir -Force -Recurse +} +Assert-Equals -actual $actual.rc -expected 0 +Assert-Equals -actual $actual.stdout -expected "arg1`r`narg2`r`narg 3`r`n" +Assert-Equals -actual $actual.stderr -expected "" +Assert-Equals -actual $actual.executable -expected $exe_special + +$test_name = "invalid exe path" +try { + $actual = Run-Command -command "C:\fakepath\$exe_filename arg1" + Fail-Json -obj $result -message "Test $test_name failed`nCommand should have thrown an exception" +} catch { + Assert-Equals -actual $_.Exception.Message -expected "Exception calling `"SearchPath`" with `"1`" argument(s): `"Could not find file 'C:\fakepath\$exe_filename'.`"" +} + +$test_name = "exe in current folder" +$actual = Run-Command -command "$exe_filename arg1" -working_directory $exe_directory +Assert-Equals -actual $actual.rc -expected 0 +Assert-Equals -actual $actual.stdout -expected "arg1`r`n" +Assert-Equals -actual $actual.stderr -expected "" +Assert-Equals -actual $actual.executable -expected $exe + +$test_name = "no working directory set" +$actual = Run-Command -command "cmd.exe /c cd" +Assert-Equals -actual $actual.rc -expected 0 +Assert-Equals -actual $actual.stdout -expected "$($pwd.Path)`r`n" +Assert-Equals -actual $actual.stderr -expected "" +Assert-Equals -actual $actual.executable.ToUpper() -expected "$env:SystemRoot\System32\cmd.exe".ToUpper() + +$test_name = "working directory override" +$actual = Run-Command -command "cmd.exe /c cd" -working_directory $env:SystemRoot +Assert-Equals -actual $actual.rc -expected 0 +Assert-Equals -actual $actual.stdout -expected "$env:SystemRoot`r`n" +Assert-Equals -actual $actual.stderr -expected "" +Assert-Equals -actual $actual.executable.ToUpper() -expected "$env:SystemRoot\System32\cmd.exe".ToUpper() + +$test_name = "working directory invalid path" +try { + $actual = Run-Command -command "doesn't matter" -working_directory "invalid path here" + Fail-Json -obj $result -message "Test $test_name failed`nCommand should have thrown an exception" +} catch { + Assert-Equals -actual $_.Exception.Message -expected "invalid working directory path 'invalid path here'" +} + +$test_name = "invalid arguments" +$actual = Run-Command -command "ipconfig.exe /asdf" +Assert-Equals -actual $actual.rc -expected 1 + +$test_name = "test stdout and stderr streams" +$actual = Run-Command -command "cmd.exe /c echo stdout && echo stderr 1>&2" +Assert-Equals -actual $actual.rc -expected 0 +Assert-Equals -actual $actual.stdout -expected "stdout `r`n" +Assert-Equals -actual $actual.stderr -expected "stderr `r`n" + +$test_name = "Test UTF8 output from stdout stream" +$actual = Run-Command -command "powershell.exe -ExecutionPolicy ByPass -Command `"Write-Host '💩'`"" +Assert-Equals -actual $actual.rc -expected 0 +Assert-Equals -actual $actual.stdout -expected "💩`n" +Assert-Equals -actual $actual.stderr -expected "" + +$test_name = "test default environment variable" +Set-Item -LiteralPath env:TESTENV -Value "test" +$actual = Run-Command -command "cmd.exe /c set" +$env_present = $actual.stdout -split "`r`n" | Where-Object { $_ -eq "TESTENV=test" } +if ($null -eq $env_present) { + Fail-Json -obj $result -message "Test $test_name failed`nenvironment variable TESTENV not found in stdout`n$($actual.stdout)" +} + +$test_name = "test custom environment variable1" +$actual = Run-Command -command "cmd.exe /c set" -environment @{ TESTENV2 = "testing" } +$env_not_present = $actual.stdout -split "`r`n" | Where-Object { $_ -eq "TESTENV=test" } +$env_present = $actual.stdout -split "`r`n" | Where-Object { $_ -eq "TESTENV2=testing" } +if ($null -ne $env_not_present) { + Fail-Json -obj $result -message "Test $test_name failed`nenvironment variabel TESTENV found in stdout when it should be`n$($actual.stdout)" +} +if ($null -eq $env_present) { + Fail-json -obj $result -message "Test $test_name failed`nenvironment variable TESTENV2 not found in stdout`n$($actual.stdout)" +} + +$test_name = "input test" +$wrapper = @" +begin { + `$string = "" +} process { + `$current_input = [string]`$input + `$string += `$current_input +} end { + Write-Host `$string +} +"@ +$encoded_wrapper = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($wrapper)) +$actual = Run-Command -command "powershell.exe -ExecutionPolicy ByPass -EncodedCommand $encoded_wrapper" -stdin "Ansible" +Assert-Equals -actual $actual.stdout -expected "Ansible`n" + +$result.data = "success" +Exit-Json -obj $result diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/meta/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/meta/main.yml new file mode 100644 index 00000000..fd0dc543 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- setup_win_printargv diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/tasks/main.yml new file mode 100644 index 00000000..3001518b --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: call module with CommandUtil tests + command_util_test: + exe: '{{ win_printargv_path }}' + register: command_util + +- assert: + that: + - command_util.data == 'success' diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/library/file_util_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/library/file_util_test.ps1 new file mode 100644 index 00000000..ae3e68ec --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/library/file_util_test.ps1 @@ -0,0 +1,108 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.FileUtil + +$ErrorActionPreference = "Stop" + +$result = @{ + changed = $false +} + +Function Assert-Equals($actual, $expected) { + if ($actual -cne $expected) { + $call_stack = (Get-PSCallStack)[1] + $error_msg = "AssertionError:`r`nActual: `"$actual`" != Expected: `"$expected`"`r`nLine: $($call_stack.ScriptLineNumber), Method: $($call_stack.Position.Text)" + Fail-Json -obj $result -message $error_msg + } +} + +Function Get-PagefilePath() { + $pagefile = $null + $cs = Get-CimInstance -ClassName Win32_ComputerSystem + if ($cs.AutomaticManagedPagefile) { + $pagefile = "$($env:SystemRoot.Substring(0, 1)):\pagefile.sys" + } else { + $pf = Get-CimInstance -ClassName Win32_PageFileSetting + if ($null -ne $pf) { + $pagefile = $pf[0].Name + } + } + return $pagefile +} + +$pagefile = Get-PagefilePath +if ($pagefile) { + # Test-AnsiblePath Hidden system file + $actual = Test-AnsiblePath -Path $pagefile + Assert-Equals -actual $actual -expected $true + + # Get-AnsibleItem file + $actual = Get-AnsibleItem -Path $pagefile + Assert-Equals -actual $actual.FullName -expected $pagefile + Assert-Equals -actual $actual.Attributes.HasFlag([System.IO.FileAttributes]::Directory) -expected $false + Assert-Equals -actual $actual.Exists -expected $true +} + +# Test-AnsiblePath File that doesn't exist +$actual = Test-AnsiblePath -Path C:\fakefile +Assert-Equals -actual $actual -expected $false + +# Test-AnsiblePath Directory that doesn't exist +$actual = Test-AnsiblePath -Path C:\fakedirectory +Assert-Equals -actual $actual -expected $false + +# Test-AnsiblePath file in non-existant directory +$actual = Test-AnsiblePath -Path C:\fakedirectory\fakefile.txt +Assert-Equals -actual $actual -expected $false + +# Test-AnsiblePath Normal directory +$actual = Test-AnsiblePath -Path C:\Windows +Assert-Equals -actual $actual -expected $true + +# Test-AnsiblePath Normal file +$actual = Test-AnsiblePath -Path C:\Windows\System32\kernel32.dll +Assert-Equals -actual $actual -expected $true + +# Test-AnsiblePath fails with wildcard +$failed = $false +try { + Test-AnsiblePath -Path C:\Windows\*.exe +} catch { + $failed = $true + Assert-Equals -actual $_.Exception.Message -expected "Exception calling `"GetAttributes`" with `"1`" argument(s): `"Illegal characters in path.`"" +} +Assert-Equals -actual $failed -expected $true + +# Test-AnsiblePath on non file PS Provider object +$actual = Test-AnsiblePath -Path Cert:\LocalMachine\My +Assert-Equals -actual $actual -expected $true + +# Test-AnsiblePath on environment variable +$actual = Test-AnsiblePath -Path env:SystemDrive +Assert-Equals -actual $actual -expected $true + +# Test-AnsiblePath on environment variable that does not exist +$actual = Test-AnsiblePath -Path env:FakeEnvValue +Assert-Equals -actual $actual -expected $false + +# Get-AnsibleItem doesn't exist with -ErrorAction SilentlyContinue param +$actual = Get-AnsibleItem -Path C:\fakefile -ErrorAction SilentlyContinue +Assert-Equals -actual $actual -expected $null + +# Get-AnsibleItem directory +$actual = Get-AnsibleItem -Path C:\Windows +Assert-Equals -actual $actual.FullName -expected C:\Windows +Assert-Equals -actual $actual.Attributes.HasFlag([System.IO.FileAttributes]::Directory) -expected $true +Assert-Equals -actual $actual.Exists -expected $true + +# ensure Get-AnsibleItem doesn't fail in a try/catch and -ErrorAction SilentlyContinue - stop's a trap from trapping it +try { + $actual = Get-AnsibleItem -Path C:\fakepath -ErrorAction SilentlyContinue +} catch { + Fail-Json -obj $result -message "this should not fire" +} +Assert-Equals -actual $actual -expected $null + +$result.data = "success" +Exit-Json -obj $result diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/tasks/main.yml new file mode 100644 index 00000000..a636d32e --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: call module with FileUtil tests + file_util_test: + register: file_util_test + +- assert: + that: + - file_util_test.data == 'success' diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testlist.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testlist.ps1 new file mode 100644 index 00000000..06ef17b4 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testlist.ps1 @@ -0,0 +1,12 @@ +#powershell + +#Requires -Module Ansible.ModuleUtils.Legacy + +$params = Parse-Args $args +$value = Get-AnsibleParam -Obj $params -Name value -Type list + +if ($value -isnot [array]) { + Fail-Json -obj @{} -message "value was not a list but was $($value.GetType().FullName)" +} + +Exit-Json @{ count = $value.Count } diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testpath.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testpath.ps1 new file mode 100644 index 00000000..55cad70f --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testpath.ps1 @@ -0,0 +1,9 @@ +#powershell + +#Requires -Module Ansible.ModuleUtils.Legacy + +$params = Parse-Args $args + +$path = Get-AnsibleParam -Obj $params -Name path -Type path + +Exit-Json @{ path=$path } diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/tasks/main.yml new file mode 100644 index 00000000..0bd10558 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/tasks/main.yml @@ -0,0 +1,41 @@ +# NB: these tests are just a placeholder until we have pester unit tests. +# They are being run as part of the Windows smoke tests. Please do not significantly +# increase the size of these tests, as the smoke tests need to remain fast. +# Any significant additions should be made to the (as yet nonexistent) PS module_utils unit tests. +--- +- name: find a nonexistent drive letter + raw: foreach($c in [char[]]([char]'D'..[char]'Z')) { If (-not $(Get-PSDrive $c -ErrorAction SilentlyContinue)) { return $c } } + register: bogus_driveletter + +- assert: + that: bogus_driveletter.stdout_lines[0] | length == 1 + +- name: test path shape validation + testpath: + path: "{{ item.path }}" + failed_when: path_shapes is failed != (item.should_fail | default(false)) + register: path_shapes + with_items: + - path: C:\Windows + - path: HKLM:\Software + - path: '{{ bogus_driveletter.stdout_lines[0] }}:\goodpath' + - path: '{{ bogus_driveletter.stdout_lines[0] }}:\badpath*%@:\blar' + should_fail: true + +- name: test list parameters + testlist: + value: '{{item.value}}' + register: list_tests + failed_when: list_tests is failed or list_tests.count != item.count + with_items: + - value: [] + count: 0 + - value: + - 1 + - 2 + count: 2 + - value: + - 1 + count: 1 + - value: "1, 2" + count: 2 diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/library/symbolic_link_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/library/symbolic_link_test.ps1 new file mode 100644 index 00000000..1decfe4f --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/library/symbolic_link_test.ps1 @@ -0,0 +1,170 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.LinkUtil +#Requires -Module Ansible.ModuleUtils.CommandUtil + +$ErrorActionPreference = 'Stop' + +$path = Join-Path -Path ([System.IO.Path]::GetFullPath($env:TEMP)) -ChildPath '.ansible .ÅÑŚÌβŁÈ [$!@^&test(;)]' + +$folder_target = "$path\folder" +$file_target = "$path\file" +$symlink_file_path = "$path\file-symlink" +$symlink_folder_path = "$path\folder-symlink" +$hardlink_path = "$path\hardlink" +$hardlink_path_2 = "$path\hardlink2" +$junction_point_path = "$path\junction" + +if (Test-Path -LiteralPath $path) { + # Remove-Item struggles with broken symlinks, rely on trusty rmdir instead + Run-Command -command "cmd.exe /c rmdir /S /Q `"$path`"" > $null +} +New-Item -Path $path -ItemType Directory | Out-Null +New-Item -Path $folder_target -ItemType Directory | Out-Null +New-Item -Path $file_target -ItemType File | Out-Null +Set-Content -LiteralPath $file_target -Value "a" + +Function Assert-Equals($actual, $expected) { + if ($actual -ne $expected) { + Fail-Json @{} "actual != expected`nActual: $actual`nExpected: $expected" + } +} + +Function Assert-True($expression, $message) { + if ($expression -ne $true) { + Fail-Json @{} $message + } +} + +# need to manually set this +Load-LinkUtils + +# path is not a link +$no_link_result = Get-Link -link_path $path +Assert-True -expression ($null -eq $no_link_result) -message "did not return null result for a non link" + +# fail to create hard link pointed to a directory +try { + New-Link -link_path "$path\folder-hard" -link_target $folder_target -link_type "hard" + Assert-True -expression $false -message "creation of hard link should have failed if target was a directory" +} catch { + Assert-Equals -actual $_.Exception.Message -expected "cannot set the target for a hard link to a directory" +} + +# fail to create a junction point pointed to a file +try { + New-Link -link_path "$path\junction-fail" -link_target $file_target -link_type "junction" + Assert-True -expression $false -message "creation of junction point should have failed if target was a file" +} catch { + Assert-Equals -actual $_.Exception.Message -expected "cannot set the target for a junction point to a file" +} + +# fail to create a symbolic link with non-existent target +try { + New-Link -link_path "$path\symlink-fail" -link_target "$path\fake-folder" -link_type "link" + Assert-True -expression $false -message "creation of symbolic link should have failed if target did not exist" +} catch { + Assert-Equals -actual $_.Exception.Message -expected "link_target '$path\fake-folder' does not exist, cannot create link" +} + +# create recursive symlink +Run-Command -command "cmd.exe /c mklink /D symlink-rel folder" -working_directory $path | Out-Null +$rel_link_result = Get-Link -link_path "$path\symlink-rel" +Assert-Equals -actual $rel_link_result.Type -expected "SymbolicLink" +Assert-Equals -actual $rel_link_result.SubstituteName -expected "folder" +Assert-Equals -actual $rel_link_result.PrintName -expected "folder" +Assert-Equals -actual $rel_link_result.TargetPath -expected "folder" +Assert-Equals -actual $rel_link_result.AbsolutePath -expected $folder_target +Assert-Equals -actual $rel_link_result.HardTargets -expected $null + +# create a symbolic file test +New-Link -link_path $symlink_file_path -link_target $file_target -link_type "link" +$file_link_result = Get-Link -link_path $symlink_file_path +Assert-Equals -actual $file_link_result.Type -expected "SymbolicLink" +Assert-Equals -actual $file_link_result.SubstituteName -expected "\??\$file_target" +Assert-Equals -actual $file_link_result.PrintName -expected $file_target +Assert-Equals -actual $file_link_result.TargetPath -expected $file_target +Assert-Equals -actual $file_link_result.AbsolutePath -expected $file_target +Assert-Equals -actual $file_link_result.HardTargets -expected $null + +# create a symbolic link folder test +New-Link -link_path $symlink_folder_path -link_target $folder_target -link_type "link" +$folder_link_result = Get-Link -link_path $symlink_folder_path +Assert-Equals -actual $folder_link_result.Type -expected "SymbolicLink" +Assert-Equals -actual $folder_link_result.SubstituteName -expected "\??\$folder_target" +Assert-Equals -actual $folder_link_result.PrintName -expected $folder_target +Assert-Equals -actual $folder_link_result.TargetPath -expected $folder_target +Assert-Equals -actual $folder_link_result.AbsolutePath -expected $folder_target +Assert-Equals -actual $folder_link_result.HardTargets -expected $null + +# create a junction point test +New-Link -link_path $junction_point_path -link_target $folder_target -link_type "junction" +$junction_point_result = Get-Link -link_path $junction_point_path +Assert-Equals -actual $junction_point_result.Type -expected "JunctionPoint" +Assert-Equals -actual $junction_point_result.SubstituteName -expected "\??\$folder_target" +Assert-Equals -actual $junction_point_result.PrintName -expected $folder_target +Assert-Equals -actual $junction_point_result.TargetPath -expected $folder_target +Assert-Equals -actual $junction_point_result.AbsolutePath -expected $folder_target +Assert-Equals -actual $junction_point_result.HardTargets -expected $null + +# create a hard link test +New-Link -link_path $hardlink_path -link_target $file_target -link_type "hard" +$hardlink_result = Get-Link -link_path $hardlink_path +Assert-Equals -actual $hardlink_result.Type -expected "HardLink" +Assert-Equals -actual $hardlink_result.SubstituteName -expected $null +Assert-Equals -actual $hardlink_result.PrintName -expected $null +Assert-Equals -actual $hardlink_result.TargetPath -expected $null +Assert-Equals -actual $hardlink_result.AbsolutePath -expected $null +if ($hardlink_result.HardTargets[0] -ne $hardlink_path -and $hardlink_result.HardTargets[1] -ne $hardlink_path) { + Assert-True -expression $false -message "file $hardlink_path is not a target of the hard link" +} +if ($hardlink_result.HardTargets[0] -ne $file_target -and $hardlink_result.HardTargets[1] -ne $file_target) { + Assert-True -expression $false -message "file $file_target is not a target of the hard link" +} +Assert-equals -actual (Get-Content -LiteralPath $hardlink_path -Raw) -expected (Get-Content -LiteralPath $file_target -Raw) + +# create a new hard link and verify targets go to 3 +New-Link -link_path $hardlink_path_2 -link_target $file_target -link_type "hard" +$hardlink_result_2 = Get-Link -link_path $hardlink_path +Assert-True -expression ($hardlink_result_2.HardTargets.Count -eq 3) -message "did not return 3 targets for the hard link, actual $($hardlink_result_2.Targets.Count)" + +# check if broken symbolic link still works +Remove-Item -LiteralPath $folder_target -Force | Out-Null +$broken_link_result = Get-Link -link_path $symlink_folder_path +Assert-Equals -actual $broken_link_result.Type -expected "SymbolicLink" +Assert-Equals -actual $broken_link_result.SubstituteName -expected "\??\$folder_target" +Assert-Equals -actual $broken_link_result.PrintName -expected $folder_target +Assert-Equals -actual $broken_link_result.TargetPath -expected $folder_target +Assert-Equals -actual $broken_link_result.AbsolutePath -expected $folder_target +Assert-Equals -actual $broken_link_result.HardTargets -expected $null + +# check if broken junction point still works +$broken_junction_result = Get-Link -link_path $junction_point_path +Assert-Equals -actual $broken_junction_result.Type -expected "JunctionPoint" +Assert-Equals -actual $broken_junction_result.SubstituteName -expected "\??\$folder_target" +Assert-Equals -actual $broken_junction_result.PrintName -expected $folder_target +Assert-Equals -actual $broken_junction_result.TargetPath -expected $folder_target +Assert-Equals -actual $broken_junction_result.AbsolutePath -expected $folder_target +Assert-Equals -actual $broken_junction_result.HardTargets -expected $null + +# delete file symbolic link +Remove-Link -link_path $symlink_file_path +Assert-True -expression (-not (Test-Path -LiteralPath $symlink_file_path)) -message "failed to delete file symbolic link" + +# delete folder symbolic link +Remove-Link -link_path $symlink_folder_path +Assert-True -expression (-not (Test-Path -LiteralPath $symlink_folder_path)) -message "failed to delete folder symbolic link" + +# delete junction point +Remove-Link -link_path $junction_point_path +Assert-True -expression (-not (Test-Path -LiteralPath $junction_point_path)) -message "failed to delete junction point" + +# delete hard link +Remove-Link -link_path $hardlink_path +Assert-True -expression (-not (Test-Path -LiteralPath $hardlink_path)) -message "failed to delete hard link" + +# cleanup after tests +Run-Command -command "cmd.exe /c rmdir /S /Q `"$path`"" > $null + +Exit-Json @{ data = "success" } diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/tasks/main.yml new file mode 100644 index 00000000..f121ad4a --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: call module with symbolic link tests + symbolic_link_test: + register: symbolic_link + +- assert: + that: + - symbolic_link.data == 'success' diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/library/privilege_util_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/library/privilege_util_test.ps1 new file mode 100644 index 00000000..e1ca25da --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/library/privilege_util_test.ps1 @@ -0,0 +1,112 @@ +#!powershell + +#AnsibleRequires -CSharpUtil Ansible.Basic +#Requires -Module Ansible.ModuleUtils.PrivilegeUtil + +$module = [Ansible.Basic.AnsibleModule]::Create($args, @{}) + +Function Assert-Equals($actual, $expected) { + if ($actual -cne $expected) { + $call_stack = (Get-PSCallStack)[1] + $module.Result.actual = $actual + $module.Result.expected = $expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + $module.FailJson("AssertionError: actual != expected") + } +} + +# taken from https://docs.microsoft.com/en-us/windows/desktop/SecAuthZ/privilege-constants +$total_privileges = @( + "SeAssignPrimaryTokenPrivilege", + "SeAuditPrivilege", + "SeBackupPrivilege", + "SeChangeNotifyPrivilege", + "SeCreateGlobalPrivilege", + "SeCreatePagefilePrivilege", + "SeCreatePermanentPrivilege", + "SeCreateSymbolicLinkPrivilege", + "SeCreateTokenPrivilege", + "SeDebugPrivilege", + "SeEnableDelegationPrivilege", + "SeImpersonatePrivilege", + "SeIncreaseBasePriorityPrivilege", + "SeIncreaseQuotaPrivilege", + "SeIncreaseWorkingSetPrivilege", + "SeLoadDriverPrivilege", + "SeLockMemoryPrivilege", + "SeMachineAccountPrivilege", + "SeManageVolumePrivilege", + "SeProfileSingleProcessPrivilege", + "SeRelabelPrivilege", + "SeRemoteShutdownPrivilege", + "SeRestorePrivilege", + "SeSecurityPrivilege", + "SeShutdownPrivilege", + "SeSyncAgentPrivilege", + "SeSystemEnvironmentPrivilege", + "SeSystemProfilePrivilege", + "SeSystemtimePrivilege", + "SeTakeOwnershipPrivilege", + "SeTcbPrivilege", + "SeTimeZonePrivilege", + "SeTrustedCredManAccessPrivilege", + "SeUndockPrivilege" +) + +$raw_privilege_output = &whoami /priv | Where-Object { $_.StartsWith("Se") } +$actual_privileges = @{} +foreach ($raw_privilege in $raw_privilege_output) { + $split = $raw_privilege.TrimEnd() -split " " + $actual_privileges."$($split[0])" = ($split[-1] -eq "Enabled") +} +$process = [Ansible.Privilege.PrivilegeUtil]::GetCurrentProcess() + +### Test PS cmdlets ### +# test ps Get-AnsiblePrivilege +foreach ($privilege in $total_privileges) { + $expected = $null + if ($actual_privileges.ContainsKey($privilege)) { + $expected = $actual_privileges.$privilege + } + $actual = Get-AnsiblePrivilege -Name $privilege + Assert-Equals -actual $actual -expected $expected +} + +# test c# GetAllPrivilegeInfo +$actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) +Assert-Equals -actual $actual.GetType().Name -expected 'Dictionary`2' +Assert-Equals -actual $actual.Count -expected $actual_privileges.Count +foreach ($privilege in $total_privileges) { + if ($actual_privileges.ContainsKey($privilege)) { + $actual_value = $actual.$privilege + if ($actual_privileges.$privilege) { + Assert-Equals -actual $actual_value.HasFlag([Ansible.Privilege.PrivilegeAttributes]::Enabled) -expected $true + } else { + Assert-Equals -actual $actual_value.HasFlag([Ansible.Privilege.PrivilegeAttributes]::Enabled) -expected $false + } + } +} + +# test Set-AnsiblePrivilege +Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $false # ensure we start with a disabled privilege + +Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $true -WhatIf +$actual = Get-AnsiblePrivilege -Name SeUndockPrivilege +Assert-Equals -actual $actual -expected $false + +Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $true +$actual = Get-AnsiblePrivilege -Name SeUndockPrivilege +Assert-Equals -actual $actual -expected $true + +Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $false -WhatIf +$actual = Get-AnsiblePrivilege -Name SeUndockPrivilege +Assert-Equals -actual $actual -expected $true + +Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $false +$actual = Get-AnsiblePrivilege -Name SeUndockPrivilege +Assert-Equals -actual $actual -expected $false + +$module.Result.data = "success" +$module.ExitJson() + diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/tasks/main.yml new file mode 100644 index 00000000..5f54480e --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: call module with PrivilegeUtil tests + privilege_util_test: + register: privilege_util_test + +- assert: + that: + - privilege_util_test.data == 'success' diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/library/sid_utils_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/library/sid_utils_test.ps1 new file mode 100644 index 00000000..eb376c81 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/library/sid_utils_test.ps1 @@ -0,0 +1,93 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.SID + +$params = Parse-Args $args +$sid_account = Get-AnsibleParam -obj $params -name "sid_account" -type "str" -failifempty $true + +Function Assert-Equals($actual, $expected) { + if ($actual -ne $expected) { + Fail-Json @{} "actual != expected`nActual: $actual`nExpected: $expected" + } +} + +Function Get-ComputerSID() { + # find any local user and trim off the final UID + $luser_sid = (Get-CimInstance Win32_UserAccount -Filter "Domain='$env:COMPUTERNAME'")[0].SID + + return $luser_sid -replace '(S-1-5-21-\d+-\d+-\d+)-\d+', '$1' +} + +$local_sid = Get-ComputerSID + +# most machines should have a -500 Administrator account, but it may have been renamed. Look it up by SID +$default_admin = Get-CimInstance Win32_UserAccount -Filter "SID='$local_sid-500'" + +# this group is called Administrators by default on English Windows, but could named something else. Look it up by SID +$default_admin_group = Get-CimInstance Win32_Group -Filter "SID='S-1-5-32-544'" + +if (@($default_admin).Length -ne 1) { + Fail-Json @{} "could not find a local admin account with SID ending in -500" +} + +### Set this to the NETBIOS name of the domain you wish to test, not set for shippable ### +$test_domain = $null + +$tests = @( + # Local Users + @{ sid = "S-1-1-0"; full_name = "Everyone"; names = @("Everyone") }, + @{ sid = "S-1-5-18"; full_name = "NT AUTHORITY\SYSTEM"; names = @("NT AUTHORITY\SYSTEM", "SYSTEM") }, + @{ sid = "S-1-5-20"; full_name = "NT AUTHORITY\NETWORK SERVICE"; names = @("NT AUTHORITY\NETWORK SERVICE", "NETWORK SERVICE") }, + @{ sid = "$($default_admin.SID)"; full_name = "$($default_admin.FullName)"; names = @("$env:COMPUTERNAME\$($default_admin.Name)", "$($default_admin.Name)", ".\$($default_admin.Name)") }, + + # Local Groups + @{ sid = "$($default_admin_group.SID)"; full_name = "BUILTIN\$($default_admin_group.Name)"; names = @("BUILTIN\$($default_admin_group.Name)", "$($default_admin_group.Name)", ".\$($default_admin_group.Name)") } +) + +# Add domain tests if the domain name has been set +if ($null -ne $test_domain) { + Import-Module ActiveDirectory + $domain_info = Get-ADDomain -Identity $test_domain + $domain_sid = $domain_info.DomainSID + $domain_netbios = $domain_info.NetBIOSName + $domain_upn = $domain_info.Forest + + $tests += @{ + sid = "$domain_sid-512" + full_name = "$domain_netbios\Domain Admins" + names = @("$domain_netbios\Domain Admins", "Domain Admins@$domain_upn", "Domain Admins") + } + + $tests += @{ + sid = "$domain_sid-500" + full_name = "$domain_netbios\Administrator" + names = @("$domain_netbios\Administrator", "Administrator@$domain_upn") + } +} + +foreach ($test in $tests) { + $actual_account_name = Convert-FromSID -sid $test.sid + # renamed admins may have an empty FullName; skip comparison in that case + if ($test.full_name) { + Assert-Equals -actual $actual_account_name -expected $test.full_name + } + + foreach ($test_name in $test.names) { + $actual_sid = Convert-ToSID -account_name $test_name + Assert-Equals -actual $actual_sid -expected $test.sid + } +} + +# the account to SID test is run outside of the normal run as we can't test it +# in the normal test suite +# Calling Convert-ToSID with a string like a SID should return that SID back +$actual = Convert-ToSID -account_name $sid_account +Assert-Equals -actual $actual -expected $sid_account + +# Calling COnvert-ToSID with a string prefixed with .\ should return the SID +# for a user that is called that SID and not the SID passed in +$actual = Convert-ToSID -account_name ".\$sid_account" +Assert-Equals -actual ($actual -ne $sid_account) -expected $true + +Exit-Json @{ data = "success" } diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/tasks/main.yml new file mode 100644 index 00000000..acbae50a --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- block: + - name: create test user with well know SID as the name + win_user: + name: S-1-0-0 + password: AbcDef123!@# + state: present + + - name: call module with SID tests + sid_utils_test: + sid_account: S-1-0-0 + register: sid_test + + always: + - name: remove test SID user + win_user: + name: S-1-0-0 + state: absent + +- assert: + that: + - sid_test.data == 'success' diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/aliases new file mode 100644 index 00000000..b5ad7ca9 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/aliases @@ -0,0 +1,4 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest +needs/httptester diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/library/web_request_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/library/web_request_test.ps1 new file mode 100644 index 00000000..a483698c --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/library/web_request_test.ps1 @@ -0,0 +1,467 @@ +#!powershell + +#AnsibleRequires -CSharpUtil Ansible.Basic +#Requires -Module Ansible.ModuleUtils.WebRequest + +$spec = @{ + options = @{ + httpbin_host = @{ type = 'str'; required = $true } + } +} + +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + +$httpbin_host = $module.Params.httpbin_host + +Function Assert-Equals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + + $matched = $false + if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array] -or $Actual -is [System.Collections.IList]) { + $Actual.Count | Assert-Equals -Expected $Expected.Count + for ($i = 0; $i -lt $Actual.Count; $i++) { + $actualValue = $Actual[$i] + $expectedValue = $Expected[$i] + Assert-Equals -Actual $actualValue -Expected $expectedValue + } + $matched = $true + } else { + $matched = $Actual -ceq $Expected + } + + if (-not $matched) { + if ($Actual -is [PSObject]) { + $Actual = $Actual.ToString() + } + + $call_stack = (Get-PSCallStack)[1] + $module.Result.test = $test + $module.Result.actual = $Actual + $module.Result.expected = $Expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + + $module.FailJson("AssertionError: actual != expected") + } +} + +Function Convert-StreamToString { + [CmdletBinding()] + param ( + [Parameter(Mandatory=$true)] + [System.IO.Stream] + $Stream + ) + + $ms = New-Object -TypeName System.IO.MemoryStream + try { + $Stream.CopyTo($ms) + [System.Text.Encoding]::UTF8.GetString($ms.ToArray()) + } finally { + $ms.Dispose() + } +} + +$tests = [Ordered]@{ + 'GET request over http' = { + $r = Get-AnsibleWebRequest -Uri "http://$httpbin_host/get" + + $r.Method | Assert-Equals -Expected 'GET' + $r.Timeout | Assert-Equals -Expected 30000 + $r.UseDefaultCredentials | Assert-Equals -Expected $false + $r.Credentials | Assert-Equals -Expected $null + $r.ClientCertificates.Count | Assert-Equals -Expected 0 + $r.Proxy.Credentials | Assert-Equals -Expected $null + $r.UserAgent | Assert-Equals -Expected 'ansible-httpget' + + $actual = Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.StatusCode | Assert-Equals -Expected 200 + Convert-StreamToString -Stream $Stream + } | ConvertFrom-Json + + $actual.headers.'User-Agent' | Assert-Equals -Expected 'ansible-httpget' + $actual.headers.'Host' | Assert-Equals -Expected $httpbin_host + + $module.Result.msg | Assert-Equals -Expected 'OK' + $module.Result.status_code | Assert-Equals -Expected 200 + $module.Result.ContainsKey('elapsed') | Assert-Equals -Expected $true + } + + 'GET request over https' = { + # url is an alias for the -Uri parameter. + $r = Get-AnsibleWebRequest -url "https://$httpbin_host/get" + + $r.Method | Assert-Equals -Expected 'GET' + $r.Timeout | Assert-Equals -Expected 30000 + $r.UseDefaultCredentials | Assert-Equals -Expected $false + $r.Credentials | Assert-Equals -Expected $null + $r.ClientCertificates.Count | Assert-Equals -Expected 0 + $r.Proxy.Credentials | Assert-Equals -Expected $null + $r.UserAgent | Assert-Equals -Expected 'ansible-httpget' + + $actual = Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.StatusCode | Assert-Equals -Expected 200 + Convert-StreamToString -Stream $Stream + } | ConvertFrom-Json + + $actual.headers.'User-Agent' | Assert-Equals -Expected 'ansible-httpget' + $actual.headers.'Host' | Assert-Equals -Expected $httpbin_host + } + + 'POST request' = { + $getParams = @{ + Headers = @{ + 'Content-Type' = 'application/json' + } + Method = 'POST' + Uri = "https://$httpbin_host/post" + } + $r = Get-AnsibleWebRequest @getParams + + $r.Method | Assert-Equals -Expected 'POST' + $r.Timeout | Assert-Equals -Expected 30000 + $r.UseDefaultCredentials | Assert-Equals -Expected $false + $r.Credentials | Assert-Equals -Expected $null + $r.ClientCertificates.Count | Assert-Equals -Expected 0 + $r.Proxy.Credentials | Assert-Equals -Expected $null + $r.ContentType | Assert-Equals -Expected 'application/json' + $r.UserAgent | Assert-Equals -Expected 'ansible-httpget' + + $body = New-Object -TypeName System.IO.MemoryStream -ArgumentList @(, + ([System.Text.Encoding]::UTF8.GetBytes('{"foo":"bar"}')) + ) + $actual = Invoke-WithWebRequest -Module $module -Request $r -Body $body -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.StatusCode | Assert-Equals -Expected 200 + Convert-StreamToString -Stream $Stream + } | ConvertFrom-Json + + $actual.headers.'User-Agent' | Assert-Equals -Expected 'ansible-httpget' + $actual.headers.'Host' | Assert-Equals -Expected $httpbin_host + $actual.data | Assert-Equals -Expected '{"foo":"bar"}' + } + + 'Safe redirection of GET' = { + $r = Get-AnsibleWebRequest -Uri "http://$httpbin_host/redirect/2" + + Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected "http://$httpbin_host/get" + $Response.StatusCode | Assert-Equals -Expected 200 + } + } + + 'Safe redirection of HEAD' = { + $r = Get-AnsibleWebRequest -Uri "http://$httpbin_host/redirect/2" -Method HEAD + + Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected "http://$httpbin_host/get" + $Response.StatusCode | Assert-Equals -Expected 200 + } + } + + 'Safe redirection of PUT' = { + $params = @{ + Method = 'PUT' + Uri = "http://$httpbin_host/redirect-to?url=https://$httpbin_host/put" + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected $r.RequestUri + $Response.StatusCode | Assert-Equals -Expected 302 + } + } + + 'None redirection of GET' = { + $params = @{ + FollowRedirects = 'None' + Uri = "http://$httpbin_host/redirect/2" + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected $r.RequestUri + $Response.StatusCode | Assert-Equals -Expected 302 + } + } + + 'None redirection of HEAD' = { + $params = @{ + follow_redirects = 'None' + method = 'HEAD' + Uri = "http://$httpbin_host/redirect/2" + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected $r.RequestUri + $Response.StatusCode | Assert-Equals -Expected 302 + } + } + + 'None redirection of PUT' = { + $params = @{ + FollowRedirects = 'None' + Method = 'PUT' + Uri = "http://$httpbin_host/redirect-to?url=https://$httpbin_host/put" + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected $r.RequestUri + $Response.StatusCode | Assert-Equals -Expected 302 + } + } + + 'All redirection of GET' = { + $params = @{ + FollowRedirects = 'All' + Uri = "http://$httpbin_host/redirect/2" + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected "http://$httpbin_host/get" + $Response.StatusCode | Assert-Equals -Expected 200 + } + } + + 'All redirection of HEAD' = { + $params = @{ + follow_redirects = 'All' + method = 'HEAD' + Uri = "http://$httpbin_host/redirect/2" + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected "http://$httpbin_host/get" + $Response.StatusCode | Assert-Equals -Expected 200 + } + } + + 'All redirection of PUT' = { + $params = @{ + FollowRedirects = 'All' + Method = 'PUT' + Uri = "http://$httpbin_host/redirect-to?url=https://$httpbin_host/put" + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected "https://$httpbin_host/put" + $Response.StatusCode | Assert-Equals -Expected 200 + } + } + + 'Exceeds maximum redirection - ignored' = { + $params = @{ + MaximumRedirection = 4 + Uri = "https://$httpbin_host/redirect/5" + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -IgnoreBadResponse -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected "https://$httpbin_host/relative-redirect/1" + $Response.StatusCode | Assert-Equals -Expected 302 + } + } + + 'Exceeds maximum redirection - exception' = { + $params = @{ + MaximumRedirection = 1 + Uri = "https://$httpbin_host/redirect/2" + } + $r = Get-AnsibleWebRequest @params + + $failed = $false + try { + $null = Invoke-WithWebRequest -Module $module -Request $r -Script {} + } catch { + $_.Exception.GetType().Name | Assert-Equals -Expected 'WebException' + $_.Exception.Message | Assert-Equals -Expected 'Too many automatic redirections were attempted.' + $failed = $true + } + $failed | Assert-Equals -Expected $true + } + + 'Basic auth as Credential' = { + $params = @{ + Url = "http://$httpbin_host/basic-auth/username/password" + UrlUsername = 'username' + UrlPassword = 'password' + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -IgnoreBadResponse -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.StatusCode | Assert-Equals -Expected 200 + } + } + + 'Basic auth as Header' = { + $params = @{ + Url = "http://$httpbin_host/basic-auth/username/password" + url_username = 'username' + url_password = 'password' + ForceBasicAuth = $true + } + $r = Get-AnsibleWebRequest @params + + Invoke-WithWebRequest -Module $module -Request $r -IgnoreBadResponse -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.StatusCode | Assert-Equals -Expected 200 + } + } + + 'Send request with headers' = { + $params = @{ + Headers = @{ + 'Content-Length' = 0 + testingheader = 'testing_header' + TestHeader = 'test-header' + 'User-Agent' = 'test-agent' + } + Url = "https://$httpbin_host/get" + } + $r = Get-AnsibleWebRequest @params + + $actual = Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.StatusCode | Assert-Equals -Expected 200 + Convert-StreamToString -Stream $Stream + } | ConvertFrom-Json + + $actual.headers.'Testheader' | Assert-Equals -Expected 'test-header' + $actual.headers.'testingheader' | Assert-Equals -Expected 'testing_header' + $actual.Headers.'User-Agent' | Assert-Equals -Expected 'test-agent' + } + + 'Request with timeout' = { + $params = @{ + Uri = "https://$httpbin_host/delay/5" + Timeout = 1 + } + $r = Get-AnsibleWebRequest @params + + $failed = $false + try { + $null = Invoke-WithWebRequest -Module $module -Request $r -Script {} + } catch { + $failed = $true + $_.Exception.GetType().Name | Assert-Equals -Expected WebException + $_.Exception.Message | Assert-Equals -Expected 'The operation has timed out' + } + $failed | Assert-Equals -Expected $true + } + + 'Request with file URI' = { + $filePath = Join-Path $module.Tmpdir -ChildPath 'test.txt' + Set-Content -LiteralPath $filePath -Value 'test' + + $r = Get-AnsibleWebRequest -Uri $filePath + + $actual = Invoke-WithWebRequest -Module $module -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ContentLength | Assert-Equals -Expected 6 + Convert-StreamToString -Stream $Stream + } + $actual | Assert-Equals -Expected "test`r`n" + $module.Result.msg | Assert-Equals -Expected "OK" + $module.Result.status_code | Assert-Equals -Expected 200 + } + + 'Web request based on module options' = { + Set-Variable complex_args -Scope Global -Value @{ + url = "https://$httpbin_host/redirect/2" + method = 'GET' + follow_redirects = 'safe' + headers = @{ + 'User-Agent' = 'other-agent' + } + http_agent = 'actual-agent' + maximum_redirection = 2 + timeout = 10 + validate_certs = $false + } + $spec = @{ + options = @{ + url = @{ type = 'str'; required = $true } + test = @{ type = 'str'; choices = 'abc', 'def'} + } + mutually_exclusive = @(,@('url', 'test')) + } + + $testModule = [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @(Get-AnsibleWebRequestSpec)) + $r = Get-AnsibleWebRequest -Url $testModule.Params.url -Module $testModule + + $actual = Invoke-WithWebRequest -Module $testModule -Request $r -Script { + Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream) + + $Response.ResponseUri | Assert-Equals -Expected "https://$httpbin_host/get" + Convert-StreamToString -Stream $Stream + } | ConvertFrom-Json + $actual.headers.'User-Agent' | Assert-Equals -Expected 'actual-agent' + } + + 'Web request with default proxy' = { + $params = @{ + Uri = "https://$httpbin_host/get" + } + $r = Get-AnsibleWebRequest @params + + $null -ne $r.Proxy | Assert-Equals -Expected $true + } + + 'Web request with no proxy' = { + $params = @{ + Uri = "https://$httpbin_host/get" + UseProxy = $false + } + $r = Get-AnsibleWebRequest @params + + $null -eq $r.Proxy | Assert-Equals -Expected $true + } +} + +# setup and teardown should favour native tools to create and delete the service and not the util we are testing. +foreach ($testImpl in $tests.GetEnumerator()) { + Set-Variable -Name complex_args -Scope Global -Value @{} + $test = $testImpl.Key + &$testImpl.Value +} + +$module.Result.data = "success" +$module.ExitJson() diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/meta/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/meta/main.yml new file mode 100644 index 00000000..829d0a78 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- prepare_http_tests diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/tasks/main.yml new file mode 100644 index 00000000..57d8138a --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: test Ansible.ModuleUtils.WebRequest + web_request_test: + httpbin_host: '{{ httpbin_host }}' + register: web_request + +- name: assert test Ansible.ModuleUtils.WebRequest succeeded + assert: + that: + - web_request.data == 'success' diff --git a/test/integration/targets/module_utils_Ansible.Privilege/aliases b/test/integration/targets/module_utils_Ansible.Privilege/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Privilege/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.Privilege/library/ansible_privilege_tests.ps1 b/test/integration/targets/module_utils_Ansible.Privilege/library/ansible_privilege_tests.ps1 new file mode 100644 index 00000000..7c76036a --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Privilege/library/ansible_privilege_tests.ps1 @@ -0,0 +1,324 @@ +#!powershell + +#AnsibleRequires -CSharpUtil Ansible.Basic +#Ansiblerequires -CSharpUtil Ansible.Privilege + +$module = [Ansible.Basic.AnsibleModule]::Create($args, @{}) + +Function Assert-Equals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + + $matched = $false + if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) { + $Actual.Count | Assert-Equals -Expected $Expected.Count + for ($i = 0; $i -lt $Actual.Count; $i++) { + $actual_value = $Actual[$i] + $expected_value = $Expected[$i] + Assert-Equals -Actual $actual_value -Expected $expected_value + } + $matched = $true + } else { + $matched = $Actual -ceq $Expected + } + + if (-not $matched) { + if ($Actual -is [PSObject]) { + $Actual = $Actual.ToString() + } + + $call_stack = (Get-PSCallStack)[1] + $module.Result.test = $test + $module.Result.actual = $Actual + $module.Result.expected = $Expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + $module.FailJson("AssertionError: actual != expected") + } +} + +Function Assert-DictionaryEquals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + $actual_keys = $Actual.Keys + $expected_keys = $Expected.Keys + + $actual_keys.Count | Assert-Equals -Expected $expected_keys.Count + foreach ($actual_entry in $Actual.GetEnumerator()) { + $actual_key = $actual_entry.Key + ($actual_key -cin $expected_keys) | Assert-Equals -Expected $true + $actual_value = $actual_entry.Value + $expected_value = $Expected.$actual_key + + if ($actual_value -is [System.Collections.IDictionary]) { + $actual_value | Assert-DictionaryEquals -Expected $expected_value + } elseif ($actual_value -is [System.Collections.ArrayList]) { + for ($i = 0; $i -lt $actual_value.Count; $i++) { + $actual_entry = $actual_value[$i] + $expected_entry = $expected_value[$i] + if ($actual_entry -is [System.Collections.IDictionary]) { + $actual_entry | Assert-DictionaryEquals -Expected $expected_entry + } else { + Assert-Equals -Actual $actual_entry -Expected $expected_entry + } + } + } else { + Assert-Equals -Actual $actual_value -Expected $expected_value + } + } + foreach ($expected_key in $expected_keys) { + ($expected_key -cin $actual_keys) | Assert-Equals -Expected $true + } +} + +Function Assert-Equals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + + $matched = $false + if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) { + $Actual.Count | Assert-Equals -Expected $Expected.Count + for ($i = 0; $i -lt $Actual.Count; $i++) { + $actual_value = $Actual[$i] + $expected_value = $Expected[$i] + Assert-Equals -Actual $actual_value -Expected $expected_value + } + $matched = $true + } else { + $matched = $Actual -ceq $Expected + } + + if (-not $matched) { + if ($Actual -is [PSObject]) { + $Actual = $Actual.ToString() + } + + $call_stack = (Get-PSCallStack)[1] + $module.Result.test = $test + $module.Result.actual = $Actual + $module.Result.expected = $Expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + $module.FailJson("AssertionError: actual != expected") + } +} + +Function Assert-DictionaryEquals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + $actual_keys = $Actual.Keys + $expected_keys = $Expected.Keys + + $actual_keys.Count | Assert-Equals -Expected $expected_keys.Count + foreach ($actual_entry in $Actual.GetEnumerator()) { + $actual_key = $actual_entry.Key + ($actual_key -cin $expected_keys) | Assert-Equals -Expected $true + $actual_value = $actual_entry.Value + $expected_value = $Expected.$actual_key + + if ($actual_value -is [System.Collections.IDictionary]) { + $actual_value | Assert-DictionaryEquals -Expected $expected_value + } elseif ($actual_value -is [System.Collections.ArrayList]) { + for ($i = 0; $i -lt $actual_value.Count; $i++) { + $actual_entry = $actual_value[$i] + $expected_entry = $expected_value[$i] + if ($actual_entry -is [System.Collections.IDictionary]) { + $actual_entry | Assert-DictionaryEquals -Expected $expected_entry + } else { + Assert-Equals -Actual $actual_entry -Expected $expected_entry + } + } + } else { + Assert-Equals -Actual $actual_value -Expected $expected_value + } + } + foreach ($expected_key in $expected_keys) { + ($expected_key -cin $actual_keys) | Assert-Equals -Expected $true + } +} + +$process = [Ansible.Privilege.PrivilegeUtil]::GetCurrentProcess() + +$tests = @{ + "Check valid privilege name" = { + $actual = [Ansible.Privilege.PrivilegeUtil]::CheckPrivilegeName("SeTcbPrivilege") + $actual | Assert-Equals -Expected $true + } + + "Check invalid privilege name" = { + $actual = [Ansible.Privilege.PrivilegeUtil]::CheckPrivilegeName("SeFake") + $actual | Assert-Equals -Expected $false + } + + "Disable a privilege" = { + # Ensure the privilege is enabled at the start + [Ansible.Privilege.PrivilegeUtil]::EnablePrivilege($process, "SeTimeZonePrivilege") > $null + + $actual = [Ansible.Privilege.PrivilegeUtil]::DisablePrivilege($process, "SeTimeZonePrivilege") + $actual.GetType().Name | Assert-Equals -Expected 'Dictionary`2' + $actual.Count | Assert-Equals -Expected 1 + $actual.SeTimeZonePrivilege | Assert-Equals -Expected $true + + # Disable again + $actual = [Ansible.Privilege.PrivilegeUtil]::DisablePrivilege($process, "SeTimeZonePrivilege") + $actual.GetType().Name | Assert-Equals -Expected 'Dictionary`2' + $actual.Count | Assert-Equals -Expected 0 + } + + "Enable a privilege" = { + # Ensure the privilege is disabled at the start + [Ansible.Privilege.PrivilegeUtil]::DisablePrivilege($process, "SeTimeZonePrivilege") > $null + + $actual = [Ansible.Privilege.PrivilegeUtil]::EnablePrivilege($process, "SeTimeZonePrivilege") + $actual.GetType().Name | Assert-Equals -Expected 'Dictionary`2' + $actual.Count | Assert-Equals -Expected 1 + $actual.SeTimeZonePrivilege | Assert-Equals -Expected $false + + # Disable again + $actual = [Ansible.Privilege.PrivilegeUtil]::EnablePrivilege($process, "SeTimeZonePrivilege") + $actual.GetType().Name | Assert-Equals -Expected 'Dictionary`2' + $actual.Count | Assert-Equals -Expected 0 + } + + "Disable and revert privileges" = { + $current_state = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + + $previous_state = [Ansible.Privilege.PrivilegeUtil]::DisableAllPrivileges($process) + $previous_state.GetType().Name | Assert-Equals -Expected 'Dictionary`2' + foreach ($previous_state_entry in $previous_state.GetEnumerator()) { + $previous_state_entry.Value | Assert-Equals -Expected $true + } + + # Disable again + $previous_state2 = [Ansible.Privilege.PrivilegeUtil]::DisableAllPrivileges($process) + $previous_state2.Count | Assert-Equals -Expected 0 + + $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + foreach ($actual_entry in $actual.GetEnumerator()) { + $actual_entry.Value -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + } + + [Ansible.Privilege.PrivilegeUtil]::SetTokenPrivileges($process, $previous_state) > $null + $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $actual | Assert-DictionaryEquals -Expected $current_state + } + + "Remove a privilege" = { + [Ansible.Privilege.PrivilegeUtil]::RemovePrivilege($process, "SeUndockPrivilege") > $null + $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $actual.ContainsKey("SeUndockPrivilege") | Assert-Equals -Expected $false + } + + "Test Enabler" = { + # Disable privilege at the start + $new_state = @{ + SeTimeZonePrivilege = $false + SeShutdownPrivilege = $false + SeIncreaseWorkingSetPrivilege = $false + } + [Ansible.Privilege.PrivilegeUtil]::SetTokenPrivileges($process, $new_state) > $null + $check_state = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $check_state.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + $check_state.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + $check_state.SeIncreaseWorkingSetPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + + # Check that strict = false won't validate privileges not held but activates the ones we want + $enabler = New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $false, "SeTimeZonePrivilege", "SeShutdownPrivilege", "SeTcbPrivilege" + $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled) + $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled) + $actual.SeIncreaseWorkingSetPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + $actual.ContainsKey("SeTcbPrivilege") | Assert-Equals -Expected $false + + # Now verify a no-op enabler will not rever back to disabled + $enabler2 = New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $false, "SeTimeZonePrivilege", "SeShutdownPrivilege", "SeTcbPrivilege" + $enabler2.Dispose() + $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled) + $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled) + + # Verify that when disposing the object the privileges are reverted + $enabler.Dispose() + $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + } + + "Test Enabler strict" = { + # Disable privilege at the start + $new_state = @{ + SeTimeZonePrivilege = $false + SeShutdownPrivilege = $false + SeIncreaseWorkingSetPrivilege = $false + } + [Ansible.Privilege.PrivilegeUtil]::SetTokenPrivileges($process, $new_state) > $null + $check_state = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $check_state.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + $check_state.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + $check_state.SeIncreaseWorkingSetPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + + # Check that strict = false won't validate privileges not held but activates the ones we want + $enabler = New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $true, "SeTimeZonePrivilege", "SeShutdownPrivilege" + $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled) + $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled) + $actual.SeIncreaseWorkingSetPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + + # Now verify a no-op enabler will not rever back to disabled + $enabler2 = New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $true, "SeTimeZonePrivilege", "SeShutdownPrivilege" + $enabler2.Dispose() + $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled) + $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled) + + # Verify that when disposing the object the privileges are reverted + $enabler.Dispose() + $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + } + + "Test Enabler invalid privilege" = { + $failed = $false + try { + New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $false, "SeTimeZonePrivilege", "SeFake" + } catch { + $failed = $true + $_.Exception.InnerException.Message | Assert-Equals -Expected "Failed to enable privilege(s) SeTimeZonePrivilege, SeFake (A specified privilege does not exist, Win32ErrorCode 1313)" + } + $failed | Assert-Equals -Expected $true + } + + "Test Enabler strict failure" = { + # Start disabled + [Ansible.Privilege.PrivilegeUtil]::DisablePrivilege($process, "SeTimeZonePrivilege") > $null + $check_state = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process) + $check_state.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0 + + $failed = $false + try { + New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $true, "SeTimeZonePrivilege", "SeTcbPrivilege" + } catch { + $failed = $true + $_.Exception.InnerException.Message | Assert-Equals -Expected "Failed to enable privilege(s) SeTimeZonePrivilege, SeTcbPrivilege (Not all privileges or groups referenced are assigned to the caller, Win32ErrorCode 1300)" + } + $failed | Assert-Equals -Expected $true + } +} + +foreach ($test_impl in $tests.GetEnumerator()) { + $test = $test_impl.Key + &$test_impl.Value +} + +$module.Result.data = "success" +$module.ExitJson() + diff --git a/test/integration/targets/module_utils_Ansible.Privilege/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Privilege/tasks/main.yml new file mode 100644 index 00000000..888394d4 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Privilege/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: test Ansible.Privilege.cs + ansible_privilege_tests: + register: ansible_privilege_test + +- name: assert test Ansible.Privilege.cs + assert: + that: + - ansible_privilege_test.data == "success" diff --git a/test/integration/targets/module_utils_Ansible.Process/aliases b/test/integration/targets/module_utils_Ansible.Process/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Process/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.Process/library/ansible_process_tests.ps1 b/test/integration/targets/module_utils_Ansible.Process/library/ansible_process_tests.ps1 new file mode 100644 index 00000000..d906dfc5 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Process/library/ansible_process_tests.ps1 @@ -0,0 +1,236 @@ +#!powershell + +#AnsibleRequires -CSharpUtil Ansible.Basic +#AnsibleRequires -CSharpUtil Ansible.Process + +$module = [Ansible.Basic.AnsibleModule]::Create($args, @{}) + +Function Assert-Equals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + + $matched = $false + if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) { + $Actual.Count | Assert-Equals -Expected $Expected.Count + for ($i = 0; $i -lt $Actual.Count; $i++) { + $actual_value = $Actual[$i] + $expected_value = $Expected[$i] + Assert-Equals -Actual $actual_value -Expected $expected_value + } + $matched = $true + } else { + $matched = $Actual -ceq $Expected + } + + if (-not $matched) { + if ($Actual -is [PSObject]) { + $Actual = $Actual.ToString() + } + + $call_stack = (Get-PSCallStack)[1] + $module.Result.test = $test + $module.Result.actual = $Actual + $module.Result.expected = $Expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + $module.FailJson("AssertionError: actual != expected") + } +} + +$tests = @{ + "ParseCommandLine empty string" = { + $expected = @((Get-Process -Id $pid).Path) + $actual = [Ansible.Process.ProcessUtil]::ParseCommandLine("") + Assert-Equals -Actual $actual -Expected $expected + } + + "ParseCommandLine single argument" = { + $expected = @("powershell.exe") + $actual = [Ansible.Process.ProcessUtil]::ParseCommandLine("powershell.exe") + Assert-Equals -Actual $actual -Expected $expected + } + + "ParseCommandLine multiple arguments" = { + $expected = @("powershell.exe", "-File", "C:\temp\script.ps1") + $actual = [Ansible.Process.ProcessUtil]::ParseCommandLine("powershell.exe -File C:\temp\script.ps1") + Assert-Equals -Actual $actual -Expected $expected + } + + "ParseCommandLine comples arguments" = { + $expected = @('abc', 'd', 'ef gh', 'i\j', 'k"l', 'm\n op', 'ADDLOCAL=qr, s', 'tuv\', 'w''x', 'yz') + $actual = [Ansible.Process.ProcessUtil]::ParseCommandLine('abc d "ef gh" i\j k\"l m\\"n op" ADDLOCAL="qr, s" tuv\ w''x yz') + Assert-Equals -Actual $actual -Expected $expected + } + + "SearchPath normal" = { + $expected = "$($env:SystemRoot)\System32\WindowsPowerShell\v1.0\powershell.exe" + $actual = [Ansible.Process.ProcessUtil]::SearchPath("powershell.exe") + $actual | Assert-Equals -Expected $expected + } + + "SearchPath missing" = { + $failed = $false + try { + [Ansible.Process.ProcessUtil]::SearchPath("fake.exe") + } catch { + $failed = $true + $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "System.IO.FileNotFoundException" + $expected = 'Exception calling "SearchPath" with "1" argument(s): "Could not find file ''fake.exe''."' + $_.Exception.Message | Assert-Equals -Expected $expected + } + $failed | Assert-Equals -Expected $true + } + + "CreateProcess basic" = { + $actual = [Ansible.Process.ProcessUtil]::CreateProcess("whoami.exe") + $actual.GetType().FullName | Assert-Equals -Expected "Ansible.Process.Result" + $actual.StandardOut | Assert-Equals -Expected "$(&whoami.exe)`r`n" + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess stderr" = { + $actual = [Ansible.Process.ProcessUtil]::CreateProcess("powershell.exe [System.Console]::Error.WriteLine('hi')") + $actual.StandardOut | Assert-Equals -Expected "" + $actual.StandardError | Assert-Equals -Expected "hi`r`n" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess exit code" = { + $actual = [Ansible.Process.ProcessUtil]::CreateProcess("powershell.exe exit 10") + $actual.StandardOut | Assert-Equals -Expected "" + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 10 + } + + "CreateProcess bad executable" = { + $failed = $false + try { + [Ansible.Process.ProcessUtil]::CreateProcess("fake.exe") + } catch { + $failed = $true + $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "Ansible.Process.Win32Exception" + $expected = 'Exception calling "CreateProcess" with "1" argument(s): "CreateProcessW() failed ' + $expected += '(The system cannot find the file specified, Win32ErrorCode 2)"' + $_.Exception.Message | Assert-Equals -Expected $expected + } + $failed | Assert-Equals -Expected $true + } + + "CreateProcess with unicode" = { + $actual = [Ansible.Process.ProcessUtil]::CreateProcess("cmd.exe /c echo 💩 café") + $actual.StandardOut | Assert-Equals -Expected "💩 café`r`n" + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, "cmd.exe /c echo 💩 café", $null, $null) + $actual.StandardOut | Assert-Equals -Expected "💩 café`r`n" + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess without working dir" = { + $expected = $pwd.Path + "`r`n" + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe $pwd.Path', $null, $null) + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess with working dir" = { + $expected = "C:\Windows`r`n" + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe $pwd.Path', "C:\Windows", $null) + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess without environment" = { + $expected = "$($env:USERNAME)`r`n" + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe $env:TEST; $env:USERNAME', $null, $null) + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess with environment" = { + $env_vars = @{ + TEST = "tesTing" + TEST2 = "Testing 2" + } + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'cmd.exe /c set', $null, $env_vars) + ("TEST=tesTing" -cin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true + ("TEST2=Testing 2" -cin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true + ("USERNAME=$($env:USERNAME)" -cnotin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess with string stdin" = { + $expected = "input value`r`n`r`n" + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe [System.Console]::In.ReadToEnd()', + $null, $null, "input value") + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess with string stdin and newline" = { + $expected = "input value`r`n`r`n" + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe [System.Console]::In.ReadToEnd()', + $null, $null, "input value`r`n") + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess with byte stdin" = { + $expected = "input value`r`n" + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe [System.Console]::In.ReadToEnd()', + $null, $null, [System.Text.Encoding]::UTF8.GetBytes("input value")) + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess with byte stdin and newline" = { + $expected = "input value`r`n`r`n" + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe [System.Console]::In.ReadToEnd()', + $null, $null, [System.Text.Encoding]::UTF8.GetBytes("input value`r`n")) + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess with lpApplicationName" = { + $expected = "abc`r`n" + $full_path = "$($env:SystemRoot)\System32\WindowsPowerShell\v1.0\powershell.exe" + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($full_path, "Write-Output 'abc'", $null, $null) + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($full_path, "powershell.exe Write-Output 'abc'", $null, $null) + $actual.StandardOut | Assert-Equals -Expected $expected + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } + + "CreateProcess with unicode and us-ascii encoding" = { + $poop = [System.Char]::ConvertFromUtf32(0xE05A) # Coverage breaks due to script parsing encoding issues with unicode chars, just use the code point instead + $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, "cmd.exe /c echo $poop café", $null, $null, '', 'us-ascii') + $actual.StandardOut | Assert-Equals -Expected "??? caf??`r`n" + $actual.StandardError | Assert-Equals -Expected "" + $actual.ExitCode | Assert-Equals -Expected 0 + } +} + +foreach ($test_impl in $tests.GetEnumerator()) { + $test = $test_impl.Key + &$test_impl.Value +} + +$module.Result.data = "success" +$module.ExitJson() diff --git a/test/integration/targets/module_utils_Ansible.Process/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Process/tasks/main.yml new file mode 100644 index 00000000..13a5c16e --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Process/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: test Ansible.Process.cs + ansible_process_tests: + register: ansible_process_tests + +- name: assert test Ansible.Process.cs + assert: + that: + - ansible_process_tests.data == "success" diff --git a/test/integration/targets/module_utils_Ansible.Service/aliases b/test/integration/targets/module_utils_Ansible.Service/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Service/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/module_utils_Ansible.Service/library/ansible_service_tests.ps1 b/test/integration/targets/module_utils_Ansible.Service/library/ansible_service_tests.ps1 new file mode 100644 index 00000000..6c8f729b --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Service/library/ansible_service_tests.ps1 @@ -0,0 +1,937 @@ +#!powershell + +#AnsibleRequires -CSharpUtil Ansible.Basic +#AnsibleRequires -CSharpUtil Ansible.Service +#Requires -Module Ansible.ModuleUtils.ArgvParser +#Requires -Module Ansible.ModuleUtils.CommandUtil + +$module = [Ansible.Basic.AnsibleModule]::Create($args, @{}) + +$path = "$env:SystemRoot\System32\svchost.exe" + +Function Assert-Equals { + param( + [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual, + [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected + ) + + $matched = $false + if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array] -or $Actual -is [System.Collections.IList]) { + $Actual.Count | Assert-Equals -Expected $Expected.Count + for ($i = 0; $i -lt $Actual.Count; $i++) { + $actualValue = $Actual[$i] + $expectedValue = $Expected[$i] + Assert-Equals -Actual $actualValue -Expected $expectedValue + } + $matched = $true + } else { + $matched = $Actual -ceq $Expected + } + + if (-not $matched) { + if ($Actual -is [PSObject]) { + $Actual = $Actual.ToString() + } + + $call_stack = (Get-PSCallStack)[1] + $module.Result.test = $test + $module.Result.actual = $Actual + $module.Result.expected = $Expected + $module.Result.line = $call_stack.ScriptLineNumber + $module.Result.method = $call_stack.Position.Text + + $module.FailJson("AssertionError: actual != expected") + } +} + +Function Invoke-Sc { + [CmdletBinding()] + param ( + [Parameter(Mandatory=$true)] + [String] + $Action, + + [Parameter(Mandatory=$true)] + [String] + $Name, + + [Object] + $Arguments + ) + + $commandArgs = [System.Collections.Generic.List[String]]@("sc.exe", $Action, $Name) + if ($null -ne $Arguments) { + if ($Arguments -is [System.Collections.IDictionary]) { + foreach ($arg in $Arguments.GetEnumerator()) { + $commandArgs.Add("$($arg.Key)=") + $commandArgs.Add($arg.Value) + } + } else { + foreach ($arg in $Arguments) { + $commandArgs.Add($arg) + } + } + } + + $command = Argv-ToString -arguments $commandArgs + + $res = Run-Command -command $command + if ($res.rc -ne 0) { + $module.Result.rc = $res.rc + $module.Result.stdout = $res.stdout + $module.Result.stderr = $res.stderr + $module.FailJson("Failed to invoke sc with: $command") + } + + $info = @{ Name = $Name } + + if ($Action -eq 'qtriggerinfo') { + # qtriggerinfo is in a different format which requires some manual parsing from the norm. + $info.Triggers = [System.Collections.Generic.List[PSObject]]@() + } + + $currentKey = $null + $qtriggerSection = @{} + $res.stdout -split "`r`n" | Foreach-Object -Process { + $line = $_.Trim() + + if ($Action -eq 'qtriggerinfo' -and $line -in @('START SERVICE', 'STOP SERVICE')) { + if ($qtriggerSection.Count -gt 0) { + $info.Triggers.Add([PSCustomObject]$qtriggerSection) + $qtriggerSection = @{} + } + + $qtriggerSection = @{ + Action = $line + } + } + + if (-not $line -or (-not $line.Contains(':') -and $null -eq $currentKey)) { + return + } + + $lineSplit = $line.Split(':', 2) + if ($lineSplit.Length -eq 2) { + $k = $lineSplit[0].Trim() + if (-not $k) { + $k = $currentKey + } + + $v = $lineSplit[1].Trim() + } else { + $k = $currentKey + $v = $line + } + + if ($qtriggerSection.Count -gt 0) { + if ($k -eq 'DATA') { + $qtriggerSection.Data.Add($v) + } else { + $qtriggerSection.Type = $k + $qtriggerSection.SubType = $v + $qtriggerSection.Data = [System.Collections.Generic.List[String]]@() + } + } else { + if ($info.ContainsKey($k)) { + if ($info[$k] -isnot [System.Collections.Generic.List[String]]) { + $info[$k] = [System.Collections.Generic.List[String]]@($info[$k]) + } + $info[$k].Add($v) + } else { + $currentKey = $k + $info[$k] = $v + } + } + } + + if ($qtriggerSection.Count -gt 0) { + $info.Triggers.Add([PSCustomObject]$qtriggerSection) + } + + [PSCustomObject]$info +} + +$tests = [Ordered]@{ + "Props on service created by New-Service" = { + $actual = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + + $actual.ServiceName | Assert-Equals -Expected $serviceName + $actual.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32OwnProcess) + $actual.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::DemandStart) + $actual.ErrorControl | Assert-Equals -Expected ([Ansible.Service.ErrorControl]::Normal) + $actual.Path | Assert-Equals -Expected ('"{0}"' -f $path) + $actual.LoadOrderGroup | Assert-Equals -Expected "" + $actual.DependentOn.Count | Assert-Equals -Expected 0 + $actual.Account | Assert-Equals -Expected ( + [System.Security.Principal.SecurityIdentifier]'S-1-5-18').Translate([System.Security.Principal.NTAccount] + ) + $actual.DisplayName | Assert-Equals -Expected $serviceName + $actual.Description | Assert-Equals -Expected $null + $actual.FailureActions.ResetPeriod | Assert-Equals -Expected 0 + $actual.FailureActions.RebootMsg | Assert-Equals -Expected $null + $actual.FailureActions.Command | Assert-Equals -Expected $null + $actual.FailureActions.Actions.Count | Assert-Equals -Expected 0 + $actual.FailureActionsOnNonCrashFailures | Assert-Equals -Expected $false + $actual.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::None) + $actual.RequiredPrivileges.Count | Assert-Equals -Expected 0 + # Cannot test default values as it differs per OS version + $null -ne $actual.PreShutdownTimeout | Assert-Equals -Expected $true + $actual.Triggers.Count | Assert-Equals -Expected 0 + $actual.PreferredNode | Assert-Equals -Expected $null + if ([Environment]::OSVersion.Version -ge [Version]'6.3') { + $actual.LaunchProtection | Assert-Equals -Expected ([Ansible.Service.LaunchProtection]::None) + } else { + $actual.LaunchProtection | Assert-Equals -Expected $null + } + $actual.State | Assert-Equals -Expected ([Ansible.Service.ServiceStatus]::Stopped) + $actual.Win32ExitCode | Assert-Equals -Expected 1077 # ERROR_SERVICE_NEVER_STARTED + $actual.ServiceExitCode | Assert-Equals -Expected 0 + $actual.Checkpoint | Assert-Equals -Expected 0 + $actual.WaitHint | Assert-Equals -Expected 0 + $actual.ProcessId | Assert-Equals -Expected 0 + $actual.ServiceFlags | Assert-Equals -Expected ([Ansible.Service.ServiceFlags]::None) + $actual.DependedBy.Count | Assert-Equals 0 + } + + "Service creation through util" = { + $testName = "$($serviceName)_2" + $actual = [Ansible.Service.Service]::Create($testName, '"{0}"' -f $path) + + try { + $cmdletService = Get-Service -Name $testName -ErrorAction SilentlyContinue + $null -ne $cmdletService | Assert-Equals -Expected $true + + $actual.ServiceName | Assert-Equals -Expected $testName + $actual.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32OwnProcess) + $actual.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::DemandStart) + $actual.ErrorControl | Assert-Equals -Expected ([Ansible.Service.ErrorControl]::Normal) + $actual.Path | Assert-Equals -Expected ('"{0}"' -f $path) + $actual.LoadOrderGroup | Assert-Equals -Expected "" + $actual.DependentOn.Count | Assert-Equals -Expected 0 + $actual.Account | Assert-Equals -Expected ( + [System.Security.Principal.SecurityIdentifier]'S-1-5-18').Translate([System.Security.Principal.NTAccount] + ) + $actual.DisplayName | Assert-Equals -Expected $testName + $actual.Description | Assert-Equals -Expected $null + $actual.FailureActions.ResetPeriod | Assert-Equals -Expected 0 + $actual.FailureActions.RebootMsg | Assert-Equals -Expected $null + $actual.FailureActions.Command | Assert-Equals -Expected $null + $actual.FailureActions.Actions.Count | Assert-Equals -Expected 0 + $actual.FailureActionsOnNonCrashFailures | Assert-Equals -Expected $false + $actual.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::None) + $actual.RequiredPrivileges.Count | Assert-Equals -Expected 0 + $null -ne $actual.PreShutdownTimeout | Assert-Equals -Expected $true + $actual.Triggers.Count | Assert-Equals -Expected 0 + $actual.PreferredNode | Assert-Equals -Expected $null + if ([Environment]::OSVersion.Version -ge [Version]'6.3') { + $actual.LaunchProtection | Assert-Equals -Expected ([Ansible.Service.LaunchProtection]::None) + } else { + $actual.LaunchProtection | Assert-Equals -Expected $null + } + $actual.State | Assert-Equals -Expected ([Ansible.Service.ServiceStatus]::Stopped) + $actual.Win32ExitCode | Assert-Equals -Expected 1077 # ERROR_SERVICE_NEVER_STARTED + $actual.ServiceExitCode | Assert-Equals -Expected 0 + $actual.Checkpoint | Assert-Equals -Expected 0 + $actual.WaitHint | Assert-Equals -Expected 0 + $actual.ProcessId | Assert-Equals -Expected 0 + $actual.ServiceFlags | Assert-Equals -Expected ([Ansible.Service.ServiceFlags]::None) + $actual.DependedBy.Count | Assert-Equals 0 + } finally { + $actual.Delete() + } + } + + "Fail to open non-existing service" = { + $failed = $false + try { + $null = New-Object -TypeName Ansible.Service.Service -ArgumentList 'fake_service' + } catch [Ansible.Service.ServiceManagerException] { + # 1060 == ERROR_SERVICE_DOES_NOT_EXIST + $_.Exception.Message -like '*Win32ErrorCode 1060 - 0x00000424*' | Assert-Equals -Expected $true + $failed = $true + } + + $failed | Assert-Equals -Expected $true + } + + "Open with specific access rights" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList @( + $serviceName, [Ansible.Service.ServiceRights]'QueryConfig, QueryStatus' + ) + + # QueryStatus can get the status + $service.State | Assert-Equals -Expected ([Ansible.Service.ServiceStatus]::Stopped) + + # Should fail to get the config because we did not request that right + $failed = $false + try { + $service.Path = 'fail' + } catch [Ansible.Service.ServiceManagerException] { + # 5 == ERROR_ACCESS_DENIED + $_.Exception.Message -like '*Win32ErrorCode 5 - 0x00000005*' | Assert-Equals -Expected $true + $failed = $true + } + + $failed | Assert-Equals -Expected $true + + } + + "Modfiy ServiceType" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.ServiceType = [Ansible.Service.ServiceType]::Win32ShareProcess + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32ShareProcess) + $actual.TYPE | Assert-Equals -Expected "20 WIN32_SHARE_PROCESS" + + $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{type="own"} + $service.Refresh() + $service.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32OwnProcess) + } + + "Create desktop interactive service" = { + $service = New-Object -Typename Ansible.Service.Service -ArgumentList $serviceName + $service.ServiceType = [Ansible.Service.ServiceType]'Win32OwnProcess, InteractiveProcess' + + $actual = Invoke-Sc -Action qc -Name $serviceName + $actual.TYPE | Assert-Equals -Expected "110 WIN32_OWN_PROCESS (interactive)" + $service.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]'Win32OwnProcess, InteractiveProcess') + + # Change back from interactive process + $service.ServiceType = [Ansible.Service.ServiceType]::Win32OwnProcess + + $actual = Invoke-Sc -Action qc -Name $serviceName + $actual.TYPE | Assert-Equals -Expected "10 WIN32_OWN_PROCESS" + $service.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32OwnProcess) + + $service.Account = [System.Security.Principal.SecurityIdentifier]'S-1-5-20' + + $failed = $false + try { + $service.ServiceType = [Ansible.Service.ServiceType]'Win32OwnProcess, InteractiveProcess' + } catch [Ansible.Service.ServiceManagerException] { + $failed = $true + $_.Exception.NativeErrorCode | Assert-Equals -Expected 87 # ERROR_INVALID_PARAMETER + } + $failed | Assert-Equals -Expected $true + + $actual = Invoke-Sc -Action qc -Name $serviceName + $actual.TYPE | Assert-Equals -Expected "10 WIN32_OWN_PROCESS" + } + + "Modify StartType" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.StartType = [Ansible.Service.ServiceStartType]::Disabled + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::Disabled) + $actual.START_TYPE | Assert-Equals -Expected "4 DISABLED" + + $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{start="demand"} + $service.Refresh() + $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::DemandStart) + } + + "Modify StartType auto delayed" = { + # Delayed start type is a modifier of the AutoStart type. It uses a separate config entry to define and this + # makes sure the util does that correctly from various types and back. + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.StartType = [Ansible.Service.ServiceStartType]::Disabled # Start from Disabled + + # Disabled -> Auto Start Delayed + $service.StartType = [Ansible.Service.ServiceStartType]::AutoStartDelayed + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::AutoStartDelayed) + $actual.START_TYPE | Assert-Equals -Expected "2 AUTO_START (DELAYED)" + + # Auto Start Delayed -> Auto Start + $service.StartType = [Ansible.Service.ServiceStartType]::AutoStart + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::AutoStart) + $actual.START_TYPE | Assert-Equals -Expected "2 AUTO_START" + + # Auto Start -> Auto Start Delayed + $service.StartType = [Ansible.Service.ServiceStartType]::AutoStartDelayed + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::AutoStartDelayed) + $actual.START_TYPE | Assert-Equals -Expected "2 AUTO_START (DELAYED)" + + # Auto Start Delayed -> Manual + $service.StartType = [Ansible.Service.ServiceStartType]::DemandStart + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::DemandStart) + $actual.START_TYPE | Assert-Equals -Expected "3 DEMAND_START" + } + + "Modify ErrorControl" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.ErrorControl = [Ansible.Service.ErrorControl]::Severe + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.ErrorControl | Assert-Equals -Expected ([Ansible.Service.ErrorControl]::Severe) + $actual.ERROR_CONTROL | Assert-Equals -Expected "2 SEVERE" + + $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{error="ignore"} + $service.Refresh() + $service.ErrorControl | Assert-Equals -Expected ([Ansible.Service.ErrorControl]::Ignore) + } + + "Modify Path" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.Path = "Fake path" + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.Path | Assert-Equals -Expected "Fake path" + $actual.BINARY_PATH_NAME | Assert-Equals -Expected "Fake path" + + $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{binpath="other fake path"} + $service.Refresh() + $service.Path | Assert-Equals -Expected "other fake path" + } + + "Modify LoadOrderGroup" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.LoadOrderGroup = "my group" + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.LoadOrderGroup | Assert-Equals -Expected "my group" + $actual.LOAD_ORDER_GROUP | Assert-Equals -Expected "my group" + + $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{group=""} + $service.Refresh() + $service.LoadOrderGroup | Assert-Equals -Expected "" + } + + "Modify DependentOn" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.DependentOn = @("HTTP", "WinRM") + + $actual = Invoke-Sc -Action qc -Name $serviceName + @(,$service.DependentOn) | Assert-Equals -Expected @("HTTP", "WinRM") + @(,$actual.DEPENDENCIES) | Assert-Equals -Expected @("HTTP", "WinRM") + + $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{depend=""} + $service.Refresh() + $service.DependentOn.Count | Assert-Equals -Expected 0 + } + + "Modify Account - service account" = { + $systemSid = [System.Security.Principal.SecurityIdentifier]'S-1-5-18' + $systemName =$systemSid.Translate([System.Security.Principal.NTAccount]) + $localSid = [System.Security.Principal.SecurityIdentifier]'S-1-5-19' + $localName = $localSid.Translate([System.Security.Principal.NTAccount]) + $networkSid = [System.Security.Principal.SecurityIdentifier]'S-1-5-20' + $networkName = $networkSid.Translate([System.Security.Principal.NTAccount]) + + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.Account = $networkSid + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.Account | Assert-Equals -Expected $networkName + $actual.SERVICE_START_NAME | Assert-Equals -Expected $networkName.Value + + $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{obj=$localName.Value} + $service.Refresh() + $service.Account | Assert-Equals -Expected $localName + + $service.Account = $systemSid + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.Account | Assert-Equals -Expected $systemName + $actual.SERVICE_START_NAME | Assert-Equals -Expected "LocalSystem" + } + + "Modify Account - user" = { + $currentSid = [System.Security.Principal.WindowsIdentity]::GetCurrent().User + + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.Account = $currentSid + $service.Password = 'password' + + $actual = Invoke-Sc -Action qc -Name $serviceName + + # When running tests in CI this seems to become .\Administrator + if ($service.Account.Value.StartsWith('.\')) { + $username = $service.Account.Value.Substring(2, $service.Account.Value.Length - 2) + $actualSid = ([System.Security.Principal.NTAccount]"$env:COMPUTERNAME\$username").Translate( + [System.Security.Principal.SecurityIdentifier] + ) + } else { + $actualSid = $service.Account.Translate([System.Security.Principal.SecurityIdentifier]) + } + $actualSid.Value | Assert-Equals -Expected $currentSid.Value + $actual.SERVICE_START_NAME | Assert-Equals -Expected $service.Account.Value + + # Go back to SYSTEM from account + $systemSid = [System.Security.Principal.SecurityIdentifier]'S-1-5-18' + $service.Account = $systemSid + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.Account | Assert-Equals -Expected $systemSid.Translate([System.Security.Principal.NTAccount]) + $actual.SERVICE_START_NAME | Assert-Equals -Expected "LocalSystem" + } + + "Modify Account - virtual account" = { + $account = [System.Security.Principal.NTAccount]"NT SERVICE\$serviceName" + + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.Account = $account + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.Account | Assert-Equals -Expected $account + $actual.SERVICE_START_NAME | Assert-Equals -Expected $account.Value + } + + "Modify Account - gMSA" = { + # This cannot be tested through CI, only done on manual tests. + return + + $gmsaName = [System.Security.Principal.NTAccount]'gMSA$@DOMAIN.LOCAL' # Make sure this is UPN. + $gmsaSid = $gmsaName.Translate([System.Security.Principal.SecurityIdentifier]) + $gmsaNetlogon = $gmsaSid.Translate([System.Security.Principal.NTAccount]) + + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.Account = $gmsaName + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.Account | Assert-Equals -Expected $gmsaName + $actual.SERVICE_START_NAME | Assert-Equals -Expected $gmsaName + + # Go from gMSA to account and back to verify the Password doesn't matter. + $currentUser = [System.Security.Principal.WindowsIdentity]::GetCurrent().User + $service.Account = $currentUser + $service.Password = 'fake password' + $service.Password = 'fake password2' + + # Now test in the Netlogon format. + $service.Account = $gmsaSid + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.Account | Assert-Equals -Expected $gmsaNetlogon + $actual.SERVICE_START_NAME | Assert-Equals -Expected $gmsaNetlogon.Value + } + + "Modify DisplayName" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.DisplayName = "Custom Service Name" + + $actual = Invoke-Sc -Action qc -Name $serviceName + $service.DisplayName | Assert-Equals -Expected "Custom Service Name" + $actual.DISPLAY_NAME | Assert-Equals -Expected "Custom Service Name" + + $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{displayname="New Service Name"} + $service.Refresh() + $service.DisplayName | Assert-Equals -Expected "New Service Name" + } + + "Modify Description" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.Description = "My custom service description" + + $actual = Invoke-Sc -Action qdescription -Name $serviceName + $service.Description | Assert-Equals -Expected "My custom service description" + $actual.DESCRIPTION | Assert-Equals -Expected "My custom service description" + + $null = Invoke-Sc -Action description -Name $serviceName -Arguments @(,"new description") + $service.Description | Assert-Equals -Expected "new description" + + $service.Description = $null + + $actual = Invoke-Sc -Action qdescription -Name $serviceName + $service.Description | Assert-Equals -Expected $null + $actual.DESCRIPTION | Assert-Equals -Expected "" + } + + "Modify FailureActions" = { + $newAction = [Ansible.Service.FailureActions]@{ + ResetPeriod = 86400 + RebootMsg = 'Reboot msg' + Command = 'Command line' + Actions = @( + [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::RunCommand; Delay = 1000}, + [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::RunCommand; Delay = 2000}, + [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::Restart; Delay = 1000}, + [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::Reboot; Delay = 1000} + ) + } + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.FailureActions = $newAction + + $actual = Invoke-Sc -Action qfailure -Name $serviceName + $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 86400 + $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'Reboot msg' + $actual.COMMAND_LINE | Assert-Equals -Expected 'Command line' + $actual.FAILURE_ACTIONS.Count | Assert-Equals -Expected 4 + $actual.FAILURE_ACTIONS[0] | Assert-Equals -Expected "RUN PROCESS -- Delay = 1000 milliseconds." + $actual.FAILURE_ACTIONS[1] | Assert-Equals -Expected "RUN PROCESS -- Delay = 2000 milliseconds." + $actual.FAILURE_ACTIONS[2] | Assert-Equals -Expected "RESTART -- Delay = 1000 milliseconds." + $actual.FAILURE_ACTIONS[3] | Assert-Equals -Expected "REBOOT -- Delay = 1000 milliseconds." + $service.FailureActions.Actions.Count | Assert-Equals -Expected 4 + + # Test that we can change individual settings and it doesn't change all + $service.FailureActions = [Ansible.Service.FailureActions]@{ResetPeriod = 172800} + + $actual = Invoke-Sc -Action qfailure -Name $serviceName + $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 172800 + $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'Reboot msg' + $actual.COMMAND_LINE | Assert-Equals -Expected 'Command line' + $actual.FAILURE_ACTIONS.Count | Assert-Equals -Expected 4 + $service.FailureActions.Actions.Count | Assert-Equals -Expected 4 + + $service.FailureActions = [Ansible.Service.FailureActions]@{RebootMsg = "New reboot msg"} + + $actual = Invoke-Sc -Action qfailure -Name $serviceName + $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 172800 + $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg' + $actual.COMMAND_LINE | Assert-Equals -Expected 'Command line' + $actual.FAILURE_ACTIONS.Count | Assert-Equals -Expected 4 + $service.FailureActions.Actions.Count | Assert-Equals -Expected 4 + + $service.FailureActions = [Ansible.Service.FailureActions]@{Command = "New command line"} + + $actual = Invoke-Sc -Action qfailure -Name $serviceName + $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 172800 + $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg' + $actual.COMMAND_LINE | Assert-Equals -Expected 'New command line' + $actual.FAILURE_ACTIONS.Count | Assert-Equals -Expected 4 + $service.FailureActions.Actions.Count | Assert-Equals -Expected 4 + + # Test setting both ResetPeriod and Actions together + $service.FailureActions = [Ansible.Service.FailureActions]@{ + ResetPeriod = 86400 + Actions = @( + [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::RunCommand; Delay = 5000}, + [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::None; Delay = 0} + ) + } + + $actual = Invoke-Sc -Action qfailure -Name $serviceName + $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 86400 + $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg' + $actual.COMMAND_LINE | Assert-Equals -Expected 'New command line' + # sc.exe does not show the None action it just ends the list, so we verify from get_FailureActions + $actual.FAILURE_ACTIONS | Assert-Equals -Expected "RUN PROCESS -- Delay = 5000 milliseconds." + $service.FailureActions.Actions.Count | Assert-Equals -Expected 2 + $service.FailureActions.Actions[1].Type | Assert-Equals -Expected ([Ansible.Service.FailureAction]::None) + + # Test setting just Actions without ResetPeriod + $service.FailureActions = [Ansible.Service.FailureActions]@{ + Actions = [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::RunCommand; Delay = 10000} + } + $actual = Invoke-Sc -Action qfailure -Name $serviceName + $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 86400 + $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg' + $actual.COMMAND_LINE | Assert-Equals -Expected 'New command line' + $actual.FAILURE_ACTIONS | Assert-Equals -Expected "RUN PROCESS -- Delay = 10000 milliseconds." + $service.FailureActions.Actions.Count | Assert-Equals -Expected 1 + + # Test removing all actions + $service.FailureActions = [Ansible.Service.FailureActions]@{ + Actions = @() + } + $actual = Invoke-Sc -Action qfailure -Name $serviceName + $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 0 # ChangeServiceConfig2W resets this back to 0. + $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg' + $actual.COMMAND_LINE | Assert-Equals -Expected 'New command line' + $actual.PSObject.Properties.Name.Contains('FAILURE_ACTIONS') | Assert-Equals -Expected $false + $service.FailureActions.Actions.Count | Assert-Equals -Expected 0 + + # Test that we are reading the right values + $null = Invoke-Sc -Action failure -Name $serviceName -Arguments @{ + reset = 172800 + reboot = "sc reboot msg" + command = "sc command line" + actions = "run/5000/reboot/800" + } + + $actual = $service.FailureActions + $actual.ResetPeriod | Assert-Equals -Expected 172800 + $actual.RebootMsg | Assert-Equals -Expected "sc reboot msg" + $actual.Command | Assert-Equals -Expected "sc command line" + $actual.Actions.Count | Assert-Equals -Expected 2 + $actual.Actions[0].Type | Assert-Equals -Expected ([Ansible.Service.FailureAction]::RunCommand) + $actual.Actions[0].Delay | Assert-Equals -Expected 5000 + $actual.Actions[1].Type | Assert-Equals -Expected ([Ansible.Service.FailureAction]::Reboot) + $actual.Actions[1].Delay | Assert-Equals -Expected 800 + } + + "Modify FailureActionsOnNonCrashFailures" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.FailureActionsOnNonCrashFailures = $true + + $actual = Invoke-Sc -Action qfailureflag -Name $serviceName + $service.FailureActionsOnNonCrashFailures | Assert-Equals -Expected $true + $actual.FAILURE_ACTIONS_ON_NONCRASH_FAILURES | Assert-Equals -Expected "TRUE" + + $null = Invoke-Sc -Action failureflag -Name $serviceName -Arguments @(,0) + $service.FailureActionsOnNonCrashFailures | Assert-Equals -Expected $false + } + + "Modify ServiceSidInfo" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.ServiceSidInfo = [Ansible.Service.ServiceSidInfo]::None + + $actual = Invoke-Sc -Action qsidtype -Name $serviceName + $service.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::None) + $actual.SERVICE_SID_TYPE | Assert-Equals -Expected 'NONE' + + $null = Invoke-Sc -Action sidtype -Name $serviceName -Arguments @(,'unrestricted') + $service.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::Unrestricted) + + $service.ServiceSidInfo = [Ansible.Service.ServiceSidInfo]::Restricted + + $actual = Invoke-Sc -Action qsidtype -Name $serviceName + $service.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::Restricted) + $actual.SERVICE_SID_TYPE | Assert-Equals -Expected 'RESTRICTED' + } + + "Modify RequiredPrivileges" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.RequiredPrivileges = @("SeBackupPrivilege", "SeTcbPrivilege") + + $actual = Invoke-Sc -Action qprivs -Name $serviceName + ,$service.RequiredPrivileges | Assert-Equals -Expected @("SeBackupPrivilege", "SeTcbPrivilege") + ,$actual.PRIVILEGES | Assert-Equals -Expected @("SeBackupPrivilege", "SeTcbPrivilege") + + # Ensure setting to $null is the same as an empty array + $service.RequiredPrivileges = $null + + $actual = Invoke-Sc -Action qprivs -Name $serviceName + ,$service.RequiredPrivileges | Assert-Equals -Expected @() + ,$actual.PRIVILEGES | Assert-Equals -Expected @() + + $service.RequiredPrivileges = @("SeBackupPrivilege", "SeTcbPrivilege") + $service.RequiredPrivileges = @() + + $actual = Invoke-Sc -Action qprivs -Name $serviceName + ,$service.RequiredPrivileges | Assert-Equals -Expected @() + ,$actual.PRIVILEGES | Assert-Equals -Expected @() + + $null = Invoke-Sc -Action privs -Name $serviceName -Arguments @(,"SeCreateTokenPrivilege/SeRestorePrivilege") + ,$service.RequiredPrivileges | Assert-Equals -Expected @("SeCreateTokenPrivilege", "SeRestorePrivilege") + } + + "Modify PreShutdownTimeout" = { + $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName + $service.PreShutdownTimeout = 60000 + + # sc.exe doesn't seem to have a query argument for this, just get it from the registry + $actual = ( + Get-ItemProperty -LiteralPath "HKLM:\SYSTEM\CurrentControlSet\Services\$serviceName" -Name PreshutdownTimeout + ).PreshutdownTimeout + $actual | Assert-Equals -Expected 60000 + } + + "Modify Triggers" = { + $service = [Ansible.Service.Service]$serviceName + $service.Triggers = @( + [Ansible.Service.Trigger]@{ + Type = [Ansible.Service.TriggerType]::DomainJoin + Action = [Ansible.Service.TriggerAction]::ServiceStop + SubType = [Guid][Ansible.Service.Trigger]::DOMAIN_JOIN_GUID + }, + [Ansible.Service.Trigger]@{ + Type = [Ansible.Service.TriggerType]::NetworkEndpoint + Action = [Ansible.Service.TriggerAction]::ServiceStart + SubType = [Guid][Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID + DataItems = [Ansible.Service.TriggerItem]@{ + Type = [Ansible.Service.TriggerDataType]::String + Data = 'my named pipe' + } + }, + [Ansible.Service.Trigger]@{ + Type = [Ansible.Service.TriggerType]::NetworkEndpoint + Action = [Ansible.Service.TriggerAction]::ServiceStart + SubType = [Guid][Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID + DataItems = [Ansible.Service.TriggerItem]@{ + Type = [Ansible.Service.TriggerDataType]::String + Data = 'my named pipe 2' + } + }, + [Ansible.Service.Trigger]@{ + Type = [Ansible.Service.TriggerType]::Custom + Action = [Ansible.Service.TriggerAction]::ServiceStart + SubType = [Guid]'9bf04e57-05dc-4914-9ed9-84bf992db88c' + DataItems = @( + [Ansible.Service.TriggerItem]@{ + Type = [Ansible.Service.TriggerDataType]::Binary + Data = [byte[]]@(1, 2, 3, 4) + }, + [Ansible.Service.TriggerItem]@{ + Type = [Ansible.Service.TriggerDataType]::Binary + Data = [byte[]]@(5, 6, 7, 8, 9) + } + ) + } + [Ansible.Service.Trigger]@{ + Type = [Ansible.Service.TriggerType]::Custom + Action = [Ansible.Service.TriggerAction]::ServiceStart + SubType = [Guid]'9fbcfc7e-7581-4d46-913b-53bb15c80c51' + DataItems = @( + [Ansible.Service.TriggerItem]@{ + Type = [Ansible.Service.TriggerDataType]::String + Data = 'entry 1' + }, + [Ansible.Service.TriggerItem]@{ + Type = [Ansible.Service.TriggerDataType]::String + Data = 'entry 2' + } + ) + }, + [Ansible.Service.Trigger]@{ + Type = [Ansible.Service.TriggerType]::FirewallPortEvent + Action = [Ansible.Service.TriggerAction]::ServiceStop + SubType = [Guid][Ansible.Service.Trigger]::FIREWALL_PORT_CLOSE_GUID + DataItems = [Ansible.Service.TriggerItem]@{ + Type = [Ansible.Service.TriggerDataType]::String + Data = [System.Collections.Generic.List[String]]@("1234", "tcp", "imagepath", "servicename") + } + } + ) + + $actual = Invoke-Sc -Action qtriggerinfo -Name $serviceName + + $actual.Triggers.Count | Assert-Equals -Expected 6 + $actual.Triggers[0].Type | Assert-Equals -Expected 'DOMAIN JOINED STATUS' + $actual.Triggers[0].Action | Assert-Equals -Expected 'STOP SERVICE' + $actual.Triggers[0].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::DOMAIN_JOIN_GUID) [DOMAIN JOINED]" + $actual.Triggers[0].Data.Count | Assert-Equals -Expected 0 + + $actual.Triggers[1].Type | Assert-Equals -Expected 'NETWORK EVENT' + $actual.Triggers[1].Action | Assert-Equals -Expected 'START SERVICE' + $actual.Triggers[1].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID) [NAMED PIPE EVENT]" + $actual.Triggers[1].Data.Count | Assert-Equals -Expected 1 + $actual.Triggers[1].Data[0] | Assert-Equals -Expected 'my named pipe' + + $actual.Triggers[2].Type | Assert-Equals -Expected 'NETWORK EVENT' + $actual.Triggers[2].Action | Assert-Equals -Expected 'START SERVICE' + $actual.Triggers[2].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID) [NAMED PIPE EVENT]" + $actual.Triggers[2].Data.Count | Assert-Equals -Expected 1 + $actual.Triggers[2].Data[0] | Assert-Equals -Expected 'my named pipe 2' + + $actual.Triggers[3].Type | Assert-Equals -Expected 'CUSTOM' + $actual.Triggers[3].Action | Assert-Equals -Expected 'START SERVICE' + $actual.Triggers[3].SubType | Assert-Equals -Expected '9bf04e57-05dc-4914-9ed9-84bf992db88c [ETW PROVIDER UUID]' + $actual.Triggers[3].Data.Count | Assert-Equals -Expected 2 + $actual.Triggers[3].Data[0] | Assert-Equals -Expected '01 02 03 04' + $actual.Triggers[3].Data[1] | Assert-Equals -Expected '05 06 07 08 09' + + $actual.Triggers[4].Type | Assert-Equals -Expected 'CUSTOM' + $actual.Triggers[4].Action | Assert-Equals -Expected 'START SERVICE' + $actual.Triggers[4].SubType | Assert-Equals -Expected '9fbcfc7e-7581-4d46-913b-53bb15c80c51 [ETW PROVIDER UUID]' + $actual.Triggers[4].Data.Count | Assert-Equals -Expected 2 + $actual.Triggers[4].Data[0] | Assert-Equals -Expected "entry 1" + $actual.Triggers[4].Data[1] | Assert-Equals -Expected "entry 2" + + $actual.Triggers[5].Type | Assert-Equals -Expected 'FIREWALL PORT EVENT' + $actual.Triggers[5].Action | Assert-Equals -Expected 'STOP SERVICE' + $actual.Triggers[5].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::FIREWALL_PORT_CLOSE_GUID) [PORT CLOSE]" + $actual.Triggers[5].Data.Count | Assert-Equals -Expected 1 + $actual.Triggers[5].Data[0] | Assert-Equals -Expected '1234;tcp;imagepath;servicename' + + # Remove trigger with $null + $service.Triggers = $null + + $actual = Invoke-Sc -Action qtriggerinfo -Name $serviceName + $actual.Triggers.Count | Assert-Equals -Expected 0 + + # Add a single trigger + $service.Triggers = [Ansible.Service.Trigger]@{ + Type = [Ansible.Service.TriggerType]::GroupPolicy + Action = [Ansible.Service.TriggerAction]::ServiceStart + SubType = [Guid][Ansible.Service.Trigger]::MACHINE_POLICY_PRESENT_GUID + } + + $actual = Invoke-Sc -Action qtriggerinfo -Name $serviceName + $actual.Triggers.Count | Assert-Equals -Expected 1 + $actual.Triggers[0].Type | Assert-Equals -Expected 'GROUP POLICY' + $actual.Triggers[0].Action | Assert-Equals -Expected 'START SERVICE' + $actual.Triggers[0].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::MACHINE_POLICY_PRESENT_GUID) [MACHINE POLICY PRESENT]" + $actual.Triggers[0].Data.Count | Assert-Equals -Expected 0 + + # Remove trigger with empty list + $service.Triggers = @() + + $actual = Invoke-Sc -Action qtriggerinfo -Name $serviceName + $actual.Triggers.Count | Assert-Equals -Expected 0 + + # Add triggers through sc and check we get the values correctly + $null = Invoke-Sc -Action triggerinfo -Name $serviceName -Arguments @( + 'start/namedpipe/abc', + 'start/namedpipe/def', + 'start/custom/d4497e12-ac36-4823-af61-92db0dbd4a76/11223344/aabbccdd', + 'start/strcustom/435a1742-22c5-4234-9db3-e32dafde695c/11223344/aabbccdd', + 'stop/portclose/1234;tcp;imagepath;servicename', + 'stop/networkoff' + ) + + $actual = $service.Triggers + $actual.Count | Assert-Equals -Expected 6 + + $actual[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::NetworkEndpoint) + $actual[0].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStart) + $actual[0].SubType = [Guid][Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID + $actual[0].DataItems.Count | Assert-Equals -Expected 1 + $actual[0].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String) + $actual[0].DataItems[0].Data | Assert-Equals -Expected 'abc' + + $actual[1].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::NetworkEndpoint) + $actual[1].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStart) + $actual[1].SubType = [Guid][Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID + $actual[1].DataItems.Count | Assert-Equals -Expected 1 + $actual[1].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String) + $actual[1].DataItems[0].Data | Assert-Equals -Expected 'def' + + $actual[2].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::Custom) + $actual[2].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStart) + $actual[2].SubType = [Guid]'d4497e12-ac36-4823-af61-92db0dbd4a76' + $actual[2].DataItems.Count | Assert-Equals -Expected 2 + $actual[2].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::Binary) + ,$actual[2].DataItems[0].Data | Assert-Equals -Expected ([byte[]]@(17, 34, 51, 68)) + $actual[2].DataItems[1].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::Binary) + ,$actual[2].DataItems[1].Data | Assert-Equals -Expected ([byte[]]@(170, 187, 204, 221)) + + $actual[3].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::Custom) + $actual[3].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStart) + $actual[3].SubType = [Guid]'435a1742-22c5-4234-9db3-e32dafde695c' + $actual[3].DataItems.Count | Assert-Equals -Expected 2 + $actual[3].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String) + $actual[3].DataItems[0].Data | Assert-Equals -Expected '11223344' + $actual[3].DataItems[1].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String) + $actual[3].DataItems[1].Data | Assert-Equals -Expected 'aabbccdd' + + $actual[4].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::FirewallPortEvent) + $actual[4].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStop) + $actual[4].SubType = [Guid][Ansible.Service.Trigger]::FIREWALL_PORT_CLOSE_GUID + $actual[4].DataItems.Count | Assert-Equals -Expected 1 + $actual[4].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String) + ,$actual[4].DataItems[0].Data | Assert-Equals -Expected @('1234', 'tcp', 'imagepath', 'servicename') + + $actual[5].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::IpAddressAvailability) + $actual[5].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStop) + $actual[5].SubType = [Guid][Ansible.Service.Trigger]::NETWORK_MANAGER_LAST_IP_ADDRESS_REMOVAL_GUID + $actual[5].DataItems.Count | Assert-Equals -Expected 0 + } + + # Cannot test PreferredNode as we can't guarantee CI is set up with NUMA support. + # Cannot test LaunchProtection as once set we cannot remove unless rebooting +} + +# setup and teardown should favour native tools to create and delete the service and not the util we are testing. +foreach ($testImpl in $tests.GetEnumerator()) { + $serviceName = "ansible_$([System.IO.Path]::GetRandomFileName())" + $null = New-Service -Name $serviceName -BinaryPathName ('"{0}"' -f $path) -StartupType Manual + + try { + $test = $testImpl.Key + &$testImpl.Value + } finally { + $null = Invoke-Sc -Action delete -Name $serviceName + } +} + +$module.Result.data = "success" +$module.ExitJson() diff --git a/test/integration/targets/module_utils_Ansible.Service/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Service/tasks/main.yml new file mode 100644 index 00000000..78f91e1e --- /dev/null +++ b/test/integration/targets/module_utils_Ansible.Service/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: test Ansible.Service.cs + ansible_service_tests: + register: ansible_service_test + +- name: assert test Ansible.Service.cs + assert: + that: + - ansible_service_test.data == "success" diff --git a/test/integration/targets/network_cli/aliases b/test/integration/targets/network_cli/aliases new file mode 100644 index 00000000..6a739c96 --- /dev/null +++ b/test/integration/targets/network_cli/aliases @@ -0,0 +1,3 @@ +# Keeping incidental for efficiency, to avoid spinning up another VM +shippable/vyos/incidental +network/vyos diff --git a/test/integration/targets/network_cli/passworded_user.yml b/test/integration/targets/network_cli/passworded_user.yml new file mode 100644 index 00000000..5538684c --- /dev/null +++ b/test/integration/targets/network_cli/passworded_user.yml @@ -0,0 +1,14 @@ +- hosts: vyos + gather_facts: false + + tasks: + - name: Run whoami + vyos.vyos.vyos_command: + commands: + - whoami + register: whoami + + - assert: + that: + - whoami is successful + - whoami.stdout_lines[0][0] == 'atester' diff --git a/test/integration/targets/network_cli/runme.sh b/test/integration/targets/network_cli/runme.sh new file mode 100755 index 00000000..156674fe --- /dev/null +++ b/test/integration/targets/network_cli/runme.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -eux +export ANSIBLE_ROLES_PATH=../ + +function cleanup { + ansible-playbook teardown.yml -i "$INVENTORY_PATH" "$@" +} + +trap cleanup EXIT + +ansible-playbook setup.yml -i "$INVENTORY_PATH" "$@" + +# We need a nonempty file to override key with (empty file gives a +# lovely "list index out of range" error) +foo=$(mktemp) +echo hello > "$foo" + +# We want to ensure that passwords make it to the network connection plugins +# because they follow a different path than the rest of the codebase. +# In setup.yml, we create a passworded user, and now we connect as that user +# to make sure the password we pass here successfully makes it to the plugin. +ansible-playbook \ + -i "$INVENTORY_PATH" \ + -e ansible_user=atester \ + -e ansible_password=testymctest \ + -e ansible_ssh_private_key_file="$foo" \ + passworded_user.yml diff --git a/test/integration/targets/network_cli/setup.yml b/test/integration/targets/network_cli/setup.yml new file mode 100644 index 00000000..d862406f --- /dev/null +++ b/test/integration/targets/network_cli/setup.yml @@ -0,0 +1,14 @@ +- hosts: vyos + connection: ansible.netcommon.network_cli + become: true + gather_facts: false + + tasks: + - name: Create user with password + register: result + vyos.vyos.vyos_config: + lines: + - set system login user atester full-name "Ansible Tester" + - set system login user atester authentication plaintext-password testymctest + - set system login user jsmith level admin + - delete service ssh disable-password-authentication diff --git a/test/integration/targets/network_cli/teardown.yml b/test/integration/targets/network_cli/teardown.yml new file mode 100644 index 00000000..c47f3e89 --- /dev/null +++ b/test/integration/targets/network_cli/teardown.yml @@ -0,0 +1,14 @@ +- hosts: vyos + connection: ansible.netcommon.network_cli + become: true + gather_facts: false + + tasks: + - name: Get rid of user (undo everything from setup.yml) + register: result + vyos.vyos.vyos_config: + lines: + - delete system login user atester full-name "Ansible Tester" + - delete system login user atester authentication plaintext-password testymctest + - delete system login user jsmith level admin + - set service ssh disable-password-authentication diff --git a/test/integration/targets/no_log/aliases b/test/integration/targets/no_log/aliases new file mode 100644 index 00000000..70a7b7a9 --- /dev/null +++ b/test/integration/targets/no_log/aliases @@ -0,0 +1 @@ +shippable/posix/group5 diff --git a/test/integration/targets/no_log/dynamic.yml b/test/integration/targets/no_log/dynamic.yml new file mode 100644 index 00000000..4a1123d5 --- /dev/null +++ b/test/integration/targets/no_log/dynamic.yml @@ -0,0 +1,27 @@ +- name: test dynamic no log + hosts: testhost + gather_facts: no + ignore_errors: yes + tasks: + - name: no loop, task fails, dynamic no_log + debug: + msg: "SHOW {{ var_does_not_exist }}" + no_log: "{{ not (unsafe_show_logs|bool) }}" + + - name: loop, task succeeds, dynamic does no_log + debug: + msg: "SHOW {{ item }}" + loop: + - a + - b + - c + no_log: "{{ not (unsafe_show_logs|bool) }}" + + - name: loop, task fails, dynamic no_log + debug: + msg: "SHOW {{ var_does_not_exist }}" + loop: + - a + - b + - c + no_log: "{{ not (unsafe_show_logs|bool) }}" diff --git a/test/integration/targets/no_log/library/module.py b/test/integration/targets/no_log/library/module.py new file mode 100644 index 00000000..d4f3c565 --- /dev/null +++ b/test/integration/targets/no_log/library/module.py @@ -0,0 +1,45 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec={ + 'state': {}, + 'secret': {'no_log': True}, + 'subopt_dict': { + 'type': 'dict', + 'options': { + 'str_sub_opt1': {'no_log': True}, + 'str_sub_opt2': {}, + 'nested_subopt': { + 'type': 'dict', + 'options': { + 'n_subopt1': {'no_log': True}, + } + } + } + }, + 'subopt_list': { + 'type': 'list', + 'elements': 'dict', + 'options': { + 'subopt1': {'no_log': True}, + 'subopt2': {}, + } + } + + } + ) + module.exit_json(msg='done') + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/no_log/no_log_local.yml b/test/integration/targets/no_log/no_log_local.yml new file mode 100644 index 00000000..aacf7de2 --- /dev/null +++ b/test/integration/targets/no_log/no_log_local.yml @@ -0,0 +1,92 @@ +# TODO: test against real connection plugins to ensure they're not leaking module args + +- name: normal play + hosts: testhost + gather_facts: no + tasks: + - name: args should be logged in the absence of no_log + shell: echo "LOG_ME_TASK_SUCCEEDED" + + - name: failed args should be logged in the absence of no_log + shell: echo "LOG_ME_TASK_FAILED" + failed_when: true + ignore_errors: true + + - name: item args should be logged in the absence of no_log + shell: echo {{ item }} + with_items: [ "LOG_ME_ITEM", "LOG_ME_SKIPPED", "LOG_ME_ITEM_FAILED" ] + when: item != "LOG_ME_SKIPPED" + failed_when: item == "LOG_ME_ITEM_FAILED" + ignore_errors: true + + - name: args should not be logged when task-level no_log set + shell: echo "DO_NOT_LOG_TASK_SUCCEEDED" + no_log: true + + - name: failed args should not be logged when task-level no_log set + shell: echo "DO_NOT_LOG_TASK_FAILED" + no_log: true + failed_when: true + ignore_errors: true + + - name: skipped task args should be suppressed with no_log + shell: echo "DO_NOT_LOG_TASK_SKIPPED" + no_log: true + when: false + + - name: items args should be suppressed with no_log in every state + shell: echo {{ item }} + no_log: true + with_items: [ "DO_NOT_LOG_ITEM", "DO_NOT_LOG_ITEM_SKIPPED", "DO_NOT_LOG_ITEM_FAILED" ] + when: item != "DO_NOT_LOG_ITEM_SKIPPED" + failed_when: item == "DO_NOT_LOG_ITEM_FAILED" + ignore_errors: yes + + - name: async task args should suppressed with no_log + async: 10 + poll: 1 + shell: echo "DO_NOT_LOG_ASYNC_TASK_SUCCEEDED" + no_log: true + +- name: play-level no_log set + hosts: testhost + gather_facts: no + no_log: true + tasks: + - name: args should not be logged when play-level no_log set + shell: echo "DO_NOT_LOG_PLAY" + + - name: args should not be logged when both play- and task-level no_log set + shell: echo "DO_NOT_LOG_TASK_AND_PLAY" + no_log: true + + - name: args should be logged when task-level no_log overrides play-level + shell: echo "LOG_ME_OVERRIDE" + no_log: false + + - name: Add a fake host for next play + add_host: + hostname: fake + +- name: use 'fake' unreachable host to force unreachable error + hosts: fake + gather_facts: no + connection: ssh + tasks: + - name: 'EXPECTED FAILURE: Fail to run a lineinfile task' + vars: + logins: + - machine: foo + login: bar + password: DO_NOT_LOG_UNREACHABLE_ITEM + - machine: two + login: three + password: DO_NOT_LOG_UNREACHABLE_ITEM + lineinfile: + path: /dev/null + mode: 0600 + create: true + insertafter: EOF + line: "machine {{ item.machine }} login {{ item.login }} password {{ item.password }}" + loop: "{{ logins }}" + no_log: true diff --git a/test/integration/targets/no_log/no_log_suboptions.yml b/test/integration/targets/no_log/no_log_suboptions.yml new file mode 100644 index 00000000..e67ecfe2 --- /dev/null +++ b/test/integration/targets/no_log/no_log_suboptions.yml @@ -0,0 +1,24 @@ +- name: test no log with suboptions + hosts: testhost + gather_facts: no + + tasks: + - name: Task with suboptions + module: + secret: GLAMOROUS + subopt_dict: + str_sub_opt1: AFTERMATH + str_sub_opt2: otherstring + nested_subopt: + n_subopt1: MANPOWER + + subopt_list: + - subopt1: UNTAPPED + subopt2: thridstring + + - subopt1: CONCERNED + + - name: Task with suboptions as string + module: + secret: MARLIN + subopt_dict: str_sub_opt1=FLICK diff --git a/test/integration/targets/no_log/no_log_suboptions_invalid.yml b/test/integration/targets/no_log/no_log_suboptions_invalid.yml new file mode 100644 index 00000000..933a8a9b --- /dev/null +++ b/test/integration/targets/no_log/no_log_suboptions_invalid.yml @@ -0,0 +1,45 @@ +- name: test no log with suboptions + hosts: testhost + gather_facts: no + ignore_errors: yes + + tasks: + - name: Task with suboptions and invalid parameter + module: + secret: SUPREME + invalid: param + subopt_dict: + str_sub_opt1: IDIOM + str_sub_opt2: otherstring + nested_subopt: + n_subopt1: MOCKUP + + subopt_list: + - subopt1: EDUCATED + subopt2: thridstring + - subopt1: FOOTREST + + - name: Task with suboptions as string with invalid parameter + module: + secret: FOOTREST + invalid: param + subopt_dict: str_sub_opt1=CRAFTY + + - name: Task with suboptions with dict instead of list + module: + secret: FELINE + subopt_dict: + str_sub_opt1: CRYSTAL + str_sub_opt2: otherstring + nested_subopt: + n_subopt1: EXPECTANT + subopt_list: + foo: bar + + - name: Task with suboptions with incorrect data type + module: + secret: AGROUND + subopt_dict: 9068.21361 + subopt_list: + - subopt1: GOLIATH + - subopt1: FREEFALL diff --git a/test/integration/targets/no_log/runme.sh b/test/integration/targets/no_log/runme.sh new file mode 100755 index 00000000..bb5c048f --- /dev/null +++ b/test/integration/targets/no_log/runme.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -eux + +# This test expects 7 loggable vars and 0 non-loggable ones. +# If either mismatches it fails, run the ansible-playbook command to debug. +[ "$(ansible-playbook no_log_local.yml -i ../../inventory -vvvvv "$@" | awk \ +'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "26/0" ] + +# deal with corner cases with no log and loops +# no log enabled, should produce 6 censored messages +[ "$(ansible-playbook dynamic.yml -i ../../inventory -vvvvv "$@" -e unsafe_show_logs=no|grep -c 'output has been hidden')" = "6" ] + +# no log disabled, should produce 0 censored +[ "$(ansible-playbook dynamic.yml -i ../../inventory -vvvvv "$@" -e unsafe_show_logs=yes|grep -c 'output has been hidden')" = "0" ] + +# test no log for sub options +[ "$(ansible-playbook no_log_suboptions.yml -i ../../inventory -vvvvv "$@" | grep -Ec '(MANPOWER|UNTAPPED|CONCERNED|MARLIN|FLICK)')" = "0" ] + +# test invalid data passed to a suboption +[ "$(ansible-playbook no_log_suboptions_invalid.yml -i ../../inventory -vvvvv "$@" | grep -Ec '(SUPREME|IDIOM|MOCKUP|EDUCATED|FOOTREST|CRAFTY|FELINE|CRYSTAL|EXPECTANT|AGROUND|GOLIATH|FREEFALL)')" = "0" ] diff --git a/test/integration/targets/noexec/aliases b/test/integration/targets/noexec/aliases new file mode 100644 index 00000000..66a77c7b --- /dev/null +++ b/test/integration/targets/noexec/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/docker +skip/macos diff --git a/test/integration/targets/noexec/inventory b/test/integration/targets/noexec/inventory new file mode 100644 index 00000000..ab9b62c8 --- /dev/null +++ b/test/integration/targets/noexec/inventory @@ -0,0 +1 @@ +not_empty # avoid empty empty hosts list warning without defining explicit localhost diff --git a/test/integration/targets/noexec/runme.sh b/test/integration/targets/noexec/runme.sh new file mode 100755 index 00000000..ff706558 --- /dev/null +++ b/test/integration/targets/noexec/runme.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -eux + +trap 'umount "${OUTPUT_DIR}/ramdisk"' EXIT + +mkdir "${OUTPUT_DIR}/ramdisk" +mount -t tmpfs -o size=32m,noexec,rw tmpfs "${OUTPUT_DIR}/ramdisk" +ANSIBLE_REMOTE_TMP="${OUTPUT_DIR}/ramdisk" ansible-playbook -i inventory "$@" test-noexec.yml diff --git a/test/integration/targets/noexec/test-noexec.yml b/test/integration/targets/noexec/test-noexec.yml new file mode 100644 index 00000000..3c7d756b --- /dev/null +++ b/test/integration/targets/noexec/test-noexec.yml @@ -0,0 +1,8 @@ +- hosts: localhost + gather_facts: false + tasks: + - ping: + + - command: sleep 1 + async: 2 + poll: 1 diff --git a/test/integration/targets/old_style_cache_plugins/aliases b/test/integration/targets/old_style_cache_plugins/aliases new file mode 100644 index 00000000..05f65b71 --- /dev/null +++ b/test/integration/targets/old_style_cache_plugins/aliases @@ -0,0 +1,4 @@ +shippable/posix/group3 +skip/osx +skip/macos +disabled diff --git a/test/integration/targets/old_style_cache_plugins/inventory_config b/test/integration/targets/old_style_cache_plugins/inventory_config new file mode 100644 index 00000000..d87c2a90 --- /dev/null +++ b/test/integration/targets/old_style_cache_plugins/inventory_config @@ -0,0 +1 @@ +# inventory config file for consistent source diff --git a/test/integration/targets/old_style_cache_plugins/plugins/cache/redis.py b/test/integration/targets/old_style_cache_plugins/plugins/cache/redis.py new file mode 100644 index 00000000..9879dec9 --- /dev/null +++ b/test/integration/targets/old_style_cache_plugins/plugins/cache/redis.py @@ -0,0 +1,141 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + cache: redis + short_description: Use Redis DB for cache + description: + - This cache uses JSON formatted, per host records saved in Redis. + version_added: "1.9" + requirements: + - redis>=2.4.5 (python lib) + options: + _uri: + description: + - A colon separated string of connection information for Redis. + required: True + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + description: Expiration timeout for the cache plugin data + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer +''' + +import time +import json + +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.plugins.cache import BaseCacheModule + +try: + from redis import StrictRedis, VERSION +except ImportError: + raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'") + + +class CacheModule(BaseCacheModule): + """ + A caching module backed by redis. + Keys are maintained in a zset with their score being the timestamp + when they are inserted. This allows for the usage of 'zremrangebyscore' + to expire keys. This mechanism is used or a pattern matched 'scan' for + performance. + """ + def __init__(self, *args, **kwargs): + if C.CACHE_PLUGIN_CONNECTION: + connection = C.CACHE_PLUGIN_CONNECTION.split(':') + else: + connection = [] + + self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) + self._prefix = C.CACHE_PLUGIN_PREFIX + self._cache = {} + self._db = StrictRedis(*connection) + self._keys_set = 'ansible_cache_keys' + + def _make_key(self, key): + return self._prefix + key + + def get(self, key): + + if key not in self._cache: + value = self._db.get(self._make_key(key)) + # guard against the key not being removed from the zset; + # this could happen in cases where the timeout value is changed + # between invocations + if value is None: + self.delete(key) + raise KeyError + self._cache[key] = json.loads(value) + + return self._cache.get(key) + + def set(self, key, value): + + value2 = json.dumps(value) + if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire' + self._db.setex(self._make_key(key), int(self._timeout), value2) + else: + self._db.set(self._make_key(key), value2) + + if VERSION[0] == 2: + self._db.zadd(self._keys_set, time.time(), key) + else: + self._db.zadd(self._keys_set, {key: time.time()}) + self._cache[key] = value + + def _expire_keys(self): + if self._timeout > 0: + expiry_age = time.time() - self._timeout + self._db.zremrangebyscore(self._keys_set, 0, expiry_age) + + def keys(self): + self._expire_keys() + return self._db.zrange(self._keys_set, 0, -1) + + def contains(self, key): + self._expire_keys() + return (self._db.zrank(self._keys_set, key) is not None) + + def delete(self, key): + if key in self._cache: + del self._cache[key] + self._db.delete(self._make_key(key)) + self._db.zrem(self._keys_set, key) + + def flush(self): + for key in self.keys(): + self.delete(key) + + def copy(self): + # TODO: there is probably a better way to do this in redis + ret = dict() + for key in self.keys(): + ret[key] = self.get(key) + return ret + + def __getstate__(self): + return dict() + + def __setstate__(self, data): + self.__init__() diff --git a/test/integration/targets/old_style_cache_plugins/plugins/inventory/test.py b/test/integration/targets/old_style_cache_plugins/plugins/inventory/test.py new file mode 100644 index 00000000..7e591957 --- /dev/null +++ b/test/integration/targets/old_style_cache_plugins/plugins/inventory/test.py @@ -0,0 +1,59 @@ +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: test + plugin_type: inventory + short_description: test inventory source + extends_documentation_fragment: + - inventory_cache +''' + +from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable + + +class InventoryModule(BaseInventoryPlugin, Cacheable): + + NAME = 'test' + + def populate(self, hosts): + for host in list(hosts.keys()): + self.inventory.add_host(host, group='all') + for hostvar, hostval in hosts[host].items(): + self.inventory.set_variable(host, hostvar, hostval) + + def get_hosts(self): + return {'host1': {'one': 'two'}, 'host2': {'three': 'four'}} + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self.load_cache_plugin() + + cache_key = self.get_cache_key(path) + + # cache may be True or False at this point to indicate if the inventory is being refreshed + # get the user's cache option + cache_setting = self.get_option('cache') + + attempt_to_read_cache = cache_setting and cache + cache_needs_update = cache_setting and not cache + + # attempt to read the cache if inventory isn't being refreshed and the user has caching enabled + if attempt_to_read_cache: + try: + results = self._cache[cache_key] + except KeyError: + # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated + cache_needs_update = True + + if cache_needs_update: + results = self.get_hosts() + + # set the cache + self._cache[cache_key] = results + + self.populate(results) diff --git a/test/integration/targets/old_style_cache_plugins/runme.sh b/test/integration/targets/old_style_cache_plugins/runme.sh new file mode 100755 index 00000000..86d2433b --- /dev/null +++ b/test/integration/targets/old_style_cache_plugins/runme.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_TEST_PREFER_VENV=1 +source virtualenv.sh + +# Run test if dependencies are installed +failed_dep_1=$(ansible localhost -m pip -a "name=redis>=2.4.5 state=present" "$@" | tee out.txt | grep -c 'FAILED!' || true) +cat out.txt + +installed_redis=$(ansible localhost -m package -a "name=redis-server state=present" --become "$@" | tee out.txt | grep -c '"changed": true' || true) +failed_dep_2=$(grep out.txt -ce 'FAILED!' || true) +cat out.txt + +started_redis=$(ansible localhost -m service -a "name=redis-server state=started" --become "$@" | tee out.txt | grep -c '"changed": true' || true) +failed_dep_3=$(grep out.txt -ce 'FAILED!' || true) +cat out.txt + +CLEANUP_REDIS () { if [ "${installed_redis}" -eq 1 ] ; then ansible localhost -m package -a "name=redis-server state=absent" --become ; fi } +STOP_REDIS () { if [ "${installed_redis}" -ne 1 ] && [ "${started_redis}" -eq 1 ] ; then ansible localhost -m service -a "name=redis-server state=stopped" --become ; fi } + +if [ "${failed_dep_1}" -eq 1 ] || [ "${failed_dep_2}" -eq 1 ] || [ "${failed_dep_3}" -eq 1 ] ; then + STOP_REDIS + CLEANUP_REDIS + exit 0 +fi + +export ANSIBLE_CACHE_PLUGIN=redis +export ANSIBLE_CACHE_PLUGIN_CONNECTION=localhost:6379:0 +export ANSIBLE_CACHE_PLUGINS=./plugins/cache + +# Use old redis for fact caching +count=$(ansible-playbook test_fact_gathering.yml -vvv 2>&1 "$@" | tee out.txt | grep -c 'Gathering Facts' || true) +failed_dep_version=$(grep out.txt -ce "'redis' python module (version 2.4.5 or newer) is required" || true) +cat out.txt +if [ "${failed_dep_version}" -eq 1 ] ; then + STOP_REDIS + CLEANUP_REDIS + exit 0 +fi +if [ "${count}" -ne 1 ] ; then + STOP_REDIS + CLEANUP_REDIS + exit 1 +fi + +# Attempt to use old redis for inventory caching; should not work +export ANSIBLE_INVENTORY_CACHE=True +export ANSIBLE_INVENTORY_CACHE_PLUGIN=redis +export ANSIBLE_INVENTORY_ENABLED=test +export ANSIBLE_INVENTORY_PLUGINS=./plugins/inventory + +ansible-inventory -i inventory_config --graph 2>&1 "$@" | tee out.txt | grep 'Cache options were provided but may not reconcile correctly unless set via set_options' +res=$? +cat out.txt +if [ "${res}" -eq 1 ] ; then + STOP_REDIS + CLEANUP_REDIS + exit 1 +fi + +# Use new style redis for fact caching +unset ANSIBLE_CACHE_PLUGINS +count=$(ansible-playbook test_fact_gathering.yml -vvv "$@" | tee out.txt | grep -c 'Gathering Facts' || true) +cat out.txt +if [ "${count}" -ne 1 ] ; then + STOP_REDIS + CLEANUP_REDIS + exit 1 +fi + +# Use new redis for inventory caching +ansible-inventory -i inventory_config --graph "$@" 2>&1 | tee out.txt | grep 'host2' +res=$? +cat out.txt + +STOP_REDIS +CLEANUP_REDIS + +exit $res diff --git a/test/integration/targets/old_style_cache_plugins/test_fact_gathering.yml b/test/integration/targets/old_style_cache_plugins/test_fact_gathering.yml new file mode 100644 index 00000000..5c720b4e --- /dev/null +++ b/test/integration/targets/old_style_cache_plugins/test_fact_gathering.yml @@ -0,0 +1,6 @@ +--- +- hosts: localhost + gather_facts: no + +- hosts: localhost + gather_facts: yes diff --git a/test/integration/targets/old_style_modules_posix/aliases b/test/integration/targets/old_style_modules_posix/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/old_style_modules_posix/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/old_style_modules_posix/library/helloworld.sh b/test/integration/targets/old_style_modules_posix/library/helloworld.sh new file mode 100644 index 00000000..c1108a8c --- /dev/null +++ b/test/integration/targets/old_style_modules_posix/library/helloworld.sh @@ -0,0 +1,29 @@ +#!/bin/sh +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +if [ -f "$1" ]; then + . "$1" +else + echo '{"msg": "No argument file provided", "failed": true}' + exit 1 +fi + +salutation=${salutation:=Hello} +name=${name:=World} + +cat << EOF +{"msg": "${salutation}, ${name}!"} +EOF diff --git a/test/integration/targets/old_style_modules_posix/meta/main.yml b/test/integration/targets/old_style_modules_posix/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/old_style_modules_posix/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/old_style_modules_posix/tasks/main.yml b/test/integration/targets/old_style_modules_posix/tasks/main.yml new file mode 100644 index 00000000..a7882171 --- /dev/null +++ b/test/integration/targets/old_style_modules_posix/tasks/main.yml @@ -0,0 +1,44 @@ +- name: Hello, World! + helloworld: + register: hello_world + +- assert: + that: + - 'hello_world.msg == "Hello, World!"' + +- name: Hello, Ansible! + helloworld: + args: + name: Ansible + register: hello_ansible + +- assert: + that: + - 'hello_ansible.msg == "Hello, Ansible!"' + +- name: Goodbye, Ansible! + helloworld: + args: + salutation: Goodbye + name: Ansible + register: goodbye_ansible + +- assert: + that: + - 'goodbye_ansible.msg == "Goodbye, Ansible!"' + +- name: Copy module to remote + copy: + src: "{{ role_path }}/library/helloworld.sh" + dest: "{{ remote_tmp_dir }}/helloworld.sh" + +- name: Execute module directly + command: '/bin/sh {{ remote_tmp_dir }}/helloworld.sh' + register: direct + ignore_errors: true + +- assert: + that: + - direct is failed + - | + direct.stdout == '{"msg": "No argument file provided", "failed": true}' diff --git a/test/integration/targets/omit/48673.yml b/test/integration/targets/omit/48673.yml new file mode 100644 index 00000000..d25c8cf2 --- /dev/null +++ b/test/integration/targets/omit/48673.yml @@ -0,0 +1,4 @@ +- hosts: testhost + serial: "{{ testing_omitted_variable | default(omit) }}" + tasks: + - debug: diff --git a/test/integration/targets/omit/aliases b/test/integration/targets/omit/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/omit/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/omit/runme.sh b/test/integration/targets/omit/runme.sh new file mode 100755 index 00000000..962e1f04 --- /dev/null +++ b/test/integration/targets/omit/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook 48673.yml -i ../../inventory -v "$@" diff --git a/test/integration/targets/order/aliases b/test/integration/targets/order/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/order/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/order/inventory b/test/integration/targets/order/inventory new file mode 100644 index 00000000..11f322a1 --- /dev/null +++ b/test/integration/targets/order/inventory @@ -0,0 +1,9 @@ +[incremental] +hostB +hostA +hostD +hostC + +[incremental:vars] +ansible_connection=local +ansible_python_interpreter='{{ansible_playbook_python}}' diff --git a/test/integration/targets/order/order.yml b/test/integration/targets/order/order.yml new file mode 100644 index 00000000..62176b1e --- /dev/null +++ b/test/integration/targets/order/order.yml @@ -0,0 +1,39 @@ +- name: just plain order + hosts: all + gather_facts: false + order: '{{ myorder | default("inventory") }}' + tasks: + - shell: "echo '{{ inventory_hostname }}' >> hostlist.txt" + +- name: with serial + hosts: all + gather_facts: false + serial: 1 + order: '{{ myorder | default("inventory")}}' + tasks: + - shell: "echo '{{ inventory_hostname }}' >> shostlist.txt" + +- name: ensure everything works + hosts: localhost + gather_facts: false + tasks: + - assert: + that: + - item.1 == hostlist[item.0] + - item.1 == shostlist[item.0] + loop: '{{ lookup("indexed_items", inputlist) }}' + vars: + hostlist: '{{ lookup("file", "hostlist.txt").splitlines() }}' + shostlist: '{{ lookup("file", "shostlist.txt").splitlines() }}' + when: myorder | default('inventory') != 'shuffle' + + - name: Assert that shuffle worked + assert: + that: + - item.1 != hostlist[item.0] or item.1 in hostlist + - item.1 != hostlist[item.0] or item.1 in hostlist + loop: '{{ lookup("indexed_items", inputlist) }}' + vars: + hostlist: '{{ lookup("file", "hostlist.txt").splitlines() }}' + shostlist: '{{ lookup("file", "shostlist.txt").splitlines() }}' + when: myorder | default('inventory') == 'shuffle' diff --git a/test/integration/targets/order/runme.sh b/test/integration/targets/order/runme.sh new file mode 100755 index 00000000..9a01c211 --- /dev/null +++ b/test/integration/targets/order/runme.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -eux + +cleanup () { + files="shostlist.txt hostlist.txt" + for file in $files; do + if [[ -f "$file" ]]; then + rm -f "$file" + fi + done +} + +for EXTRA in '{"inputlist": ["hostB", "hostA", "hostD", "hostC"]}' \ + '{"myorder": "inventory", "inputlist": ["hostB", "hostA", "hostD", "hostC"]}' \ + '{"myorder": "sorted", "inputlist": ["hostA", "hostB", "hostC", "hostD"]}' \ + '{"myorder": "reverse_sorted", "inputlist": ["hostD", "hostC", "hostB", "hostA"]}' \ + '{"myorder": "reverse_inventory", "inputlist": ["hostC", "hostD", "hostA", "hostB"]}' \ + '{"myorder": "shuffle", "inputlist": ["hostC", "hostD", "hostA", "hostB"]}' +do + cleanup + ansible-playbook order.yml --forks 1 -i inventory -e "$EXTRA" "$@" +done +cleanup diff --git a/test/integration/targets/package/aliases b/test/integration/targets/package/aliases new file mode 100644 index 00000000..0b484bba --- /dev/null +++ b/test/integration/targets/package/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +destructive +skip/aix diff --git a/test/integration/targets/package/meta/main.yml b/test/integration/targets/package/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/package/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/package/tasks/main.yml b/test/integration/targets/package/tasks/main.yml new file mode 100644 index 00000000..4fc3a8a6 --- /dev/null +++ b/test/integration/targets/package/tasks/main.yml @@ -0,0 +1,114 @@ +# Test code for the package module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: output_dir_test={{output_dir}}/at + +- name: make sure our testing sub-directory does not exist + file: path="{{ output_dir_test }}" state=absent + +- name: create our testing sub-directory + file: path="{{ output_dir_test }}" state=directory + +# Verify correct default package manager for Fedora +# Validates: https://github.com/ansible/ansible/issues/34014 +- block: + - name: install apt + dnf: + name: apt + state: present + - name: gather facts again + setup: + - name: validate output + assert: + that: + - 'ansible_pkg_mgr == "dnf"' + always: + - name: remove apt + dnf: + name: apt + state: absent + - name: gather facts again + setup: + when: ansible_distribution == "Fedora" + +# Verify correct default package manager for Debian/Ubuntu when Zypper installed +- block: + # Just make an executable file called "zypper" - installing zypper itself + # consistently is hard - and we're not going to use it + - name: install fake zypper + file: + state: touch + mode: 0755 + path: /usr/bin/zypper + - name: gather facts again + setup: + - name: validate output + assert: + that: + - 'ansible_pkg_mgr == "apt"' + always: + - name: remove fake zypper + file: + path: /usr/bin/zypper + state: absent + - name: gather facts again + setup: + when: ansible_os_family == "Debian" + +## +## package +## + +- name: define distros to attempt installing at on + set_fact: + package_distros: + - RedHat + - CentOS + - ScientificLinux + - Fedora + - Ubuntu + - Debian + +- block: + - name: remove at package + package: + name: at + state: absent + register: at_check0 + + - name: verify at command is missing + shell: which at + register: at_check1 + failed_when: at_check1.rc == 0 + + - name: reinstall at package + package: + name: at + state: present + register: at_install0 + - debug: var=at_install0 + - name: validate results + assert: + that: + - 'at_install0.changed is defined' + - 'at_install0.changed' + + - name: verify at command is installed + shell: which at + + when: ansible_distribution in package_distros diff --git a/test/integration/targets/package_facts/aliases b/test/integration/targets/package_facts/aliases new file mode 100644 index 00000000..6c62b9a7 --- /dev/null +++ b/test/integration/targets/package_facts/aliases @@ -0,0 +1,4 @@ +shippable/posix/group3 +skip/aix +skip/osx +skip/macos diff --git a/test/integration/targets/package_facts/tasks/main.yml b/test/integration/targets/package_facts/tasks/main.yml new file mode 100644 index 00000000..12dfcf03 --- /dev/null +++ b/test/integration/targets/package_facts/tasks/main.yml @@ -0,0 +1,115 @@ +# Test playbook for the package_facts module +# (c) 2017, Adam Miller + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: Prep package_fact tests - Debian Family + block: + - name: install python apt bindings - python2 + package: name="python-apt" state=present + when: ansible_python.version.major|int == 2 + + - name: install python apt bindings - python3 + package: name="python3-apt" state=present + when: ansible_python.version.major|int == 3 + + - name: Gather package facts + package_facts: + manager: apt + + - name: check for ansible_facts.packages exists + assert: + that: ansible_facts.packages is defined + when: ansible_os_family == "Debian" + +- name: Run package_fact tests - Red Hat Family + block: + - name: Gather package facts + package_facts: + manager: rpm + + - name: check for ansible_facts.packages exists + assert: + that: ansible_facts.packages is defined + when: (ansible_os_family == "RedHat") + +- name: Run package_fact tests - SUSE/OpenSUSE Family + block: + - name: install python rpm bindings - python2 + package: name="rpm-python" state=present + when: ansible_python.version.major|int == 2 + + - name: install python rpm bindings - python3 + package: name="python3-rpm" state=present + when: ansible_python.version.major|int == 3 + + - name: Gather package facts + package_facts: + manager: rpm + + - name: check for ansible_facts.packages exists + assert: + that: ansible_facts.packages is defined + when: (ansible_os_family == "openSUSE Leap") or (ansible_os_family == "Suse") + +# Check that auto detection works also +- name: Gather package facts + package_facts: + manager: auto + +- name: check for ansible_facts.packages exists + assert: + that: ansible_facts.packages is defined + +- name: Run package_fact tests - FreeBSD + block: + - name: Gather package facts + package_facts: + manager: pkg + + - name: check for ansible_facts.packages exists + assert: + that: ansible_facts.packages is defined + + - name: check there is at least one package not flagged vital nor automatic + command: pkg query -e "%a = 0 && %V = 0" %n + register: not_vital_nor_automatic + failed_when: not not_vital_nor_automatic.stdout + + - vars: + pkg_name: "{{ not_vital_nor_automatic.stdout_lines[0].strip() }}" + block: + - name: check the selected package is not vital + assert: + that: + - 'not ansible_facts.packages[pkg_name][0].vital' + - 'not ansible_facts.packages[pkg_name][0].automatic' + + - name: flag the selected package as vital and automatic + command: 'pkg set --yes -v 1 -A 1 {{ pkg_name }}' + + - name: Gather package facts (again) + package_facts: + + - name: check the selected package is flagged vital and automatic + assert: + that: + - 'ansible_facts.packages[pkg_name][0].vital|bool' + - 'ansible_facts.packages[pkg_name][0].automatic|bool' + always: + - name: restore previous flags for the selected package + command: 'pkg set --yes -v 0 -A 0 {{ pkg_name }}' + when: ansible_os_family == "FreeBSD" diff --git a/test/integration/targets/parsing/aliases b/test/integration/targets/parsing/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/parsing/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/parsing/bad_parsing.yml b/test/integration/targets/parsing/bad_parsing.yml new file mode 100644 index 00000000..953ec072 --- /dev/null +++ b/test/integration/targets/parsing/bad_parsing.yml @@ -0,0 +1,12 @@ +- hosts: testhost + + # the following commands should all parse fine and execute fine + # and represent quoting scenarios that should be legit + + gather_facts: False + + roles: + + # this one has a lot of things that should fail, see makefile for operation w/ tags + + - { role: test_bad_parsing } diff --git a/test/integration/targets/parsing/good_parsing.yml b/test/integration/targets/parsing/good_parsing.yml new file mode 100644 index 00000000..b68d9112 --- /dev/null +++ b/test/integration/targets/parsing/good_parsing.yml @@ -0,0 +1,9 @@ +- hosts: testhost + + # the following commands should all parse fine and execute fine + # and represent quoting scenarios that should be legit + + gather_facts: False + + roles: + - { role: test_good_parsing, tags: test_good_parsing } diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/main.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/main.yml new file mode 100644 index 00000000..f1b2ec6a --- /dev/null +++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/main.yml @@ -0,0 +1,60 @@ +# test code for the ping module +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# the following tests all raise errors, to use them in a Makefile, we run them with different flags, as +# otherwise ansible stops at the first one and we want to ensure STOP conditions for each + +- set_fact: + test_file: "{{ output_dir }}/ansible_test_file" # FIXME, use set tempdir + test_input: "owner=test" + bad_var: "{{ output_dir }}' owner=test" + chdir: "mom chdir=/tmp" + tags: common + +- file: name={{test_file}} state=touch + tags: common + +- name: remove touched file + file: name={{test_file}} state=absent + tags: common + +- name: include test that we cannot insert arguments + include: scenario1.yml + tags: scenario1 + +- name: include test that we cannot duplicate arguments + include: scenario2.yml + tags: scenario2 + +- name: include test that we can't do this for the shell module + include: scenario3.yml + tags: scenario3 + +- name: include test that we can't go all Little Bobby Droptables on a quoted var to add more + include: scenario4.yml + tags: scenario4 + +- name: test that a missing/malformed jinja2 filter fails + debug: msg="{{output_dir|badfiltername}}" + tags: scenario5 + register: filter_fail + ignore_errors: yes + +- assert: + that: + - filter_fail is failed diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario1.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario1.yml new file mode 100644 index 00000000..8a82fb95 --- /dev/null +++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario1.yml @@ -0,0 +1,4 @@ +- name: test that we cannot insert arguments + file: path={{ test_file }} {{ test_input }} + failed_when: False # ignore the module, just test the parser + tags: scenario1 diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario2.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario2.yml new file mode 100644 index 00000000..c3b4b13c --- /dev/null +++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario2.yml @@ -0,0 +1,4 @@ +- name: test that we cannot duplicate arguments + file: path={{ test_file }} owner=test2 {{ test_input }} + failed_when: False # ignore the module, just test the parser + tags: scenario2 diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario3.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario3.yml new file mode 100644 index 00000000..a228f70e --- /dev/null +++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario3.yml @@ -0,0 +1,4 @@ +- name: test that we can't do this for the shell module + shell: echo hi {{ chdir }} + failed_when: False + tags: scenario3 diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario4.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario4.yml new file mode 100644 index 00000000..2845adca --- /dev/null +++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario4.yml @@ -0,0 +1,4 @@ +- name: test that we can't go all Little Bobby Droptables on a quoted var to add more + file: "name={{ bad_var }}" + failed_when: False + tags: scenario4 diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/vars/main.yml b/test/integration/targets/parsing/roles/test_bad_parsing/vars/main.yml new file mode 100644 index 00000000..1aaeac77 --- /dev/null +++ b/test/integration/targets/parsing/roles/test_bad_parsing/vars/main.yml @@ -0,0 +1,2 @@ +--- +output_dir: . diff --git a/test/integration/targets/parsing/roles/test_good_parsing/tasks/main.yml b/test/integration/targets/parsing/roles/test_good_parsing/tasks/main.yml new file mode 100644 index 00000000..0fb1337e --- /dev/null +++ b/test/integration/targets/parsing/roles/test_good_parsing/tasks/main.yml @@ -0,0 +1,204 @@ +# test code for the ping module +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# various tests of things that should not cause parsing problems + +- set_fact: + test_input: "a=1 a=2 a=3" + +- set_fact: + multi_line: | + echo old + echo mcdonald + echo had + echo a + echo farm + +- shell: echo "dog" + register: result + +- assert: + that: + result.cmd == 'echo "dog"' + +- shell: echo 'dog' + register: result + +- assert: + that: + result.cmd == 'echo \'dog\'' + +- name: a quoted argument is not sent to the shell module as anything but a string parameter + shell: echo 'dog' 'executable=/usr/bin/python' + register: result + +- debug: var=result.cmd + +- assert: + that: + result.cmd == "echo 'dog' 'executable=/usr/bin/python'" + +- name: it is valid to pass multiple key=value arguments because the shell doesn't check key=value arguments + shell: echo quackquack=here quackquack=everywhere + register: result + +- assert: + that: + result.cmd == 'echo quackquack=here quackquack=everywhere' + +- name: the same is true with quoting + shell: echo "quackquack=here quackquack=everywhere" + register: result + +- assert: + that: + result.cmd == 'echo "quackquack=here quackquack=everywhere"' + +- name: the same is true with quoting (B) + shell: echo "quackquack=here" "quackquack=everywhere" + register: result + +- name: the same is true with quoting (C) + shell: echo "quackquack=here" 'quackquack=everywhere' + register: result + +- name: the same is true with quoting (D) + shell: echo "quackquack=here" 'quackquack=everywhere' + register: result + +- name: the same is true with quoting (E) + shell: echo {{ test_input }} + register: result + +- assert: + that: + result.cmd == "echo a=1 a=2 a=3" + +- name: more shell duplicates + shell: echo foo=bar foo=bar + register: result + +- assert: + that: + result.cmd == "echo foo=bar foo=bar" + +- name: raw duplicates, noop + raw: env true foo=bar foo=bar + +- name: multi-line inline shell commands (should use script module but hey) are a thing + shell: "{{ multi_line }}" + register: result + +- debug: var=result + +- assert: + that: + result.stdout_lines == [ 'old', 'mcdonald', 'had', 'a', 'farm' ] + +- name: passing same arg to shell command is legit + shell: echo foo --arg=a --arg=b + failed_when: False # just catch the exit code, parse error is what I care about, but should register and compare result + register: result + +- assert: + that: + # command shouldn't end in spaces, amend test once fixed + - result.cmd == "echo foo --arg=a --arg=b" + +- name: test includes with params + include: test_include.yml fact_name=include_params param="{{ test_input }}" + +- name: assert the include set the correct fact for the param + assert: + that: + - include_params == test_input + +- name: test includes with quoted params + include: test_include.yml fact_name=double_quoted_param param="this is a param with double quotes" + +- name: assert the include set the correct fact for the double quoted param + assert: + that: + - double_quoted_param == "this is a param with double quotes" + +- name: test includes with single quoted params + include: test_include.yml fact_name=single_quoted_param param='this is a param with single quotes' + +- name: assert the include set the correct fact for the single quoted param + assert: + that: + - single_quoted_param == "this is a param with single quotes" + +- name: test includes with quoted params in complex args + include: test_include.yml + vars: + fact_name: complex_param + param: "this is a param in a complex arg with double quotes" + +- name: assert the include set the correct fact for the params in complex args + assert: + that: + - complex_param == "this is a param in a complex arg with double quotes" + +- name: test variable module name + action: "{{ variable_module_name }} msg='this should be debugged'" + register: result + +- name: assert the task with variable module name ran + assert: + that: + - result.msg == "this should be debugged" + +- name: test conditional includes + include: test_include_conditional.yml + when: false + +- name: assert the nested include from test_include_conditional was not set + assert: + that: + - nested_include_var is undefined + +- name: test omit in complex args + set_fact: + foo: bar + spam: "{{ omit }}" + should_not_omit: "prefix{{ omit }}" + +- assert: + that: + - foo == 'bar' + - spam is undefined + - should_not_omit is defined + +- name: test omit in module args + set_fact: > + yo=whatsup + eggs="{{ omit }}" + default_omitted="{{ not_exists|default(omit) }}" + should_not_omit_1="prefix{{ omit }}" + should_not_omit_2="{{ omit }}suffix" + should_not_omit_3="__omit_place_holder__afb6b9bc3d20bfeaa00a1b23a5930f89" + +- assert: + that: + - yo == 'whatsup' + - eggs is undefined + - default_omitted is undefined + - should_not_omit_1 is defined + - should_not_omit_2 is defined + - should_not_omit_3 == "__omit_place_holder__afb6b9bc3d20bfeaa00a1b23a5930f89" diff --git a/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include.yml b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include.yml new file mode 100644 index 00000000..4ba50358 --- /dev/null +++ b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include.yml @@ -0,0 +1 @@ +- set_fact: "{{fact_name}}='{{param}}'" diff --git a/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_conditional.yml b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_conditional.yml new file mode 100644 index 00000000..070888da --- /dev/null +++ b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_conditional.yml @@ -0,0 +1 @@ +- include: test_include_nested.yml diff --git a/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_nested.yml b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_nested.yml new file mode 100644 index 00000000..f1f6fcc4 --- /dev/null +++ b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_nested.yml @@ -0,0 +1,2 @@ +- name: set the nested include fact + set_fact: nested_include_var=1 diff --git a/test/integration/targets/parsing/roles/test_good_parsing/vars/main.yml b/test/integration/targets/parsing/roles/test_good_parsing/vars/main.yml new file mode 100644 index 00000000..ea7a0b84 --- /dev/null +++ b/test/integration/targets/parsing/roles/test_good_parsing/vars/main.yml @@ -0,0 +1,2 @@ +--- +variable_module_name: debug diff --git a/test/integration/targets/parsing/runme.sh b/test/integration/targets/parsing/runme.sh new file mode 100755 index 00000000..022ce4cf --- /dev/null +++ b/test/integration/targets/parsing/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook bad_parsing.yml -i ../../inventory -vvv "$@" --tags prepare,common,scenario5 +ansible-playbook good_parsing.yml -i ../../inventory -v "$@" diff --git a/test/integration/targets/path_lookups/aliases b/test/integration/targets/path_lookups/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/path_lookups/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/path_lookups/play.yml b/test/integration/targets/path_lookups/play.yml new file mode 100644 index 00000000..7321589b --- /dev/null +++ b/test/integration/targets/path_lookups/play.yml @@ -0,0 +1,49 @@ +- name: setup state + hosts: localhost + gather_facts: false + tasks: + - file: path={{playbook_dir}}/files state=directory + - file: path={{playbook_dir}}/roles/showfile/files state=directory + - copy: dest={{playbook_dir}}/roles/showfile/files/testfile content='in role files' + - copy: dest={{playbook_dir}}/roles/showfile/testfile content='in role' + - copy: dest={{playbook_dir}}/roles/showfile/tasks/testfile content='in role tasks' + - copy: dest={{playbook_dir}}/files/testfile content='in files' + - copy: dest={{playbook_dir}}/testfile content='in local' + +- include: testplay.yml + vars: + remove: nothing + role_out: in role files + play_out: in files + +- include: testplay.yml + vars: + remove: roles/showfile/files/testfile + role_out: in role + play_out: in files + +- include: testplay.yml + vars: + remove: roles/showfile/testfile + role_out: in role tasks + play_out: in files + +- include: testplay.yml + vars: + remove: roles/showfile/tasks/testfile + role_out: in files + play_out: in files + +- include: testplay.yml + vars: + remove: files/testfile + role_out: in local + play_out: in local + +- name: cleanup + hosts: localhost + gather_facts: false + tasks: + - file: path={{playbook_dir}}/testfile state=absent + - file: path={{playbook_dir}}/files state=absent + - file: path={{playbook_dir}}/roles/showfile/files state=absent diff --git a/test/integration/targets/path_lookups/roles/showfile/tasks/main.yml b/test/integration/targets/path_lookups/roles/showfile/tasks/main.yml new file mode 100644 index 00000000..1b380579 --- /dev/null +++ b/test/integration/targets/path_lookups/roles/showfile/tasks/main.yml @@ -0,0 +1,2 @@ +- name: relative to role + set_fact: role_result="{{lookup('file', 'testfile')}}" diff --git a/test/integration/targets/path_lookups/runme.sh b/test/integration/targets/path_lookups/runme.sh new file mode 100755 index 00000000..754150b4 --- /dev/null +++ b/test/integration/targets/path_lookups/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook play.yml -i ../../inventory -v "$@" diff --git a/test/integration/targets/path_lookups/testplay.yml b/test/integration/targets/path_lookups/testplay.yml new file mode 100644 index 00000000..8bf45532 --- /dev/null +++ b/test/integration/targets/path_lookups/testplay.yml @@ -0,0 +1,20 @@ +- name: test initial state + hosts: localhost + gather_facts: false + pre_tasks: + - name: remove {{ remove }} + file: path={{ playbook_dir }}/{{ remove }} state=absent + roles: + - showfile + post_tasks: + - name: from play + set_fact: play_result="{{lookup('file', 'testfile')}}" + + - name: output stage {{ remove }} removed + debug: msg="play> {{play_out}}, role> {{role_out}}" + + - name: verify that result match expected + assert: + that: + - 'play_result == play_out' + - 'role_result == role_out' diff --git a/test/integration/targets/path_with_comma_in_inventory/aliases b/test/integration/targets/path_with_comma_in_inventory/aliases new file mode 100644 index 00000000..70a7b7a9 --- /dev/null +++ b/test/integration/targets/path_with_comma_in_inventory/aliases @@ -0,0 +1 @@ +shippable/posix/group5 diff --git a/test/integration/targets/path_with_comma_in_inventory/playbook.yml b/test/integration/targets/path_with_comma_in_inventory/playbook.yml new file mode 100644 index 00000000..64c83689 --- /dev/null +++ b/test/integration/targets/path_with_comma_in_inventory/playbook.yml @@ -0,0 +1,9 @@ +--- +- hosts: all + gather_facts: false + tasks: + - name: Ensure we can see group_vars from path with comma + assert: + that: + - inventory_var_from_path_with_commas is defined + - inventory_var_from_path_with_commas == 'here' diff --git a/test/integration/targets/path_with_comma_in_inventory/runme.sh b/test/integration/targets/path_with_comma_in_inventory/runme.sh new file mode 100755 index 00000000..833e2ac5 --- /dev/null +++ b/test/integration/targets/path_with_comma_in_inventory/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -ux + +ansible-playbook -i this,path,has,commas/hosts playbook.yml -v "$@" diff --git a/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/group_vars/all.yml b/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/group_vars/all.yml new file mode 100644 index 00000000..df5b84d8 --- /dev/null +++ b/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/group_vars/all.yml @@ -0,0 +1 @@ +inventory_var_from_path_with_commas: 'here' diff --git a/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/hosts b/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/hosts new file mode 100644 index 00000000..5219b90c --- /dev/null +++ b/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/hosts @@ -0,0 +1 @@ +localhost ansible_connect=local diff --git a/test/integration/targets/pause/aliases b/test/integration/targets/pause/aliases new file mode 100644 index 00000000..810f1ab6 --- /dev/null +++ b/test/integration/targets/pause/aliases @@ -0,0 +1,3 @@ +needs/target/setup_pexpect +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/pause/pause-1.yml b/test/integration/targets/pause/pause-1.yml new file mode 100644 index 00000000..44c9960c --- /dev/null +++ b/test/integration/targets/pause/pause-1.yml @@ -0,0 +1,11 @@ +- name: Test pause module in default state + hosts: localhost + become: no + gather_facts: no + + tasks: + - name: EXPECTED FAILURE + pause: + + - debug: + msg: Task after pause diff --git a/test/integration/targets/pause/pause-2.yml b/test/integration/targets/pause/pause-2.yml new file mode 100644 index 00000000..81a7fda5 --- /dev/null +++ b/test/integration/targets/pause/pause-2.yml @@ -0,0 +1,12 @@ +- name: Test pause module with custom prompt + hosts: localhost + become: no + gather_facts: no + + tasks: + - name: EXPECTED FAILURE + pause: + prompt: Custom prompt + + - debug: + msg: Task after pause diff --git a/test/integration/targets/pause/pause-3.yml b/test/integration/targets/pause/pause-3.yml new file mode 100644 index 00000000..8f8c72ed --- /dev/null +++ b/test/integration/targets/pause/pause-3.yml @@ -0,0 +1,12 @@ +- name: Test pause module with pause + hosts: localhost + become: no + gather_facts: no + + tasks: + - name: EXPECTED FAILURE + pause: + seconds: 2 + + - debug: + msg: Task after pause diff --git a/test/integration/targets/pause/pause-4.yml b/test/integration/targets/pause/pause-4.yml new file mode 100644 index 00000000..f16c7d67 --- /dev/null +++ b/test/integration/targets/pause/pause-4.yml @@ -0,0 +1,13 @@ +- name: Test pause module with pause and custom prompt + hosts: localhost + become: no + gather_facts: no + + tasks: + - name: EXPECTED FAILURE + pause: + seconds: 2 + prompt: Waiting for two seconds + + - debug: + msg: Task after pause diff --git a/test/integration/targets/pause/pause-5.yml b/test/integration/targets/pause/pause-5.yml new file mode 100644 index 00000000..22955cd0 --- /dev/null +++ b/test/integration/targets/pause/pause-5.yml @@ -0,0 +1,35 @@ +- name: Test pause module echo output + hosts: localhost + become: no + gather_facts: no + + tasks: + - pause: + echo: yes + prompt: Enter some text + register: results + + - name: Ensure that input was captured + assert: + that: + - results.user_input == 'hello there' + + - pause: + echo: yes + prompt: Enter some text to edit + register: result + + - name: Ensure edited input was captured + assert: + that: + - result.user_input == 'hello tommy boy' + + - pause: + echo: no + prompt: Enter some text + register: result + + - name: Ensure secret input was caputered + assert: + that: + - result.user_input == 'supersecretpancakes' diff --git a/test/integration/targets/pause/runme.sh b/test/integration/targets/pause/runme.sh new file mode 100755 index 00000000..eb2c6f7c --- /dev/null +++ b/test/integration/targets/pause/runme.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_ROLES_PATH=../ ansible-playbook setup.yml + +# Test pause module when no tty and non-interactive with no seconds parameter. +# This is to prevent playbooks from hanging in cron and Tower jobs. +/usr/bin/env bash << EOF +ansible-playbook test-pause-no-tty.yml 2>&1 | \ + grep '\[WARNING\]: Not waiting for response to prompt as stdin is not interactive' && { + echo 'Successfully skipped pause in no TTY mode' >&2 + exit 0 + } || { + echo 'Failed to skip pause module' >&2 + exit 1 + } +EOF + +# Do not issue a warning when run in the background if a timeout is given +# https://github.com/ansible/ansible/issues/73042 +if sleep 0 | ansible localhost -m pause -a 'seconds=1' 2>&1 | grep '\[WARNING\]: Not waiting for response'; then + echo "Incorrectly issued warning when run in the background" + exit 1 +else + echo "Succesfully ran in the background with no warning" +fi + +# Test redirecting stdout +# https://github.com/ansible/ansible/issues/41717 +if ansible-playbook pause-3.yml > /dev/null ; then + echo "Successfully redirected stdout" +else + echo "Failure when attempting to redirect stdout" + exit 1 +fi + + +# Test pause with seconds and minutes specified +ansible-playbook test-pause.yml "$@" + +# Interactively test pause +python test-pause.py "$@" diff --git a/test/integration/targets/pause/setup.yml b/test/integration/targets/pause/setup.yml new file mode 100644 index 00000000..9f6ab117 --- /dev/null +++ b/test/integration/targets/pause/setup.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: no + roles: + - setup_pexpect diff --git a/test/integration/targets/pause/test-pause-background.yml b/test/integration/targets/pause/test-pause-background.yml new file mode 100644 index 00000000..e480a774 --- /dev/null +++ b/test/integration/targets/pause/test-pause-background.yml @@ -0,0 +1,10 @@ +- name: Test pause in a background task + hosts: localhost + gather_facts: no + become: no + + tasks: + - pause: + + - pause: + seconds: 1 diff --git a/test/integration/targets/pause/test-pause-no-tty.yml b/test/integration/targets/pause/test-pause-no-tty.yml new file mode 100644 index 00000000..6e0e4027 --- /dev/null +++ b/test/integration/targets/pause/test-pause-no-tty.yml @@ -0,0 +1,7 @@ +- name: Test pause + hosts: localhost + gather_facts: no + become: no + + tasks: + - pause: diff --git a/test/integration/targets/pause/test-pause.py b/test/integration/targets/pause/test-pause.py new file mode 100755 index 00000000..7b37c666 --- /dev/null +++ b/test/integration/targets/pause/test-pause.py @@ -0,0 +1,273 @@ +#!/usr/bin/env python + +import os +import pexpect +import sys +import termios + +from ansible.module_utils.six import PY2 + +args = sys.argv[1:] + +env_vars = { + 'ANSIBLE_ROLES_PATH': './roles', + 'ANSIBLE_NOCOLOR': 'True', + 'ANSIBLE_RETRY_FILES_ENABLED': 'False' +} + +try: + backspace = termios.tcgetattr(sys.stdin.fileno())[6][termios.VERASE] +except Exception: + backspace = b'\x7f' + +if PY2: + log_buffer = sys.stdout +else: + log_buffer = sys.stdout.buffer + +os.environ.update(env_vars) + +# -- Plain pause -- # +playbook = 'pause-1.yml' + +# Case 1 - Contiune with enter +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Press enter to continue, Ctrl\+C to interrupt:') +pause_test.send('\r') +pause_test.expect('Task after pause') +pause_test.expect(pexpect.EOF) +pause_test.close() + + +# Case 2 - Continue with C +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Press enter to continue, Ctrl\+C to interrupt:') +pause_test.send('\x03') +pause_test.expect("Press 'C' to continue the play or 'A' to abort") +pause_test.send('C') +pause_test.expect('Task after pause') +pause_test.expect(pexpect.EOF) +pause_test.close() + + +# Case 3 - Abort with A +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Press enter to continue, Ctrl\+C to interrupt:') +pause_test.send('\x03') +pause_test.expect("Press 'C' to continue the play or 'A' to abort") +pause_test.send('A') +pause_test.expect('user requested abort!') +pause_test.expect(pexpect.EOF) +pause_test.close() + +# -- Custom Prompt -- # +playbook = 'pause-2.yml' + +# Case 1 - Contiune with enter +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Custom prompt:') +pause_test.send('\r') +pause_test.expect('Task after pause') +pause_test.expect(pexpect.EOF) +pause_test.close() + + +# Case 2 - Contiune with C +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Custom prompt:') +pause_test.send('\x03') +pause_test.expect("Press 'C' to continue the play or 'A' to abort") +pause_test.send('C') +pause_test.expect('Task after pause') +pause_test.expect(pexpect.EOF) +pause_test.close() + + +# Case 3 - Abort with A +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Custom prompt:') +pause_test.send('\x03') +pause_test.expect("Press 'C' to continue the play or 'A' to abort") +pause_test.send('A') +pause_test.expect('user requested abort!') +pause_test.expect(pexpect.EOF) +pause_test.close() + +# -- Pause for N seconds -- # + +playbook = 'pause-3.yml' + +# Case 1 - Wait for task to continue after timeout +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Pausing for \d+ seconds') +pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)") +pause_test.expect('Task after pause') +pause_test.expect(pexpect.EOF) +pause_test.close() + +# Case 2 - Contiune with Ctrl + C, C +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Pausing for \d+ seconds') +pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)") +pause_test.send('\x03') +pause_test.send('C') +pause_test.expect('Task after pause') +pause_test.expect(pexpect.EOF) +pause_test.close() + + +# Case 3 - Abort with Ctrl + C, A +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Pausing for \d+ seconds') +pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)") +pause_test.send('\x03') +pause_test.send('A') +pause_test.expect('user requested abort!') +pause_test.expect(pexpect.EOF) +pause_test.close() + +# -- Pause for N seconds with custom prompt -- # + +playbook = 'pause-4.yml' + +# Case 1 - Wait for task to continue after timeout +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Pausing for \d+ seconds') +pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)") +pause_test.expect(r"Waiting for two seconds:") +pause_test.expect('Task after pause') +pause_test.expect(pexpect.EOF) +pause_test.close() + +# Case 2 - Contiune with Ctrl + C, C +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Pausing for \d+ seconds') +pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)") +pause_test.expect(r"Waiting for two seconds:") +pause_test.send('\x03') +pause_test.send('C') +pause_test.expect('Task after pause') +pause_test.expect(pexpect.EOF) +pause_test.close() + + +# Case 3 - Abort with Ctrl + C, A +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Pausing for \d+ seconds') +pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)") +pause_test.expect(r"Waiting for two seconds:") +pause_test.send('\x03') +pause_test.send('A') +pause_test.expect('user requested abort!') +pause_test.expect(pexpect.EOF) +pause_test.close() + +# -- Enter input and ensure it's captured, echoed, and can be edited -- # + +playbook = 'pause-5.yml' + +pause_test = pexpect.spawn( + 'ansible-playbook', + args=[playbook] + args, + timeout=10, + env=os.environ +) + +pause_test.logfile = log_buffer +pause_test.expect(r'Enter some text:') +pause_test.send('hello there') +pause_test.send('\r') +pause_test.expect(r'Enter some text to edit:') +pause_test.send('hello there') +pause_test.send(backspace * 4) +pause_test.send('ommy boy') +pause_test.send('\r') +pause_test.expect(r'Enter some text \(output is hidden\):') +pause_test.send('supersecretpancakes') +pause_test.send('\r') +pause_test.expect(pexpect.EOF) +pause_test.close() diff --git a/test/integration/targets/pause/test-pause.yml b/test/integration/targets/pause/test-pause.yml new file mode 100644 index 00000000..6fefbaa1 --- /dev/null +++ b/test/integration/targets/pause/test-pause.yml @@ -0,0 +1,51 @@ +- name: Test pause + hosts: localhost + gather_facts: no + become: no + + tasks: + - name: non-integer for duraction (EXPECTED FAILURE) + pause: + seconds: hello + register: result + ignore_errors: yes + + - assert: + that: + - result is failed + - "'non-integer' in result.msg" + + - name: non-boolean for echo (EXPECTED FAILURE) + pause: + echo: hello + register: result + ignore_errors: yes + + - assert: + that: + - result is failed + - "'not a valid boolean' in result.msg" + + - pause: + seconds: 0.1 + register: results + + - assert: + that: + - results.stdout is search('Paused for \d+\.\d+ seconds') + + - pause: + seconds: 1 + register: results + + - assert: + that: + - results.stdout is search('Paused for \d+\.\d+ seconds') + + - pause: + minutes: 1 + register: results + + - assert: + that: + - results.stdout is search('Paused for \d+\.\d+ minutes') diff --git a/test/integration/targets/ping/aliases b/test/integration/targets/ping/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/ping/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/ping/tasks/main.yml b/test/integration/targets/ping/tasks/main.yml new file mode 100644 index 00000000..bc93f98a --- /dev/null +++ b/test/integration/targets/ping/tasks/main.yml @@ -0,0 +1,53 @@ +# test code for the ping module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: ping the test + ping: + register: result + +- name: assert the ping worked + assert: + that: + - result is not failed + - result is not changed + - result.ping == 'pong' + +- name: ping with data + ping: + data: testing + register: result + +- name: assert the ping worked with data + assert: + that: + - result is not failed + - result is not changed + - result.ping == 'testing' + +- name: ping with data=crash + ping: + data: crash + register: result + ignore_errors: yes + +- name: assert the ping failed with data=boom + assert: + that: + - result is failed + - result is not changed + - "'Exception: boom' in result.module_stdout + result.module_stderr" diff --git a/test/integration/targets/pip/aliases b/test/integration/targets/pip/aliases new file mode 100644 index 00000000..8d8cc50e --- /dev/null +++ b/test/integration/targets/pip/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group5 +skip/aix diff --git a/test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py b/test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py new file mode 100644 index 00000000..c8a79430 --- /dev/null +++ b/test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py @@ -0,0 +1,2 @@ +def main(): + print("success") diff --git a/test/integration/targets/pip/files/setup.py b/test/integration/targets/pip/files/setup.py new file mode 100755 index 00000000..094064b7 --- /dev/null +++ b/test/integration/targets/pip/files/setup.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python + +from setuptools import setup, find_packages + +setup( + name="ansible_test_pip_chdir", + version="0", + packages=find_packages(), + entry_points={ + 'console_scripts': [ + 'ansible_test_pip_chdir = ansible_test_pip_chdir:main' + ] + } +) diff --git a/test/integration/targets/pip/meta/main.yml b/test/integration/targets/pip/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/pip/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/pip/tasks/default_cleanup.yml b/test/integration/targets/pip/tasks/default_cleanup.yml new file mode 100644 index 00000000..f2265c09 --- /dev/null +++ b/test/integration/targets/pip/tasks/default_cleanup.yml @@ -0,0 +1,5 @@ +- name: remove unwanted packages + package: + name: git + state: absent + when: git_install.changed diff --git a/test/integration/targets/pip/tasks/freebsd_cleanup.yml b/test/integration/targets/pip/tasks/freebsd_cleanup.yml new file mode 100644 index 00000000..fa224d83 --- /dev/null +++ b/test/integration/targets/pip/tasks/freebsd_cleanup.yml @@ -0,0 +1,6 @@ +- name: remove auto-installed packages from FreeBSD + pkgng: + name: git + state: absent + autoremove: yes + when: git_install.changed diff --git a/test/integration/targets/pip/tasks/main.yml b/test/integration/targets/pip/tasks/main.yml new file mode 100644 index 00000000..c0a36c43 --- /dev/null +++ b/test/integration/targets/pip/tasks/main.yml @@ -0,0 +1,53 @@ +# Current pip unconditionally uses md5. +# We can re-enable if pip switches to a different hash or allows us to not check md5. + +- name: Python 2 + when: ansible_python.version.major == 2 + block: + - name: find virtualenv command + command: "which virtualenv virtualenv-{{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + register: command + ignore_errors: true + + - name: is virtualenv available to python -m + command: '{{ ansible_python_interpreter }} -m virtualenv' + register: python_m + when: not command.stdout_lines + failed_when: python_m.rc != 2 + + - name: remember selected virtualenv command + set_fact: + virtualenv: "{{ command.stdout_lines[0] if command is successful else ansible_python_interpreter ~ ' -m virtualenv' }}" + +- name: Python 3+ + when: ansible_python.version.major > 2 + block: + - name: remember selected virtualenv command + set_fact: + virtualenv: "{{ ansible_python_interpreter ~ ' -m venv' }}" + +- block: + - name: install git, needed for repo installs + package: + name: git + state: present + when: ansible_distribution != "MacOSX" + register: git_install + + - name: ensure wheel is installed + pip: + name: wheel + + - include_tasks: pip.yml + always: + - name: platform specific cleanup + include_tasks: "{{ cleanup_filename }}" + with_first_found: + - "{{ ansible_distribution | lower }}_cleanup.yml" + - "default_cleanup.yml" + loop_control: + loop_var: cleanup_filename + when: ansible_fips|bool != True + module_defaults: + pip: + virtualenv_command: "{{ virtualenv }}" diff --git a/test/integration/targets/pip/tasks/pip.yml b/test/integration/targets/pip/tasks/pip.yml new file mode 100644 index 00000000..572c7b6f --- /dev/null +++ b/test/integration/targets/pip/tasks/pip.yml @@ -0,0 +1,580 @@ +# test code for the pip module +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# FIXME: replace the python test package + +# first some tests installed system-wide +# verify things were not installed to start with + +- name: ensure packages are not installed (precondition setup) + pip: + name: "{{ pip_test_packages }}" + state: absent + +# verify that a package that is uninstalled being set to absent +# results in an unchanged state and that the test package is not +# installed + +- name: ensure packages are not installed + pip: + name: "{{ pip_test_packages }}" + state: absent + register: uninstall_result + +- name: removing unremoved packages should return unchanged + assert: + that: + - "not (uninstall_result is changed)" + +- command: "{{ ansible_python.executable }} -c 'import {{ item }}'" + register: absent_result + failed_when: "absent_result.rc == 0" + loop: '{{ pip_test_modules }}' + +# now we're going to install the test package knowing it is uninstalled +# and check that installation was ok + +- name: ensure packages are installed + pip: + name: "{{ pip_test_packages }}" + state: present + register: install_result + +- name: verify we recorded a change + assert: + that: + - "install_result is changed" + +- command: "{{ ansible_python.executable }} -c 'import {{ item }}'" + loop: '{{ pip_test_modules }}' + +# now remove it to test uninstallation of a package we are sure is installed + +- name: now uninstall so we can see that a change occurred + pip: + name: "{{ pip_test_packages }}" + state: absent + register: absent2 + +- name: assert a change occurred on uninstallation + assert: + that: + - "absent2 is changed" + +# put the test packages back + +- name: now put it back in case someone wanted it (like us!) + pip: + name: "{{ pip_test_packages }}" + state: present + +# Test virtualenv installations + +- name: "make sure the test env doesn't exist" + file: + state: absent + name: "{{ output_dir }}/pipenv" + +- name: install a working version of setuptools in the virtualenv + pip: + name: setuptools + virtualenv: "{{ output_dir }}/pipenv" + state: present + version: 33.1.1 + +- name: create a requirement file with an vcs url + copy: + dest: "{{ output_dir }}/pipreq.txt" + content: "-e git+https://github.com/dvarrazzo/pyiso8601#egg=iso8601" + +- name: install the requirement file in a virtualenv + pip: + requirements: "{{ output_dir}}/pipreq.txt" + virtualenv: "{{ output_dir }}/pipenv" + register: req_installed + +- name: check that a change occurred + assert: + that: + - "req_installed is changed" + +- name: "repeat installation to check status didn't change" + pip: + requirements: "{{ output_dir}}/pipreq.txt" + virtualenv: "{{ output_dir }}/pipenv" + register: req_installed + +- name: "check that a change didn't occurr this time (bug ansible#1705)" + assert: + that: + - "not (req_installed is changed)" + +- name: install the same module from url + pip: + name: "git+https://github.com/dvarrazzo/pyiso8601#egg=iso8601" + virtualenv: "{{ output_dir }}/pipenv" + editable: True + register: url_installed + +- name: "check that a change didn't occurr (bug ansible-modules-core#1645)" + assert: + that: + - "not (url_installed is changed)" + +# Test pip package in check mode doesn't always report changed. + +# Special case for pip +- name: check for pip package + pip: + name: pip + virtualenv: "{{ output_dir }}/pipenv" + state: present + +- name: check for pip package in check_mode + pip: + name: pip + virtualenv: "{{ output_dir }}/pipenv" + state: present + check_mode: True + register: pip_check_mode + +- name: make sure pip in check_mode doesn't report changed + assert: + that: + - "not (pip_check_mode is changed)" + +# Special case for setuptools +- name: check for setuptools package + pip: + name: setuptools + virtualenv: "{{ output_dir }}/pipenv" + state: present + +- name: check for setuptools package in check_mode + pip: + name: setuptools + virtualenv: "{{ output_dir }}/pipenv" + state: present + check_mode: True + register: setuptools_check_mode + +- name: make sure setuptools in check_mode doesn't report changed + assert: + that: + - "not (setuptools_check_mode is changed)" + + +# Normal case +- name: check for q package + pip: + name: q + virtualenv: "{{ output_dir }}/pipenv" + state: present + +- name: check for q package in check_mode + pip: + name: q + virtualenv: "{{ output_dir }}/pipenv" + state: present + check_mode: True + register: q_check_mode + +- name: make sure q in check_mode doesn't report changed + assert: + that: + - "not (q_check_mode is changed)" + +# Case with package name that has a different package name case and an +# underscore instead of a hyphen +- name: check for Junit-XML package + pip: + name: Junit-XML + virtualenv: "{{ output_dir }}/pipenv" + state: present + +- name: check for Junit-XML package in check_mode + pip: + name: Junit-XML + virtualenv: "{{ output_dir }}/pipenv" + state: present + check_mode: True + register: diff_case_check_mode + +- name: make sure Junit-XML in check_mode doesn't report changed + assert: + that: + - "diff_case_check_mode is not changed" + +# ansible#23204 +- name: ensure is a fresh virtualenv + file: + state: absent + name: "{{ output_dir }}/pipenv" + +- name: install pip throught pip into fresh virtualenv + pip: + name: pip + virtualenv: "{{ output_dir }}/pipenv" + register: pip_install_venv + +- name: make sure pip in fresh virtualenv report changed + assert: + that: + - "pip_install_venv is changed" + +# https://github.com/ansible/ansible/issues/37912 +# support chdir without virtualenv +- name: create chdir test directories + file: + state: directory + name: "{{ output_dir }}/{{ item }}" + loop: + - pip_module + - pip_root + - pip_module/ansible_test_pip_chdir + +- name: copy test module + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/pip_module/{{ item }}" + loop: + - setup.py + - ansible_test_pip_chdir/__init__.py + +- name: install test module + pip: + name: . + chdir: "{{ output_dir }}/pip_module" + extra_args: --user --upgrade --root {{ output_dir }}/pip_root + +- name: register python_site_lib + command: '{{ ansible_python.executable }} -c "import site; print(site.USER_SITE)"' + register: pip_python_site_lib + +- name: register python_user_base + command: '{{ ansible_python.executable }} -c "import site; print(site.USER_BASE)"' + register: pip_python_user_base + +- name: run test module + shell: "PYTHONPATH=$(echo {{ output_dir }}/pip_root{{ pip_python_site_lib.stdout }}) {{ output_dir }}/pip_root{{ pip_python_user_base.stdout }}/bin/ansible_test_pip_chdir" + register: pip_chdir_command + +- name: make sure command ran + assert: + that: + - pip_chdir_command.stdout == "success" + +# https://github.com/ansible/ansible/issues/25122 +- name: ensure is a fresh virtualenv + file: + state: absent + name: "{{ output_dir }}/pipenv" + +- name: install requirements file into virtual + chdir + pip: + name: q + chdir: "{{ output_dir }}/" + virtualenv: "pipenv" + state: present + register: venv_chdir + +- name: make sure fresh virtualenv + chdir report changed + assert: + that: + - "venv_chdir is changed" + +# ansible#38785 +- name: allow empty list of packages + pip: + name: [] + register: pip_install_empty + +- name: ensure empty install is successful + assert: + that: + - "not (pip_install_empty is changed)" + +# https://github.com/ansible/ansible/issues/41043 +- name: do not consider an empty string as a version + pip: + name: q + state: present + version: "" + virtualenv: "{{ output_dir }}/pipenv" + register: pip_install_empty_version_string + +- name: ensure that task installation did not fail + assert: + that: + - pip_install_empty_version_string is successful + +# test version specifiers +- name: make sure no test_package installed now + pip: + name: "{{ pip_test_packages }}" + state: absent + +- name: install package with version specifiers + pip: + name: "{{ pip_test_package }}" + version: "<100,!=1.0,>0.0.0" + register: version + +- name: assert package installed correctly + assert: + that: "version.changed" + +- name: reinstall package + pip: + name: "{{ pip_test_package }}" + version: "<100,!=1.0,>0.0.0" + register: version2 + +- name: assert no changes ocurred + assert: + that: "not version2.changed" + +- name: test the check_mod + pip: + name: "{{ pip_test_package }}" + version: "<100,!=1.0,>0.0.0" + check_mode: yes + register: version3 + +- name: assert no changes + assert: + that: "not version3.changed" + +- name: test the check_mod with unsatisfied version + pip: + name: "{{ pip_test_package }}" + version: ">100.0.0" + check_mode: yes + register: version4 + +- name: assert changed + assert: + that: "version4.changed" + +- name: uninstall test packages for next test + pip: + name: "{{ pip_test_packages }}" + state: absent + +- name: test invalid combination of arguments + pip: + name: "{{ pip_test_pkg_ver }}" + version: "1.11.1" + ignore_errors: yes + register: version5 + +- name: assert the invalid combination should fail + assert: + that: "version5 is failed" + +- name: another invalid combination of arguments + pip: + name: "{{ pip_test_pkg_ver[0] }}" + version: "<100.0.0" + ignore_errors: yes + register: version6 + +- name: assert invalid combination should fail + assert: + that: "version6 is failed" + +- name: try to install invalid package + pip: + name: "{{ pip_test_pkg_ver_unsatisfied }}" + ignore_errors: yes + register: version7 + +- name: assert install should fail + assert: + that: "version7 is failed" + +- name: test install multi-packages with version specifiers + pip: + name: "{{ pip_test_pkg_ver }}" + register: version8 + +- name: assert packages installed correctly + assert: + that: "version8.changed" + +- name: test install multi-packages with check_mode + pip: + name: "{{ pip_test_pkg_ver }}" + check_mode: yes + register: version9 + +- name: assert no change + assert: + that: "not version9.changed" + +- name: test install unsatisfied multi-packages with check_mode + pip: + name: "{{ pip_test_pkg_ver_unsatisfied }}" + check_mode: yes + register: version10 + +- name: assert changes needed + assert: + that: "version10.changed" + +- name: uninstall packages for next test + pip: + name: "{{ pip_test_packages }}" + state: absent + +- name: test install multi package provided by one single string + pip: + name: "{{pip_test_pkg_ver[0]}},{{pip_test_pkg_ver[1]}}" + register: version11 + +- name: assert the install ran correctly + assert: + that: "version11.changed" + +- name: test install multi package provided by one single string with check_mode + pip: + name: "{{pip_test_pkg_ver[0]}},{{pip_test_pkg_ver[1]}}" + check_mode: yes + register: version12 + +- name: assert no changes needed + assert: + that: "not version12.changed" + +- name: test module can parse the combination of multi-packages one line and git url + pip: + name: + - git+https://github.com/dvarrazzo/pyiso8601#egg=iso8601 + - "{{pip_test_pkg_ver[0]}},{{pip_test_pkg_ver[1]}}" + +- name: test the invalid package name + pip: + name: djan=+-~!@#$go>1.11.1,<1.11.3 + ignore_errors: yes + register: version13 + +- name: the invalid package should make module failed + assert: + that: "version13 is failed" + +- name: try install package with setuptools extras + pip: + name: + - "{{pip_test_package}}[test]" + +- name: clean up + pip: + name: "{{ pip_test_packages }}" + state: absent + +# https://github.com/ansible/ansible/issues/47198 +# distribute is a legacy package that will fail on newer Python 3 versions +- block: + - name: make sure the virtualenv does not exist + file: + state: absent + name: "{{ output_dir }}/pipenv" + + - name: install distribute in the virtualenv + pip: + # using -c for constraints is not supported as long as tests are executed using the centos6 container + # since the pip version in the venv is not upgraded and is too old (6.0.8) + name: + - distribute + - setuptools<45 # setuptools 45 and later require python 3.5 or later + virtualenv: "{{ output_dir }}/pipenv" + state: present + + - name: try to remove distribute + pip: + state: "absent" + name: "distribute" + virtualenv: "{{ output_dir }}/pipenv" + ignore_errors: yes + register: remove_distribute + + - name: inspect the cmd + assert: + that: "'distribute' in remove_distribute.cmd" + when: ansible_python.version.major == 2 + +### test virtualenv_command begin ### + +- name: Test virtualenv command with arguments + when: ansible_python.version.major == 2 + block: + - name: make sure the virtualenv does not exist + file: + state: absent + name: "{{ output_dir }}/pipenv" + + # ref: https://github.com/ansible/ansible/issues/52275 + - name: install using virtualenv_command with arguments + pip: + name: "{{ pip_test_package }}" + virtualenv: "{{ output_dir }}/pipenv" + virtualenv_command: "{{ command.stdout_lines[0] | basename }} --verbose" + state: present + register: version13 + + - name: ensure install using virtualenv_command with arguments was successful + assert: + that: + - "version13 is success" + +### test virtualenv_command end ### + +# https://github.com/ansible/ansible/issues/68592 +# Handle pre-release version numbers in check_mode for already-installed +# packages. +# TODO: Limiting to py3 test boxes for now so the example of 'black' installs, +# we should probably find another package to use with a similar versioning +# scheme or make a small one and enable this test for py2 as well. +- block: + - name: Install a beta version of a package + pip: + name: black + version: 19.10b0 + state: present + + - name: Use check_mode and ensure that the package is shown as installed + check_mode: true + pip: + name: black + state: present + register: pip_prereleases + + - name: Uninstall the beta package if we need to + pip: + name: black + version: 19.10b0 + state: absent + when: pip_prereleases is changed + + - assert: + that: + - pip_prereleases is successful + - pip_prereleases is not changed + - '"black==19.10b0" in pip_prereleases.stdout_lines' + + when: ansible_python.version.major == 3 diff --git a/test/integration/targets/pip/vars/main.yml b/test/integration/targets/pip/vars/main.yml new file mode 100644 index 00000000..2e87abcc --- /dev/null +++ b/test/integration/targets/pip/vars/main.yml @@ -0,0 +1,13 @@ +pip_test_package: sampleprojectpy2 +pip_test_packages: + - sampleprojectpy2 + - jiphy +pip_test_pkg_ver: + - sampleprojectpy2<=100, !=9.0.0,>=0.0.1 + - jiphy<100 ,!=9,>=0.0.1 +pip_test_pkg_ver_unsatisfied: + - sampleprojectpy2>= 999.0.0 + - jiphy >999.0 +pip_test_modules: + - sample + - jiphy diff --git a/test/integration/targets/pkg_resources/aliases b/test/integration/targets/pkg_resources/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/pkg_resources/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/pkg_resources/lookup_plugins/check_pkg_resources.py b/test/integration/targets/pkg_resources/lookup_plugins/check_pkg_resources.py new file mode 100644 index 00000000..9f1c5c0b --- /dev/null +++ b/test/integration/targets/pkg_resources/lookup_plugins/check_pkg_resources.py @@ -0,0 +1,23 @@ +""" +This test case verifies that pkg_resources imports from ansible plugins are functional. + +If pkg_resources is not installed this test will succeed. +If pkg_resources is installed but is unable to function, this test will fail. + +One known failure case this test can detect is when ansible declares a __requires__ and then tests are run without an egg-info directory. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +# noinspection PyUnresolvedReferences +try: + from pkg_resources import Requirement +except ImportError: + Requirement = None + +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + return [] diff --git a/test/integration/targets/pkg_resources/tasks/main.yml b/test/integration/targets/pkg_resources/tasks/main.yml new file mode 100644 index 00000000..b19d0ebd --- /dev/null +++ b/test/integration/targets/pkg_resources/tasks/main.yml @@ -0,0 +1,3 @@ +- name: Verify that pkg_resources imports are functional + debug: + msg: "{{ lookup('check_pkg_resources') }}" diff --git a/test/integration/targets/play_iterator/aliases b/test/integration/targets/play_iterator/aliases new file mode 100644 index 00000000..3005e4b2 --- /dev/null +++ b/test/integration/targets/play_iterator/aliases @@ -0,0 +1 @@ +shippable/posix/group4 diff --git a/test/integration/targets/play_iterator/playbook.yml b/test/integration/targets/play_iterator/playbook.yml new file mode 100644 index 00000000..76100c60 --- /dev/null +++ b/test/integration/targets/play_iterator/playbook.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + gather_facts: false + tasks: + - name: + debug: + msg: foo + - name: "task 2" + debug: + msg: bar diff --git a/test/integration/targets/play_iterator/runme.sh b/test/integration/targets/play_iterator/runme.sh new file mode 100755 index 00000000..9f30d9e7 --- /dev/null +++ b/test/integration/targets/play_iterator/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook playbook.yml --start-at-task 'task 2' "$@" diff --git a/test/integration/targets/playbook/aliases b/test/integration/targets/playbook/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/playbook/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/playbook/runme.sh b/test/integration/targets/playbook/runme.sh new file mode 100755 index 00000000..25e2e5a6 --- /dev/null +++ b/test/integration/targets/playbook/runme.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -eux + +# run type tests +ansible-playbook -i ../../inventory types.yml -v "$@" + +# test timeout +ansible-playbook -i ../../inventory timeout.yml -v "$@" diff --git a/test/integration/targets/playbook/timeout.yml b/test/integration/targets/playbook/timeout.yml new file mode 100644 index 00000000..442e13ae --- /dev/null +++ b/test/integration/targets/playbook/timeout.yml @@ -0,0 +1,12 @@ +- hosts: localhost + gather_facts: false + tasks: + - shell: sleep 100 + timeout: 1 + ignore_errors: true + register: time + + - assert: + that: + - time is failed + - '"The shell action failed to execute in the expected time frame" in time["msg"]' diff --git a/test/integration/targets/playbook/types.yml b/test/integration/targets/playbook/types.yml new file mode 100644 index 00000000..dd8997b6 --- /dev/null +++ b/test/integration/targets/playbook/types.yml @@ -0,0 +1,21 @@ +- hosts: testhost + gather_facts: false + tasks: + - name: try to set 'diff' a boolean + debug: msg="not important" + diff: yes + ignore_errors: True + register: good_diff + + - name: try to set 'diff' a boolean to a string (. would make it non boolean) + debug: msg="not important" + diff: yes. + ignore_errors: True + register: bad_diff + + - name: Check we did error out + assert: + that: + - good_diff is success + - bad_diff is failed + - "'is not a valid boolean' in bad_diff['msg']" diff --git a/test/integration/targets/plugin_config_for_inventory/aliases b/test/integration/targets/plugin_config_for_inventory/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/plugin_config_for_inventory/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/plugin_config_for_inventory/config_with_parameter.yml b/test/integration/targets/plugin_config_for_inventory/config_with_parameter.yml new file mode 100644 index 00000000..8ff39884 --- /dev/null +++ b/test/integration/targets/plugin_config_for_inventory/config_with_parameter.yml @@ -0,0 +1,3 @@ +plugin: test_inventory +departments: + - paris diff --git a/test/integration/targets/plugin_config_for_inventory/config_without_parameter.yml b/test/integration/targets/plugin_config_for_inventory/config_without_parameter.yml new file mode 100644 index 00000000..787cf967 --- /dev/null +++ b/test/integration/targets/plugin_config_for_inventory/config_without_parameter.yml @@ -0,0 +1 @@ +plugin: test_inventory diff --git a/test/integration/targets/plugin_config_for_inventory/runme.sh b/test/integration/targets/plugin_config_for_inventory/runme.sh new file mode 100755 index 00000000..119a073a --- /dev/null +++ b/test/integration/targets/plugin_config_for_inventory/runme.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -o errexit -o nounset -o xtrace + +export ANSIBLE_INVENTORY_PLUGINS=./ +export ANSIBLE_INVENTORY_ENABLED=test_inventory + +# check default values +ansible-inventory --list -i ./config_without_parameter.yml --export | \ + env python -c "import json, sys; inv = json.loads(sys.stdin.read()); \ + assert set(inv['_meta']['hostvars']['test_host']['departments']) == set(['seine-et-marne', 'haute-garonne'])" + +# check values +ansible-inventory --list -i ./config_with_parameter.yml --export | \ + env python -c "import json, sys; inv = json.loads(sys.stdin.read()); \ + assert set(inv['_meta']['hostvars']['test_host']['departments']) == set(['paris'])" diff --git a/test/integration/targets/plugin_config_for_inventory/test_inventory.py b/test/integration/targets/plugin_config_for_inventory/test_inventory.py new file mode 100644 index 00000000..63ed0cc2 --- /dev/null +++ b/test/integration/targets/plugin_config_for_inventory/test_inventory.py @@ -0,0 +1,52 @@ +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: test_inventory + plugin_type: inventory + authors: + - Pierre-Louis Bonicoli (@pilou-) + short_description: test inventory + description: + - test inventory (fetch parameters using config API) + options: + departments: + description: test parameter + type: list + default: + - seine-et-marne + - haute-garonne + required: False +''' + +EXAMPLES = ''' +# Example command line: ansible-inventory --list -i test_inventory.yml + +plugin: test_inventory +departments: + - paris +''' + +from ansible.plugins.inventory import BaseInventoryPlugin + + +class InventoryModule(BaseInventoryPlugin): + NAME = 'test_inventory' + + def verify_file(self, path): + return True + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + config_data = self._read_config_data(path=path) + self._consume_options(config_data) + + departments = self.get_option('departments') + + group = 'test_group' + host = 'test_host' + + self.inventory.add_group(group) + self.inventory.add_host(group=group, host=host) + self.inventory.set_variable(host, 'departments', departments) diff --git a/test/integration/targets/plugin_filtering/aliases b/test/integration/targets/plugin_filtering/aliases new file mode 100644 index 00000000..3005e4b2 --- /dev/null +++ b/test/integration/targets/plugin_filtering/aliases @@ -0,0 +1 @@ +shippable/posix/group4 diff --git a/test/integration/targets/plugin_filtering/copy.yml b/test/integration/targets/plugin_filtering/copy.yml new file mode 100644 index 00000000..083386a1 --- /dev/null +++ b/test/integration/targets/plugin_filtering/copy.yml @@ -0,0 +1,10 @@ +--- +- hosts: testhost + gather_facts: False + tasks: + - copy: + content: 'Testing 1... 2... 3...' + dest: ./testing.txt + - file: + state: absent + path: ./testing.txt diff --git a/test/integration/targets/plugin_filtering/filter_lookup.ini b/test/integration/targets/plugin_filtering/filter_lookup.ini new file mode 100644 index 00000000..17e58e33 --- /dev/null +++ b/test/integration/targets/plugin_filtering/filter_lookup.ini @@ -0,0 +1,4 @@ +[default] +retry_files_enabled = False +plugin_filters_cfg = ./filter_lookup.yml + diff --git a/test/integration/targets/plugin_filtering/filter_lookup.yml b/test/integration/targets/plugin_filtering/filter_lookup.yml new file mode 100644 index 00000000..694ebfcb --- /dev/null +++ b/test/integration/targets/plugin_filtering/filter_lookup.yml @@ -0,0 +1,6 @@ +--- +filter_version: 1.0 +module_blacklist: + # Specify the name of a lookup plugin here. This should have no effect as + # this is only for filtering modules + - list diff --git a/test/integration/targets/plugin_filtering/filter_modules.ini b/test/integration/targets/plugin_filtering/filter_modules.ini new file mode 100644 index 00000000..ab39bedd --- /dev/null +++ b/test/integration/targets/plugin_filtering/filter_modules.ini @@ -0,0 +1,4 @@ +[default] +retry_files_enabled = False +plugin_filters_cfg = ./filter_modules.yml + diff --git a/test/integration/targets/plugin_filtering/filter_modules.yml b/test/integration/targets/plugin_filtering/filter_modules.yml new file mode 100644 index 00000000..6cffa676 --- /dev/null +++ b/test/integration/targets/plugin_filtering/filter_modules.yml @@ -0,0 +1,9 @@ +--- +filter_version: 1.0 +module_blacklist: + # A pure action plugin + - pause + # A hybrid action plugin with module + - copy + # A pure module + - tempfile diff --git a/test/integration/targets/plugin_filtering/filter_ping.ini b/test/integration/targets/plugin_filtering/filter_ping.ini new file mode 100644 index 00000000..aabbde45 --- /dev/null +++ b/test/integration/targets/plugin_filtering/filter_ping.ini @@ -0,0 +1,4 @@ +[default] +retry_files_enabled = False +plugin_filters_cfg = ./filter_ping.yml + diff --git a/test/integration/targets/plugin_filtering/filter_ping.yml b/test/integration/targets/plugin_filtering/filter_ping.yml new file mode 100644 index 00000000..08e56f24 --- /dev/null +++ b/test/integration/targets/plugin_filtering/filter_ping.yml @@ -0,0 +1,5 @@ +--- +filter_version: 1.0 +module_blacklist: + # Ping is special + - ping diff --git a/test/integration/targets/plugin_filtering/filter_stat.ini b/test/integration/targets/plugin_filtering/filter_stat.ini new file mode 100644 index 00000000..13a103dd --- /dev/null +++ b/test/integration/targets/plugin_filtering/filter_stat.ini @@ -0,0 +1,4 @@ +[default] +retry_files_enabled = False +plugin_filters_cfg = ./filter_stat.yml + diff --git a/test/integration/targets/plugin_filtering/filter_stat.yml b/test/integration/targets/plugin_filtering/filter_stat.yml new file mode 100644 index 00000000..c1ce42ef --- /dev/null +++ b/test/integration/targets/plugin_filtering/filter_stat.yml @@ -0,0 +1,5 @@ +--- +filter_version: 1.0 +module_blacklist: + # Stat is special + - stat diff --git a/test/integration/targets/plugin_filtering/lookup.yml b/test/integration/targets/plugin_filtering/lookup.yml new file mode 100644 index 00000000..de6d1b48 --- /dev/null +++ b/test/integration/targets/plugin_filtering/lookup.yml @@ -0,0 +1,14 @@ +--- +- hosts: testhost + gather_facts: False + vars: + data: + - one + - two + tasks: + - debug: + msg: '{{ lookup("list", data) }}' + + - debug: + msg: '{{ item }}' + with_list: '{{ data }}' diff --git a/test/integration/targets/plugin_filtering/no_blacklist_module.ini b/test/integration/targets/plugin_filtering/no_blacklist_module.ini new file mode 100644 index 00000000..65b51d67 --- /dev/null +++ b/test/integration/targets/plugin_filtering/no_blacklist_module.ini @@ -0,0 +1,3 @@ +[defaults] +retry_files_enabled = False +plugin_filters_cfg = ./no_blacklist_module.yml diff --git a/test/integration/targets/plugin_filtering/no_blacklist_module.yml b/test/integration/targets/plugin_filtering/no_blacklist_module.yml new file mode 100644 index 00000000..52a55dff --- /dev/null +++ b/test/integration/targets/plugin_filtering/no_blacklist_module.yml @@ -0,0 +1,3 @@ +--- +filter_version: 1.0 +module_blacklist: diff --git a/test/integration/targets/plugin_filtering/no_filters.ini b/test/integration/targets/plugin_filtering/no_filters.ini new file mode 100644 index 00000000..4b42c8c4 --- /dev/null +++ b/test/integration/targets/plugin_filtering/no_filters.ini @@ -0,0 +1,4 @@ +[default] +retry_files_enabled = False +plugin_filters_cfg = ./empty.yml + diff --git a/test/integration/targets/plugin_filtering/pause.yml b/test/integration/targets/plugin_filtering/pause.yml new file mode 100644 index 00000000..e2c1ef9c --- /dev/null +++ b/test/integration/targets/plugin_filtering/pause.yml @@ -0,0 +1,6 @@ +--- +- hosts: testhost + gather_facts: False + tasks: + - pause: + seconds: 1 diff --git a/test/integration/targets/plugin_filtering/ping.yml b/test/integration/targets/plugin_filtering/ping.yml new file mode 100644 index 00000000..9e2214b0 --- /dev/null +++ b/test/integration/targets/plugin_filtering/ping.yml @@ -0,0 +1,6 @@ +--- +- hosts: testhost + gather_facts: False + tasks: + - ping: + data: 'Testing 1... 2... 3...' diff --git a/test/integration/targets/plugin_filtering/runme.sh b/test/integration/targets/plugin_filtering/runme.sh new file mode 100755 index 00000000..aa0e2b0c --- /dev/null +++ b/test/integration/targets/plugin_filtering/runme.sh @@ -0,0 +1,137 @@ +#!/usr/bin/env bash + +set -ux + +# +# Check that with no filters set, all of these modules run as expected +# +ANSIBLE_CONFIG=no_filters.ini ansible-playbook copy.yml -i ../../inventory -vvv "$@" +if test $? != 0 ; then + echo "### Failed to run copy with no filters applied" + exit 1 +fi +ANSIBLE_CONFIG=no_filters.ini ansible-playbook pause.yml -i ../../inventory -vvv "$@" +if test $? != 0 ; then + echo "### Failed to run pause with no filters applied" + exit 1 +fi +ANSIBLE_CONFIG=no_filters.ini ansible-playbook tempfile.yml -i ../../inventory -vvv "$@" +if test $? != 0 ; then + echo "### Failed to run tempfile with no filters applied" + exit 1 +fi + +# +# Check that if no modules are blacklisted then Ansible should not through traceback +# +ANSIBLE_CONFIG=no_blacklist_module.ini ansible-playbook tempfile.yml -i ../../inventory -vvv "$@" +if test $? != 0 ; then + echo "### Failed to run tempfile with no modules blacklisted" + exit 1 +fi + +# +# Check that with these modules filtered out, all of these modules fail to be found +# +ANSIBLE_CONFIG=filter_modules.ini ansible-playbook copy.yml -i ../../inventory -v "$@" +if test $? = 0 ; then + echo "### Failed to prevent copy from running" + exit 1 +else + echo "### Copy was prevented from running as expected" +fi +ANSIBLE_CONFIG=filter_modules.ini ansible-playbook pause.yml -i ../../inventory -v "$@" +if test $? = 0 ; then + echo "### Failed to prevent pause from running" + exit 1 +else + echo "### pause was prevented from running as expected" +fi +ANSIBLE_CONFIG=filter_modules.ini ansible-playbook tempfile.yml -i ../../inventory -v "$@" +if test $? = 0 ; then + echo "### Failed to prevent tempfile from running" + exit 1 +else + echo "### tempfile was prevented from running as expected" +fi + +# +# ping is a special module as we test for its existence. Check it specially +# + +# Check that ping runs with no filter +ANSIBLE_CONFIG=no_filters.ini ansible-playbook ping.yml -i ../../inventory -vvv "$@" +if test $? != 0 ; then + echo "### Failed to run ping with no filters applied" + exit 1 +fi + +# Check that other modules run with ping filtered +ANSIBLE_CONFIG=filter_ping.ini ansible-playbook copy.yml -i ../../inventory -vvv "$@" +if test $? != 0 ; then + echo "### Failed to run copy when a filter was applied to ping" + exit 1 +fi +# Check that ping fails to run when it is filtered +ANSIBLE_CONFIG=filter_ping.ini ansible-playbook ping.yml -i ../../inventory -v "$@" +if test $? = 0 ; then + echo "### Failed to prevent ping from running" + exit 1 +else + echo "### Ping was prevented from running as expected" +fi + +# +# Check that specifying a lookup plugin in the filter has no effect +# + +ANSIBLE_CONFIG=filter_lookup.ini ansible-playbook lookup.yml -i ../../inventory -vvv "$@" +if test $? != 0 ; then + echo "### Failed to use a lookup plugin when it is incorrectly specified in the *module* blacklist" + exit 1 +fi + +# +# stat is a special module as we use it to run nearly every other module. Check it specially +# + +# Check that stat runs with no filter +ANSIBLE_CONFIG=no_filters.ini ansible-playbook stat.yml -i ../../inventory -vvv "$@" +if test $? != 0 ; then + echo "### Failed to run stat with no filters applied" + exit 1 +fi + +# Check that running another module when stat is filtered gives us our custom error message +ANSIBLE_CONFIG=filter_stat.ini +export ANSIBLE_CONFIG +CAPTURE=$(ansible-playbook copy.yml -i ../../inventory -vvv "$@" 2>&1) +if test $? = 0 ; then + echo "### Copy ran even though stat is in the module blacklist" + exit 1 +else + echo "$CAPTURE" | grep 'The stat module was specified in the module blacklist file,.*, but Ansible will not function without the stat module. Please remove stat from the blacklist.' + if test $? != 0 ; then + echo "### Stat did not give us our custom error message" + exit 1 + fi + echo "### Filtering stat failed with our custom error message as expected" +fi +unset ANSIBLE_CONFIG + +# Check that running stat when stat is filtered gives our custom error message +ANSIBLE_CONFIG=filter_stat.ini +export ANSIBLE_CONFIG +CAPTURE=$(ansible-playbook stat.yml -i ../../inventory -vvv "$@" 2>&1) +if test $? = 0 ; then + echo "### Stat ran even though it is in the module blacklist" + exit 1 +else + echo "$CAPTURE" | grep 'The stat module was specified in the module blacklist file,.*, but Ansible will not function without the stat module. Please remove stat from the blacklist.' + if test $? != 0 ; then + echo "### Stat did not give us our custom error message" + exit 1 + fi + echo "### Filtering stat failed with our custom error message as expected" +fi +unset ANSIBLE_CONFIG diff --git a/test/integration/targets/plugin_filtering/stat.yml b/test/integration/targets/plugin_filtering/stat.yml new file mode 100644 index 00000000..4f24baae --- /dev/null +++ b/test/integration/targets/plugin_filtering/stat.yml @@ -0,0 +1,6 @@ +--- +- hosts: testhost + gather_facts: False + tasks: + - stat: + path: '/' diff --git a/test/integration/targets/plugin_filtering/tempfile.yml b/test/integration/targets/plugin_filtering/tempfile.yml new file mode 100644 index 00000000..06463547 --- /dev/null +++ b/test/integration/targets/plugin_filtering/tempfile.yml @@ -0,0 +1,9 @@ +--- +- hosts: testhost + gather_facts: False + tasks: + - tempfile: + register: temp_result + - file: + state: absent + path: '{{ temp_result["path"] }}' diff --git a/test/integration/targets/plugin_loader/aliases b/test/integration/targets/plugin_loader/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/plugin_loader/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/plugin_loader/normal/filters.yml b/test/integration/targets/plugin_loader/normal/filters.yml new file mode 100644 index 00000000..f9069be1 --- /dev/null +++ b/test/integration/targets/plugin_loader/normal/filters.yml @@ -0,0 +1,13 @@ +- hosts: testhost + gather_facts: false + tasks: + - name: ensure filters work as shipped from core + assert: + that: + - a|flatten == [1, 2, 3, 4, 5] + - a|ternary('yes', 'no') == 'yes' + vars: + a: + - 1 + - 2 + - [3, 4, 5] diff --git a/test/integration/targets/plugin_loader/normal/library/_symlink.py b/test/integration/targets/plugin_loader/normal/library/_symlink.py new file mode 120000 index 00000000..c4142e74 --- /dev/null +++ b/test/integration/targets/plugin_loader/normal/library/_symlink.py @@ -0,0 +1 @@ +_underscore.py \ No newline at end of file diff --git a/test/integration/targets/plugin_loader/normal/library/_underscore.py b/test/integration/targets/plugin_loader/normal/library/_underscore.py new file mode 100644 index 00000000..7a416a64 --- /dev/null +++ b/test/integration/targets/plugin_loader/normal/library/_underscore.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def main(): + print(json.dumps(dict(changed=False, source='legacy_library_dir'))) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/plugin_loader/normal/underscore.yml b/test/integration/targets/plugin_loader/normal/underscore.yml new file mode 100644 index 00000000..fb5bbad7 --- /dev/null +++ b/test/integration/targets/plugin_loader/normal/underscore.yml @@ -0,0 +1,15 @@ +- hosts: testhost + gather_facts: false + tasks: + - name: Load a deprecated module + underscore: + register: res + + - name: Load a deprecated module that is a symlink + symlink: + register: sym + + - assert: + that: + - res.source == 'legacy_library_dir' + - sym.source == 'legacy_library_dir' diff --git a/test/integration/targets/plugin_loader/override/filter_plugins/core.py b/test/integration/targets/plugin_loader/override/filter_plugins/core.py new file mode 100644 index 00000000..f283dc39 --- /dev/null +++ b/test/integration/targets/plugin_loader/override/filter_plugins/core.py @@ -0,0 +1,18 @@ +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def do_flag(myval): + return 'flagged' + + +class FilterModule(object): + ''' Ansible core jinja2 filters ''' + + def filters(self): + return { + # jinja2 overrides + 'flag': do_flag, + 'flatten': do_flag, + } diff --git a/test/integration/targets/plugin_loader/override/filters.yml b/test/integration/targets/plugin_loader/override/filters.yml new file mode 100644 index 00000000..e51ab4e9 --- /dev/null +++ b/test/integration/targets/plugin_loader/override/filters.yml @@ -0,0 +1,15 @@ +- hosts: testhost + gather_facts: false + tasks: + - name: ensure local 'flag' filter works, 'flatten' is overriden and 'ternary' is still from core + assert: + that: + - a|flag == 'flagged' + - a|flatten != [1, 2, 3, 4, 5] + - a|flatten == "flagged" + - a|ternary('yes', 'no') == 'yes' + vars: + a: + - 1 + - 2 + - [3, 4, 5] diff --git a/test/integration/targets/plugin_loader/runme.sh b/test/integration/targets/plugin_loader/runme.sh new file mode 100755 index 00000000..2a1bdeda --- /dev/null +++ b/test/integration/targets/plugin_loader/runme.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -ux + + +# check normal execution +for myplay in normal/*.yml +do + ansible-playbook "${myplay}" -i ../../inventory -vvv "$@" + if test $? != 0 ; then + echo "### Failed to run ${myplay} normally" + exit 1 + fi +done + +# check overrides +for myplay in override/*.yml +do + ansible-playbook "${myplay}" -i ../../inventory -vvv "$@" + if test $? != 0 ; then + echo "### Failed to run ${myplay} override" + exit 1 + fi +done diff --git a/test/integration/targets/plugin_namespace/aliases b/test/integration/targets/plugin_namespace/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/plugin_namespace/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/plugin_namespace/filter_plugins/test_filter.py b/test/integration/targets/plugin_namespace/filter_plugins/test_filter.py new file mode 100644 index 00000000..dca094be --- /dev/null +++ b/test/integration/targets/plugin_namespace/filter_plugins/test_filter.py @@ -0,0 +1,15 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def filter_name(a): + return __name__ + + +class FilterModule(object): + def filters(self): + filters = { + 'filter_name': filter_name, + } + + return filters diff --git a/test/integration/targets/plugin_namespace/lookup_plugins/lookup_name.py b/test/integration/targets/plugin_namespace/lookup_plugins/lookup_name.py new file mode 100644 index 00000000..d0af703b --- /dev/null +++ b/test/integration/targets/plugin_namespace/lookup_plugins/lookup_name.py @@ -0,0 +1,9 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + return [__name__] diff --git a/test/integration/targets/plugin_namespace/tasks/main.yml b/test/integration/targets/plugin_namespace/tasks/main.yml new file mode 100644 index 00000000..19bdd3a0 --- /dev/null +++ b/test/integration/targets/plugin_namespace/tasks/main.yml @@ -0,0 +1,11 @@ +- set_fact: + filter_name: "{{ 1 | filter_name }}" + lookup_name: "{{ lookup('lookup_name') }}" + test_name_ok: "{{ 1 is test_name_ok }}" + +- assert: + that: + # filter names are prefixed with a unique hash value to prevent shadowing of other plugins + - filter_name | regex_search('^ansible\.plugins\.filter\.[0-9]+_test_filter$') + - lookup_name == 'ansible.plugins.lookup.lookup_name' + - test_name_ok diff --git a/test/integration/targets/plugin_namespace/test_plugins/test_test.py b/test/integration/targets/plugin_namespace/test_plugins/test_test.py new file mode 100644 index 00000000..2a9d6ee0 --- /dev/null +++ b/test/integration/targets/plugin_namespace/test_plugins/test_test.py @@ -0,0 +1,16 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + + +def test_name_ok(value): + # test names are prefixed with a unique hash value to prevent shadowing of other plugins + return bool(re.match(r'^ansible\.plugins\.test\.[0-9]+_test_test$', __name__)) + + +class TestModule: + def tests(self): + return { + 'test_name_ok': test_name_ok, + } diff --git a/test/integration/targets/prepare_http_tests/defaults/main.yml b/test/integration/targets/prepare_http_tests/defaults/main.yml new file mode 100644 index 00000000..a1e5b8d1 --- /dev/null +++ b/test/integration/targets/prepare_http_tests/defaults/main.yml @@ -0,0 +1,4 @@ +badssl_host: wrong.host.badssl.com +httpbin_host: httpbin.org +sni_host: ci-files.testing.ansible.com +badssl_host_substring: wrong.host.badssl.com diff --git a/test/integration/targets/prepare_http_tests/meta/main.yml b/test/integration/targets/prepare_http_tests/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/prepare_http_tests/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/prepare_http_tests/tasks/default.yml b/test/integration/targets/prepare_http_tests/tasks/default.yml new file mode 100644 index 00000000..bff90350 --- /dev/null +++ b/test/integration/targets/prepare_http_tests/tasks/default.yml @@ -0,0 +1,64 @@ +- name: RedHat - Enable the dynamic CA configuration feature + command: update-ca-trust force-enable + when: ansible_os_family == 'RedHat' + +- name: RedHat - Retrieve test cacert + get_url: + url: "http://ansible.http.tests/cacert.pem" + dest: "/etc/pki/ca-trust/source/anchors/ansible.pem" + when: ansible_os_family == 'RedHat' + +- name: Get client cert/key + get_url: + url: "http://ansible.http.tests/{{ item }}" + dest: "{{ remote_tmp_dir }}/{{ item }}" + with_items: + - client.pem + - client.key + +- name: Suse - Retrieve test cacert + get_url: + url: "http://ansible.http.tests/cacert.pem" + dest: "/etc/pki/trust/anchors/ansible.pem" + when: ansible_os_family == 'Suse' + +- name: Debian - Retrieve test cacert + get_url: + url: "http://ansible.http.tests/cacert.pem" + dest: "/usr/local/share/ca-certificates/ansible.crt" + when: ansible_os_family == 'Debian' + +- name: Redhat - Update ca trust + command: update-ca-trust extract + when: ansible_os_family == 'RedHat' + +- name: Debian/Suse - Update ca certificates + command: update-ca-certificates + when: ansible_os_family == 'Debian' or ansible_os_family == 'Suse' + +- name: FreeBSD - Retrieve test cacert + get_url: + url: "http://ansible.http.tests/cacert.pem" + dest: "/tmp/ansible.pem" + when: ansible_os_family == 'FreeBSD' + +- name: FreeBSD - Add cacert to root certificate store + blockinfile: + path: "/etc/ssl/cert.pem" + block: "{{ lookup('file', '/tmp/ansible.pem') }}" + when: ansible_os_family == 'FreeBSD' + +- name: MacOS - Retrieve test cacert + when: ansible_os_family == 'Darwin' + block: + - uri: + url: "http://ansible.http.tests/cacert.pem" + return_content: true + register: cacert_pem + + - raw: '{{ ansible_python_interpreter }} -c "import ssl; print(ssl.get_default_verify_paths().cafile)"' + register: macos_cafile + + - blockinfile: + path: "{{ macos_cafile.stdout_lines|first }}" + block: "{{ cacert_pem.content }}" diff --git a/test/integration/targets/prepare_http_tests/tasks/main.yml b/test/integration/targets/prepare_http_tests/tasks/main.yml new file mode 100644 index 00000000..86e350c2 --- /dev/null +++ b/test/integration/targets/prepare_http_tests/tasks/main.yml @@ -0,0 +1,24 @@ +# The docker --link functionality gives us an ENV var we can key off of to see if we have access to +# the httptester container +- set_fact: + has_httptester: "{{ lookup('env', 'HTTPTESTER') != '' }}" + +- name: make sure we have the ansible_os_family and ansible_distribution_version facts + setup: + gather_subset: distribution + when: ansible_facts == {} + +# If we are running with access to a httptester container, grab it's cacert and install it +- block: + # Override hostname defaults with httptester linked names + - include_vars: httptester.yml + + - include_tasks: "{{ lookup('first_found', files)}}" + vars: + files: + - "{{ ansible_os_family | lower }}.yml" + - "default.yml" + when: + - has_httptester|bool + # skip the setup if running on Windows Server 2008 as httptester is not available + - ansible_os_family != 'Windows' or (ansible_os_family == 'Windows' and not ansible_distribution_version.startswith("6.0.")) diff --git a/test/integration/targets/prepare_http_tests/tasks/windows.yml b/test/integration/targets/prepare_http_tests/tasks/windows.yml new file mode 100644 index 00000000..da8b0eb3 --- /dev/null +++ b/test/integration/targets/prepare_http_tests/tasks/windows.yml @@ -0,0 +1,33 @@ +# Server 2008 R2 uses a 3rd party program to foward the ports and it may +# not be ready straight away, we give it at least 5 minutes before +# conceding defeat +- name: Windows - make sure the port forwarder is active + win_wait_for: + host: ansible.http.tests + port: 80 + state: started + timeout: 300 + +- name: Windows - Get client cert/key + win_get_url: + url: http://ansible.http.tests/{{ item }} + dest: '{{ remote_tmp_dir }}\{{ item }}' + register: win_download + # Server 2008 R2 is slightly slower, we attempt 5 retries + retries: 5 + until: win_download is successful + with_items: + - client.pem + - client.key + +- name: Windows - Retrieve test cacert + win_get_url: + url: http://ansible.http.tests/cacert.pem + dest: '{{ remote_tmp_dir }}\cacert.pem' + +- name: Windows - Update ca trust + win_certificate_store: + path: '{{ remote_tmp_dir }}\cacert.pem' + state: present + store_location: LocalMachine + store_name: Root diff --git a/test/integration/targets/prepare_http_tests/vars/httptester.yml b/test/integration/targets/prepare_http_tests/vars/httptester.yml new file mode 100644 index 00000000..0e23ae93 --- /dev/null +++ b/test/integration/targets/prepare_http_tests/vars/httptester.yml @@ -0,0 +1,5 @@ +# these are fake hostnames provided by docker link for the httptester container +badssl_host: fail.ansible.http.tests +httpbin_host: ansible.http.tests +sni_host: sni1.ansible.http.tests +badssl_host_substring: HTTP Client Testing Service diff --git a/test/integration/targets/prepare_tests/tasks/main.yml b/test/integration/targets/prepare_tests/tasks/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/pull/aliases b/test/integration/targets/pull/aliases new file mode 100644 index 00000000..757c9966 --- /dev/null +++ b/test/integration/targets/pull/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/aix diff --git a/test/integration/targets/pull/cleanup.yml b/test/integration/targets/pull/cleanup.yml new file mode 100644 index 00000000..68686964 --- /dev/null +++ b/test/integration/targets/pull/cleanup.yml @@ -0,0 +1,16 @@ +- hosts: localhost + vars: + git_install: '{{ lookup("file", lookup("env", "OUTPUT_DIR") + "/git_install.json") | from_json }}' + tasks: + - name: remove unwanted packages + package: + name: git + state: absent + when: git_install.changed + + - name: remove auto-installed packages from FreeBSD + package: + name: git + state: absent + autoremove: yes + when: git_install.changed and ansible_distribution == "FreeBSD" diff --git a/test/integration/targets/pull/pull-integration-test/ansible.cfg b/test/integration/targets/pull/pull-integration-test/ansible.cfg new file mode 100644 index 00000000..f8fc6cdb --- /dev/null +++ b/test/integration/targets/pull/pull-integration-test/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +inventory = inventory diff --git a/test/integration/targets/pull/pull-integration-test/inventory b/test/integration/targets/pull/pull-integration-test/inventory new file mode 100644 index 00000000..72644cef --- /dev/null +++ b/test/integration/targets/pull/pull-integration-test/inventory @@ -0,0 +1,2 @@ +testhost1.example.com +localhost diff --git a/test/integration/targets/pull/pull-integration-test/local.yml b/test/integration/targets/pull/pull-integration-test/local.yml new file mode 100644 index 00000000..d358ee86 --- /dev/null +++ b/test/integration/targets/pull/pull-integration-test/local.yml @@ -0,0 +1,20 @@ +- name: test playbook for ansible-pull + hosts: all + gather_facts: False + tasks: + - name: debug output + debug: msg="test task" + - name: check for correct inventory + debug: msg="testing for inventory content" + failed_when: "'testhost1.example.com' not in groups['all']" + - name: check for correct limit + debug: msg="testing for limit" + failed_when: "'testhost1.example.com' == inventory_hostname" + - name: final task, has to be reached for the test to succeed + debug: msg="MAGICKEYWORD" + + - name: check that extra vars are correclty passed + assert: + that: + - docker_registries_login is defined + tags: ['never', 'test_ev'] diff --git a/test/integration/targets/pull/runme.sh b/test/integration/targets/pull/runme.sh new file mode 100755 index 00000000..dcadc495 --- /dev/null +++ b/test/integration/targets/pull/runme.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +set -eux +set -o pipefail + +# http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x +temp_dir=$(shell mktemp -d 2>/dev/null || mktemp -d -t 'ansible-testing-XXXXXXXXXX') +trap 'rm -rf "${temp_dir}"' EXIT + +repo_dir="${temp_dir}/repo" +pull_dir="${temp_dir}/pull" +temp_log="${temp_dir}/pull.log" + +ansible-playbook setup.yml -i ../../inventory + +cleanup="$(pwd)/cleanup.yml" + +trap 'ansible-playbook "${cleanup}" -i ../../inventory' EXIT + +cp -av "pull-integration-test" "${repo_dir}" +cd "${repo_dir}" +( + git init + git config user.email "ansible@ansible.com" + git config user.name "Ansible Test Runner" + git add . + git commit -m "Initial commit." +) + +function pass_tests { + # test for https://github.com/ansible/ansible/issues/13688 + if ! grep MAGICKEYWORD "${temp_log}"; then + cat "${temp_log}" + echo "Missing MAGICKEYWORD in output." + exit 1 + fi + + # test for https://github.com/ansible/ansible/issues/13681 + if grep -E '127\.0\.0\.1.*ok' "${temp_log}"; then + cat "${temp_log}" + echo "Found host 127.0.0.1 in output. Only localhost should be present." + exit 1 + fi + # make sure one host was run + if ! grep -E 'localhost.*ok' "${temp_log}"; then + cat "${temp_log}" + echo "Did not find host localhost in output." + exit 1 + fi +} + +export ANSIBLE_INVENTORY +export ANSIBLE_HOST_PATTERN_MISMATCH + +unset ANSIBLE_INVENTORY +unset ANSIBLE_HOST_PATTERN_MISMATCH + +ANSIBLE_CONFIG='' ansible-pull -d "${pull_dir}" -U "${repo_dir}" "$@" | tee "${temp_log}" + +pass_tests + +# ensure complex extra vars work +PASSWORD='test' +USER=${USER:-'broken_docker'} +JSON_EXTRA_ARGS='{"docker_registries_login": [{ "docker_password": "'"${PASSWORD}"'", "docker_username": "'"${USER}"'", "docker_registry_url":"repository-manager.company.com:5001"}], "docker_registries_logout": [{ "docker_password": "'"${PASSWORD}"'", "docker_username": "'"${USER}"'", "docker_registry_url":"repository-manager.company.com:5001"}] }' + +ANSIBLE_CONFIG='' ansible-pull -d "${pull_dir}" -U "${repo_dir}" -e "${JSON_EXTRA_ARGS}" "$@" --tags untagged,test_ev | tee "${temp_log}" + +pass_tests diff --git a/test/integration/targets/pull/setup.yml b/test/integration/targets/pull/setup.yml new file mode 100644 index 00000000..a82d02ae --- /dev/null +++ b/test/integration/targets/pull/setup.yml @@ -0,0 +1,11 @@ +- hosts: localhost + tasks: + - name: install git + package: + name: git + when: ansible_distribution != "MacOSX" + register: git_install + - name: save install result + copy: + content: '{{ git_install }}' + dest: '{{ lookup("env", "OUTPUT_DIR") }}/git_install.json' diff --git a/test/integration/targets/raw/aliases b/test/integration/targets/raw/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/raw/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/raw/meta/main.yml b/test/integration/targets/raw/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/raw/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/raw/runme.sh b/test/integration/targets/raw/runme.sh new file mode 100755 index 00000000..07955427 --- /dev/null +++ b/test/integration/targets/raw/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -ux +export ANSIBLE_BECOME_ALLOW_SAME_USER=1 +export ANSIBLE_ROLES_PATH=../ +ansible-playbook -i ../../inventory runme.yml -e "output_dir=${OUTPUT_DIR}" -v "$@" diff --git a/test/integration/targets/raw/runme.yml b/test/integration/targets/raw/runme.yml new file mode 100644 index 00000000..ea865bca --- /dev/null +++ b/test/integration/targets/raw/runme.yml @@ -0,0 +1,4 @@ +- hosts: testhost + gather_facts: no + roles: + - { role: raw } diff --git a/test/integration/targets/raw/tasks/main.yml b/test/integration/targets/raw/tasks/main.yml new file mode 100644 index 00000000..7f99eadf --- /dev/null +++ b/test/integration/targets/raw/tasks/main.yml @@ -0,0 +1,107 @@ +# Test code for the raw module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: output_dir_test={{output_dir}}/test_command_raw + +- name: make sure our testing sub-directory does not exist + file: path="{{ output_dir_test }}" state=absent + +- name: create our testing sub-directory + file: path="{{ output_dir_test }}" state=directory + +## +## raw +## + +- name: touch a file + raw: "touch {{output_dir_test | expanduser}}/test.txt" + register: raw_result0 +- debug: var=raw_result0 +- stat: + path: "{{output_dir_test | expanduser}}/test.txt" + register: raw_result0_stat +- debug: var=raw_result0_stat +- name: ensure proper results + assert: + that: + - 'raw_result0.changed is defined' + - 'raw_result0.rc is defined' + - 'raw_result0.stderr is defined' + - 'raw_result0.stdout is defined' + - 'raw_result0.stdout_lines is defined' + - 'raw_result0.rc == 0' + - 'raw_result0_stat.stat.size == 0' + +- name: run a piped command + raw: "echo 'foo,bar,baz' | cut -d\\, -f2 | tr 'b' 'c'" + register: raw_result1 +- debug: var=raw_result1 +- name: ensure proper results + assert: + that: + - 'raw_result1.changed is defined' + - 'raw_result1.rc is defined' + - 'raw_result1.stderr is defined' + - 'raw_result1.stdout is defined' + - 'raw_result1.stdout_lines is defined' + - 'raw_result1.rc == 0' + - 'raw_result1.stdout_lines == ["car"]' + +- name: get the path to bash + shell: which bash + register: bash_path +- name: run exmample non-posix command with bash + raw: "echo 'foobar' > {{output_dir_test | expanduser}}/test.txt ; cat < {{output_dir_test | expanduser}}/test.txt" + args: + executable: "{{ bash_path.stdout }}" + register: raw_result2 +- debug: var=raw_result2 +- name: ensure proper results + assert: + that: + - 'raw_result2.changed is defined' + - 'raw_result2.rc is defined' + - 'raw_result2.stderr is defined' + - 'raw_result2.stdout is defined' + - 'raw_result2.stdout_lines is defined' + - 'raw_result2.rc == 0' + - 'raw_result2.stdout_lines == ["foobar"]' +# the following five tests added to test https://github.com/ansible/ansible/pull/68315 +- name: get the path to sh + shell: which sh + register: sh_path +- name: use sh + raw: echo $0 + args: + executable: "{{ sh_path.stdout }}" + become: true + become_method: su + register: sh_output +- name: assert sh + assert: + that: "(sh_output.stdout | trim) == sh_path.stdout" +- name: use bash + raw: echo $0 + args: + executable: "{{ bash_path.stdout }}" + become: true + become_method: su + register: bash_output +- name: assert bash + assert: + that: "(bash_output.stdout | trim) == bash_path.stdout" diff --git a/test/integration/targets/reboot/aliases b/test/integration/targets/reboot/aliases new file mode 100644 index 00000000..e9bebbf3 --- /dev/null +++ b/test/integration/targets/reboot/aliases @@ -0,0 +1,2 @@ +# No current way to split controller and test node +unsupported diff --git a/test/integration/targets/reboot/tasks/check_reboot.yml b/test/integration/targets/reboot/tasks/check_reboot.yml new file mode 100644 index 00000000..1aff1be2 --- /dev/null +++ b/test/integration/targets/reboot/tasks/check_reboot.yml @@ -0,0 +1,10 @@ +- name: Get current boot time + command: "{{ boot_time_command[ansible_facts['distribution'] | lower] | default('cat /proc/sys/kernel/random/boot_id') }}" + register: after_boot_time + +- name: Ensure system was actually rebooted + assert: + that: + - reboot_result is changed + - reboot_result.elapsed > 10 + - before_boot_time.stdout != after_boot_time.stdout diff --git a/test/integration/targets/reboot/tasks/get_boot_time.yml b/test/integration/targets/reboot/tasks/get_boot_time.yml new file mode 100644 index 00000000..cec22f06 --- /dev/null +++ b/test/integration/targets/reboot/tasks/get_boot_time.yml @@ -0,0 +1,3 @@ +- name: Get current boot time + command: "{{ boot_time_command[ansible_facts['distribution'] | lower] | default('cat /proc/sys/kernel/random/boot_id') }}" + register: before_boot_time diff --git a/test/integration/targets/reboot/tasks/main.yml b/test/integration/targets/reboot/tasks/main.yml new file mode 100644 index 00000000..2568b9b2 --- /dev/null +++ b/test/integration/targets/reboot/tasks/main.yml @@ -0,0 +1,111 @@ +- block: + # This block can be removed once we have a mechanism in ansible-test to separate + # the control node from the managed node. + - block: + - name: Write temp file for sanity checking this is not the controller + copy: + content: 'I am the control node' + dest: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue + delegate_to: localhost + connection: local + when: inventory_hostname == ansible_play_hosts[0] + + - name: See if the temp file exists on the managed node + stat: + path: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue + register: controller_temp_file + + - name: EXPECT FAILURE | Check if the managed node is the control node + assert: + msg: > + This test must be run manually by modifying the inventory file to point + "{{ inventory_hostname }}" at a remote host rather than "{{ ansible_host }}". + Skipping reboot test. + that: + - not controller_temp_file.stat.exists + + - import_tasks: get_boot_time.yml + + - name: Reboot with default settings + reboot: + register: reboot_result + + - import_tasks: check_reboot.yml + + - import_tasks: get_boot_time.yml + + - name: Reboot with all options + reboot: + connect_timeout: 30 + search_paths: /usr/local/bin + msg: Rebooting + post_reboot_delay: 1 + pre_reboot_delay: 61 + test_command: uptime + reboot_timeout: 500 + register: reboot_result + + - import_tasks: check_reboot.yml + + - import_tasks: get_boot_time.yml + + - name: Test with negative values for delays + reboot: + post_reboot_delay: -0.5 + pre_reboot_delay: -61 + register: reboot_result + + - import_tasks: check_reboot.yml + + - name: Use invalid parameter + reboot: + foo: bar + ignore_errors: true + register: invalid_parameter + + - name: Ensure task fails with error + assert: + that: + - invalid_parameter is failed + - "invalid_parameter.msg == 'Invalid options for reboot: foo'" + + - name: Reboot with test command that fails + reboot: + test_command: 'FAIL' + reboot_timeout: "{{ timeout }}" + register: reboot_fail_test + failed_when: "reboot_fail_test.msg != 'Timed out waiting for post-reboot test command (timeout=' ~ timeout ~ ')'" + vars: + timeout: "{{ timeout_value[ansible_facts['distribution'] | lower] | default(60) }}" + + - name: Test molly-guard + block: + - import_tasks: get_boot_time.yml + + - name: Install molly-guard + apt: + update_cache: yes + name: molly-guard + state: present + + - name: Reboot when molly-guard is installed + reboot: + search_paths: /lib/molly-guard + register: reboot_result + + - import_tasks: check_reboot.yml + + when: ansible_facts.distribution in ['Debian', 'Ubuntu'] + tags: + - molly-guard + + always: + - name: Cleanup temp file + file: + path: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue + state: absent + delegate_to: localhost + connection: local + when: inventory_hostname == ansible_play_hosts[0] + + when: ansible_virtualization_type | default('') != 'docker' diff --git a/test/integration/targets/reboot/vars/main.yml b/test/integration/targets/reboot/vars/main.yml new file mode 100644 index 00000000..24367c80 --- /dev/null +++ b/test/integration/targets/reboot/vars/main.yml @@ -0,0 +1,9 @@ +boot_time_command: + freebsd: '/sbin/sysctl kern.boottime' + openbsd: '/sbin/sysctl kern.boottime' + macosx: 'who -b' + solaris: 'who -b' + sunos: 'who -b' + +timeout_value: + solaris: 120 diff --git a/test/integration/targets/rel_plugin_loading/aliases b/test/integration/targets/rel_plugin_loading/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/rel_plugin_loading/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/rel_plugin_loading/notyaml.yml b/test/integration/targets/rel_plugin_loading/notyaml.yml new file mode 100644 index 00000000..23ab0323 --- /dev/null +++ b/test/integration/targets/rel_plugin_loading/notyaml.yml @@ -0,0 +1,5 @@ +all: + hosts: + testhost: + ansible_connection: local + ansible_python_interpreter: "{{ansible_playbook_python}}" diff --git a/test/integration/targets/rel_plugin_loading/runme.sh b/test/integration/targets/rel_plugin_loading/runme.sh new file mode 100755 index 00000000..34e70fdd --- /dev/null +++ b/test/integration/targets/rel_plugin_loading/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_INVENTORY_ENABLED=notyaml ansible-playbook subdir/play.yml -i notyaml.yml "$@" diff --git a/test/integration/targets/rel_plugin_loading/subdir/inventory_plugins/notyaml.py b/test/integration/targets/rel_plugin_loading/subdir/inventory_plugins/notyaml.py new file mode 100644 index 00000000..d013fc48 --- /dev/null +++ b/test/integration/targets/rel_plugin_loading/subdir/inventory_plugins/notyaml.py @@ -0,0 +1,168 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + inventory: yaml + version_added: "2.4" + short_description: Uses a specific YAML file as an inventory source. + description: + - "YAML-based inventory, should start with the C(all) group and contain hosts/vars/children entries." + - Host entries can have sub-entries defined, which will be treated as variables. + - Vars entries are normal group vars. + - "Children are 'child groups', which can also have their own vars/hosts/children and so on." + - File MUST have a valid extension, defined in configuration. + notes: + - If you want to set vars for the C(all) group inside the inventory file, the C(all) group must be the first entry in the file. + - Whitelisted in configuration by default. + options: + yaml_extensions: + description: list of 'valid' extensions for files containing YAML + type: list + default: ['.yaml', '.yml', '.json'] + env: + - name: ANSIBLE_YAML_FILENAME_EXT + - name: ANSIBLE_INVENTORY_PLUGIN_EXTS + ini: + - key: yaml_valid_extensions + section: defaults + - section: inventory_plugin_yaml + key: yaml_valid_extensions + +''' +EXAMPLES = ''' +all: # keys must be unique, i.e. only one 'hosts' per group + hosts: + test1: + test2: + host_var: value + vars: + group_all_var: value + children: # key order does not matter, indentation does + other_group: + children: + group_x: + hosts: + test5 + vars: + g2_var2: value3 + hosts: + test4: + ansible_host: 127.0.0.1 + last_group: + hosts: + test1 # same host as above, additional group membership + vars: + group_last_var: value +''' + +import os + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.plugins.inventory import BaseFileInventoryPlugin + +NoneType = type(None) + + +class InventoryModule(BaseFileInventoryPlugin): + + NAME = 'yaml' + + def __init__(self): + + super(InventoryModule, self).__init__() + + def verify_file(self, path): + + valid = False + if super(InventoryModule, self).verify_file(path): + file_name, ext = os.path.splitext(path) + if not ext or ext in self.get_option('yaml_extensions'): + valid = True + return valid + + def parse(self, inventory, loader, path, cache=True): + ''' parses the inventory file ''' + + super(InventoryModule, self).parse(inventory, loader, path) + self.set_options() + + try: + data = self.loader.load_from_file(path, cache=False) + except Exception as e: + raise AnsibleParserError(e) + + if not data: + raise AnsibleParserError('Parsed empty YAML file') + elif not isinstance(data, MutableMapping): + raise AnsibleParserError('YAML inventory has invalid structure, it should be a dictionary, got: %s' % type(data)) + elif data.get('plugin'): + raise AnsibleParserError('Plugin configuration YAML file, not YAML inventory') + + # We expect top level keys to correspond to groups, iterate over them + # to get host, vars and subgroups (which we iterate over recursivelly) + if isinstance(data, MutableMapping): + for group_name in data: + self._parse_group(group_name, data[group_name]) + else: + raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(data)) + + def _parse_group(self, group, group_data): + + if isinstance(group_data, (MutableMapping, NoneType)): + + try: + self.inventory.add_group(group) + except AnsibleError as e: + raise AnsibleParserError("Unable to add group %s: %s" % (group, to_text(e))) + + if group_data is not None: + # make sure they are dicts + for section in ['vars', 'children', 'hosts']: + if section in group_data: + # convert strings to dicts as these are allowed + if isinstance(group_data[section], string_types): + group_data[section] = {group_data[section]: None} + + if not isinstance(group_data[section], (MutableMapping, NoneType)): + raise AnsibleParserError('Invalid "%s" entry for "%s" group, requires a dictionary, found "%s" instead.' % + (section, group, type(group_data[section]))) + + for key in group_data: + + if not isinstance(group_data[key], (MutableMapping, NoneType)): + self.display.warning('Skipping key (%s) in group (%s) as it is not a mapping, it is a %s' % (key, group, type(group_data[key]))) + continue + + if isinstance(group_data[key], NoneType): + self.display.vvv('Skipping empty key (%s) in group (%s)' % (key, group)) + elif key == 'vars': + for var in group_data[key]: + self.inventory.set_variable(group, var, group_data[key][var]) + elif key == 'children': + for subgroup in group_data[key]: + self._parse_group(subgroup, group_data[key][subgroup]) + self.inventory.add_child(group, subgroup) + + elif key == 'hosts': + for host_pattern in group_data[key]: + hosts, port = self._parse_host(host_pattern) + self._populate_host_vars(hosts, group_data[key][host_pattern] or {}, group, port) + else: + self.display.warning('Skipping unexpected key (%s) in group (%s), only "vars", "children" and "hosts" are valid' % (key, group)) + + else: + self.display.warning("Skipping '%s' as this is not a valid group definition" % group) + + def _parse_host(self, host_pattern): + ''' + Each host key can be a pattern, try to process it and add variables as needed + ''' + (hostnames, port) = self._expand_hostpattern(host_pattern) + + return hostnames, port diff --git a/test/integration/targets/rel_plugin_loading/subdir/play.yml b/test/integration/targets/rel_plugin_loading/subdir/play.yml new file mode 100644 index 00000000..2326b14a --- /dev/null +++ b/test/integration/targets/rel_plugin_loading/subdir/play.yml @@ -0,0 +1,6 @@ +- hosts: all + gather_facts: false + tasks: + - assert: + that: + - inventory_hostname == 'testhost' diff --git a/test/integration/targets/remote_tmp/aliases b/test/integration/targets/remote_tmp/aliases new file mode 100644 index 00000000..757c9966 --- /dev/null +++ b/test/integration/targets/remote_tmp/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/aix diff --git a/test/integration/targets/remote_tmp/playbook.yml b/test/integration/targets/remote_tmp/playbook.yml new file mode 100644 index 00000000..43f99ca5 --- /dev/null +++ b/test/integration/targets/remote_tmp/playbook.yml @@ -0,0 +1,57 @@ +- name: Test temp dir on de escalation + hosts: testhost + become: yes + tasks: + - name: create test user + user: + name: tmptest + state: present + group: '{{ "staff" if ansible_facts.distribution == "MacOSX" else omit }}' + + - name: execute test case + become_user: tmptest + block: + - name: Test case from issue 41340 + blockinfile: + create: yes + block: | + export foo=bar + marker: "# {mark} Here there be a marker" + dest: /tmp/testing.txt + mode: 0644 + always: + - name: clean up file + file: path=/tmp/testing.txt state=absent + + - name: clean up test user + user: name=tmptest state=absent + become_user: root + +- name: Test tempdir is removed + hosts: testhost + gather_facts: false + tasks: + - file: + state: touch + path: "{{ output_dir }}/65393" + + - copy: + src: "{{ output_dir }}/65393" + dest: "{{ output_dir }}/65393.2" + remote_src: true + + - find: + path: "~/.ansible/tmp" + use_regex: yes + patterns: 'AnsiballZ_.+\.py' + recurse: true + register: result + + - debug: + var: result + + - assert: + that: + # Should only be AnsiballZ_find.py because find is actively running + - result.files|length == 1 + - result.files[0].path.endswith('/AnsiballZ_find.py') diff --git a/test/integration/targets/remote_tmp/runme.sh b/test/integration/targets/remote_tmp/runme.sh new file mode 100755 index 00000000..8d1eebd6 --- /dev/null +++ b/test/integration/targets/remote_tmp/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -ux + +ansible-playbook -i ../../inventory playbook.yml -e "output_dir=${OUTPUT_DIR}" -v "$@" diff --git a/test/integration/targets/replace/aliases b/test/integration/targets/replace/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/replace/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/replace/meta/main.yml b/test/integration/targets/replace/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/replace/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/replace/tasks/main.yml b/test/integration/targets/replace/tasks/main.yml new file mode 100644 index 00000000..24146ff3 --- /dev/null +++ b/test/integration/targets/replace/tasks/main.yml @@ -0,0 +1,265 @@ +# setup +- set_fact: output_dir_test={{output_dir}}/test_replace + +- name: make sure our testing sub-directory does not exist + file: path="{{ output_dir_test }}" state=absent + +- name: create our testing sub-directory + file: path="{{ output_dir_test }}" state=directory + +# tests +- name: create test files + copy: + content: |- + The quick brown fox jumps over the lazy dog. + We promptly judged antique ivory buckles for the next prize. + Jinxed wizards pluck ivy from the big quilt. + Jaded zombies acted quaintly but kept driving their oxen forward. + dest: "{{ output_dir_test }}/pangrams.{{ item }}.txt" + with_sequence: start=0 end=6 format=%02x #increment as needed + + +## test `before` option +- name: remove all spaces before "quilt" + replace: + path: "{{ output_dir_test }}/pangrams.00.txt" + before: 'quilt' + regexp: ' ' + register: replace_test0 + +- command: "cat {{ output_dir_test }}/pangrams.00.txt" + register: replace_cat0 + +- name: validate before assertions + assert: + that: + - replace_test0 is successful + - replace_test0 is changed + - replace_cat0.stdout_lines[0] == 'Thequickbrownfoxjumpsoverthelazydog.' + - replace_cat0.stdout_lines[-1] == 'Jaded zombies acted quaintly but kept driving their oxen forward.' + + +## test `after` option +- name: remove all spaces after "promptly" + replace: + path: "{{ output_dir_test }}/pangrams.01.txt" + after: 'promptly' + regexp: ' ' + register: replace_test1 + +- command: "cat {{ output_dir_test }}/pangrams.01.txt" + register: replace_cat1 + +- name: validate after assertions + assert: + that: + - replace_test1 is successful + - replace_test1 is changed + - replace_cat1.stdout_lines[0] == 'The quick brown fox jumps over the lazy dog.' + - replace_cat1.stdout_lines[-1] == 'Jadedzombiesactedquaintlybutkeptdrivingtheiroxenforward.' + + +## test combined `before` and `after` options +- name: before "promptly" but after "quilt", replace every "e" with a "3" + replace: + path: "{{ output_dir_test }}/pangrams.02.txt" + before: 'promptly' + after: 'quilt' + regexp: 'e' + replace: '3' + register: replace_test2 + +- name: validate after+before assertions + assert: + that: + - replace_test2 is successful + - not replace_test2 is changed + - replace_test2.msg.startswith("Pattern for before/after params did not match the given file") + +- name: before "quilt" but after "promptly", replace every "e" with a "3" + replace: + path: "{{ output_dir_test }}/pangrams.03.txt" + before: 'quilt' + after: 'promptly' + regexp: 'e' + replace: '3' + register: replace_test3 + +- command: "cat {{ output_dir_test }}/pangrams.03.txt" + register: replace_cat3 + +- name: validate before+after assertions + assert: + that: + - replace_test3 is successful + - replace_test3 is changed + - replace_cat3.stdout_lines[1] == 'We promptly judg3d antiqu3 ivory buckl3s for th3 n3xt priz3.' + + +## test ^$ behavior in MULTILINE, and . behavior in absense of DOTALL +- name: quote everything between bof and eof + replace: + path: "{{ output_dir_test }}/pangrams.04.txt" + regexp: ^([\S\s]+)$ + replace: '"\1"' + register: replace_test4_0 + +- command: "cat {{ output_dir_test }}/pangrams.04.txt" + register: replace_cat4_0 + +- name: quote everything between bol and eol + replace: + path: "{{ output_dir_test }}/pangrams.04.txt" + regexp: ^(.+)$ + replace: '"\1"' + register: replace_test4_1 + +- command: "cat {{ output_dir_test }}/pangrams.04.txt" + register: replace_cat4_1 + +- name: validate before+after assertions + assert: + that: + - replace_test4_0 is successful + - replace_test4_0 is changed + - replace_test4_1 is successful + - replace_test4_1 is changed + - replace_cat4_0.stdout_lines[0] == '"The quick brown fox jumps over the lazy dog.' + - replace_cat4_0.stdout_lines[-1] == 'Jaded zombies acted quaintly but kept driving their oxen forward."' + - replace_cat4_1.stdout_lines[0] == '""The quick brown fox jumps over the lazy dog."' + - replace_cat4_1.stdout_lines[-1] == '"Jaded zombies acted quaintly but kept driving their oxen forward.""' + + +## test \b escaping in short and long form +- name: short form with unescaped word boundaries + replace: path="{{ output_dir_test }}/pangrams.05.txt" regexp='\b(.+)\b' replace='"\1"' + register: replace_test5_0 + +- name: short form with escaped word boundaries + replace: path="{{ output_dir_test }}/pangrams.05.txt" regexp='\\b(.+)\\b' replace='"\1"' + register: replace_test5_1 + +- command: "cat {{ output_dir_test }}/pangrams.05.txt" + register: replace_cat5_1 + +- name: long form with unescaped word boundaries + replace: + path: "{{ output_dir_test }}/pangrams.05.txt" + regexp: '\b(.+)\b' + replace: '"\1"' + register: replace_test5_2 + +- command: "cat {{ output_dir_test }}/pangrams.05.txt" + register: replace_cat5_2 + +- name: long form with escaped word boundaries + replace: + path: "{{ output_dir_test }}/pangrams.05.txt" + regexp: '\\b(.+)\\b' + replace: '"\1"' + register: replace_test5_3 + +- name: validate before+after assertions + assert: + that: + - not replace_test5_0 is changed + - replace_test5_1 is changed + - replace_test5_2 is changed + - not replace_test5_3 is changed + - replace_cat5_1.stdout_lines[0] == '"The quick brown fox jumps over the lazy dog".' + - replace_cat5_1.stdout_lines[-1] == '"Jaded zombies acted quaintly but kept driving their oxen forward".' + - replace_cat5_2.stdout_lines[0] == '""The quick brown fox jumps over the lazy dog"".' + - replace_cat5_2.stdout_lines[-1] == '""Jaded zombies acted quaintly but kept driving their oxen forward"".' + + +## test backup behaviors +- name: replacement with backup + replace: + path: "{{ output_dir_test }}/pangrams.06.txt" + regexp: ^(.+)$ + replace: '"\1"' + backup: true + register: replace_test6 + +- command: "cat {{ output_dir_test }}/pangrams.06.txt" + register: replace_cat6_0 + +- command: "cat {{ replace_test6.backup_file }}" + register: replace_cat6_1 + +- name: validate backup + assert: + that: + - replace_test6 is successful + - replace_test6 is changed + - replace_test6.backup_file is search('/pangrams.06.txt.') + - replace_cat6_0.stdout != replace_cat6_1.stdout + + +## test filesystem failures +- name: fail on directory + replace: + path: "{{ output_dir_test }}" + regexp: ^(.+)$ + register: replace_test7_1 + ignore_errors: true + +- name: fail on missing file + replace: + path: "{{ output_dir_test }}/missing_file.txt" + regexp: ^(.+)$ + register: replace_test7_2 + ignore_errors: true + +- name: validate backup + assert: + that: + - replace_test7_1 is failure + - replace_test7_2 is failure + - replace_test7_1.msg.endswith(" is a directory !") + - replace_test7_2.msg.endswith(" does not exist !") + + +## test subsection replacement when before/after potentially match more than once +- name: test file for subsection replacement gone awry + copy: + content: |- + # start of group + 0.0.0.0 + 127.0.0.1 + 127.0.1.1 + # end of group + + # start of group + 0.0.0.0 + 127.0.0.1 + 127.0.1.1 + # end of group + + # start of group + 0.0.0.0 + 127.0.0.1 + 127.0.1.1 + # end of group + dest: "{{ output_dir_test }}/addresses.txt" + +- name: subsection madness + replace: + path: "{{ output_dir_test }}/addresses.txt" + after: '# start of group' + before: '# end of group' + regexp: '0' + replace: '9' + register: replace_test8 + +- command: "cat {{ output_dir_test }}/addresses.txt" + register: replace_cat8 + +- name: validate before+after assertions + assert: + that: + - replace_test8 is successful + - replace_test8 is changed + - replace_cat8.stdout_lines[1] == "9.9.9.9" + - replace_cat8.stdout_lines[7] == "0.0.0.0" + - replace_cat8.stdout_lines[13] == "0.0.0.0" diff --git a/test/integration/targets/retry_task_name_in_callback/aliases b/test/integration/targets/retry_task_name_in_callback/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/retry_task_name_in_callback/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/retry_task_name_in_callback/runme.sh b/test/integration/targets/retry_task_name_in_callback/runme.sh new file mode 100755 index 00000000..5f636cd8 --- /dev/null +++ b/test/integration/targets/retry_task_name_in_callback/runme.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -eux + +# we are looking to verify the callback for v2_retry_runner gets a correct task name, include +# if the value needs templating based on results of previous tasks +OUTFILE="callback_retry_task_name.out" +trap 'rm -rf "${OUTFILE}"' EXIT + +EXPECTED_REGEX="^.*TASK.*18236 callback task template fix OUTPUT 2" +ansible-playbook "$@" -i ../../inventory test.yml | tee "${OUTFILE}" +echo "Grepping for ${EXPECTED_REGEX} in stdout." +grep -e "${EXPECTED_REGEX}" "${OUTFILE}" diff --git a/test/integration/targets/retry_task_name_in_callback/test.yml b/test/integration/targets/retry_task_name_in_callback/test.yml new file mode 100644 index 00000000..0e450cf9 --- /dev/null +++ b/test/integration/targets/retry_task_name_in_callback/test.yml @@ -0,0 +1,28 @@ +--- +- hosts: testhost + gather_facts: False + vars: + foo: blippy + tasks: + - name: First run {{ foo }} + command: echo "18236 callback task template fix OUTPUT 1" + register: the_result_var + + - block: + - name: "{{ the_result_var.stdout }}" + command: echo "18236 callback task template fix OUTPUT 2" + register: the_result_var + retries: 1 + delay: 1 + until: False + ignore_errors: true + + # - name: assert task_name was + + - name: "{{ the_result_var.stdout }}" + command: echo "18236 callback taskadfadf template fix OUTPUT 3" + register: the_result_var + + - name: "{{ the_result_var.stdout }}" + debug: + msg: "nothing to see here." diff --git a/test/integration/targets/roles/aliases b/test/integration/targets/roles/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/roles/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/roles/allowed_dupes.yml b/test/integration/targets/roles/allowed_dupes.yml new file mode 100644 index 00000000..998950b3 --- /dev/null +++ b/test/integration/targets/roles/allowed_dupes.yml @@ -0,0 +1,18 @@ +- name: test that import_role adds one (just one) execution of the role + hosts: localhost + gather_facts: false + tags: ['importrole'] + roles: + - name: a + tasks: + - name: import role ignores dupe rule + import_role: name=a + +- name: test that include_role adds one (just one) execution of the role + hosts: localhost + gather_facts: false + tags: ['includerole'] + roles: + - name: a + tasks: + - include_role: name=a diff --git a/test/integration/targets/roles/data_integrity.yml b/test/integration/targets/roles/data_integrity.yml new file mode 100644 index 00000000..5eb4fb32 --- /dev/null +++ b/test/integration/targets/roles/data_integrity.yml @@ -0,0 +1,4 @@ +- hosts: all + gather_facts: false + roles: + - data diff --git a/test/integration/targets/roles/no_dupes.yml b/test/integration/targets/roles/no_dupes.yml new file mode 100644 index 00000000..0ac9ff94 --- /dev/null +++ b/test/integration/targets/roles/no_dupes.yml @@ -0,0 +1,19 @@ +- name: play should only show 1 invocation of a, as dependencies in this play are deduped + hosts: testhost + gather_facts: false + tags: [ 'inroles' ] + roles: + - role: a + - role: b + - role: c + +- name: play should only show 1 invocation of a, as dependencies in this play are deduped even outside of roles + hosts: testhost + gather_facts: false + tags: [ 'acrossroles' ] + roles: + - role: a + - role: b + tasks: + - name: execute role c which depends on a + import_role: name=c diff --git a/test/integration/targets/roles/roles/a/tasks/main.yml b/test/integration/targets/roles/roles/a/tasks/main.yml new file mode 100644 index 00000000..7fb1b487 --- /dev/null +++ b/test/integration/targets/roles/roles/a/tasks/main.yml @@ -0,0 +1 @@ +- debug: msg=A diff --git a/test/integration/targets/roles/roles/b/meta/main.yml b/test/integration/targets/roles/roles/b/meta/main.yml new file mode 100644 index 00000000..f95ffe65 --- /dev/null +++ b/test/integration/targets/roles/roles/b/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - name: a diff --git a/test/integration/targets/roles/roles/b/tasks/main.yml b/test/integration/targets/roles/roles/b/tasks/main.yml new file mode 100644 index 00000000..57c13524 --- /dev/null +++ b/test/integration/targets/roles/roles/b/tasks/main.yml @@ -0,0 +1 @@ +- debug: msg=B diff --git a/test/integration/targets/roles/roles/c/meta/main.yml b/test/integration/targets/roles/roles/c/meta/main.yml new file mode 100644 index 00000000..04bd23be --- /dev/null +++ b/test/integration/targets/roles/roles/c/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - name: a diff --git a/test/integration/targets/roles/roles/c/tasks/main.yml b/test/integration/targets/roles/roles/c/tasks/main.yml new file mode 100644 index 00000000..190c429b --- /dev/null +++ b/test/integration/targets/roles/roles/c/tasks/main.yml @@ -0,0 +1 @@ +- debug: msg=C diff --git a/test/integration/targets/roles/roles/data/defaults/main/00.yml b/test/integration/targets/roles/roles/data/defaults/main/00.yml new file mode 100644 index 00000000..98c13a15 --- /dev/null +++ b/test/integration/targets/roles/roles/data/defaults/main/00.yml @@ -0,0 +1 @@ +defined_var: 1 diff --git a/test/integration/targets/roles/roles/data/defaults/main/01.yml b/test/integration/targets/roles/roles/data/defaults/main/01.yml new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/roles/roles/data/tasks/main.yml b/test/integration/targets/roles/roles/data/tasks/main.yml new file mode 100644 index 00000000..8d85580c --- /dev/null +++ b/test/integration/targets/roles/roles/data/tasks/main.yml @@ -0,0 +1,5 @@ +- name: ensure data was correctly defind + assert: + that: + - defined_var is defined + - defined_var == 1 diff --git a/test/integration/targets/roles/runme.sh b/test/integration/targets/roles/runme.sh new file mode 100755 index 00000000..f2058ff1 --- /dev/null +++ b/test/integration/targets/roles/runme.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -eux + +# test no dupes when dependencies in b and c point to a in roles: +[ "$(ansible-playbook no_dupes.yml -i ../../inventory --tags inroles "$@" | grep -c '"msg": "A"')" = "1" ] +[ "$(ansible-playbook no_dupes.yml -i ../../inventory --tags acrossroles "$@" | grep -c '"msg": "A"')" = "1" ] + +# but still dupe across plays +[ "$(ansible-playbook no_dupes.yml -i ../../inventory "$@" | grep -c '"msg": "A"')" = "2" ] + +# include/import can execute another instance of role +[ "$(ansible-playbook allowed_dupes.yml -i ../../inventory --tags importrole "$@" | grep -c '"msg": "A"')" = "2" ] +[ "$(ansible-playbook allowed_dupes.yml -i ../../inventory --tags includerole "$@" | grep -c '"msg": "A"')" = "2" ] + + +# ensure role data is merged correctly +ansible-playbook data_integrity.yml -i ../../inventory "$@" diff --git a/test/integration/targets/rpm_key/aliases b/test/integration/targets/rpm_key/aliases new file mode 100644 index 00000000..3a07aab3 --- /dev/null +++ b/test/integration/targets/rpm_key/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/rpm_key/defaults/main.yaml b/test/integration/targets/rpm_key/defaults/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/rpm_key/tasks/main.yaml b/test/integration/targets/rpm_key/tasks/main.yaml new file mode 100644 index 00000000..9f6fd4ec --- /dev/null +++ b/test/integration/targets/rpm_key/tasks/main.yaml @@ -0,0 +1,2 @@ + - include: 'rpm_key.yaml' + when: ansible_os_family == "RedHat" diff --git a/test/integration/targets/rpm_key/tasks/rpm_key.yaml b/test/integration/targets/rpm_key/tasks/rpm_key.yaml new file mode 100644 index 00000000..58020f48 --- /dev/null +++ b/test/integration/targets/rpm_key/tasks/rpm_key.yaml @@ -0,0 +1,195 @@ +--- +# +# Save initial state +# +- name: Retrieve a list of gpg keys are installed for package checking + shell: 'rpm -q gpg-pubkey | sort' + register: list_of_pubkeys + +- name: Retrieve the gpg keys used to verify packages + command: 'rpm -q --qf %{description} gpg-pubkey' + register: pubkeys + +- name: Save gpg keys to a file + copy: + content: "{{ pubkeys['stdout'] }}\n" + dest: '{{ output_dir }}/pubkeys' + mode: 0600 + +# +# Tests start +# +- name: download EPEL GPG key + get_url: + url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY-EPEL-7 + dest: /tmp/RPM-GPG-KEY-EPEL-7 + +- name: download sl rpm + get_url: + url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/sl-5.02-1.el7.x86_64.rpm + dest: /tmp/sl.rpm + +- name: download Mono key + get_url: + url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/mono.gpg + dest: /tmp/mono.gpg + +- name: remove EPEL GPG key from keyring + rpm_key: + state: absent + key: /tmp/RPM-GPG-KEY-EPEL-7 + +- name: check GPG signature of sl. Should fail + shell: "rpm --checksig /tmp/sl.rpm" + register: sl_check + ignore_errors: yes + +- name: confirm that signature check failed + assert: + that: + - "'MISSING KEYS' in sl_check.stdout or 'SIGNATURES NOT OK' in sl_check.stdout" + - "sl_check.failed" + +- name: remove EPEL GPG key from keyring (idempotent) + rpm_key: + state: absent + key: /tmp/RPM-GPG-KEY-EPEL-7 + register: idempotent_test + +- name: check idempontence + assert: + that: "not idempotent_test.changed" + +- name: add EPEL GPG key to key ring + rpm_key: + state: present + key: /tmp/RPM-GPG-KEY-EPEL-7 + +- name: add EPEL GPG key to key ring (idempotent) + rpm_key: + state: present + key: /tmp/RPM-GPG-KEY-EPEL-7 + +- name: add Mono gpg key + rpm_key: + state: present + key: /tmp/mono.gpg + +- name: add Mono gpg key + rpm_key: + state: present + key: /tmp/mono.gpg + register: mono_indempotence + +- name: verify idempotence + assert: + that: "not mono_indempotence.changed" + +- name: check GPG signature of sl. Should return okay + shell: "rpm --checksig /tmp/sl.rpm" + register: sl_check + +- name: confirm that signature check succeeded + assert: + that: "'rsa sha1 (md5) pgp md5 OK' in sl_check.stdout or 'digests signatures OK' in sl_check.stdout" + +- name: remove GPG key from url + rpm_key: + state: absent + key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY-EPEL-7 + +- name: Confirm key is missing + shell: "rpm --checksig /tmp/sl.rpm" + register: sl_check + ignore_errors: yes + +- name: confirm that signature check failed + assert: + that: + - "'MISSING KEYS' in sl_check.stdout or 'SIGNATURES NOT OK' in sl_check.stdout" + - "sl_check.failed" + +- name: add GPG key from url + rpm_key: + state: present + key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY-EPEL-7 + +- name: check GPG signature of sl. Should return okay + shell: "rpm --checksig /tmp/sl.rpm" + register: sl_check + +- name: confirm that signature check succeeded + assert: + that: "'rsa sha1 (md5) pgp md5 OK' in sl_check.stdout or 'digests signatures OK' in sl_check.stdout" + +- name: remove all keys from key ring + shell: "rpm -q gpg-pubkey | xargs rpm -e" + +- name: add very first key on system + rpm_key: + state: present + key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY-EPEL-7 + +- name: check GPG signature of sl. Should return okay + shell: "rpm --checksig /tmp/sl.rpm" + register: sl_check + +- name: confirm that signature check succeeded + assert: + that: "'rsa sha1 (md5) pgp md5 OK' in sl_check.stdout or 'digests signatures OK' in sl_check.stdout" + +- name: Issue 20325 - Verify fingerprint of key, invalid fingerprint - EXPECTED FAILURE + rpm_key: + key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY.dag + fingerprint: 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 + register: result + failed_when: result is success + +- name: Issue 20325 - Assert Verify fingerprint of key, invalid fingerprint + assert: + that: + - result is success + - result is not changed + - "'does not match the key fingerprint' in result.msg" + +- name: Issue 20325 - Verify fingerprint of key, valid fingerprint + rpm_key: + key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY.dag + fingerprint: EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6 + register: result + +- name: Issue 20325 - Assert Verify fingerprint of key, valid fingerprint + assert: + that: + - result is success + - result is changed + +- name: Issue 20325 - Verify fingerprint of key, valid fingerprint - Idempotent check + rpm_key: + key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY.dag + fingerprint: EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6 + register: result + +- name: Issue 20325 - Assert Verify fingerprint of key, valid fingerprint - Idempotent check + assert: + that: + - result is success + - result is not changed + +# +# Cleanup +# +- name: remove all keys from key ring + shell: "rpm -q gpg-pubkey | xargs rpm -e" + +- name: Restore the gpg keys normally installed on the system + command: 'rpm --import {{ output_dir }}/pubkeys' + +- name: Retrieve a list of gpg keys are installed for package checking + shell: 'rpm -q gpg-pubkey | sort' + register: new_list_of_pubkeys + +- name: Confirm that we've restored all the pubkeys + assert: + that: + - 'list_of_pubkeys["stdout"] == new_list_of_pubkeys["stdout"]' diff --git a/test/integration/targets/run_modules/aliases b/test/integration/targets/run_modules/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/run_modules/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/run_modules/args.json b/test/integration/targets/run_modules/args.json new file mode 100644 index 00000000..c3abc21a --- /dev/null +++ b/test/integration/targets/run_modules/args.json @@ -0,0 +1 @@ +{ "ANSIBLE_MODULE_ARGS": {} } diff --git a/test/integration/targets/run_modules/library/test.py b/test/integration/targets/run_modules/library/test.py new file mode 100644 index 00000000..bbe3182c --- /dev/null +++ b/test/integration/targets/run_modules/library/test.py @@ -0,0 +1,7 @@ +#!/usr/bin/python + +from ansible.module_utils.basic import AnsibleModule + +module = AnsibleModule(argument_spec=dict()) + +module.exit_json(**{'tempdir': module._remote_tmp}) diff --git a/test/integration/targets/run_modules/runme.sh b/test/integration/targets/run_modules/runme.sh new file mode 100755 index 00000000..34c245cb --- /dev/null +++ b/test/integration/targets/run_modules/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +# test running module directly +python.py library/test.py args.json diff --git a/test/integration/targets/script/aliases b/test/integration/targets/script/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/script/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/script/files/create_afile.sh b/test/integration/targets/script/files/create_afile.sh new file mode 100755 index 00000000..e6fae448 --- /dev/null +++ b/test/integration/targets/script/files/create_afile.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +echo "win" > "$1" \ No newline at end of file diff --git a/test/integration/targets/script/files/no_shebang.py b/test/integration/targets/script/files/no_shebang.py new file mode 100644 index 00000000..c6c813af --- /dev/null +++ b/test/integration/targets/script/files/no_shebang.py @@ -0,0 +1,3 @@ +import sys + +sys.stdout.write("Script with shebang omitted") diff --git a/test/integration/targets/script/files/remove_afile.sh b/test/integration/targets/script/files/remove_afile.sh new file mode 100755 index 00000000..4a7fea66 --- /dev/null +++ b/test/integration/targets/script/files/remove_afile.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +rm "$1" \ No newline at end of file diff --git a/test/integration/targets/script/files/space path/test.sh b/test/integration/targets/script/files/space path/test.sh new file mode 100755 index 00000000..6f6334d7 --- /dev/null +++ b/test/integration/targets/script/files/space path/test.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +echo -n "Script with space in path" \ No newline at end of file diff --git a/test/integration/targets/script/files/test.sh b/test/integration/targets/script/files/test.sh new file mode 100755 index 00000000..ade17e9b --- /dev/null +++ b/test/integration/targets/script/files/test.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +echo -n "win" \ No newline at end of file diff --git a/test/integration/targets/script/files/test_with_args.sh b/test/integration/targets/script/files/test_with_args.sh new file mode 100755 index 00000000..13dce4f2 --- /dev/null +++ b/test/integration/targets/script/files/test_with_args.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +for i in "$@"; do + echo "$i" +done \ No newline at end of file diff --git a/test/integration/targets/script/meta/main.yml b/test/integration/targets/script/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/script/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/script/tasks/main.yml b/test/integration/targets/script/tasks/main.yml new file mode 100644 index 00000000..f1746f7c --- /dev/null +++ b/test/integration/targets/script/tasks/main.yml @@ -0,0 +1,240 @@ +# Test code for the script module and action_plugin. +# (c) 2014, Richard Isaacson + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +## +## prep +## + +- set_fact: + output_dir_test: "{{ output_dir }}/test_script" + +- name: make sure our testing sub-directory does not exist + file: + path: "{{ output_dir_test }}" + state: absent + +- name: create our testing sub-directory + file: + path: "{{ output_dir_test }}" + state: directory + +## +## script +## + +- name: execute the test.sh script via command + script: test.sh + register: script_result0 + +- name: assert that the script executed correctly + assert: + that: + - "script_result0.rc == 0" + - "script_result0.stdout == 'win'" + +- name: Execute a script with a space in the path + script: "'space path/test.sh'" + register: _space_path_test + tags: + - spacepath + +- name: Assert that script with space in path ran successfully + assert: + that: + - _space_path_test is success + - _space_path_test.stdout == 'Script with space in path' + tags: + - spacepath + +- name: Execute a script with arguments including a unicode character + script: test_with_args.sh -this -that -Ӧther + register: unicode_args + +- name: Assert that script with unicode character ran successfully + assert: + that: + - unicode_args is success + - unicode_args.stdout_lines[0] == '-this' + - unicode_args.stdout_lines[1] == '-that' + - unicode_args.stdout_lines[2] == '-Ӧther' + +# creates +- name: verify that afile.txt is absent + file: + path: "{{ output_dir_test }}/afile.txt" + state: absent + +- name: create afile.txt with create_afile.sh via command + script: create_afile.sh {{ output_dir_test | expanduser }}/afile.txt + args: + creates: "{{ output_dir_test | expanduser }}/afile.txt" + register: _create_test1 + +- name: Check state of created file + stat: + path: "{{ output_dir_test | expanduser }}/afile.txt" + register: _create_stat1 + +- name: Run create_afile.sh again to ensure it is skipped + script: create_afile.sh {{ output_dir_test | expanduser }}/afile.txt + args: + creates: "{{ output_dir_test | expanduser }}/afile.txt" + register: _create_test2 + +- name: Assert that script report a change, file was created, second run was skipped + assert: + that: + - _create_test1 is changed + - _create_stat1.stat.exists + - _create_test2 is skipped + + +# removes +- name: verify that afile.txt is present + file: + path: "{{ output_dir_test }}/afile.txt" + state: file + +- name: remove afile.txt with remote_afile.sh via command + script: remove_afile.sh {{ output_dir_test | expanduser }}/afile.txt + args: + removes: "{{ output_dir_test | expanduser }}/afile.txt" + register: _remove_test1 + +- name: Check state of removed file + stat: + path: "{{ output_dir_test | expanduser }}/afile.txt" + register: _remove_stat1 + +- name: Run remote_afile.sh again to enure it is skipped + script: remove_afile.sh {{ output_dir_test | expanduser }}/afile.txt + args: + removes: "{{ output_dir_test | expanduser }}/afile.txt" + register: _remove_test2 + +- name: Assert that script report a change, file was removed, second run was skipped + assert: + that: + - _remove_test1 is changed + - not _remove_stat1.stat.exists + - _remove_test2 is skipped + + +# async +- name: verify that afile.txt is absent + file: + path: "{{ output_dir_test }}/afile.txt" + state: absent + +- name: test task failure with async param + script: /some/script.sh + async: 2 + ignore_errors: true + register: script_result3 + +- name: assert task with async param failed + assert: + that: + - script_result3 is failed + - script_result3.msg == "async is not supported for this task." + + +# check mode +- name: Run script to create a file in check mode + script: create_afile.sh {{ output_dir_test | expanduser }}/afile2.txt + check_mode: yes + register: _check_mode_test + +- debug: + var: _check_mode_test + verbosity: 2 + +- name: Get state of file created by script + stat: + path: "{{ output_dir_test | expanduser }}/afile2.txt" + register: _afile_stat + +- debug: + var: _afile_stat + verbosity: 2 + +- name: Assert that a change was reported but the script did not make changes + assert: + that: + - _check_mode_test is changed + - not _afile_stat.stat.exists + +- name: Run script to create a file + script: create_afile.sh {{ output_dir_test | expanduser }}/afile2.txt + +- name: Run script to create a file in check mode with 'creates' argument + script: create_afile.sh {{ output_dir_test | expanduser }}/afile2.txt + args: + creates: "{{ output_dir_test | expanduser }}/afile2.txt" + register: _check_mode_test2 + check_mode: yes + +- debug: + var: _check_mode_test2 + verbosity: 2 + +- name: Assert that task was skipped and mesage was returned + assert: + that: + - _check_mode_test2 is skipped + - '_check_mode_test2.msg == "{{ output_dir_test | expanduser }}/afile2.txt exists, matching creates option"' + +- name: Remove afile2.txt + file: + path: "{{ output_dir_test | expanduser }}/afile2.txt" + state: absent + +- name: Run script to remove a file in check mode with 'removes' argument + script: remove_afile.sh {{ output_dir_test | expanduser }}/afile2.txt + args: + removes: "{{ output_dir_test | expanduser }}/afile2.txt" + register: _check_mode_test3 + check_mode: yes + +- debug: + var: _check_mode_test3 + verbosity: 2 + +- name: Assert that task was skipped and message was returned + assert: + that: + - _check_mode_test3 is skipped + - '_check_mode_test3.msg == "{{ output_dir_test | expanduser }}/afile2.txt does not exist, matching removes option"' + +# executable + +- name: Run script with shebang omitted + script: no_shebang.py + args: + executable: "{{ ansible_python_interpreter }}" + register: _shebang_omitted_test + tags: + - noshebang + +- name: Assert that script with shebang omitted succeeded + assert: + that: + - _shebang_omitted_test is success + - _shebang_omitted_test.stdout == 'Script with shebang omitted' + tags: + - noshebang diff --git a/test/integration/targets/service/aliases b/test/integration/targets/service/aliases new file mode 100644 index 00000000..1ef4c361 --- /dev/null +++ b/test/integration/targets/service/aliases @@ -0,0 +1,5 @@ +destructive +shippable/posix/group1 +skip/aix +skip/osx +skip/macos diff --git a/test/integration/targets/service/files/ansible-broken.upstart b/test/integration/targets/service/files/ansible-broken.upstart new file mode 100644 index 00000000..4e9c6694 --- /dev/null +++ b/test/integration/targets/service/files/ansible-broken.upstart @@ -0,0 +1,10 @@ +description "ansible test daemon" + +start on runlevel [345] +stop on runlevel [!345] + +expect daemon + +exec ansible_test_service + +manual diff --git a/test/integration/targets/service/files/ansible.rc b/test/integration/targets/service/files/ansible.rc new file mode 100644 index 00000000..ec77d521 --- /dev/null +++ b/test/integration/targets/service/files/ansible.rc @@ -0,0 +1,16 @@ +#!/bin/sh + +# PROVIDE: ansible_test_service +# REQUIRE: FILESYSTEMS devfs +# BEFORE: LOGIN +# KEYWORD: nojail shutdown + +. /etc/rc.subr + +name="ansible_test_service" +rcvar="ansible_test_service_enable" +command="/usr/sbin/${name}" +pidfile="/var/run/${name}.pid" +extra_commands=reload +load_rc_config $name +run_rc_command "$1" diff --git a/test/integration/targets/service/files/ansible.systemd b/test/integration/targets/service/files/ansible.systemd new file mode 100644 index 00000000..3466f25a --- /dev/null +++ b/test/integration/targets/service/files/ansible.systemd @@ -0,0 +1,11 @@ +[Unit] +Description=Ansible Test Service + +[Service] +ExecStart=/usr/sbin/ansible_test_service "Test\nthat newlines in scripts\nwork" +ExecReload=/bin/true +Type=forking +PIDFile=/var/run/ansible_test_service.pid + +[Install] +WantedBy=multi-user.target diff --git a/test/integration/targets/service/files/ansible.sysv b/test/integration/targets/service/files/ansible.sysv new file mode 100755 index 00000000..1df0423d --- /dev/null +++ b/test/integration/targets/service/files/ansible.sysv @@ -0,0 +1,134 @@ +#!/bin/sh +# + +# LSB header + +### BEGIN INIT INFO +# Provides: ansible-test +# Default-Start: 3 4 5 +# Default-Stop: 0 1 2 6 +# Short-Description: test daemon for ansible +# Description: This is a test daemon used by ansible for testing only +### END INIT INFO + +# chkconfig header + +# chkconfig: 345 99 99 +# description: This is a test daemon used by ansible for testing only +# +# processname: /usr/sbin/ansible_test_service + +# Sanity checks. +[ -x /usr/sbin/ansible_test_service ] || exit 0 + +DEBIAN_VERSION=/etc/debian_version +SUSE_RELEASE=/etc/SuSE-release +# Source function library. +if [ -f $DEBIAN_VERSION ]; then + . /lib/lsb/init-functions +elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then + . /etc/rc.status +else + . /etc/rc.d/init.d/functions +fi + +SERVICE=ansible_test_service +PROCESS=ansible_test_service +CONFIG_ARGS=" " +if [ -f $DEBIAN_VERSION ]; then + LOCKFILE=/var/lock/$SERVICE +else + LOCKFILE=/var/lock/subsys/$SERVICE +fi + +RETVAL=0 + +start() { + echo -n "Starting ansible test daemon: " + if [ -f $SUSE_RELEASE ]; then + startproc -p /var/run/${SERVICE}.pid -f /usr/sbin/ansible_test_service + rc_status -v + elif [ -e $DEBIAN_VERSION ]; then + if [ -f $LOCKFILE ]; then + echo -n "already started, lock file found" + RETVAL=1 + elif /usr/sbin/ansible_test_service; then + echo -n "OK" + RETVAL=0 + fi + else + daemon --check $SERVICE $PROCESS --daemonize $CONFIG_ARGS + fi + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch $LOCKFILE + return $RETVAL +} + +stop() { + echo -n "Stopping ansible test daemon: " + if [ -f $SUSE_RELEASE ]; then + killproc -TERM /usr/sbin/ansible_test_service + rc_status -v + elif [ -f $DEBIAN_VERSION ]; then + # Added this since Debian's start-stop-daemon doesn't support spawned processes + if ps -ef | grep "/usr/sbin/ansible_test_service" | grep -v grep | awk '{print $2}' | xargs kill &> /dev/null; then + echo -n "OK" + RETVAL=0 + else + echo -n "Daemon is not started" + RETVAL=1 + fi + else + killproc -p /var/run/${SERVICE}.pid + fi + RETVAL=$? + echo + if [ $RETVAL -eq 0 ]; then + rm -f $LOCKFILE + rm -f /var/run/$SERVICE.pid + fi +} + +restart() { + stop + start +} + +# See how we were called. +case "$1" in + start|stop|restart) + $1 + ;; + status) + if [ -f $SUSE_RELEASE ]; then + echo -n "Checking for ansible test service " + checkproc /usr/sbin/ansible_test_service + rc_status -v + elif [ -f $DEBIAN_VERSION ]; then + if [ -f $LOCKFILE ]; then + RETVAL=0 + echo "ansible test is running." + else + RETVAL=1 + echo "ansible test is stopped." + fi + else + status $PROCESS + RETVAL=$? + fi + ;; + condrestart) + [ -f $LOCKFILE ] && restart || : + ;; + reload) + echo "ok" + RETVAL=0 + ;; + *) + echo "Usage: $0 {start|stop|status|restart|condrestart|reload}" + exit 1 + ;; +esac +exit $RETVAL + diff --git a/test/integration/targets/service/files/ansible.upstart b/test/integration/targets/service/files/ansible.upstart new file mode 100644 index 00000000..369f61a8 --- /dev/null +++ b/test/integration/targets/service/files/ansible.upstart @@ -0,0 +1,9 @@ +description "ansible test daemon" + +start on runlevel [345] +stop on runlevel [!345] + +expect daemon + +exec ansible_test_service + diff --git a/test/integration/targets/service/files/ansible_test_service.py b/test/integration/targets/service/files/ansible_test_service.py new file mode 100644 index 00000000..c4feed85 --- /dev/null +++ b/test/integration/targets/service/files/ansible_test_service.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +# this is mostly based off of the code found here: +# http://code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/ + +import os +import resource +import signal +import sys +import time + +UMASK = 0 +WORKDIR = "/" +MAXFD = 1024 + +if (hasattr(os, "devnull")): + REDIRECT_TO = os.devnull +else: + REDIRECT_TO = "/dev/null" + + +def createDaemon(): + try: + pid = os.fork() + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid == 0): + os.setsid() + + try: + pid = os.fork() + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid == 0): + os.chdir(WORKDIR) + os.umask(UMASK) + else: + f = open('/var/run/ansible_test_service.pid', 'w') + f.write("%d\n" % pid) + f.close() + os._exit(0) + else: + os._exit(0) + + maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + if (maxfd == resource.RLIM_INFINITY): + maxfd = MAXFD + + for fd in range(0, maxfd): + try: + os.close(fd) + except OSError: # ERROR, fd wasn't open to begin with (ignored) + pass + + os.open(REDIRECT_TO, os.O_RDWR) + os.dup2(0, 1) + os.dup2(0, 2) + + return (0) + + +if __name__ == "__main__": + + signal.signal(signal.SIGHUP, signal.SIG_IGN) + + retCode = createDaemon() + + while True: + time.sleep(1000) diff --git a/test/integration/targets/service/meta/main.yml b/test/integration/targets/service/meta/main.yml new file mode 100644 index 00000000..399f3fb6 --- /dev/null +++ b/test/integration/targets/service/meta/main.yml @@ -0,0 +1,20 @@ +# test code for the service module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +dependencies: + - prepare_tests diff --git a/test/integration/targets/service/tasks/main.yml b/test/integration/targets/service/tasks/main.yml new file mode 100644 index 00000000..69a9ef20 --- /dev/null +++ b/test/integration/targets/service/tasks/main.yml @@ -0,0 +1,58 @@ +- name: install the test daemon script + copy: + src: ansible_test_service.py + dest: /usr/sbin/ansible_test_service + mode: '755' + +- name: rewrite shebang in the test daemon script + lineinfile: + path: /usr/sbin/ansible_test_service + line: "#!{{ ansible_python_interpreter | realpath }}" + insertbefore: BOF + firstmatch: yes + +- block: + # determine init system is in use + - name: detect sysv init system + set_fact: + service_type: sysv + when: + - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] + - ansible_distribution_version is version('6', '>=') + - ansible_distribution_version is version('7', '<') + - name: detect systemd init system + set_fact: + service_type: systemd + when: (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version is version('7', '>=')) or ansible_distribution == 'Fedora' or (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('15.04', '>=')) or (ansible_distribution == 'Debian' and ansible_distribution_version is version('8', '>=')) or ansible_os_family == 'Suse' + - name: detect upstart init system + set_fact: + service_type: upstart + when: + - ansible_distribution == 'Ubuntu' + - ansible_distribution_version is version('15.04', '<') + - name: detect rc init system + set_fact: + service_type: rc + when: + - ansible_distribution.lower().endswith('bsd') + + + - name: display value of ansible_service_mgr + debug: + msg: 'ansible_service_mgr: {{ ansible_service_mgr }}' + + - name: setup test service script + include_tasks: '{{ service_type }}_setup.yml' + + - name: execute tests + import_tasks: tests.yml + + always: + - name: disable and stop ansible test service + service: + name: ansible_test + state: stopped + enabled: false + + # cleaning up changes made by this playbook + - include_tasks: '{{ service_type }}_cleanup.yml' diff --git a/test/integration/targets/service/tasks/rc_cleanup.yml b/test/integration/targets/service/tasks/rc_cleanup.yml new file mode 100644 index 00000000..47f470c6 --- /dev/null +++ b/test/integration/targets/service/tasks/rc_cleanup.yml @@ -0,0 +1,9 @@ +- name: remove the rc init file + file: path=/etc/rc.d/ansible_test state=absent + register: remove_rc_result + +- name: assert that the rc init file was removed + assert: + that: + - "remove_rc_result.path == '/etc/rc.d/ansible_test'" + - "remove_rc_result.state == 'absent'" diff --git a/test/integration/targets/service/tasks/rc_setup.yml b/test/integration/targets/service/tasks/rc_setup.yml new file mode 100644 index 00000000..45d2c90d --- /dev/null +++ b/test/integration/targets/service/tasks/rc_setup.yml @@ -0,0 +1,21 @@ +- name: install the rc init file + copy: src=ansible.rc dest=/etc/rc.d/ansible_test mode=0755 + register: install_rc_result + +- name: assert that the rc init file was installed + assert: + that: + - "install_rc_result.dest == '/etc/rc.d/ansible_test'" + - "install_rc_result.state == 'file'" + - "install_rc_result.mode == '0755'" + - "install_rc_result.checksum == '8526e4571d2ac685fa5a73af723183c194bda35d'" + +# FreeBSD (likely others as well) requires the command_interpreter to match the +# shebang the script was started with as an extra caution against killing the +# wrong thing. We add the line here. +- name: add command_interpreter in rc init file + lineinfile: + path: /etc/rc.d/ansible_test + line: "command_interpreter={{ ansible_python_interpreter | realpath }}" + insertafter: '^pidfile.*' + firstmatch: yes diff --git a/test/integration/targets/service/tasks/systemd_cleanup.yml b/test/integration/targets/service/tasks/systemd_cleanup.yml new file mode 100644 index 00000000..e0707265 --- /dev/null +++ b/test/integration/targets/service/tasks/systemd_cleanup.yml @@ -0,0 +1,25 @@ +- name: remove the systemd unit file + file: path=/usr/lib/systemd/system/ansible_test.service state=absent + register: remove_systemd_result + +- name: remove the systemd unit file + file: path=/usr/lib/systemd/system/ansible_test_broken.service state=absent + register: remove_systemd_broken_result + +- debug: var=remove_systemd_broken_result +- name: assert that the systemd unit file was removed + assert: + that: + - "remove_systemd_result.path == '/usr/lib/systemd/system/ansible_test.service'" + - "remove_systemd_result.state == 'absent'" + - "remove_systemd_broken_result.path == '/usr/lib/systemd/system/ansible_test_broken.service'" + - "remove_systemd_broken_result.state == 'absent'" + +- name: make sure systemd is reloaded + shell: systemctl daemon-reload + register: restart_systemd_result + +- name: assert that systemd was reloaded + assert: + that: + - "restart_systemd_result.rc == 0" diff --git a/test/integration/targets/service/tasks/systemd_setup.yml b/test/integration/targets/service/tasks/systemd_setup.yml new file mode 100644 index 00000000..a9170a38 --- /dev/null +++ b/test/integration/targets/service/tasks/systemd_setup.yml @@ -0,0 +1,17 @@ +- name: install the systemd unit file + copy: src=ansible.systemd dest=/etc/systemd/system/ansible_test.service mode=0644 + register: install_systemd_result + +- name: install a broken systemd unit file + file: src=ansible_test.service path=/etc/systemd/system/ansible_test_broken.service state=link + register: install_broken_systemd_result + +- name: assert that the systemd unit file was installed + assert: + that: + - "install_systemd_result.dest == '/etc/systemd/system/ansible_test.service'" + - "install_systemd_result.state == 'file'" + - "install_systemd_result.mode == '0644'" + - "install_systemd_result.checksum == '9e6320795a5c79c01230a6de1c343ea32097af52'" + - "install_broken_systemd_result.dest == '/etc/systemd/system/ansible_test_broken.service'" + - "install_broken_systemd_result.state == 'link'" diff --git a/test/integration/targets/service/tasks/sysv_cleanup.yml b/test/integration/targets/service/tasks/sysv_cleanup.yml new file mode 100644 index 00000000..dbdfcf8b --- /dev/null +++ b/test/integration/targets/service/tasks/sysv_cleanup.yml @@ -0,0 +1,9 @@ +- name: remove the sysV init file + file: path=/etc/init.d/ansible_test state=absent + register: remove_sysv_result + +- name: assert that the sysV init file was removed + assert: + that: + - "remove_sysv_result.path == '/etc/init.d/ansible_test'" + - "remove_sysv_result.state == 'absent'" diff --git a/test/integration/targets/service/tasks/sysv_setup.yml b/test/integration/targets/service/tasks/sysv_setup.yml new file mode 100644 index 00000000..7b648c24 --- /dev/null +++ b/test/integration/targets/service/tasks/sysv_setup.yml @@ -0,0 +1,11 @@ +- name: install the sysV init file + copy: src=ansible.sysv dest=/etc/init.d/ansible_test mode=0755 + register: install_sysv_result + +- name: assert that the sysV init file was installed + assert: + that: + - "install_sysv_result.dest == '/etc/init.d/ansible_test'" + - "install_sysv_result.state == 'file'" + - "install_sysv_result.mode == '0755'" + - "install_sysv_result.checksum == '362899814c47d9aad6e93b2f64e39edd24e38797'" diff --git a/test/integration/targets/service/tasks/tests.yml b/test/integration/targets/service/tasks/tests.yml new file mode 100644 index 00000000..de66bf5c --- /dev/null +++ b/test/integration/targets/service/tasks/tests.yml @@ -0,0 +1,225 @@ +- name: disable the ansible test service + service: name=ansible_test enabled=no + +- name: (check mode run) enable the ansible test service + service: name=ansible_test enabled=yes + register: enable_in_check_mode_result + check_mode: yes + +- name: assert that changes reported for check mode run + assert: + that: + - "enable_in_check_mode_result is changed" + +- name: enable the ansible test service + service: name=ansible_test enabled=yes + register: enable_result + +- name: assert that the service was enabled and changes reported + assert: + that: + - "enable_result.enabled == true" + - "enable_result is changed" + +- name: start the ansible test service + service: name=ansible_test state=started + register: start_result + +- name: assert that the service was started + assert: + that: + - "start_result.state == 'started'" + - "start_result is changed" + +- name: check that the service was started + shell: 'cat /proc/$(cat /var/run/ansible_test_service.pid)/cmdline' + register: cmdline + failed_when: cmdline is failed or '\0/usr/sbin/ansible_test_service\0' not in cmdline.stdout + # No proc on BSD + when: not ansible_distribution.lower().endswith('bsd') + +- name: check that the service was started (*bsd) + shell: 'ps -p $(cat /var/run/ansible_test_service.pid)' + register: cmdline + failed_when: cmdline is failed or '/usr/sbin/ansible_test_service' not in cmdline.stdout + when: ansible_distribution.lower().endswith('bsd') + +- name: find the service with a pattern + service: name=ansible_test pattern="ansible_test_ser" state=started + register: start2_result + +- name: assert that the service was started via the pattern + assert: + that: + - "start2_result.name == 'ansible_test'" + - "start2_result.state == 'started'" + - "start2_result is not changed" + +- name: fetch PID for ansible_test service (before restart) + command: 'cat /var/run/ansible_test_service.pid' + register: pid_before_restart + +- name: restart the ansible test service + service: name=ansible_test state=restarted + register: restart_result + +- name: assert that the service was restarted + assert: + that: + - "restart_result.state == 'started'" + - "restart_result is changed" + +- name: fetch PID for ansible_test service (after restart) + command: 'cat /var/run/ansible_test_service.pid' + register: pid_after_restart + +- name: "check that PIDs aren't the same" + fail: + when: pid_before_restart.stdout == pid_after_restart.stdout + +- name: check that service is started + command: 'cat /proc/{{ pid_after_restart.stdout }}/cmdline' + register: cmdline + failed_when: cmdline is failed or '\0/usr/sbin/ansible_test_service\0' not in cmdline.stdout + # No proc on BSD + when: not ansible_distribution.lower().endswith('bsd') + +- name: check that the service is started (*bsd) + shell: 'ps -p {{ pid_after_restart.stdout }}' + register: cmdline + failed_when: cmdline is failed or '/usr/sbin/ansible_test_service' not in cmdline.stdout + when: ansible_distribution.lower().endswith('bsd') + +- name: restart the ansible test service with a sleep + service: name=ansible_test state=restarted sleep=2 + register: restart_sleep_result + +- name: assert that the service was restarted with a sleep + assert: + that: + - "restart_sleep_result.state == 'started'" + - "restart_sleep_result is changed" + +- name: reload the ansible test service + service: name=ansible_test state=reloaded + register: reload_result + # don't do this on systems with systemd because it triggers error: + # Unable to reload service ansible_test: ansible_test.service is not active, cannot reload. + when: service_type != "systemd" + +- name: assert that the service was reloaded + assert: + that: + - "reload_result.state == 'started'" + - "reload_result is changed" + when: service_type != "systemd" + +- name: "test for #42786 (sysvinit)" + when: service_type == "sysv" + block: + - name: "sysvinit (#42786): check state, 'enable' parameter isn't set" + service: use=sysvinit name=ansible_test state=started + + - name: "sysvinit (#42786): check that service is still enabled" + service: use=sysvinit name=ansible_test enabled=yes + register: result_enabled + failed_when: result_enabled is changed + +- name: fetch PID for ansible_test service + command: 'cat /var/run/ansible_test_service.pid' + register: ansible_test_pid + +- name: check that service is started + command: 'cat /proc/{{ ansible_test_pid.stdout }}/cmdline' + register: cmdline + failed_when: cmdline is failed or '\0/usr/sbin/ansible_test_service\0' not in cmdline.stdout + # No proc on BSD + when: not ansible_distribution.lower().endswith('bsd') + +- name: check that the service is started (*bsd) + shell: 'ps -p {{ ansible_test_pid.stdout }}' + register: cmdline + failed_when: cmdline is failed or '/usr/sbin/ansible_test_service' not in cmdline.stdout + when: ansible_distribution.lower().endswith('bsd') + +- name: stop the ansible test service + service: name=ansible_test state=stopped + register: stop_result + +- name: check that the service is stopped + command: 'cat /proc/{{ ansible_test_pid.stdout }}/cmdline' + register: cmdline + failed_when: cmdline is not failed or '\0/usr/sbin/ansible_test_service\0' in cmdline.stdout + # No proc on BSD + when: not ansible_distribution.lower().endswith('bsd') + +- name: check that the service is stopped (*bsd) + shell: 'ps -p {{ ansible_test_pid.stdout }}' + register: cmdline + failed_when: cmdline is not failed or '/usr/sbin/ansible_test_service' in cmdline.stdout + when: ansible_distribution.lower().endswith('bsd') + +- name: assert that the service was stopped + assert: + that: + - "stop_result.state == 'stopped'" + - "stop_result is changed" + +- name: disable the ansible test service + service: name=ansible_test enabled=no + register: disable_result + +- name: assert that the service was disabled + assert: + that: + - "disable_result.enabled == false" + - "disable_result is changed" + +- name: try to enable a broken service + service: name=ansible_broken_test enabled=yes + register: broken_enable_result + ignore_errors: True + +- name: assert that the broken test failed + assert: + that: + - "broken_enable_result is failed" + +- name: remove the test daemon script + file: path=/usr/sbin/ansible_test_service state=absent + register: remove_result + +- name: assert that the test daemon script was removed + assert: + that: + - "remove_result.path == '/usr/sbin/ansible_test_service'" + - "remove_result.state == 'absent'" + +- name: the module must fail when a service is not found + service: + name: 'nonexisting' + state: stopped + register: result + ignore_errors: yes + when: ansible_distribution != 'FreeBSD' + +- assert: + that: + - result is failed + - result is search("Could not find the requested service nonexisting") + when: ansible_distribution != 'FreeBSD' + +- name: the module must fail in check_mode as well when a service is not found + service: + name: 'nonexisting' + state: stopped + register: result + check_mode: yes + ignore_errors: yes + when: ansible_distribution != 'FreeBSD' + +- assert: + that: + - result is failed + - result is search("Could not find the requested service nonexisting") + when: ansible_distribution != 'FreeBSD' diff --git a/test/integration/targets/service/tasks/upstart_cleanup.yml b/test/integration/targets/service/tasks/upstart_cleanup.yml new file mode 100644 index 00000000..683fb104 --- /dev/null +++ b/test/integration/targets/service/tasks/upstart_cleanup.yml @@ -0,0 +1,17 @@ +- vars: + upstart_files: + - /etc/init/ansible_test.conf + - /etc/init/ansible_test.override + - /etc/init/ansible_test_broken.conf + block: + - name: remove upstart init files + file: + path: '{{ item }}' + state: absent + loop: '{{ upstart_files }}' + + - name: assert that upstart init files were removed + raw: 'test -e {{ item }}' + loop: '{{ upstart_files }}' + register: file_exists + failed_when: file_exists is not failed diff --git a/test/integration/targets/service/tasks/upstart_setup.yml b/test/integration/targets/service/tasks/upstart_setup.yml new file mode 100644 index 00000000..e9607bb0 --- /dev/null +++ b/test/integration/targets/service/tasks/upstart_setup.yml @@ -0,0 +1,19 @@ +- name: install the upstart init file + copy: src=ansible.upstart dest=/etc/init/ansible_test.conf mode=0644 + register: install_upstart_result + +- name: install an upstart init file that will fail (manual in .conf) + copy: src=ansible-broken.upstart dest=/etc/init/ansible_broken_test.conf mode=0644 + register: install_upstart_broken_result + +- name: assert that the upstart init file was installed + assert: + that: + - "install_upstart_result.dest == '/etc/init/ansible_test.conf'" + - "install_upstart_result.state == 'file'" + - "install_upstart_result.mode == '0644'" + - "install_upstart_result.checksum == '5c314837b6c4dd6c68d1809653a2974e9078e02a'" + - "install_upstart_broken_result.dest == '/etc/init/ansible_broken_test.conf'" + - "install_upstart_broken_result.state == 'file'" + - "install_upstart_broken_result.mode == '0644'" + - "install_upstart_broken_result.checksum == 'e66497894f2b2bf71e1380a196cc26089cc24a10'" diff --git a/test/integration/targets/service/templates/main.yml b/test/integration/targets/service/templates/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/service_facts/aliases b/test/integration/targets/service_facts/aliases new file mode 100644 index 00000000..cc0aa0d9 --- /dev/null +++ b/test/integration/targets/service_facts/aliases @@ -0,0 +1,5 @@ +shippable/posix/group3 +skip/aix +skip/freebsd +skip/osx +skip/macos diff --git a/test/integration/targets/service_facts/files/ansible.systemd b/test/integration/targets/service_facts/files/ansible.systemd new file mode 100644 index 00000000..3466f25a --- /dev/null +++ b/test/integration/targets/service_facts/files/ansible.systemd @@ -0,0 +1,11 @@ +[Unit] +Description=Ansible Test Service + +[Service] +ExecStart=/usr/sbin/ansible_test_service "Test\nthat newlines in scripts\nwork" +ExecReload=/bin/true +Type=forking +PIDFile=/var/run/ansible_test_service.pid + +[Install] +WantedBy=multi-user.target diff --git a/test/integration/targets/service_facts/files/ansible_test_service.py b/test/integration/targets/service_facts/files/ansible_test_service.py new file mode 100644 index 00000000..19f1e291 --- /dev/null +++ b/test/integration/targets/service_facts/files/ansible_test_service.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +# this is mostly based off of the code found here: +# http://code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/ + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import resource +import signal +import time + +UMASK = 0 +WORKDIR = "/" +MAXFD = 1024 + +if (hasattr(os, "devnull")): + REDIRECT_TO = os.devnull +else: + REDIRECT_TO = "/dev/null" + + +def createDaemon(): + try: + pid = os.fork() + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid == 0): + os.setsid() + + try: + pid = os.fork() + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid == 0): + os.chdir(WORKDIR) + os.umask(UMASK) + else: + f = open('/var/run/ansible_test_service.pid', 'w') + f.write("%d\n" % pid) + f.close() + os._exit(0) + else: + os._exit(0) + + maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + if (maxfd == resource.RLIM_INFINITY): + maxfd = MAXFD + + for fd in range(0, maxfd): + try: + os.close(fd) + except OSError: # ERROR, fd wasn't open to begin with (ignored) + pass + + os.open(REDIRECT_TO, os.O_RDWR) + os.dup2(0, 1) + os.dup2(0, 2) + + return (0) + + +if __name__ == "__main__": + + signal.signal(signal.SIGHUP, signal.SIG_IGN) + + retCode = createDaemon() + + while True: + time.sleep(1000) diff --git a/test/integration/targets/service_facts/tasks/main.yml b/test/integration/targets/service_facts/tasks/main.yml new file mode 100644 index 00000000..5a08fad3 --- /dev/null +++ b/test/integration/targets/service_facts/tasks/main.yml @@ -0,0 +1,25 @@ +# Test playbook for the service_facts module +# Copyright: (c) 2017, Adam Miller +# Copyright: (c) 2020, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Gather service facts + service_facts: + +- name: check for ansible_facts.services exists + assert: + that: ansible_facts.services is defined + +- name: Test disabled service facts (https://github.com/ansible/ansible/issues/69144) + block: + - name: display value of ansible_service_mgr + debug: + msg: 'ansible_service_mgr: {{ ansible_service_mgr }}' + + - name: setup test service script + include_tasks: 'systemd_setup.yml' + + - name: execute tests + import_tasks: tests.yml + + when: (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version is version('7', '>=')) or ansible_distribution == 'Fedora' or (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('15.04', '>=')) or (ansible_distribution == 'Debian' and ansible_distribution_version is version('8', '>=')) or ansible_os_family == 'Suse' diff --git a/test/integration/targets/service_facts/tasks/systemd_cleanup.yml b/test/integration/targets/service_facts/tasks/systemd_cleanup.yml new file mode 100644 index 00000000..b68530b9 --- /dev/null +++ b/test/integration/targets/service_facts/tasks/systemd_cleanup.yml @@ -0,0 +1,32 @@ +- name: remove the systemd unit file + file: + path: /usr/lib/systemd/system/ansible_test.service + state: absent + register: remove_systemd_result + +- name: assert that the systemd unit file was removed + assert: + that: + - "remove_systemd_result.path == '/usr/lib/systemd/system/ansible_test.service'" + - "remove_systemd_result.state == 'absent'" + +- name: remove python systemd test script file + file: + path: /usr/sbin/ansible_test_service + state: absent + register: remove_systemd_binary_result + +- name: assert that python systemd test script file was removed + assert: + that: + - "remove_systemd_binary_result.path == '/usr/sbin/ansible_test_service'" + - "remove_systemd_binary_result.state == 'absent'" + +- name: make sure systemd is reloaded + shell: systemctl daemon-reload + register: restart_systemd_result + +- name: assert that systemd was reloaded + assert: + that: + - "restart_systemd_result.rc == 0" diff --git a/test/integration/targets/service_facts/tasks/systemd_setup.yml b/test/integration/targets/service_facts/tasks/systemd_setup.yml new file mode 100644 index 00000000..85eeed0c --- /dev/null +++ b/test/integration/targets/service_facts/tasks/systemd_setup.yml @@ -0,0 +1,26 @@ +- name: install the test daemon script + copy: + src: ansible_test_service.py + dest: /usr/sbin/ansible_test_service + mode: '755' + +- name: rewrite shebang in the test daemon script + lineinfile: + path: /usr/sbin/ansible_test_service + line: "#!{{ ansible_python_interpreter | realpath }}" + insertbefore: BOF + firstmatch: yes + +- name: install the systemd unit file + copy: + src: ansible.systemd + dest: /etc/systemd/system/ansible_test.service + mode: '0644' + register: install_systemd_result + +- name: assert that the systemd unit file was installed + assert: + that: + - "install_systemd_result.dest == '/etc/systemd/system/ansible_test.service'" + - "install_systemd_result.state == 'file'" + - "install_systemd_result.mode == '0644'" diff --git a/test/integration/targets/service_facts/tasks/tests.yml b/test/integration/targets/service_facts/tasks/tests.yml new file mode 100644 index 00000000..495b71fb --- /dev/null +++ b/test/integration/targets/service_facts/tasks/tests.yml @@ -0,0 +1,36 @@ +- name: start the ansible test service + service: + name: ansible_test + enabled: yes + state: started + register: enable_result + +- name: assert that the service was enabled and changes reported + assert: + that: + - "enable_result.enabled == true" + - "enable_result is changed" + +- name: disable the ansible test service + service: + name: ansible_test + state: stopped + enabled: no + register: start_result + +- name: assert that the service was stopped + assert: + that: + - "start_result.state == 'stopped'" + - "start_result is changed" + +- name: Populate service facts + service_facts: + +- name: get ansible_test service's state + debug: + var: services['ansible_test.service'].state + +- name: ansible_test service's running state should be \"inactive\" + assert: + that: "services['ansible_test.service'].state == 'inactive'" diff --git a/test/integration/targets/set_fact/aliases b/test/integration/targets/set_fact/aliases new file mode 100644 index 00000000..757c9966 --- /dev/null +++ b/test/integration/targets/set_fact/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/aix diff --git a/test/integration/targets/set_fact/incremental.yml b/test/integration/targets/set_fact/incremental.yml new file mode 100644 index 00000000..3f7aa6c4 --- /dev/null +++ b/test/integration/targets/set_fact/incremental.yml @@ -0,0 +1,35 @@ +- name: test set_fact incremental https://github.com/ansible/ansible/issues/38271 + hosts: testhost + gather_facts: no + tasks: + - name: Generate inline loop for set_fact + set_fact: + dig_list: "{{ dig_list + [ item ] }}" + loop: + - two + - three + - four + vars: + dig_list: + - one + + - name: verify cumulative set fact worked + assert: + that: + - dig_list == ['one', 'two', 'three', 'four'] + + - name: Generate inline loop for set_fact (FQCN) + ansible.builtin.set_fact: + dig_list_fqcn: "{{ dig_list_fqcn + [ item ] }}" + loop: + - two + - three + - four + vars: + dig_list_fqcn: + - one + + - name: verify cumulative set fact worked (FQCN) + assert: + that: + - dig_list_fqcn == ['one', 'two', 'three', 'four'] diff --git a/test/integration/targets/set_fact/inventory b/test/integration/targets/set_fact/inventory new file mode 100644 index 00000000..b0c00d32 --- /dev/null +++ b/test/integration/targets/set_fact/inventory @@ -0,0 +1,3 @@ +[testgroup] +testhost ansible_connection=local # no connection is actually established with this host +localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/set_fact/nowarn_clean_facts.yml b/test/integration/targets/set_fact/nowarn_clean_facts.yml new file mode 100644 index 00000000..74f908d0 --- /dev/null +++ b/test/integration/targets/set_fact/nowarn_clean_facts.yml @@ -0,0 +1,10 @@ +- name: Test no warnings ref "http://github.com/ansible/ansible/issues/37535" + hosts: testhost + gather_facts: false + tasks: + - name: set ssh jump host args + set_fact: + ansible_ssh_common_args: "-o ProxyCommand='ssh -W %h:%p -q root@localhost'" + - name: set ssh jump host args (FQCN) + ansible.builtin.set_fact: + ansible_ssh_common_args: "-o ProxyCommand='ssh -W %h:%p -q root@localhost'" diff --git a/test/integration/targets/set_fact/runme.sh b/test/integration/targets/set_fact/runme.sh new file mode 100755 index 00000000..364798a1 --- /dev/null +++ b/test/integration/targets/set_fact/runme.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -eux + +MYTMPDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') +trap 'rm -rf "${MYTMPDIR}"' EXIT + +# ensure we can incrementally set fact via loopi, injection or not +ANSIBLE_INJECT_FACT_VARS=0 ansible-playbook -i inventory incremental.yml +ANSIBLE_INJECT_FACT_VARS=1 ansible-playbook -i inventory incremental.yml + +# ensure we dont have spurious warnings do to clean_facts +ansible-playbook -i inventory nowarn_clean_facts.yml | grep '[WARNING]: Removed restricted key from module data: ansible_ssh_common_args' && exit 1 + +# test cached feature +export ANSIBLE_CACHE_PLUGIN=jsonfile ANSIBLE_CACHE_PLUGIN_CONNECTION="${MYTMPDIR}" ANSIBLE_CACHE_PLUGIN_PREFIX=prefix_ +ansible-playbook -i inventory "$@" set_fact_cached_1.yml +ansible-playbook -i inventory "$@" set_fact_cached_2.yml + +# check contents of the fact cache directory before flushing it +if [[ "$(find "${MYTMPDIR}" -type f)" != $MYTMPDIR/prefix_* ]]; then + echo "Unexpected cache file" + exit 1 +fi + +ansible-playbook -i inventory --flush-cache "$@" set_fact_no_cache.yml + +# Test boolean conversions in set_fact +ansible-playbook -v set_fact_bool_conv.yml +ANSIBLE_JINJA2_NATIVE=1 ansible-playbook -v set_fact_bool_conv_jinja2_native.yml diff --git a/test/integration/targets/set_fact/set_fact_bool_conv.yml b/test/integration/targets/set_fact/set_fact_bool_conv.yml new file mode 100644 index 00000000..8df249be --- /dev/null +++ b/test/integration/targets/set_fact/set_fact_bool_conv.yml @@ -0,0 +1,35 @@ +- hosts: localhost + gather_facts: false + vars: + string_var: "no" + tasks: + - set_fact: + this_is_string: "yes" + this_is_not_string: yes + this_is_also_string: "{{ string_var }}" + this_is_another_string: !!str "{% set thing = '' + string_var + '' %}{{ thing }}" + this_is_more_strings: '{{ string_var + "" }}' + + - assert: + that: + - string_var == 'no' + - this_is_string == True + - this_is_not_string == True + - this_is_also_string == False + - this_is_another_string == False + - this_is_more_strings == False + + - ansible.builtin.set_fact: + this_is_string_fqcn: "yes" + this_is_not_string_fqcn: yes + this_is_also_string_fqcn: "{{ string_var }}" + this_is_another_string_fqcn: !!str "{% set thing = '' + string_var + '' %}{{ thing }}" + this_is_more_strings_fqcn: '{{ string_var + "" }}' + + - assert: + that: + - this_is_string_fqcn == True + - this_is_not_string_fqcn == True + - this_is_also_string_fqcn == False + - this_is_another_string_fqcn == False + - this_is_more_strings_fqcn == False diff --git a/test/integration/targets/set_fact/set_fact_bool_conv_jinja2_native.yml b/test/integration/targets/set_fact/set_fact_bool_conv_jinja2_native.yml new file mode 100644 index 00000000..2642599f --- /dev/null +++ b/test/integration/targets/set_fact/set_fact_bool_conv_jinja2_native.yml @@ -0,0 +1,35 @@ +- hosts: localhost + gather_facts: false + vars: + string_var: "no" + tasks: + - set_fact: + this_is_string: "yes" + this_is_not_string: yes + this_is_also_string: "{{ string_var }}" + this_is_another_string: !!str "{% set thing = '' + string_var + '' %}{{ thing }}" + this_is_more_strings: '{{ string_var + "" }}' + + - assert: + that: + - string_var == 'no' + - this_is_string == 'yes' + - this_is_not_string == True + - this_is_also_string == 'no' + - this_is_another_string == 'no' + - this_is_more_strings == 'no' + + - ansible.builtin.set_fact: + this_is_string_fqcn: "yes" + this_is_not_string_fqcn: yes + this_is_also_string_fqcn: "{{ string_var }}" + this_is_another_string_fqcn: !!str "{% set thing = '' + string_var + '' %}{{ thing }}" + this_is_more_strings_fqcn: '{{ string_var + "" }}' + + - assert: + that: + - this_is_string_fqcn == 'yes' + - this_is_not_string_fqcn == True + - this_is_also_string_fqcn == 'no' + - this_is_another_string_fqcn == 'no' + - this_is_more_strings_fqcn == 'no' diff --git a/test/integration/targets/set_fact/set_fact_cached_1.yml b/test/integration/targets/set_fact/set_fact_cached_1.yml new file mode 100644 index 00000000..01c9f1e0 --- /dev/null +++ b/test/integration/targets/set_fact/set_fact_cached_1.yml @@ -0,0 +1,324 @@ +--- +- name: the first play + hosts: localhost + tasks: + - name: show foobar fact before + debug: + var: ansible_foobar + + - name: set a persistent fact foobar + set_fact: + ansible_foobar: 'foobar_from_set_fact_cacheable' + cacheable: true + + - name: show foobar fact after + debug: + var: ansible_foobar + + - name: assert ansible_foobar is correct value + assert: + that: + - ansible_foobar == 'foobar_from_set_fact_cacheable' + + - name: set a non persistent fact that will not be cached + set_fact: + ansible_foobar_not_cached: 'this_should_not_be_cached' + + - name: show ansible_foobar_not_cached fact after being set + debug: + var: ansible_foobar_not_cached + + - name: assert ansible_foobar_not_cached is correct value + assert: + that: + - ansible_foobar_not_cached == 'this_should_not_be_cached' + + - name: set another non persistent fact that will not be cached + set_fact: "cacheable=no fact_not_cached='this_should_not_be_cached!'" + + - name: show fact_not_cached fact after being set + debug: + var: fact_not_cached + + - name: assert fact_not_cached is correct value + assert: + that: + - fact_not_cached == 'this_should_not_be_cached!' + + - name: show foobar fact before (FQCN) + debug: + var: ansible_foobar_fqcn + + - name: set a persistent fact foobar (FQCN) + set_fact: + ansible_foobar_fqcn: 'foobar_fqcn_from_set_fact_cacheable' + cacheable: true + + - name: show foobar fact after (FQCN) + debug: + var: ansible_foobar_fqcn + + - name: assert ansible_foobar_fqcn is correct value (FQCN) + assert: + that: + - ansible_foobar_fqcn == 'foobar_fqcn_from_set_fact_cacheable' + + - name: set a non persistent fact that will not be cached (FQCN) + set_fact: + ansible_foobar_not_cached_fqcn: 'this_should_not_be_cached' + + - name: show ansible_foobar_not_cached_fqcn fact after being set (FQCN) + debug: + var: ansible_foobar_not_cached_fqcn + + - name: assert ansible_foobar_not_cached_fqcn is correct value (FQCN) + assert: + that: + - ansible_foobar_not_cached_fqcn == 'this_should_not_be_cached' + + - name: set another non persistent fact that will not be cached (FQCN) + set_fact: "cacheable=no fact_not_cached_fqcn='this_should_not_be_cached!'" + + - name: show fact_not_cached_fqcn fact after being set (FQCN) + debug: + var: fact_not_cached_fqcn + + - name: assert fact_not_cached_fqcn is correct value (FQCN) + assert: + that: + - fact_not_cached_fqcn == 'this_should_not_be_cached!' + +- name: the second play + hosts: localhost + tasks: + - name: show foobar fact after second play + debug: + var: ansible_foobar + + - name: assert ansible_foobar is correct value + assert: + that: + - ansible_foobar == 'foobar_from_set_fact_cacheable' + + - name: show foobar fact after second play (FQCN) + debug: + var: ansible_foobar_fqcn + + - name: assert ansible_foobar is correct value (FQCN) + assert: + that: + - ansible_foobar_fqcn == 'foobar_fqcn_from_set_fact_cacheable' + +- name: show ansible_nodename and ansible_os_family + hosts: localhost + tasks: + - name: show nodename fact after second play + debug: + var: ansible_nodename + - name: show os_family fact after second play (FQCN) + debug: + var: ansible_os_family + +- name: show ansible_nodename and ansible_os_family overridden with var + hosts: localhost + vars: + ansible_nodename: 'nodename_from_play_vars' + ansible_os_family: 'os_family_from_play_vars' + tasks: + - name: show nodename fact after second play + debug: + var: ansible_nodename + - name: show os_family fact after second play (FQCN) + debug: + var: ansible_os_family + +- name: verify ansible_nodename from vars overrides the fact + hosts: localhost + vars: + ansible_nodename: 'nodename_from_play_vars' + ansible_os_family: 'os_family_from_play_vars' + tasks: + - name: show nodename fact + debug: + var: ansible_nodename + + - name: assert ansible_nodename is correct value + assert: + that: + - ansible_nodename == 'nodename_from_play_vars' + + - name: show os_family fact (FQCN) + debug: + var: ansible_os_family + + - name: assert ansible_os_family is correct value (FQCN) + assert: + that: + - ansible_os_family == 'os_family_from_play_vars' + +- name: set_fact ansible_nodename and ansible_os_family + hosts: localhost + tasks: + - name: set a persistent fact nodename + set_fact: + ansible_nodename: 'nodename_from_set_fact_cacheable' + + - name: show nodename fact + debug: + var: ansible_nodename + + - name: assert ansible_nodename is correct value + assert: + that: + - ansible_nodename == 'nodename_from_set_fact_cacheable' + + - name: set a persistent fact os_family (FQCN) + ansible.builtin.set_fact: + ansible_os_family: 'os_family_from_set_fact_cacheable' + + - name: show os_family fact (FQCN) + debug: + var: ansible_os_family + + - name: assert ansible_os_family is correct value (FQCN) + assert: + that: + - ansible_os_family == 'os_family_from_set_fact_cacheable' + +- name: verify that set_fact ansible_xxx non_cacheable overrides ansible_xxx in vars + hosts: localhost + vars: + ansible_nodename: 'nodename_from_play_vars' + ansible_os_family: 'os_family_from_play_vars' + tasks: + - name: show nodename fact + debug: + var: ansible_nodename + + - name: assert ansible_nodename is correct value + assert: + that: + - ansible_nodename == 'nodename_from_set_fact_cacheable' + + - name: show os_family fact (FQCN) + debug: + var: ansible_os_family + + - name: assert ansible_os_family is correct value (FQCN) + assert: + that: + - ansible_os_family == 'os_family_from_set_fact_cacheable' + +- name: verify that set_fact_cacheable in previous play overrides ansible_xxx in vars + hosts: localhost + vars: + ansible_nodename: 'nodename_from_play_vars' + ansible_os_family: 'os_family_from_play_vars' + tasks: + - name: show nodename fact + debug: + var: ansible_nodename + + - name: assert ansible_nodename is correct value + assert: + that: + - ansible_nodename == 'nodename_from_set_fact_cacheable' + + - name: show os_family fact (FQCN) + debug: + var: ansible_os_family + + - name: assert ansible_os_family is correct value (FQCN) + assert: + that: + - ansible_os_family == 'os_family_from_set_fact_cacheable' + +- name: set_fact ansible_nodename and ansible_os_family cacheable + hosts: localhost + tasks: + - name: set a persistent fact nodename + set_fact: + ansible_nodename: 'nodename_from_set_fact_cacheable' + cacheable: true + + - name: show nodename fact + debug: + var: ansible_nodename + + - name: assert ansible_nodename is correct value + assert: + that: + - ansible_nodename == 'nodename_from_set_fact_cacheable' + + - name: set a persistent fact os_family (FQCN) + ansible.builtin.set_fact: + ansible_os_family: 'os_family_from_set_fact_cacheable' + cacheable: true + + - name: show os_family fact (FQCN) + debug: + var: ansible_os_family + + - name: assert ansible_os_family is correct value (FQCN) + assert: + that: + - ansible_os_family == 'os_family_from_set_fact_cacheable' + + +- name: verify that set_fact_cacheable in previous play overrides ansible_xxx in vars + hosts: localhost + vars: + ansible_nodename: 'nodename_from_play_vars' + ansible_os_family: 'os_family_from_play_vars' + tasks: + - name: show nodename fact + debug: + var: ansible_nodename + + - name: assert ansible_nodename is correct value + assert: + that: + - ansible_nodename == 'nodename_from_set_fact_cacheable' + + - name: show os_family fact (FQCN) + debug: + var: ansible_os_family + + - name: assert ansible_os_family is correct value (FQCN) + assert: + that: + - ansible_os_family == 'os_family_from_set_fact_cacheable' + +- name: the fourth play + hosts: localhost + vars: + ansible_foobar: 'foobar_from_play_vars' + ansible_foobar_fqcn: 'foobar_fqcn_from_play_vars' + tasks: + - name: show example fact + debug: + var: ansible_example + + - name: set a persistent fact example + set_fact: + ansible_example: 'foobar_from_set_fact_cacheable' + cacheable: true + + - name: assert ansible_example is correct value + assert: + that: + - ansible_example == 'foobar_from_set_fact_cacheable' + + - name: show example fact (FQCN) + debug: + var: ansible_example_fqcn + + - name: set a persistent fact example (FQCN) + set_fact: + ansible_example_fqcn: 'foobar_fqcn_from_set_fact_cacheable' + cacheable: true + + - name: assert ansible_example_fqcn is correct value (FQCN) + assert: + that: + - ansible_example_fqcn == 'foobar_fqcn_from_set_fact_cacheable' diff --git a/test/integration/targets/set_fact/set_fact_cached_2.yml b/test/integration/targets/set_fact/set_fact_cached_2.yml new file mode 100644 index 00000000..7df92244 --- /dev/null +++ b/test/integration/targets/set_fact/set_fact_cached_2.yml @@ -0,0 +1,57 @@ +--- +- name: A second playbook run with fact caching enabled + hosts: localhost + tasks: + - name: show ansible_foobar fact + debug: + var: ansible_foobar + + - name: assert ansible_foobar is correct value when read from cache + assert: + that: + - ansible_foobar == 'foobar_from_set_fact_cacheable' + + - name: show ansible_foobar_not_cached fact + debug: + var: ansible_foobar_not_cached + + - name: assert ansible_foobar_not_cached is not cached + assert: + that: + - ansible_foobar_not_cached is undefined + + - name: show fact_not_cached fact + debug: + var: fact_not_cached + + - name: assert fact_not_cached is not cached + assert: + that: + - fact_not_cached is undefined + + - name: show ansible_foobar_fqcn fact (FQCN) + debug: + var: ansible_foobar_fqcn + + - name: assert ansible_foobar_fqcn is correct value when read from cache (FQCN) + assert: + that: + - ansible_foobar_fqcn == 'foobar_fqcn_from_set_fact_cacheable' + + - name: show ansible_foobar_fqcn_not_cached fact (FQCN) + debug: + var: ansible_foobar_fqcn_not_cached + + - name: assert ansible_foobar_fqcn_not_cached is not cached (FQCN) + assert: + that: + - ansible_foobar_fqcn_not_cached is undefined + + - name: show fact_not_cached_fqcn fact (FQCN) + debug: + var: fact_not_cached_fqcn + + - name: assert fact_not_cached_fqcn is not cached (FQCN) + assert: + that: + - fact_not_cached_fqcn is undefined diff --git a/test/integration/targets/set_fact/set_fact_no_cache.yml b/test/integration/targets/set_fact/set_fact_no_cache.yml new file mode 100644 index 00000000..f5a99792 --- /dev/null +++ b/test/integration/targets/set_fact/set_fact_no_cache.yml @@ -0,0 +1,39 @@ +--- +- name: Running with fact caching enabled but with cache flushed + hosts: localhost + tasks: + - name: show ansible_foobar fact + debug: + var: ansible_foobar + + - name: assert ansible_foobar is correct value + assert: + that: + - ansible_foobar is undefined + + - name: show ansible_foobar_not_cached fact + debug: + var: ansible_foobar_not_cached + + - name: assert ansible_foobar_not_cached is not cached + assert: + that: + - ansible_foobar_not_cached is undefined + + - name: show ansible_foobar fact (FQCN) + debug: + var: ansible_foobar_fqcn + + - name: assert ansible_foobar is correct value (FQCN) + assert: + that: + - ansible_foobar_fqcn is undefined + + - name: show ansible_foobar_not_cached fact (FQCN) + debug: + var: ansible_foobar_fqcn_not_cached + + - name: assert ansible_foobar_not_cached is not cached (FQCN) + assert: + that: + - ansible_foobar_fqcn_not_cached is undefined diff --git a/test/integration/targets/setup_cron/defaults/main.yml b/test/integration/targets/setup_cron/defaults/main.yml new file mode 100644 index 00000000..e4b0123d --- /dev/null +++ b/test/integration/targets/setup_cron/defaults/main.yml @@ -0,0 +1 @@ +remote_dir: "{{ lookup('env', 'OUTPUT_DIR') }}" diff --git a/test/integration/targets/setup_cron/tasks/main.yml b/test/integration/targets/setup_cron/tasks/main.yml new file mode 100644 index 00000000..93dcefa5 --- /dev/null +++ b/test/integration/targets/setup_cron/tasks/main.yml @@ -0,0 +1,70 @@ +- name: Include distribution specific variables + include_vars: "{{ lookup('first_found', search) }}" + vars: + search: + files: + - '{{ ansible_distribution | lower }}.yml' + - '{{ ansible_os_family | lower }}.yml' + - '{{ ansible_system | lower }}.yml' + - default.yml + paths: + - vars + +- name: install cron package + package: + name: '{{ cron_pkg }}' + when: cron_pkg | default(false, true) + register: cron_package_installed + until: cron_package_installed is success + +- when: faketime_pkg | default(false, true) + block: + - name: install cron and faketime packages + package: + name: '{{ faketime_pkg }}' + register: faketime_package_installed + until: faketime_package_installed is success + + - name: Find libfaketime path + shell: '{{ list_pkg_files }} {{ faketime_pkg }} | grep -F libfaketime.so.1' + args: + warn: false + register: libfaketime_path + + - when: ansible_service_mgr == 'systemd' + block: + - name: create directory for cron drop-in file + file: + path: '/etc/systemd/system/{{ cron_service }}.service.d' + state: directory + owner: root + group: root + mode: 0755 + + - name: Use faketime with cron service + copy: + content: |- + [Service] + Environment=LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} + Environment="FAKETIME=+0y x10" + Environment=RANDOM_DELAY=0 + dest: '/etc/systemd/system/{{ cron_service }}.service.d/faketime.conf' + owner: root + group: root + mode: 0644 + + - when: ansible_system == 'FreeBSD' + name: Use faketime with cron service + copy: + content: |- + cron_env='LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} FAKETIME="+0y x10"' + dest: '/etc/rc.conf.d/cron' + owner: root + group: wheel + mode: 0644 + +- name: enable cron service + service: + daemon-reload: "{{ (ansible_service_mgr == 'systemd') | ternary(true, omit) }}" + name: '{{ cron_service }}' + state: restarted diff --git a/test/integration/targets/setup_cron/vars/debian.yml b/test/integration/targets/setup_cron/vars/debian.yml new file mode 100644 index 00000000..cd04871c --- /dev/null +++ b/test/integration/targets/setup_cron/vars/debian.yml @@ -0,0 +1,3 @@ +cron_pkg: cron +cron_service: cron +list_pkg_files: dpkg -L diff --git a/test/integration/targets/setup_cron/vars/default.yml b/test/integration/targets/setup_cron/vars/default.yml new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/setup_cron/vars/fedora.yml b/test/integration/targets/setup_cron/vars/fedora.yml new file mode 100644 index 00000000..b80a51b5 --- /dev/null +++ b/test/integration/targets/setup_cron/vars/fedora.yml @@ -0,0 +1,3 @@ +cron_pkg: cronie +cron_service: crond +list_pkg_files: rpm -ql diff --git a/test/integration/targets/setup_cron/vars/freebsd.yml b/test/integration/targets/setup_cron/vars/freebsd.yml new file mode 100644 index 00000000..41ed4493 --- /dev/null +++ b/test/integration/targets/setup_cron/vars/freebsd.yml @@ -0,0 +1,3 @@ +cron_pkg: +cron_service: cron +list_pkg_files: pkg info --list-files diff --git a/test/integration/targets/setup_cron/vars/redhat.yml b/test/integration/targets/setup_cron/vars/redhat.yml new file mode 100644 index 00000000..2dff13de --- /dev/null +++ b/test/integration/targets/setup_cron/vars/redhat.yml @@ -0,0 +1,4 @@ +cron_pkg: cronie +cron_service: crond +faketime_pkg: +list_pkg_files: rpm -ql diff --git a/test/integration/targets/setup_cron/vars/suse.yml b/test/integration/targets/setup_cron/vars/suse.yml new file mode 100644 index 00000000..cd3677a6 --- /dev/null +++ b/test/integration/targets/setup_cron/vars/suse.yml @@ -0,0 +1,3 @@ +cron_pkg: cron +cron_service: cron +list_pkg_files: rpm -ql diff --git a/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.0 b/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.0 new file mode 100644 index 00000000..4206fbab --- /dev/null +++ b/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.0 @@ -0,0 +1,10 @@ +Section: misc +Priority: optional +Standards-Version: 2.3.3 + +Package: foo +Version: 1.0.0 +Section: system +Maintainer: John Doe +Architecture: all +Description: Dummy package diff --git a/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.1 b/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.1 new file mode 100644 index 00000000..021f4d52 --- /dev/null +++ b/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.1 @@ -0,0 +1,10 @@ +Section: misc +Priority: optional +Standards-Version: 2.3.3 + +Package: foo +Version: 1.0.1 +Section: system +Maintainer: John Doe +Architecture: all +Description: Dummy package diff --git a/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.0 b/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.0 new file mode 100644 index 00000000..0da0348f --- /dev/null +++ b/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.0 @@ -0,0 +1,11 @@ +Section: misc +Priority: optional +Standards-Version: 2.3.3 + +Package: foobar +Version: 1.0.0 +Section: system +Depends: foo +Maintainer: John Doe +Architecture: all +Description: Dummy package diff --git a/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.1 b/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.1 new file mode 100644 index 00000000..b9fa8303 --- /dev/null +++ b/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.1 @@ -0,0 +1,10 @@ +Section: misc +Priority: optional +Standards-Version: 2.3.3 + +Package: foobar +Version: 1.0.1 +Section: system +Maintainer: John Doe +Architecture: all +Description: Dummy package diff --git a/test/integration/targets/setup_deb_repo/meta/main.yml b/test/integration/targets/setup_deb_repo/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/setup_deb_repo/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/setup_deb_repo/tasks/main.yml b/test/integration/targets/setup_deb_repo/tasks/main.yml new file mode 100644 index 00000000..49f68a2c --- /dev/null +++ b/test/integration/targets/setup_deb_repo/tasks/main.yml @@ -0,0 +1,56 @@ +- block: + - name: Install needed packages + apt: + name: "{{ item }}" + with_items: + - dpkg-dev + - equivs + - libfile-fcntllock-perl # to silence warning by equivs-build + + - set_fact: + repodir: /tmp/repo/ + + - name: Create repo dir + file: + path: "{{ repodir }}" + state: directory + mode: 0755 + + - name: Copy package specs to remote + copy: + src: "{{ item }}" + dest: "{{ remote_tmp_dir }}/{{ item | basename }}" + with_fileglob: + - "files/package_specs/*" + + - name: Create deb files + shell: "equivs-build {{ remote_tmp_dir }}/{{ item | basename }}" + args: + chdir: "{{ repodir }}" + with_fileglob: + - "files/package_specs/*" + + - name: Create repo + shell: dpkg-scanpackages --multiversion . /dev/null | gzip -9c > Packages.gz + args: + chdir: "{{ repodir }}" + + # Can't use apt_repository as it doesn't expose a trusted=yes option + - name: Install the repo + copy: + content: deb [trusted=yes] file:{{ repodir }} ./ + dest: /etc/apt/sources.list.d/file_tmp_repo.list + + # Need to uncomment the deb-src for the universe component for build-dep state + - name: Ensure deb-src for the universe component + lineinfile: + path: /etc/apt/sources.list + backrefs: True + regexp: ^#\s*deb-src http://archive\.ubuntu\.com/ubuntu/ (\w*){{ item }} universe$ + line: deb-src http://archive.ubuntu.com/ubuntu \1{{ item }} universe + state: present + with_items: + - '' + - -updates + + when: ansible_distribution in ['Ubuntu', 'Debian'] diff --git a/test/integration/targets/setup_epel/tasks/main.yml b/test/integration/targets/setup_epel/tasks/main.yml new file mode 100644 index 00000000..c279810e --- /dev/null +++ b/test/integration/targets/setup_epel/tasks/main.yml @@ -0,0 +1,5 @@ +- name: Install EPEL + yum: + name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/setup_epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm + disable_gpg_check: true + when: ansible_facts.distribution in ['RedHat', 'CentOS'] diff --git a/test/integration/targets/setup_gnutar/handlers/main.yml b/test/integration/targets/setup_gnutar/handlers/main.yml new file mode 100644 index 00000000..d3fa7c27 --- /dev/null +++ b/test/integration/targets/setup_gnutar/handlers/main.yml @@ -0,0 +1,6 @@ +- name: uninstall gnu-tar + command: brew uninstall gnu-tar + become: yes + become_user: "{{ brew_stat.stat.pw_name }}" + environment: + HOMEBREW_NO_AUTO_UPDATE: True diff --git a/test/integration/targets/setup_gnutar/tasks/main.yml b/test/integration/targets/setup_gnutar/tasks/main.yml new file mode 100644 index 00000000..b7d841cd --- /dev/null +++ b/test/integration/targets/setup_gnutar/tasks/main.yml @@ -0,0 +1,18 @@ +- when: ansible_facts.distribution == 'MacOSX' + block: + - name: MACOS | Find brew binary + command: which brew + register: brew_which + + - name: MACOS | Get owner of brew binary + stat: + path: "{{ brew_which.stdout }}" + register: brew_stat + + - command: brew install gnu-tar + become: yes + become_user: "{{ brew_stat.stat.pw_name }}" + environment: + HOMEBREW_NO_AUTO_UPDATE: True + notify: + - uninstall gnu-tar diff --git a/test/integration/targets/setup_nobody/handlers/main.yml b/test/integration/targets/setup_nobody/handlers/main.yml new file mode 100644 index 00000000..2d02efbb --- /dev/null +++ b/test/integration/targets/setup_nobody/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: remove nobody user + user: + name: nobody + state: absent diff --git a/test/integration/targets/setup_nobody/tasks/main.yml b/test/integration/targets/setup_nobody/tasks/main.yml new file mode 100644 index 00000000..cc0e4fe8 --- /dev/null +++ b/test/integration/targets/setup_nobody/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: create nobody user + user: + name: nobody + create_home: no + state: present + notify: remove nobody user diff --git a/test/integration/targets/setup_paramiko/aliases b/test/integration/targets/setup_paramiko/aliases new file mode 100644 index 00000000..c49be254 --- /dev/null +++ b/test/integration/targets/setup_paramiko/aliases @@ -0,0 +1 @@ +needs/target/setup_remote_tmp_dir diff --git a/test/integration/targets/setup_paramiko/constraints.txt b/test/integration/targets/setup_paramiko/constraints.txt new file mode 100644 index 00000000..c502ba0d --- /dev/null +++ b/test/integration/targets/setup_paramiko/constraints.txt @@ -0,0 +1 @@ +cryptography >= 2.5, < 3.4 diff --git a/test/integration/targets/setup_paramiko/install-CentOS-6-python-2.yml b/test/integration/targets/setup_paramiko/install-CentOS-6-python-2.yml new file mode 100644 index 00000000..0c7b9e82 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-CentOS-6-python-2.yml @@ -0,0 +1,3 @@ +- name: Install Paramiko for Python 2 on CentOS 6 + yum: + name: python-paramiko diff --git a/test/integration/targets/setup_paramiko/install-Darwin-python-3.yml b/test/integration/targets/setup_paramiko/install-Darwin-python-3.yml new file mode 100644 index 00000000..8926fe33 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-Darwin-python-3.yml @@ -0,0 +1,9 @@ +- name: Setup remote constraints + include_tasks: setup-remote-constraints.yml +- name: Install Paramiko for Python 3 on MacOS + pip: # no homebrew package manager in core, just use pip + name: paramiko + extra_args: "-c {{ remote_constraints }}" + environment: + # Not sure why this fixes the test, but it does. + SETUPTOOLS_USE_DISTUTILS: stdlib diff --git a/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-2.yml b/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-2.yml new file mode 100644 index 00000000..dec5b548 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-2.yml @@ -0,0 +1,3 @@ +- name: Install Paramiko for Python 2 on FreeBSD 11 + pkgng: + name: py27-paramiko diff --git a/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-3.yml b/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-3.yml new file mode 100644 index 00000000..eb01d00f --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-3.yml @@ -0,0 +1,12 @@ +- name: Downgrade to pip version 18.1 to work around a PEP 517 virtualenv bug + # pip 19.0.0 added support for PEP 517 + # versions as recent as 19.0.3 fail to install paramiko in a virtualenv due to a BackendUnavailable exception + # installation without a virtualenv succeeds + pip: + name: pip==18.1 +- name: Setup remote constraints + include_tasks: setup-remote-constraints.yml +- name: Install Paramiko for Python 3 on FreeBSD 11 + pip: # no py36-paramiko package exists for FreeBSD 11 + name: paramiko + extra_args: "-c {{ remote_constraints }}" diff --git a/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-2.yml b/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-2.yml new file mode 100644 index 00000000..29e78969 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-2.yml @@ -0,0 +1,3 @@ +- name: Install Paramiko for Python 2 on FreeBSD 12 + pkgng: + name: py27-paramiko diff --git a/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-3.yml b/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-3.yml new file mode 100644 index 00000000..2aa7b500 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-3.yml @@ -0,0 +1,3 @@ +- name: Install Paramiko for Python 3 on FreeBSD 12 + pkgng: + name: py36-paramiko diff --git a/test/integration/targets/setup_paramiko/install-RedHat-8-python-3.yml b/test/integration/targets/setup_paramiko/install-RedHat-8-python-3.yml new file mode 100644 index 00000000..19fd3f63 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-RedHat-8-python-3.yml @@ -0,0 +1,6 @@ +- name: Setup remote constraints + include_tasks: setup-remote-constraints.yml +- name: Install Paramiko for Python 3 on RHEL 8 + pip: # no python3-paramiko package exists for RHEL 8 + name: paramiko + extra_args: "-c {{ remote_constraints }}" diff --git a/test/integration/targets/setup_paramiko/install-Ubuntu-16-python-2.yml b/test/integration/targets/setup_paramiko/install-Ubuntu-16-python-2.yml new file mode 100644 index 00000000..8f760740 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-Ubuntu-16-python-2.yml @@ -0,0 +1,3 @@ +- name: Install Paramiko for Python 2 on Ubuntu 16 + apt: + name: python-paramiko diff --git a/test/integration/targets/setup_paramiko/install-fail.yml b/test/integration/targets/setup_paramiko/install-fail.yml new file mode 100644 index 00000000..b4ba4640 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-fail.yml @@ -0,0 +1,7 @@ +- name: Install Paramiko + fail: + msg: "Install of Paramiko on distribution '{{ ansible_distribution }}' with major version '{{ ansible_distribution_major_version }}' + with package manager '{{ ansible_pkg_mgr }}' on Python {{ ansible_python.version.major }} has not been implemented. + Use native OS packages if available, otherwise use pip. + Be sure to uninstall automatically installed dependencies when possible. + Do not implement a generic fallback to pip, as that would allow distributions not yet configured to go undetected." diff --git a/test/integration/targets/setup_paramiko/install-python-2.yml b/test/integration/targets/setup_paramiko/install-python-2.yml new file mode 100644 index 00000000..be337a16 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-python-2.yml @@ -0,0 +1,3 @@ +- name: Install Paramiko for Python 2 + package: + name: python2-paramiko diff --git a/test/integration/targets/setup_paramiko/install-python-3.yml b/test/integration/targets/setup_paramiko/install-python-3.yml new file mode 100644 index 00000000..ac2a1a2b --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-python-3.yml @@ -0,0 +1,3 @@ +- name: Install Paramiko for Python 3 + package: + name: python3-paramiko diff --git a/test/integration/targets/setup_paramiko/install.yml b/test/integration/targets/setup_paramiko/install.yml new file mode 100644 index 00000000..e98abe33 --- /dev/null +++ b/test/integration/targets/setup_paramiko/install.yml @@ -0,0 +1,18 @@ +- hosts: localhost + tasks: + - name: Detect Paramiko + detect_paramiko: + register: detect_paramiko + - name: Persist Result + copy: + content: "{{ detect_paramiko }}" + dest: "{{ lookup('env', 'OUTPUT_DIR') }}/detect-paramiko.json" + - name: Install Paramiko + when: not detect_paramiko.found + include_tasks: "{{ item }}" + with_first_found: + - "install-{{ ansible_distribution }}-{{ ansible_distribution_major_version }}-python-{{ ansible_python.version.major }}.yml" + - "install-{{ ansible_os_family }}-{{ ansible_distribution_major_version }}-python-{{ ansible_python.version.major }}.yml" + - "install-{{ ansible_os_family }}-python-{{ ansible_python.version.major }}.yml" + - "install-python-{{ ansible_python.version.major }}.yml" + - "install-fail.yml" diff --git a/test/integration/targets/setup_paramiko/inventory b/test/integration/targets/setup_paramiko/inventory new file mode 100644 index 00000000..8618c725 --- /dev/null +++ b/test/integration/targets/setup_paramiko/inventory @@ -0,0 +1 @@ +localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/setup_paramiko/library/detect_paramiko.py b/test/integration/targets/setup_paramiko/library/detect_paramiko.py new file mode 100644 index 00000000..e3a81582 --- /dev/null +++ b/test/integration/targets/setup_paramiko/library/detect_paramiko.py @@ -0,0 +1,31 @@ +#!/usr/bin/python +"""Ansible module to detect the presence of both the normal and Ansible-specific versions of Paramiko.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule + +try: + import paramiko +except ImportError: + paramiko = None + +try: + import ansible_paramiko +except ImportError: + ansible_paramiko = None + + +def main(): + module = AnsibleModule(argument_spec={}) + module.exit_json(**dict( + found=bool(paramiko or ansible_paramiko), + paramiko=bool(paramiko), + ansible_paramiko=bool(ansible_paramiko), + )) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/setup_paramiko/setup-remote-constraints.yml b/test/integration/targets/setup_paramiko/setup-remote-constraints.yml new file mode 100644 index 00000000..a86d4777 --- /dev/null +++ b/test/integration/targets/setup_paramiko/setup-remote-constraints.yml @@ -0,0 +1,12 @@ +- name: Setup remote temporary directory + include_role: + name: setup_remote_tmp_dir + +- name: Record constraints.txt path on remote host + set_fact: + remote_constraints: "{{ remote_tmp_dir }}/constraints.txt" + +- name: Copy constraints.txt to remote host + copy: + src: "constraints.txt" + dest: "{{ remote_constraints }}" diff --git a/test/integration/targets/setup_paramiko/setup.sh b/test/integration/targets/setup_paramiko/setup.sh new file mode 100644 index 00000000..316320c3 --- /dev/null +++ b/test/integration/targets/setup_paramiko/setup.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Usage: source ../setup_paramiko/setup.sh + +set -eux + +export ANSIBLE_TEST_PREFER_VENV=1 +source virtualenv.sh # for pip installs, if needed, otherwise unused +ANSIBLE_ROLES_PATH=../ ansible-playbook ../setup_paramiko/install.yml -i ../setup_paramiko/inventory "$@" +trap 'ansible-playbook ../setup_paramiko/uninstall.yml -i ../setup_paramiko/inventory "$@"' EXIT diff --git a/test/integration/targets/setup_paramiko/uninstall-Darwin-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-Darwin-python-3.yml new file mode 100644 index 00000000..69a68e42 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-Darwin-python-3.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko for Python 3 on MacOS + pip: + name: paramiko + state: absent diff --git a/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-2.yml b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-2.yml new file mode 100644 index 00000000..d27f831c --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-2.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko for Python 2 on FreeBSD 11 + pkgng: + name: py27-paramiko + state: absent diff --git a/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-3.yml new file mode 100644 index 00000000..33f292e8 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-3.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko for Python 3 on FreeBSD 11 + pip: # no py36-paramiko package exists for FreeBSD 11 + name: paramiko + state: absent diff --git a/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-2.yml b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-2.yml new file mode 100644 index 00000000..79352487 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-2.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko for Python 2 on FreeBSD 12 + pkgng: + name: py27-paramiko + state: absent diff --git a/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-3.yml new file mode 100644 index 00000000..46d26ca3 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-3.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko for Python 3 on FreeBSD 12 + pkgng: + name: py36-paramiko + state: absent diff --git a/test/integration/targets/setup_paramiko/uninstall-RedHat-8-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-RedHat-8-python-3.yml new file mode 100644 index 00000000..d3a9493e --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-RedHat-8-python-3.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko for Python 3 on RHEL 8 + pip: # no python3-paramiko package exists for RHEL 8 + name: paramiko + state: absent diff --git a/test/integration/targets/setup_paramiko/uninstall-apt-python-2.yml b/test/integration/targets/setup_paramiko/uninstall-apt-python-2.yml new file mode 100644 index 00000000..507d94cc --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-apt-python-2.yml @@ -0,0 +1,5 @@ +- name: Uninstall Paramiko for Python 2 using apt + apt: + name: python-paramiko + state: absent + autoremove: yes diff --git a/test/integration/targets/setup_paramiko/uninstall-apt-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-apt-python-3.yml new file mode 100644 index 00000000..d51fc92e --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-apt-python-3.yml @@ -0,0 +1,5 @@ +- name: Uninstall Paramiko for Python 3 using apt + apt: + name: python3-paramiko + state: absent + autoremove: yes diff --git a/test/integration/targets/setup_paramiko/uninstall-dnf.yml b/test/integration/targets/setup_paramiko/uninstall-dnf.yml new file mode 100644 index 00000000..ff0d39ce --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-dnf.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko using dnf history undo + command: dnf history undo last --assumeyes + args: + warn: no diff --git a/test/integration/targets/setup_paramiko/uninstall-fail.yml b/test/integration/targets/setup_paramiko/uninstall-fail.yml new file mode 100644 index 00000000..bc5e12f1 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-fail.yml @@ -0,0 +1,7 @@ +- name: Uninstall Paramiko + fail: + msg: "Uninstall of Paramiko on distribution '{{ ansible_distribution }}' with major version '{{ ansible_distribution_major_version }}' + with package manager '{{ ansible_pkg_mgr }}' on Python {{ ansible_python.version.major }} has not been implemented. + Use native OS packages if available, otherwise use pip. + Be sure to uninstall automatically installed dependencies when possible. + Do not implement a generic fallback to pip, as that would allow distributions not yet configured to go undetected." diff --git a/test/integration/targets/setup_paramiko/uninstall-yum.yml b/test/integration/targets/setup_paramiko/uninstall-yum.yml new file mode 100644 index 00000000..f293d229 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-yum.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko using yum history undo + command: yum history undo last --assumeyes + args: + warn: no diff --git a/test/integration/targets/setup_paramiko/uninstall-zypper-python-2.yml b/test/integration/targets/setup_paramiko/uninstall-zypper-python-2.yml new file mode 100644 index 00000000..6bdb3307 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-zypper-python-2.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko for Python 2 using zypper + command: zypper --quiet --non-interactive remove --clean-deps python2-paramiko + args: + warn: no diff --git a/test/integration/targets/setup_paramiko/uninstall-zypper-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-zypper-python-3.yml new file mode 100644 index 00000000..cb0db941 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-zypper-python-3.yml @@ -0,0 +1,4 @@ +- name: Uninstall Paramiko for Python 3 using zypper + command: zypper --quiet --non-interactive remove --clean-deps python3-paramiko + args: + warn: no diff --git a/test/integration/targets/setup_paramiko/uninstall.yml b/test/integration/targets/setup_paramiko/uninstall.yml new file mode 100644 index 00000000..48ff68e6 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall.yml @@ -0,0 +1,20 @@ +- hosts: localhost + vars: + detect_paramiko: '{{ lookup("file", lookup("env", "OUTPUT_DIR") + "/detect-paramiko.json") | from_json }}' + tasks: + - name: Uninstall Paramiko and Verify Results + when: not detect_paramiko.found + block: + - name: Uninstall Paramiko + include_tasks: "{{ item }}" + with_first_found: + - "uninstall-{{ ansible_distribution }}-{{ ansible_distribution_major_version }}-python-{{ ansible_python.version.major }}.yml" + - "uninstall-{{ ansible_os_family }}-{{ ansible_distribution_major_version }}-python-{{ ansible_python.version.major }}.yml" + - "uninstall-{{ ansible_os_family }}-python-{{ ansible_python.version.major }}.yml" + - "uninstall-{{ ansible_pkg_mgr }}-python-{{ ansible_python.version.major }}.yml" + - "uninstall-{{ ansible_pkg_mgr }}.yml" + - "uninstall-fail.yml" + - name: Verify Paramiko was uninstalled + detect_paramiko: + register: detect_paramiko + failed_when: detect_paramiko.found diff --git a/test/integration/targets/setup_passlib/tasks/main.yml b/test/integration/targets/setup_passlib/tasks/main.yml new file mode 100644 index 00000000..e4cd0d0b --- /dev/null +++ b/test/integration/targets/setup_passlib/tasks/main.yml @@ -0,0 +1,4 @@ +- name: Install passlib + pip: + name: passlib + state: present diff --git a/test/integration/targets/setup_pexpect/files/constraints.txt b/test/integration/targets/setup_pexpect/files/constraints.txt new file mode 100644 index 00000000..c78ecdad --- /dev/null +++ b/test/integration/targets/setup_pexpect/files/constraints.txt @@ -0,0 +1,2 @@ +pexpect == 4.8.0 +ptyprocess < 0.7.0 ; python_version < '2.7' # ptyprocess >= 0.7.0 not compatible with Python 2.6 diff --git a/test/integration/targets/setup_pexpect/meta/main.yml b/test/integration/targets/setup_pexpect/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/setup_pexpect/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/setup_pexpect/tasks/main.yml b/test/integration/targets/setup_pexpect/tasks/main.yml new file mode 100644 index 00000000..690fe441 --- /dev/null +++ b/test/integration/targets/setup_pexpect/tasks/main.yml @@ -0,0 +1,10 @@ +- name: Copy constraints file + copy: + src: constraints.txt + dest: "{{ remote_tmp_dir }}/pexpect-constraints.txt" + +- name: Install pexpect + pip: + name: pexpect + extra_args: '--constraint "{{ remote_tmp_dir }}/pexpect-constraints.txt"' + state: present diff --git a/test/integration/targets/setup_remote_constraints/aliases b/test/integration/targets/setup_remote_constraints/aliases new file mode 100644 index 00000000..18cc100a --- /dev/null +++ b/test/integration/targets/setup_remote_constraints/aliases @@ -0,0 +1 @@ +needs/file/test/lib/ansible_test/_data/requirements/constraints.txt diff --git a/test/integration/targets/setup_remote_constraints/meta/main.yml b/test/integration/targets/setup_remote_constraints/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/setup_remote_constraints/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/setup_remote_constraints/tasks/main.yml b/test/integration/targets/setup_remote_constraints/tasks/main.yml new file mode 100644 index 00000000..eee09ccf --- /dev/null +++ b/test/integration/targets/setup_remote_constraints/tasks/main.yml @@ -0,0 +1,8 @@ +- name: record constraints.txt path on remote host + set_fact: + remote_constraints: "{{ remote_tmp_dir }}/constraints.txt" + +- name: copy constraints.txt to remote host + copy: + src: "{{ role_path }}/../../../lib/ansible_test/_data/requirements/constraints.txt" + dest: "{{ remote_constraints }}" diff --git a/test/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/test/integration/targets/setup_remote_tmp_dir/handlers/main.yml new file mode 100644 index 00000000..229037c8 --- /dev/null +++ b/test/integration/targets/setup_remote_tmp_dir/handlers/main.yml @@ -0,0 +1,5 @@ +- name: delete temporary directory + include_tasks: default-cleanup.yml + +- name: delete temporary directory (windows) + include_tasks: windows-cleanup.yml diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml new file mode 100644 index 00000000..39872d74 --- /dev/null +++ b/test/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml @@ -0,0 +1,5 @@ +- name: delete temporary directory + file: + path: "{{ remote_tmp_dir }}" + state: absent + no_log: yes diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/default.yml new file mode 100644 index 00000000..1e0f51b8 --- /dev/null +++ b/test/integration/targets/setup_remote_tmp_dir/tasks/default.yml @@ -0,0 +1,11 @@ +- name: create temporary directory + tempfile: + state: directory + suffix: .test + register: remote_tmp_dir + notify: + - delete temporary directory + +- name: record temporary directory + set_fact: + remote_tmp_dir: "{{ remote_tmp_dir.path }}" diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/main.yml new file mode 100644 index 00000000..f8df391b --- /dev/null +++ b/test/integration/targets/setup_remote_tmp_dir/tasks/main.yml @@ -0,0 +1,10 @@ +- name: make sure we have the ansible_os_family and ansible_distribution_version facts + setup: + gather_subset: distribution + when: ansible_facts == {} + +- include_tasks: "{{ lookup('first_found', files)}}" + vars: + files: + - "{{ ansible_os_family | lower }}.yml" + - "default.yml" diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml new file mode 100644 index 00000000..1936b610 --- /dev/null +++ b/test/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml @@ -0,0 +1,4 @@ +- name: delete temporary directory (windows) + win_file: + path: "{{ remote_tmp_dir }}" + state: absent diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/windows.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/windows.yml new file mode 100644 index 00000000..afedc4eb --- /dev/null +++ b/test/integration/targets/setup_remote_tmp_dir/tasks/windows.yml @@ -0,0 +1,11 @@ +- name: create temporary directory + win_tempfile: + state: directory + suffix: .test + register: remote_tmp_dir + notify: + - delete temporary directory (windows) + +- name: record temporary directory + set_fact: + remote_tmp_dir: "{{ remote_tmp_dir.path }}" diff --git a/test/integration/targets/setup_rpm_repo/aliases b/test/integration/targets/setup_rpm_repo/aliases new file mode 100644 index 00000000..65e83152 --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/aliases @@ -0,0 +1 @@ +needs/target/setup_epel diff --git a/test/integration/targets/setup_rpm_repo/defaults/main.yml b/test/integration/targets/setup_rpm_repo/defaults/main.yml new file mode 100644 index 00000000..19c033b9 --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/defaults/main.yml @@ -0,0 +1 @@ +install_repos: yes diff --git a/test/integration/targets/setup_rpm_repo/files/comps.xml b/test/integration/targets/setup_rpm_repo/files/comps.xml new file mode 100644 index 00000000..e9391829 --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/files/comps.xml @@ -0,0 +1,36 @@ + + + + customgroup + Custom Group + + false + true + 1024 + + dinginessentail + + + + + customenvgroup + Custom Environment Group + + false + false + 1024 + + landsidescalping + + + + + customenvgroup-environment + Custom Environment Group + + 1024 + + customenvgroup + + + diff --git a/test/integration/targets/setup_rpm_repo/files/create-repo.py b/test/integration/targets/setup_rpm_repo/files/create-repo.py new file mode 100644 index 00000000..2033fdf8 --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/files/create-repo.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + + +import sys +from collections import namedtuple + +try: + from rpmfluff import SimpleRpmBuild + from rpmfluff import YumRepoBuild +except ImportError: + from rpmfluff.rpmbuild import SimpleRpmBuild + from rpmfluff.yumrepobuild import YumRepoBuild + +try: + from rpmfluff import can_use_rpm_weak_deps +except ImportError: + try: + from rpmfluff.utils import can_use_rpm_weak_deps + except ImportError: + can_use_rpm_weak_deps = None + +RPM = namedtuple('RPM', ['name', 'version', 'release', 'epoch', 'recommends']) + + +SPECS = [ + RPM('dinginessentail', '1.0', '1', None, None), + RPM('dinginessentail', '1.0', '2', '1', None), + RPM('dinginessentail', '1.1', '1', '1', None), + RPM('dinginessentail-olive', '1.0', '1', None, None), + RPM('dinginessentail-olive', '1.1', '1', None, None), + RPM('landsidescalping', '1.0', '1', None, None), + RPM('landsidescalping', '1.1', '1', None, None), + RPM('dinginessentail-with-weak-dep', '1.0', '1', None, ['dinginessentail-weak-dep']), + RPM('dinginessentail-weak-dep', '1.0', '1', None, None), +] + + +def main(): + try: + arch = sys.argv[1] + except IndexError: + arch = 'x86_64' + + pkgs = [] + for spec in SPECS: + pkg = SimpleRpmBuild(spec.name, spec.version, spec.release, [arch]) + pkg.epoch = spec.epoch + + if spec.recommends: + # Skip packages that require weak deps but an older version of RPM is being used + if not can_use_rpm_weak_deps or not can_use_rpm_weak_deps(): + continue + + for recommend in spec.recommends: + pkg.add_recommends(recommend) + + pkgs.append(pkg) + + repo = YumRepoBuild(pkgs) + repo.make(arch) + + for pkg in pkgs: + pkg.clean() + + print(repo.repoDir) + + +if __name__ == "__main__": + main() diff --git a/test/integration/targets/setup_rpm_repo/handlers/main.yml b/test/integration/targets/setup_rpm_repo/handlers/main.yml new file mode 100644 index 00000000..a0af3c92 --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/handlers/main.yml @@ -0,0 +1,5 @@ +- name: remove repos + yum_repository: + state: absent + name: "{{ item }}" + loop: "{{ repos }}" diff --git a/test/integration/targets/setup_rpm_repo/library/create_repo.py b/test/integration/targets/setup_rpm_repo/library/create_repo.py new file mode 100644 index 00000000..6dc1e457 --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/library/create_repo.py @@ -0,0 +1,94 @@ +#!/usr/bin/python + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys +import tempfile + +from collections import namedtuple + +from ansible.module_utils.basic import AnsibleModule + +try: + from rpmfluff import SimpleRpmBuild + from rpmfluff import YumRepoBuild +except ImportError: + from rpmfluff.rpmbuild import SimpleRpmBuild + from rpmfluff.yumrepobuild import YumRepoBuild + +try: + from rpmfluff import can_use_rpm_weak_deps +except ImportError: + try: + from rpmfluff.utils import can_use_rpm_weak_deps + except ImportError: + can_use_rpm_weak_deps = None + +RPM = namedtuple('RPM', ['name', 'version', 'release', 'epoch', 'recommends']) + + +SPECS = [ + RPM('dinginessentail', '1.0', '1', None, None), + RPM('dinginessentail', '1.0', '2', '1', None), + RPM('dinginessentail', '1.1', '1', '1', None), + RPM('dinginessentail-olive', '1.0', '1', None, None), + RPM('dinginessentail-olive', '1.1', '1', None, None), + RPM('landsidescalping', '1.0', '1', None, None), + RPM('landsidescalping', '1.1', '1', None, None), + RPM('dinginessentail-with-weak-dep', '1.0', '1', None, ['dinginessentail-weak-dep']), + RPM('dinginessentail-weak-dep', '1.0', '1', None, None), +] + + +def create_repo(arch='x86_64'): + pkgs = [] + for spec in SPECS: + pkg = SimpleRpmBuild(spec.name, spec.version, spec.release, [arch]) + pkg.epoch = spec.epoch + + if spec.recommends: + # Skip packages that require weak deps but an older version of RPM is being used + if not can_use_rpm_weak_deps or not can_use_rpm_weak_deps(): + continue + + for recommend in spec.recommends: + pkg.add_recommends(recommend) + + pkgs.append(pkg) + + repo = YumRepoBuild(pkgs) + repo.make(arch) + + for pkg in pkgs: + pkg.clean() + + return repo.repoDir + + +def main(): + module = AnsibleModule( + argument_spec={ + 'arch': {'required': True}, + 'tempdir': {'type': 'path'}, + } + ) + + arch = module.params['arch'] + tempdir = module.params['tempdir'] + + # Save current temp dir so we can set it back later + original_tempdir = tempfile.tempdir + tempfile.tempdir = tempdir + + try: + repo_dir = create_repo(arch) + finally: + tempfile.tempdir = original_tempdir + + module.exit_json(repo_dir=repo_dir, tmpfile=tempfile.gettempdir()) + + +if __name__ == "__main__": + main() diff --git a/test/integration/targets/setup_rpm_repo/meta/main.yml b/test/integration/targets/setup_rpm_repo/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/setup_rpm_repo/tasks/main.yml b/test/integration/targets/setup_rpm_repo/tasks/main.yml new file mode 100644 index 00000000..a6766f20 --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/tasks/main.yml @@ -0,0 +1,102 @@ +- block: + - name: Install epel repo which is missing on rhel-7 and is needed for rpmfluff + include_role: + name: setup_epel + when: + - ansible_distribution in ['RedHat', 'CentOS'] + - ansible_distribution_major_version is version('7', '==') + + - name: Include distribution specific variables + include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_version }}.yml" + - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.distribution }}.yml" + - "{{ ansible_facts.os_family }}.yml" + - default.yml + paths: + - "{{ role_path }}/vars" + + - name: Install rpmfluff and deps + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: "{{ rpm_repo_packages }}" + + - name: Install rpmfluff from pip on RHEL 8 and later + pip: + name: rpmfluff + when: + - ansible_facts.distribution in ['RedHat', 'CentOS'] + - ansible_facts.distribution_major_version is version('8', '>=') + + - set_fact: + repos: + - "fake-{{ ansible_architecture }}" + - "fake-i686" + - "fake-ppc64" + changed_when: yes + notify: remove repos + + - name: Create RPMs and put them into a repo + create_repo: + arch: "{{ ansible_architecture }}" + tempdir: "{{ remote_tmp_dir }}" + register: repo + + - set_fact: + repodir: "{{ repo.repo_dir }}" + + - name: Install the repo + yum_repository: + name: "fake-{{ ansible_architecture }}" + description: "fake-{{ ansible_architecture }}" + baseurl: "file://{{ repodir }}" + gpgcheck: no + when: install_repos | bool + + - name: Copy comps.xml file + copy: + src: comps.xml + dest: "{{ repodir }}" + register: repodir_comps + + - name: Register comps.xml on repo + command: createrepo -g {{ repodir_comps.dest | quote }} {{ repodir | quote }} + + - name: Create RPMs and put them into a repo (i686) + create_repo: + arch: i686 + tempdir: "{{ remote_tmp_dir }}" + register: repo_i686 + + - set_fact: + repodir_i686: "{{ repo_i686.repo_dir }}" + + - name: Install the repo (i686) + yum_repository: + name: "fake-i686" + description: "fake-i686" + baseurl: "file://{{ repodir_i686 }}" + gpgcheck: no + when: install_repos | bool + + - name: Create RPMs and put them into a repo (ppc64) + create_repo: + arch: ppc64 + tempdir: "{{ remote_tmp_dir }}" + register: repo_ppc64 + + - set_fact: + repodir_ppc64: "{{ repo_ppc64.repo_dir }}" + + - name: Install the repo (ppc64) + yum_repository: + name: "fake-ppc64" + description: "fake-ppc64" + baseurl: "file://{{ repodir_ppc64 }}" + gpgcheck: no + when: install_repos | bool + + when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora'] diff --git a/test/integration/targets/setup_rpm_repo/vars/Fedora.yml b/test/integration/targets/setup_rpm_repo/vars/Fedora.yml new file mode 100644 index 00000000..004f42bc --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/vars/Fedora.yml @@ -0,0 +1,4 @@ +rpm_repo_packages: + - "{{ 'python' ~ rpm_repo_python_major_version ~ '-rpmfluff' }}" + - createrepo + - rpm-build diff --git a/test/integration/targets/setup_rpm_repo/vars/RedHat-6.yml b/test/integration/targets/setup_rpm_repo/vars/RedHat-6.yml new file mode 100644 index 00000000..6edee17d --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/vars/RedHat-6.yml @@ -0,0 +1,5 @@ +rpm_repo_packages: + - rpm-build + - python-rpmfluff + - createrepo_c + - createrepo diff --git a/test/integration/targets/setup_rpm_repo/vars/RedHat-7.yml b/test/integration/targets/setup_rpm_repo/vars/RedHat-7.yml new file mode 100644 index 00000000..6edee17d --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/vars/RedHat-7.yml @@ -0,0 +1,5 @@ +rpm_repo_packages: + - rpm-build + - python-rpmfluff + - createrepo_c + - createrepo diff --git a/test/integration/targets/setup_rpm_repo/vars/RedHat-8.yml b/test/integration/targets/setup_rpm_repo/vars/RedHat-8.yml new file mode 100644 index 00000000..84849e23 --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/vars/RedHat-8.yml @@ -0,0 +1,4 @@ +rpm_repo_packages: + - rpm-build + - createrepo_c + - createrepo diff --git a/test/integration/targets/setup_rpm_repo/vars/main.yml b/test/integration/targets/setup_rpm_repo/vars/main.yml new file mode 100644 index 00000000..8e924fce --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/vars/main.yml @@ -0,0 +1 @@ +rpm_repo_python_major_version: "{{ ansible_facts.python_version.split('.')[0] }}" diff --git a/test/integration/targets/setup_win_printargv/files/PrintArgv.cs b/test/integration/targets/setup_win_printargv/files/PrintArgv.cs new file mode 100644 index 00000000..5ca3a8a0 --- /dev/null +++ b/test/integration/targets/setup_win_printargv/files/PrintArgv.cs @@ -0,0 +1,13 @@ +using System; +// This has been compiled to an exe and uploaded to S3 bucket for argv test + +namespace PrintArgv +{ + class Program + { + static void Main(string[] args) + { + Console.WriteLine(string.Join(System.Environment.NewLine, args)); + } + } +} diff --git a/test/integration/targets/setup_win_printargv/meta/main.yml b/test/integration/targets/setup_win_printargv/meta/main.yml new file mode 100644 index 00000000..e3dd5fb1 --- /dev/null +++ b/test/integration/targets/setup_win_printargv/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- setup_remote_tmp_dir diff --git a/test/integration/targets/setup_win_printargv/tasks/main.yml b/test/integration/targets/setup_win_printargv/tasks/main.yml new file mode 100644 index 00000000..5f671924 --- /dev/null +++ b/test/integration/targets/setup_win_printargv/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: download the PrintArgv.exe binary to temp location + win_get_url: + url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_win_printargv/PrintArgv.exe + dest: '{{ remote_tmp_dir }}\PrintArgv.exe' + +- name: set fact containing PrintArgv binary path + set_fact: + win_printargv_path: '{{ remote_tmp_dir }}\PrintArgv.exe' diff --git a/test/integration/targets/shell/action_plugins/test_shell.py b/test/integration/targets/shell/action_plugins/test_shell.py new file mode 100644 index 00000000..6e66ed07 --- /dev/null +++ b/test/integration/targets/shell/action_plugins/test_shell.py @@ -0,0 +1,19 @@ +# This file is part of Ansible + +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + result['shell'] = self._connection._shell.SHELL_FAMILY + return result diff --git a/test/integration/targets/shell/aliases b/test/integration/targets/shell/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/shell/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/shell/connection_plugins/test_connection_default.py b/test/integration/targets/shell/connection_plugins/test_connection_default.py new file mode 100644 index 00000000..52b027d0 --- /dev/null +++ b/test/integration/targets/shell/connection_plugins/test_connection_default.py @@ -0,0 +1,44 @@ +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +connection: test_connection_default +short_description: test connection plugin used in tests +description: +- This is a test connection plugin used for shell testing +author: ansible (@core) +version_added: historical +options: +''' + +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + ''' test connnection ''' + + transport = 'test_connection_default' + + def __init__(self, *args, **kwargs): + super(Connection, self).__init__(*args, **kwargs) + + def transport(self): + pass + + def _connect(self): + pass + + def exec_command(self, cmd, in_data=None, sudoable=True): + pass + + def put_file(self, in_path, out_path): + pass + + def fetch_file(self, in_path, out_path): + pass + + def close(self): + pass diff --git a/test/integration/targets/shell/connection_plugins/test_connection_override.py b/test/integration/targets/shell/connection_plugins/test_connection_override.py new file mode 100644 index 00000000..56d531c4 --- /dev/null +++ b/test/integration/targets/shell/connection_plugins/test_connection_override.py @@ -0,0 +1,45 @@ +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +connection: test_connection_override +short_description: test connection plugin used in tests +description: +- This is a test connection plugin used for shell testing +author: ansible (@core) +version_added: historical +options: +''' + +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + ''' test connection ''' + + transport = 'test_connection_override' + + def __init__(self, *args, **kwargs): + self._shell_type = 'powershell' # Set a shell type that is not sh + super(Connection, self).__init__(*args, **kwargs) + + def transport(self): + pass + + def _connect(self): + pass + + def exec_command(self, cmd, in_data=None, sudoable=True): + pass + + def put_file(self, in_path, out_path): + pass + + def fetch_file(self, in_path, out_path): + pass + + def close(self): + pass diff --git a/test/integration/targets/shell/tasks/main.yml b/test/integration/targets/shell/tasks/main.yml new file mode 100644 index 00000000..d6f2a2b5 --- /dev/null +++ b/test/integration/targets/shell/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: get shell when shell_type is not defined + test_shell: + register: shell_type_default + failed_when: shell_type_default.shell != 'sh' + vars: + ansible_connection: test_connection_default + +- name: get shell when shell_type is not defined but is overridden + test_shell: + register: shell_type_default_override + failed_when: shell_type_default_override.shell != item + vars: + ansible_connection: test_connection_default + ansible_shell_type: '{{ item }}' + with_items: + - powershell + - sh + +- name: get shell when shell_type is defined + test_shell: + register: shell_type_defined + failed_when: shell_type_defined.shell != 'powershell' + vars: + ansible_connection: test_connection_override + +- name: get shell when shell_type is defined but is overridden + test_shell: + register: shell_type_defined_override + failed_when: shell_type_defined_override.shell != item + vars: + ansible_connection: test_connection_default + ansible_shell_type: '{{ item }}' + with_items: + - powershell + - sh diff --git a/test/integration/targets/slurp/aliases b/test/integration/targets/slurp/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/slurp/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/slurp/files/bar.bin b/test/integration/targets/slurp/files/bar.bin new file mode 100644 index 00000000..38d4d8a4 Binary files /dev/null and b/test/integration/targets/slurp/files/bar.bin differ diff --git a/test/integration/targets/slurp/tasks/main.yml b/test/integration/targets/slurp/tasks/main.yml new file mode 100644 index 00000000..4f3556fa --- /dev/null +++ b/test/integration/targets/slurp/tasks/main.yml @@ -0,0 +1,98 @@ +# test code for the slurp module. Based on win_slurp test cases +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: Create a UTF-8 file to test with + copy: + content: 'We are at the café' + dest: '{{ output_dir }}/foo.txt' + +- name: test slurping an existing file + slurp: + src: '{{ output_dir }}/foo.txt' + register: slurp_existing + +- name: check slurp existing result + assert: + that: + - 'slurp_existing.content' + - 'slurp_existing.encoding == "base64"' + - 'slurp_existing is not changed' + - 'slurp_existing is not failed' + - '"{{ slurp_existing.content | b64decode }}" == "We are at the café"' + +- name: Create a binary file to test with + copy: + src: bar.bin + dest: '{{ output_dir }}/bar.bin' + +- name: test slurping a binary file + slurp: + path: '{{ output_dir }}/bar.bin' + register: slurp_binary + no_log: true + +- name: check slurp result of binary + assert: + that: + - "slurp_binary.content" + - "slurp_binary.encoding == 'base64'" + - "slurp_binary is not changed" + - "slurp_binary is not failed" + +- name: test slurping a non-existent file + slurp: + src: '{{ output_dir }}/i_do_not_exist' + register: slurp_missing + ignore_errors: true + +- name: check slurp missing result + assert: + that: + - "slurp_missing is failed" + - "slurp_missing.msg" + - "slurp_missing is not changed" + +- name: Create a directory to test with + file: + path: '{{ output_dir }}/baz/' + state: directory + +- name: test slurping a directory + slurp: + src: '{{ output_dir }}/baz' + register: slurp_dir + ignore_errors: true + +- name: check slurp directory result + assert: + that: + - "slurp_dir is failed" + - "slurp_dir.msg" + - "slurp_dir is not changed" + +- name: test slurp with missing argument + action: slurp + register: slurp_no_args + ignore_errors: true + +- name: check slurp with missing argument result + assert: + that: + - "slurp_no_args is failed" + - "slurp_no_args.msg" + - "slurp_no_args is not changed" diff --git a/test/integration/targets/special_vars/aliases b/test/integration/targets/special_vars/aliases new file mode 100644 index 00000000..2d9e6788 --- /dev/null +++ b/test/integration/targets/special_vars/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +needs/target/include_parent_role_vars diff --git a/test/integration/targets/special_vars/meta/main.yml b/test/integration/targets/special_vars/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/special_vars/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/special_vars/tasks/main.yml b/test/integration/targets/special_vars/tasks/main.yml new file mode 100644 index 00000000..0e71f1dc --- /dev/null +++ b/test/integration/targets/special_vars/tasks/main.yml @@ -0,0 +1,100 @@ +# test code for the template module +# (c) 2015, Brian Coca + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: verify ansible_managed + template: src=foo.j2 dest={{output_dir}}/special_vars.yaml + +- name: read the file into facts + include_vars: "{{output_dir}}/special_vars.yaml" + + +- name: verify all test vars are defined + assert: + that: + - 'item in hostvars[inventory_hostname].keys()' + with_items: + - test_template_host + - test_template_path + - test_template_mtime + - test_template_uid + - test_template_fullpath + - test_template_run_date + - test_ansible_managed + +- name: ensure that role_name exists in role_names, ansible_play_role_names, ansible_role_names, and not in ansible_dependent_role_names + assert: + that: + - "role_name in role_names" + - "role_name in ansible_play_role_names" + - "role_name in ansible_role_names" + - "role_name not in ansible_dependent_role_names" + +- name: ensure that our dependency (prepare_tests) exists in ansible_role_names and ansible_dependent_role_names, but not in role_names or ansible_play_role_names + assert: + that: + - "'prepare_tests' in ansible_role_names" + - "'prepare_tests' in ansible_dependent_role_names" + - "'prepare_tests' not in role_names" + - "'prepare_tests' not in ansible_play_role_names" + +- name: ensure that ansible_role_names is the sum of ansible_play_role_names and ansible_dependent_role_names + assert: + that: + - "(ansible_play_role_names + ansible_dependent_role_names)|unique|sort|list == ansible_role_names|sort|list" + +- name: check that ansible_parent_role_names is normally unset when not included/imported (before including other roles) + assert: + that: + - "ansible_parent_role_names is undefined" + - "ansible_parent_role_paths is undefined" + +- name: ansible_parent_role_names - test functionality by including another role + include_role: + name: include_parent_role_vars + tasks_from: included_by_other_role.yml + +- name: check that ansible_parent_role_names is normally unset when not included/imported (after including other role) + assert: + that: + - "ansible_parent_role_names is undefined" + - "ansible_parent_role_paths is undefined" + +- name: ansible_parent_role_names - test functionality by importing another role + import_role: + name: include_parent_role_vars + tasks_from: included_by_other_role.yml + +- name: check that ansible_parent_role_names is normally unset when not included/imported (after importing other role) + assert: + that: + - "ansible_parent_role_names is undefined" + - "ansible_parent_role_paths is undefined" + +- name: ansible_parent_role_names - test functionality by including another role + include_role: + name: include_parent_role_vars + +- name: check that ansible_parent_role_names is normally unset when not included/imported (after both import and inlcude) + assert: + that: + - "ansible_parent_role_names is undefined" + - "ansible_parent_role_paths is undefined" + +- name: ansible_parent_role_names - test functionality by importing another role + import_role: + name: include_parent_role_vars diff --git a/test/integration/targets/special_vars/templates/foo.j2 b/test/integration/targets/special_vars/templates/foo.j2 new file mode 100644 index 00000000..0f6db2a1 --- /dev/null +++ b/test/integration/targets/special_vars/templates/foo.j2 @@ -0,0 +1,7 @@ +test_template_host: "{{template_host}}" +test_template_path: "{{template_path}}" +test_template_mtime: "{{template_mtime}}" +test_template_uid: "{{template_uid}}" +test_template_fullpath: "{{template_fullpath}}" +test_template_run_date: "{{template_run_date}}" +test_ansible_managed: "{{ansible_managed}}" diff --git a/test/integration/targets/special_vars/vars/main.yml b/test/integration/targets/special_vars/vars/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/stat/aliases b/test/integration/targets/stat/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/stat/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/stat/files/foo.txt b/test/integration/targets/stat/files/foo.txt new file mode 100644 index 00000000..3e96db9b --- /dev/null +++ b/test/integration/targets/stat/files/foo.txt @@ -0,0 +1 @@ +templated_var_loaded diff --git a/test/integration/targets/stat/meta/main.yml b/test/integration/targets/stat/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/stat/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/stat/tasks/main.yml b/test/integration/targets/stat/tasks/main.yml new file mode 100644 index 00000000..bd6b1e89 --- /dev/null +++ b/test/integration/targets/stat/tasks/main.yml @@ -0,0 +1,157 @@ +# test code for the stat module +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: make a new file + copy: dest={{output_dir}}/foo.txt mode=0644 content="hello world" + +- name: check stat of file + stat: path={{output_dir}}/foo.txt + register: stat_result + +- debug: var=stat_result + +- assert: + that: + - "'changed' in stat_result" + - "stat_result.changed == false" + - "'stat' in stat_result" + - "'atime' in stat_result.stat" + - "'ctime' in stat_result.stat" + - "'dev' in stat_result.stat" + - "'exists' in stat_result.stat" + - "'gid' in stat_result.stat" + - "'inode' in stat_result.stat" + - "'isblk' in stat_result.stat" + - "'ischr' in stat_result.stat" + - "'isdir' in stat_result.stat" + - "'isfifo' in stat_result.stat" + - "'isgid' in stat_result.stat" + - "'isreg' in stat_result.stat" + - "'issock' in stat_result.stat" + - "'isuid' in stat_result.stat" + - "'checksum' in stat_result.stat" + - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'" + - "'mode' in stat_result.stat" + - "'mtime' in stat_result.stat" + - "'nlink' in stat_result.stat" + - "'pw_name' in stat_result.stat" + - "'rgrp' in stat_result.stat" + - "'roth' in stat_result.stat" + - "'rusr' in stat_result.stat" + - "'size' in stat_result.stat" + - "'uid' in stat_result.stat" + - "'wgrp' in stat_result.stat" + - "'woth' in stat_result.stat" + - "'wusr' in stat_result.stat" + - "'xgrp' in stat_result.stat" + - "'xoth' in stat_result.stat" + - "'xusr' in stat_result.stat" + +- name: make a symlink + file: + src: "{{ output_dir }}/foo.txt" + path: "{{ output_dir }}/foo-link" + state: link + +- name: check stat of a symlink with follow off + stat: + path: "{{ output_dir }}/foo-link" + register: stat_result + +- debug: var=stat_result + +- assert: + that: + - "'changed' in stat_result" + - "stat_result.changed == false" + - "'stat' in stat_result" + - "'atime' in stat_result.stat" + - "'ctime' in stat_result.stat" + - "'dev' in stat_result.stat" + - "'exists' in stat_result.stat" + - "'gid' in stat_result.stat" + - "'inode' in stat_result.stat" + - "'isblk' in stat_result.stat" + - "'ischr' in stat_result.stat" + - "'isdir' in stat_result.stat" + - "'isfifo' in stat_result.stat" + - "'isgid' in stat_result.stat" + - "'isreg' in stat_result.stat" + - "'issock' in stat_result.stat" + - "'isuid' in stat_result.stat" + - "'islnk' in stat_result.stat" + - "'mode' in stat_result.stat" + - "'mtime' in stat_result.stat" + - "'nlink' in stat_result.stat" + - "'pw_name' in stat_result.stat" + - "'rgrp' in stat_result.stat" + - "'roth' in stat_result.stat" + - "'rusr' in stat_result.stat" + - "'size' in stat_result.stat" + - "'uid' in stat_result.stat" + - "'wgrp' in stat_result.stat" + - "'woth' in stat_result.stat" + - "'wusr' in stat_result.stat" + - "'xgrp' in stat_result.stat" + - "'xoth' in stat_result.stat" + - "'xusr' in stat_result.stat" + +- name: check stat of a symlink with follow on + stat: + path: "{{ output_dir }}/foo-link" + follow: True + register: stat_result + +- debug: var=stat_result + +- assert: + that: + - "'changed' in stat_result" + - "stat_result.changed == false" + - "'stat' in stat_result" + - "'atime' in stat_result.stat" + - "'ctime' in stat_result.stat" + - "'dev' in stat_result.stat" + - "'exists' in stat_result.stat" + - "'gid' in stat_result.stat" + - "'inode' in stat_result.stat" + - "'isblk' in stat_result.stat" + - "'ischr' in stat_result.stat" + - "'isdir' in stat_result.stat" + - "'isfifo' in stat_result.stat" + - "'isgid' in stat_result.stat" + - "'isreg' in stat_result.stat" + - "'issock' in stat_result.stat" + - "'isuid' in stat_result.stat" + - "'checksum' in stat_result.stat" + - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'" + - "'mode' in stat_result.stat" + - "'mtime' in stat_result.stat" + - "'nlink' in stat_result.stat" + - "'pw_name' in stat_result.stat" + - "'rgrp' in stat_result.stat" + - "'roth' in stat_result.stat" + - "'rusr' in stat_result.stat" + - "'size' in stat_result.stat" + - "'uid' in stat_result.stat" + - "'wgrp' in stat_result.stat" + - "'woth' in stat_result.stat" + - "'wusr' in stat_result.stat" + - "'xgrp' in stat_result.stat" + - "'xoth' in stat_result.stat" + - "'xusr' in stat_result.stat" diff --git a/test/integration/targets/strategy_linear/aliases b/test/integration/targets/strategy_linear/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/strategy_linear/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/strategy_linear/inventory b/test/integration/targets/strategy_linear/inventory new file mode 100644 index 00000000..4a34c320 --- /dev/null +++ b/test/integration/targets/strategy_linear/inventory @@ -0,0 +1,3 @@ +[local] +testhost ansible_connection=local +testhost2 ansible_connection=local diff --git a/test/integration/targets/strategy_linear/roles/role1/tasks/main.yml b/test/integration/targets/strategy_linear/roles/role1/tasks/main.yml new file mode 100644 index 00000000..51efd43e --- /dev/null +++ b/test/integration/targets/strategy_linear/roles/role1/tasks/main.yml @@ -0,0 +1,6 @@ +- name: Include tasks + include_tasks: "tasks.yml" + +- name: Mark role as finished + set_fact: + role1_complete: True diff --git a/test/integration/targets/strategy_linear/roles/role1/tasks/tasks.yml b/test/integration/targets/strategy_linear/roles/role1/tasks/tasks.yml new file mode 100644 index 00000000..b7a46aa0 --- /dev/null +++ b/test/integration/targets/strategy_linear/roles/role1/tasks/tasks.yml @@ -0,0 +1,7 @@ +- name: Call role2 + include_role: + name: role2 + +- name: Call role2 again + include_role: + name: role2 diff --git a/test/integration/targets/strategy_linear/roles/role2/tasks/main.yml b/test/integration/targets/strategy_linear/roles/role2/tasks/main.yml new file mode 100644 index 00000000..81e041e1 --- /dev/null +++ b/test/integration/targets/strategy_linear/roles/role2/tasks/main.yml @@ -0,0 +1,7 @@ +- block: + - block: + - name: Nested task 1 + debug: msg="Nested task 1" + + - name: Nested task 2 + debug: msg="Nested task 2" diff --git a/test/integration/targets/strategy_linear/runme.sh b/test/integration/targets/strategy_linear/runme.sh new file mode 100755 index 00000000..41639f3c --- /dev/null +++ b/test/integration/targets/strategy_linear/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_include_file_noop.yml -i inventory "$@" diff --git a/test/integration/targets/strategy_linear/test_include_file_noop.yml b/test/integration/targets/strategy_linear/test_include_file_noop.yml new file mode 100644 index 00000000..9dbf83da --- /dev/null +++ b/test/integration/targets/strategy_linear/test_include_file_noop.yml @@ -0,0 +1,16 @@ +- hosts: + - testhost + - testhost2 + gather_facts: no + vars: + secondhost: testhost2 + tasks: + - name: Call the first role only on one host + include_role: + name: role1 + when: inventory_hostname is match(secondhost) + + - name: Make sure nothing else runs until role1 finishes + assert: + that: + - "'role1_complete' in hostvars[secondhost]" diff --git a/test/integration/targets/subversion/aliases b/test/integration/targets/subversion/aliases new file mode 100644 index 00000000..1dd2724e --- /dev/null +++ b/test/integration/targets/subversion/aliases @@ -0,0 +1,7 @@ +setup/always/setup_passlib +shippable/posix/group2 +skip/aix +skip/osx +skip/macos +destructive +needs/root diff --git a/test/integration/targets/subversion/roles/subversion/defaults/main.yml b/test/integration/targets/subversion/roles/subversion/defaults/main.yml new file mode 100644 index 00000000..af5ea026 --- /dev/null +++ b/test/integration/targets/subversion/roles/subversion/defaults/main.yml @@ -0,0 +1,10 @@ +--- +apache_port: 11386 # cannot use 80 as httptester overrides this +output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}" +subversion_test_dir: '{{ output_dir }}/svn-test' +subversion_server_dir: /tmp/ansible-svn # cannot use a path in the home dir without userdir or granting exec permission to the apache user +subversion_repo_name: ansible-test-repo +subversion_repo_url: http://127.0.0.1:{{ apache_port }}/svn/{{ subversion_repo_name }} +subversion_repo_auth_url: http://127.0.0.1:{{ apache_port }}/svnauth/{{ subversion_repo_name }} +subversion_username: subsvn_user''' +subversion_password: Password123! diff --git a/test/integration/targets/subversion/roles/subversion/files/create_repo.sh b/test/integration/targets/subversion/roles/subversion/files/create_repo.sh new file mode 100644 index 00000000..cc7f4074 --- /dev/null +++ b/test/integration/targets/subversion/roles/subversion/files/create_repo.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +svnadmin create "$1" +svn mkdir "file://$PWD/$1/trunk" -m "make trunk" +svn mkdir "file://$PWD/$1/tags" -m "make tags" +svn mkdir "file://$PWD/$1/branches" -m "make branches" diff --git a/test/integration/targets/subversion/roles/subversion/tasks/cleanup.yml b/test/integration/targets/subversion/roles/subversion/tasks/cleanup.yml new file mode 100644 index 00000000..9be43b4c --- /dev/null +++ b/test/integration/targets/subversion/roles/subversion/tasks/cleanup.yml @@ -0,0 +1,8 @@ +--- +- name: stop apache after tests + shell: "kill -9 $(cat '{{ subversion_server_dir }}/apache.pid')" + +- name: remove tmp subversion server dir + file: + path: '{{ subversion_server_dir }}' + state: absent diff --git a/test/integration/targets/subversion/roles/subversion/tasks/main.yml b/test/integration/targets/subversion/roles/subversion/tasks/main.yml new file mode 100644 index 00000000..0d6acb8a --- /dev/null +++ b/test/integration/targets/subversion/roles/subversion/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: setup subversion server + import_tasks: setup.yml + tags: setup + +- name: verify that subversion is installed so this test can continue + shell: which svn + tags: always + +- name: run tests + import_tasks: tests.yml + tags: tests + +- name: run warning + import_tasks: warnings.yml + tags: warnings + +- name: clean up + import_tasks: cleanup.yml + tags: cleanup diff --git a/test/integration/targets/subversion/roles/subversion/tasks/setup.yml b/test/integration/targets/subversion/roles/subversion/tasks/setup.yml new file mode 100644 index 00000000..5c9c5cb5 --- /dev/null +++ b/test/integration/targets/subversion/roles/subversion/tasks/setup.yml @@ -0,0 +1,63 @@ +--- +- name: clean out the checkout dir + file: + path: '{{ subversion_test_dir }}' + state: '{{ item }}' + loop: + - absent + - directory + +- name: install SVN pre-reqs + package: + name: '{{ subversion_packages }}' + state: present + +- name: upgrade SVN pre-reqs + package: + name: '{{ upgrade_packages }}' + state: latest + when: + - upgrade_packages | default([]) + +- name: create SVN home folder + file: + path: '{{ subversion_server_dir }}' + state: directory + +- name: setup selinux when enabled + include_tasks: setup_selinux.yml + when: ansible_selinux.status == "enabled" + +- name: template out configuration file + template: + src: subversion.conf.j2 + dest: '{{ subversion_server_dir }}/subversion.conf' + +- name: create a test repository + script: create_repo.sh {{ subversion_repo_name }} + args: + chdir: '{{ subversion_server_dir }}' + creates: '{{ subversion_server_dir }}/{{ subversion_repo_name }}' + +- name: apply ownership for all SVN directories + file: + path: '{{ subversion_server_dir }}' + owner: '{{ apache_user }}' + group: '{{ apache_group }}' + recurse: True + +- name: add test user to htpasswd for Subversion site + htpasswd: + path: '{{ subversion_server_dir }}/svn-auth-users' + name: '{{ subversion_username }}' + password: '{{ subversion_password }}' + state: present + +- name: start test Apache SVN site - non Red Hat + command: apachectl -k start -f {{ subversion_server_dir }}/subversion.conf + when: not ansible_os_family == 'RedHat' + +# On Red Hat based OS', we can't use apachectl to start up own instance, just use the raw httpd +- name: start test Apache SVN site - Red Hat + command: httpd -k start -f {{ subversion_server_dir }}/subversion.conf + when: ansible_os_family == 'RedHat' diff --git a/test/integration/targets/subversion/roles/subversion/tasks/setup_selinux.yml b/test/integration/targets/subversion/roles/subversion/tasks/setup_selinux.yml new file mode 100644 index 00000000..a9ffa712 --- /dev/null +++ b/test/integration/targets/subversion/roles/subversion/tasks/setup_selinux.yml @@ -0,0 +1,11 @@ +- name: set SELinux security context for SVN folder + sefcontext: + target: '{{ subversion_server_dir }}(/.*)?' + setype: '{{ item }}' + state: present + with_items: + - httpd_sys_content_t + - httpd_sys_rw_content_t + +- name: apply new SELinux context to filesystem + command: restorecon -irv {{ subversion_server_dir | quote }} diff --git a/test/integration/targets/subversion/roles/subversion/tasks/tests.yml b/test/integration/targets/subversion/roles/subversion/tasks/tests.yml new file mode 100644 index 00000000..8421f9de --- /dev/null +++ b/test/integration/targets/subversion/roles/subversion/tasks/tests.yml @@ -0,0 +1,133 @@ +# test code for the svn module +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# checks out every branch so using a small repo + +- name: initial checkout + subversion: + repo: '{{ subversion_repo_url }}' + dest: '{{ subversion_test_dir }}/svn' + register: subverted + +- name: check if dir was checked out + stat: + path: '{{ subversion_test_dir }}/svn' + register: subverted_result + +# FIXME: the before/after logic here should be fixed to make them hashes, see GitHub 6078 +# looks like this: { +# "after": [ +# "Revision: 9", +# "URL: https://github.com/jimi-c/test_role" +# ], +# "before": null, +# "changed": true, +# "item": "" +# } +- name: verify information about the initial clone + assert: + that: + - "'after' in subverted" + - "subverted.after.1 == 'URL: ' ~ subversion_repo_url" + - "not subverted.before" + - "subverted.changed" + - subverted_result.stat.exists + +- name: repeated checkout + subversion: + repo: '{{ subversion_repo_url }}' + dest: '{{ subversion_test_dir }}/svn' + register: subverted2 + +- name: verify on a reclone things are marked unchanged + assert: + that: + - "not subverted2.changed" + +- name: check for tags + stat: path={{ subversion_test_dir }}/svn/tags + register: tags + +- name: check for trunk + stat: path={{ subversion_test_dir }}/svn/trunk + register: trunk + +- name: check for branches + stat: path={{ subversion_test_dir }}/svn/branches + register: branches + +- name: assert presence of tags/trunk/branches + assert: + that: + - "tags.stat.isdir" + - "trunk.stat.isdir" + - "branches.stat.isdir" + +- name: remove checked out repo + file: + path: '{{ subversion_test_dir }}/svn' + state: absent + +- name: checkout with quotes in username + subversion: + repo: '{{ subversion_repo_auth_url }}' + dest: '{{ subversion_test_dir }}/svn' + username: '{{ subversion_username }}' + password: '{{ subversion_password }}' + register: subverted3 + +- name: get result of checkout with quotes in username + stat: + path: '{{ subversion_test_dir }}/svn' + register: subverted3_result + +- name: assert checkout with quotes in username + assert: + that: + - subverted3 is changed + - subverted3_result.stat.exists + - subverted3_result.stat.isdir + +- name: checkout with export + subversion: + repo: '{{ subversion_repo_url }}' + dest: '{{ subversion_test_dir }}/svn-export' + export: True + register: subverted4 + +- name: check for tags + stat: path={{ subversion_test_dir }}/svn-export/tags + register: export_tags + +- name: check for trunk + stat: path={{ subversion_test_dir }}/svn-export/trunk + register: export_trunk + +- name: check for branches + stat: path={{ subversion_test_dir }}/svn-export/branches + register: export_branches + +- name: assert presence of tags/trunk/branches in export + assert: + that: + - "export_tags.stat.isdir" + - "export_trunk.stat.isdir" + - "export_branches.stat.isdir" + - "subverted4.changed" + +# TBA: test for additional options or URL variants welcome diff --git a/test/integration/targets/subversion/roles/subversion/tasks/warnings.yml b/test/integration/targets/subversion/roles/subversion/tasks/warnings.yml new file mode 100644 index 00000000..50ebd441 --- /dev/null +++ b/test/integration/targets/subversion/roles/subversion/tasks/warnings.yml @@ -0,0 +1,7 @@ +--- +- name: checkout using a password to test for a warning when using svn lt 1.10.0 + subversion: + repo: '{{ subversion_repo_auth_url }}' + dest: '{{ subversion_test_dir }}/svn' + username: '{{ subversion_username }}' + password: '{{ subversion_password }}' diff --git a/test/integration/targets/subversion/roles/subversion/templates/subversion.conf.j2 b/test/integration/targets/subversion/roles/subversion/templates/subversion.conf.j2 new file mode 100644 index 00000000..07e7083a --- /dev/null +++ b/test/integration/targets/subversion/roles/subversion/templates/subversion.conf.j2 @@ -0,0 +1,67 @@ +{% if ansible_os_family == "Debian" %} + +{# On Ubuntu 16.04 we can include the default config, other versions require explicit config #} +{% if ansible_distribution_version == "16.04" %} +Include /etc/apache2/apache2.conf + +{% else %} +Timeout 300 +KeepAlive On +MaxKeepAliveRequests 100 +KeepAliveTimeout 5 +User ${APACHE_RUN_USER} +Group ${APACHE_RUN_GROUP} +HostnameLookups Off +LogLevel warn +LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined +LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined +LogFormat "%h %l %u %t \"%r\" %>s %O" common +LogFormat "%{Referer}i -> %U" referer +LogFormat "%{User-agent}i" agent + +IncludeOptional mods-enabled/*.load +IncludeOptional mods-enabled/*.conf +IncludeOptional conf-enabled/*.conf +IncludeOptional sites-enabled/*conf + + + Require all denied + + +{% endif %} + +{% elif ansible_os_family == "FreeBSD" %} +Include /usr/local/etc/apache24/httpd.conf +LoadModule dav_module libexec/apache24/mod_dav.so +LoadModule dav_svn_module libexec/apache24/mod_dav_svn.so +LoadModule authz_svn_module libexec/apache24/mod_authz_svn.so +{% elif ansible_os_family == "Suse" %} +Include /etc/apache2/httpd.conf +LoadModule dav_module /usr/lib64/apache2/mod_dav.so +LoadModule dav_svn_module /usr/lib64/apache2/mod_dav_svn.so +{% elif ansible_os_family == "RedHat" %} +Include /etc/httpd/conf/httpd.conf +{% endif %} + +PidFile {{ subversion_server_dir }}/apache.pid +Listen 127.0.0.1:{{ apache_port }} +ErrorLog {{ subversion_server_dir }}/apache2-error.log + + + DAV svn + SVNParentPath {{ subversion_server_dir }} +{% if ansible_distribution == "CentOS" and ansible_distribution_version.startswith("6") %} + Allow from all +{% else %} + Require all granted +{% endif %} + + + + DAV svn + SVNParentPath {{ subversion_server_dir }} + AuthType Basic + AuthName "Subversion repositories" + AuthUserFile {{ subversion_server_dir }}/svn-auth-users + Require valid-user + diff --git a/test/integration/targets/subversion/runme.sh b/test/integration/targets/subversion/runme.sh new file mode 100755 index 00000000..f505e581 --- /dev/null +++ b/test/integration/targets/subversion/runme.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +set -eu + +cleanup() { + echo "Cleanup" + ansible-playbook runme.yml -e "output_dir=${OUTPUT_DIR}" "$@" --tags cleanup + echo "Done" +} + +trap cleanup INT TERM EXIT + +export ANSIBLE_ROLES_PATH=roles/ + +# Ensure subversion is set up +ansible-playbook runme.yml "$@" -v --tags setup + +# Test functionality +ansible-playbook runme.yml "$@" -v --tags tests + +# Test a warning is displayed for versions < 1.10.0 when a password is provided +ansible-playbook runme.yml "$@" --tags warnings 2>&1 | tee out.txt + +version="$(svn --version -q)" +secure=$(python -c "from distutils.version import LooseVersion; print(LooseVersion('$version') >= LooseVersion('1.10.0'))") + +if [[ "${secure}" = "False" ]] && [[ "$(grep -c 'To securely pass credentials, upgrade svn to version 1.10.0' out.txt)" -eq 1 ]]; then + echo "Found the expected warning" +elif [[ "${secure}" = "False" ]]; then + echo "Expected a warning" + exit 1 +fi diff --git a/test/integration/targets/subversion/runme.yml b/test/integration/targets/subversion/runme.yml new file mode 100644 index 00000000..c67d7b89 --- /dev/null +++ b/test/integration/targets/subversion/runme.yml @@ -0,0 +1,15 @@ +--- +- hosts: localhost + tasks: + - name: load OS specific vars + include_vars: '{{ item }}' + with_first_found: + - files: + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' + - '{{ ansible_os_family }}.yml' + paths: '../vars' + tags: always + + - include_role: + name: subversion + tags: always diff --git a/test/integration/targets/subversion/vars/Debian.yml b/test/integration/targets/subversion/vars/Debian.yml new file mode 100644 index 00000000..bf7c2084 --- /dev/null +++ b/test/integration/targets/subversion/vars/Debian.yml @@ -0,0 +1,6 @@ +--- +subversion_packages: +- subversion +- libapache2-svn +apache_user: www-data +apache_group: www-data diff --git a/test/integration/targets/subversion/vars/FreeBSD.yml b/test/integration/targets/subversion/vars/FreeBSD.yml new file mode 100644 index 00000000..153f5235 --- /dev/null +++ b/test/integration/targets/subversion/vars/FreeBSD.yml @@ -0,0 +1,7 @@ +--- +subversion_packages: +- apache24 +- mod_dav_svn +- subversion +apache_user: www +apache_group: www diff --git a/test/integration/targets/subversion/vars/RedHat.yml b/test/integration/targets/subversion/vars/RedHat.yml new file mode 100644 index 00000000..3e3f9109 --- /dev/null +++ b/test/integration/targets/subversion/vars/RedHat.yml @@ -0,0 +1,10 @@ +--- +subversion_packages: +- mod_dav_svn +- subversion +upgrade_packages: +# prevent sqlite from being out-of-sync with the version subversion was compiled with +- subversion +- sqlite +apache_user: apache +apache_group: apache diff --git a/test/integration/targets/subversion/vars/Suse.yml b/test/integration/targets/subversion/vars/Suse.yml new file mode 100644 index 00000000..eab906ec --- /dev/null +++ b/test/integration/targets/subversion/vars/Suse.yml @@ -0,0 +1,6 @@ +--- +subversion_packages: +- subversion +- subversion-server +apache_user: wwwrun +apache_group: www diff --git a/test/integration/targets/subversion/vars/Ubuntu-18.yml b/test/integration/targets/subversion/vars/Ubuntu-18.yml new file mode 100644 index 00000000..dfe131b0 --- /dev/null +++ b/test/integration/targets/subversion/vars/Ubuntu-18.yml @@ -0,0 +1,6 @@ +--- +subversion_packages: +- subversion +- libapache2-mod-svn +apache_user: www-data +apache_group: www-data diff --git a/test/integration/targets/subversion/vars/Ubuntu-20.yml b/test/integration/targets/subversion/vars/Ubuntu-20.yml new file mode 100644 index 00000000..dfe131b0 --- /dev/null +++ b/test/integration/targets/subversion/vars/Ubuntu-20.yml @@ -0,0 +1,6 @@ +--- +subversion_packages: +- subversion +- libapache2-mod-svn +apache_user: www-data +apache_group: www-data diff --git a/test/integration/targets/systemd/aliases b/test/integration/targets/systemd/aliases new file mode 100644 index 00000000..f8e28c7e --- /dev/null +++ b/test/integration/targets/systemd/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/systemd/defaults/main.yml b/test/integration/targets/systemd/defaults/main.yml new file mode 100644 index 00000000..33063b86 --- /dev/null +++ b/test/integration/targets/systemd/defaults/main.yml @@ -0,0 +1 @@ +fake_service: nonexisting diff --git a/test/integration/targets/systemd/handlers/main.yml b/test/integration/targets/systemd/handlers/main.yml new file mode 100644 index 00000000..8643a2a0 --- /dev/null +++ b/test/integration/targets/systemd/handlers/main.yml @@ -0,0 +1,4 @@ +- name: remove unit file + file: + path: /etc/systemd/system/sleeper@.service + state: absent diff --git a/test/integration/targets/systemd/meta/main.yml b/test/integration/targets/systemd/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/systemd/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/systemd/tasks/main.yml b/test/integration/targets/systemd/tasks/main.yml new file mode 100644 index 00000000..96781eb8 --- /dev/null +++ b/test/integration/targets/systemd/tasks/main.yml @@ -0,0 +1,121 @@ +# Test code for the systemd module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +## +## systemctl +## + +- name: End if this system does not use systemd + meta: end_host + when: ansible_facts.service_mgr != 'systemd' + +- name: Include distribution specific variables + include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts.distribution }}.yml" + - "{{ ansible_facts.os_family }}.yml" + - default.yml + paths: + - vars + +- name: get a list of running services + shell: systemctl | fgrep 'running' | awk '{print $1}' | sed 's/\.service//g' | fgrep -v '.' | egrep ^[a-z] + register: running_names +- debug: var=running_names + +- name: check running state + systemd: + name: "{{ running_names.stdout_lines|random }}" + state: started + register: systemd_test0 +- debug: var=systemd_test0 +- name: validate results for test0 + assert: + that: + - 'systemd_test0.changed is defined' + - 'systemd_test0.name is defined' + - 'systemd_test0.state is defined' + - 'systemd_test0.status is defined' + - 'not systemd_test0.changed' + - 'systemd_test0.state == "started"' + +- name: the module must fail when a service is not found + systemd: + name: '{{ fake_service }}' + state: stopped + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + - 'result is search("Could not find the requested service {{ fake_service }}")' + +- name: the module must fail in check_mode as well when a service is not found + systemd: + name: '{{ fake_service }}' + state: stopped + register: result + check_mode: yes + ignore_errors: yes + +- assert: + that: + - result is failed + - 'result is search("Could not find the requested service {{ fake_service }}")' + +- name: check that the module works even when systemd is offline (eg in chroot) + systemd: + name: "{{ running_names.stdout_lines|random }}" + state: started + environment: + SYSTEMD_OFFLINE: 1 + +- name: Disable ssh 1 + systemd: + name: '{{ ssh_service }}' + enabled: false + register: systemd_disable_ssh_1 + +- name: Disable ssh 2 + systemd: + name: '{{ ssh_service }}' + enabled: false + register: systemd_disable_ssh_2 + +- name: Enable ssh 1 + systemd: + name: '{{ ssh_service }}' + enabled: true + register: systemd_enable_ssh_1 + +- name: Enable ssh 2 + systemd: + name: '{{ ssh_service }}' + enabled: true + register: systemd_enable_ssh_2 + +- assert: + that: + - systemd_disable_ssh_2 is not changed + - systemd_enable_ssh_1 is changed + - systemd_enable_ssh_2 is not changed + +- import_tasks: test_unit_template.yml diff --git a/test/integration/targets/systemd/tasks/test_unit_template.yml b/test/integration/targets/systemd/tasks/test_unit_template.yml new file mode 100644 index 00000000..47cb1c78 --- /dev/null +++ b/test/integration/targets/systemd/tasks/test_unit_template.yml @@ -0,0 +1,50 @@ +- name: Copy service file + template: + src: sleeper@.service + dest: /etc/systemd/system/sleeper@.service + owner: root + group: root + mode: '0644' + notify: remove unit file + +- name: Reload systemd + systemd: + daemon_reload: yes + +- name: Start and enable service using unit template + systemd: + name: sleeper@100.service + state: started + enabled: yes + register: template_test_1 + +- name: Start and enable service using unit template again + systemd: + name: sleeper@100.service + state: started + enabled: yes + register: template_test_2 + +- name: Stop and disable service using unit template + systemd: + name: sleeper@100.service + state: stopped + enabled: no + register: template_test_3 + +- name: Stop and disable service using unit template again + systemd: + name: sleeper@100.service + state: stopped + enabled: no + register: template_test_4 + +- name: + assert: + that: + - template_test_1 is changed + - template_test_1 is success + - template_test_2 is not changed + - template_test_2 is success + - template_test_3 is changed + - template_test_4 is not changed diff --git a/test/integration/targets/systemd/templates/sleeper@.service b/test/integration/targets/systemd/templates/sleeper@.service new file mode 100644 index 00000000..8b47982a --- /dev/null +++ b/test/integration/targets/systemd/templates/sleeper@.service @@ -0,0 +1,8 @@ +[Unit] +Description=Basic service to use as a template + +[Service] +ExecStart={{ sleep_bin_path }} %i + +[Install] +WantedBy=multi-user.target diff --git a/test/integration/targets/systemd/vars/Debian.yml b/test/integration/targets/systemd/vars/Debian.yml new file mode 100644 index 00000000..9760744d --- /dev/null +++ b/test/integration/targets/systemd/vars/Debian.yml @@ -0,0 +1,2 @@ +ssh_service: ssh +sleep_bin_path: /bin/sleep diff --git a/test/integration/targets/systemd/vars/default.yml b/test/integration/targets/systemd/vars/default.yml new file mode 100644 index 00000000..57491ff0 --- /dev/null +++ b/test/integration/targets/systemd/vars/default.yml @@ -0,0 +1,2 @@ +ssh_service: sshd +sleep_bin_path: /usr/bin/sleep diff --git a/test/integration/targets/tags/aliases b/test/integration/targets/tags/aliases new file mode 100644 index 00000000..757c9966 --- /dev/null +++ b/test/integration/targets/tags/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/aix diff --git a/test/integration/targets/tags/runme.sh b/test/integration/targets/tags/runme.sh new file mode 100755 index 00000000..949fbd5f --- /dev/null +++ b/test/integration/targets/tags/runme.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +set -eu + +# Using set -x for this test causes the Shippable console to stop receiving updates and the job to time out for macOS. +# Once that issue is resolved the set -x option can be added above. + +# Run these using en_US.UTF-8 because list-tasks is a user output function and so it tailors its output to the +# user's locale. For unicode tags, this means replacing non-ascii chars with "?" + +COMMAND=(ansible-playbook -i ../../inventory test_tags.yml -v --list-tasks) + +export LC_ALL=en_US.UTF-8 + +# Run everything by default +[ "$("${COMMAND[@]}" | grep -F Task_with | xargs)" = \ +"Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always] Task_with_unicode_tag TAGS: [くらとみ] Task_with_list_of_tags TAGS: [café, press] Task_without_tag TAGS: [] Task_with_csv_tags TAGS: [tag1, tag2] Task_with_templated_tags TAGS: [tag3]" ] + +# Run the exact tags, and always +[ "$("${COMMAND[@]}" --tags tag | grep -F Task_with | xargs)" = \ +"Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always]" ] + +# Skip one tag +[ "$("${COMMAND[@]}" --skip-tags tag | grep -F Task_with | xargs)" = \ +"Task_with_always_tag TAGS: [always] Task_with_unicode_tag TAGS: [くらとみ] Task_with_list_of_tags TAGS: [café, press] Task_without_tag TAGS: [] Task_with_csv_tags TAGS: [tag1, tag2] Task_with_templated_tags TAGS: [tag3]" ] + +# Skip a unicode tag +[ "$("${COMMAND[@]}" --skip-tags 'くらとみ' | grep -F Task_with | xargs)" = \ +"Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always] Task_with_list_of_tags TAGS: [café, press] Task_without_tag TAGS: [] Task_with_csv_tags TAGS: [tag1, tag2] Task_with_templated_tags TAGS: [tag3]" ] + +# Run just a unicode tag and always +[ "$("${COMMAND[@]}" --tags 'くらとみ' | grep -F Task_with | xargs)" = \ +"Task_with_always_tag TAGS: [always] Task_with_unicode_tag TAGS: [くらとみ]" ] + +# Run a tag from a list of tags and always +[ "$("${COMMAND[@]}" --tags café | grep -F Task_with | xargs)" = \ +"Task_with_always_tag TAGS: [always] Task_with_list_of_tags TAGS: [café, press]" ] + +# Run tag with never +[ "$("${COMMAND[@]}" --tags donever | grep -F Task_with | xargs)" = \ +"Task_with_always_tag TAGS: [always] Task_with_never_tag TAGS: [donever, never]" ] + +# Run csv tags +[ "$("${COMMAND[@]}" --tags tag1 | grep -F Task_with | xargs)" = \ +"Task_with_always_tag TAGS: [always] Task_with_csv_tags TAGS: [tag1, tag2]" ] + +# Run templated tags +[ "$("${COMMAND[@]}" --tags tag3 | grep -F Task_with | xargs)" = \ +"Task_with_always_tag TAGS: [always] Task_with_templated_tags TAGS: [tag3]" ] diff --git a/test/integration/targets/tags/test_tags.yml b/test/integration/targets/tags/test_tags.yml new file mode 100644 index 00000000..76ac5ba4 --- /dev/null +++ b/test/integration/targets/tags/test_tags.yml @@ -0,0 +1,33 @@ +--- +- name: verify tags work as expected + hosts: testhost + gather_facts: False + vars: + the_tags: + - tag3 + tasks: + - name: Task_with_tag + debug: msg= + tags: tag + - name: Task_with_always_tag + debug: msg= + tags: always + - name: Task_with_unicode_tag + debug: msg= + tags: くらとみ + - name: Task_with_list_of_tags + debug: msg= + tags: + - café + - press + - name: Task_without_tag + debug: msg= + - name: Task_with_never_tag + debug: msg=NEVER + tags: ['never', 'donever'] + - name: Task_with_csv_tags + debug: msg=csv + tags: tag1,tag2 + - name: Task_with_templated_tags + debug: msg=templated + tags: "{{ the_tags }}" diff --git a/test/integration/targets/task_ordering/aliases b/test/integration/targets/task_ordering/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/task_ordering/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/task_ordering/meta/main.yml b/test/integration/targets/task_ordering/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/task_ordering/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/task_ordering/tasks/main.yml b/test/integration/targets/task_ordering/tasks/main.yml new file mode 100644 index 00000000..4a7828bf --- /dev/null +++ b/test/integration/targets/task_ordering/tasks/main.yml @@ -0,0 +1,15 @@ +- set_fact: + temppath: "{{ remote_tmp_dir }}/output.txt" + +- include: taskorder-include.yml + with_items: + - 1 + - 2 + - 3 + +- slurp: + src: "{{ temppath }}" + register: tempout + +- assert: + that: tempout.content | b64decode == "one.1.two.1.three.1.four.1.one.2.two.2.three.2.four.2.one.3.two.3.three.3.four.3." diff --git a/test/integration/targets/task_ordering/tasks/taskorder-include.yml b/test/integration/targets/task_ordering/tasks/taskorder-include.yml new file mode 100644 index 00000000..228e897e --- /dev/null +++ b/test/integration/targets/task_ordering/tasks/taskorder-include.yml @@ -0,0 +1,10 @@ +# This test ensures that included tasks are run in order. +# There have been regressions where included tasks and +# nested blocks ran out of order... + +- shell: printf one.{{ item }}. >> {{ temppath }} +- block: + - shell: printf two.{{ item }}. >> {{ temppath }} + - block: + - shell: printf three.{{ item }}. >> {{ temppath }} +- shell: printf four.{{ item }}. >> {{ temppath }} diff --git a/test/integration/targets/tasks/aliases b/test/integration/targets/tasks/aliases new file mode 100644 index 00000000..a6dafcf8 --- /dev/null +++ b/test/integration/targets/tasks/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/test/integration/targets/tasks/tasks/main.yml b/test/integration/targets/tasks/tasks/main.yml new file mode 100644 index 00000000..f6ac1114 --- /dev/null +++ b/test/integration/targets/tasks/tasks/main.yml @@ -0,0 +1,4 @@ +# make sure tasks with an undefined variable in the name are gracefully handled +- name: "Task name with undefined variable: {{ not_defined }}" + debug: + msg: Hello diff --git a/test/integration/targets/template/6653.yml b/test/integration/targets/template/6653.yml new file mode 100644 index 00000000..970478f9 --- /dev/null +++ b/test/integration/targets/template/6653.yml @@ -0,0 +1,10 @@ +- hosts: localhost + gather_facts: no + vars: + mylist: + - alpha + - bravo + tasks: + - name: Should not fail on undefined variable + set_fact: + template_result: "{{ lookup('template', '6653.j2') }}" diff --git a/test/integration/targets/template/72262.yml b/test/integration/targets/template/72262.yml new file mode 100644 index 00000000..33c610d4 --- /dev/null +++ b/test/integration/targets/template/72262.yml @@ -0,0 +1,6 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: Should not fail on undefined variable + set_fact: + template_result: "{{ lookup('template', '72262.j2') }}" diff --git a/test/integration/targets/template/72615.yml b/test/integration/targets/template/72615.yml new file mode 100644 index 00000000..9a6eb941 --- /dev/null +++ b/test/integration/targets/template/72615.yml @@ -0,0 +1,26 @@ +- hosts: localhost + gather_facts: no + vars: + foo: "top-level-foo" + tasks: + - set_fact: + template_result: "{{ lookup('template', '72615.j2') }}" + + - assert: + that: + - "'template-level-bar' in template_result" + - "'template-nested-level-bar' in template_result" + + - assert: + that: + - "'top-level-foo' not in template_result" + - "'template-level-foo' in template_result" + - "'template-nested-level-foo' in template_result" + when: lookup('pipe', ansible_python_interpreter ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.9', '>=') + + - assert: + that: + - "'top-level-foo' in template_result" + - "'template-level-foo' not in template_result" + - "'template-nested-level-foo' not in template_result" + when: lookup('pipe', ansible_python_interpreter ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.9', '<') diff --git a/test/integration/targets/template/aliases b/test/integration/targets/template/aliases new file mode 100644 index 00000000..f0c24d20 --- /dev/null +++ b/test/integration/targets/template/aliases @@ -0,0 +1,3 @@ +needs/root +shippable/posix/group5 +skip/aix diff --git a/test/integration/targets/template/ansible_managed.cfg b/test/integration/targets/template/ansible_managed.cfg new file mode 100644 index 00000000..3626429f --- /dev/null +++ b/test/integration/targets/template/ansible_managed.cfg @@ -0,0 +1,2 @@ +[defaults] +ansible_managed=ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} diff --git a/test/integration/targets/template/ansible_managed.yml b/test/integration/targets/template/ansible_managed.yml new file mode 100644 index 00000000..2bd7c2c4 --- /dev/null +++ b/test/integration/targets/template/ansible_managed.yml @@ -0,0 +1,14 @@ +--- +- hosts: testhost + gather_facts: False + tasks: + - set_fact: + output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}" + - file: + path: '{{ output_dir }}/café.txt' + state: 'absent' + # Smoketest that ansible_managed with non-ascii chars works: + # https://github.com/ansible/ansible/issues/27262 + - template: + src: 'templates/café.j2' + dest: '{{ output_dir }}/café.txt' diff --git a/test/integration/targets/template/corner_cases.yml b/test/integration/targets/template/corner_cases.yml new file mode 100644 index 00000000..48782f79 --- /dev/null +++ b/test/integration/targets/template/corner_cases.yml @@ -0,0 +1,51 @@ +- name: test tempating corner cases + hosts: localhost + gather_facts: false + vars: + empty_list: [] + dont: I SHOULD NOT BE TEMPLATED + other: I WORK + tasks: + - name: 'ensure we are not interpolating data from outside of j2 delmiters' + assert: + that: + - '"I SHOULD NOT BE TEMPLATED" not in adjacent' + - globals1 == "[[], globals()]" + - globals2 == "[[], globals]" + vars: + adjacent: "{{ empty_list }} + [dont]" + globals1: "[{{ empty_list }}, globals()]" + globals2: "[{{ empty_list }}, globals]" + + - name: 'ensure we can add lists' + assert: + that: + - (empty_list + [other]) == [other] + - (empty_list + [other, other]) == [other, other] + - (dont_exist|default([]) + [other]) == [other] + - ([other] + [empty_list, other]) == [other, [], other] + + - name: 'ensure comments go away and we still dont interpolate in string' + assert: + that: + - 'comm1 == " + [dont]"' + - 'comm2 == " #} + [dont]"' + vars: + comm1: '{# {{nothing}} {# #} + [dont]' + comm2: "{# {{nothing}} {# #} #} + [dont]" + + - name: test additions with facts, set them up + set_fact: + inames: [] + iname: "{{ prefix ~ '-options' }}" + iname_1: "{{ prefix ~ '-options-1' }}" + vars: + prefix: 'bo' + + - name: add the facts + set_fact: + inames: '{{ inames + [iname, iname_1] }}' + + - assert: + that: + - inames == ['bo-options', 'bo-options-1'] diff --git a/test/integration/targets/template/custom_tasks/tasks/main.yml b/test/integration/targets/template/custom_tasks/tasks/main.yml new file mode 100644 index 00000000..182f7cca --- /dev/null +++ b/test/integration/targets/template/custom_tasks/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- set_fact: + output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}" + +- template: + src: test + dest: "{{ output_dir }}/templated_test" + register: custom_template_result + +- debug: + msg: "{{ custom_template_result }}" + +- assert: + that: + - custom_template_result.changed diff --git a/test/integration/targets/template/custom_tasks/templates/test b/test/integration/targets/template/custom_tasks/templates/test new file mode 100644 index 00000000..d033f125 --- /dev/null +++ b/test/integration/targets/template/custom_tasks/templates/test @@ -0,0 +1 @@ +Sample Text diff --git a/test/integration/targets/template/custom_template.yml b/test/integration/targets/template/custom_template.yml new file mode 100644 index 00000000..e5c7aac8 --- /dev/null +++ b/test/integration/targets/template/custom_template.yml @@ -0,0 +1,4 @@ +- hosts: testhost + gather_facts: yes + roles: + - { role: custom_tasks } diff --git a/test/integration/targets/template/files/encoding_1252_utf-8.expected b/test/integration/targets/template/files/encoding_1252_utf-8.expected new file mode 100644 index 00000000..0d3cc352 --- /dev/null +++ b/test/integration/targets/template/files/encoding_1252_utf-8.expected @@ -0,0 +1 @@ +windows-1252 Special Characters: €‚ƒ„…†‡ˆ‰Š‹ŒŽ‘’“”•–—˜™š›œžŸ ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ diff --git a/test/integration/targets/template/files/encoding_1252_windows-1252.expected b/test/integration/targets/template/files/encoding_1252_windows-1252.expected new file mode 100644 index 00000000..7fb94a7b --- /dev/null +++ b/test/integration/targets/template/files/encoding_1252_windows-1252.expected @@ -0,0 +1 @@ +windows-1252 Special Characters: diff --git a/test/integration/targets/template/files/foo-py26.txt b/test/integration/targets/template/files/foo-py26.txt new file mode 100644 index 00000000..76b0bb56 --- /dev/null +++ b/test/integration/targets/template/files/foo-py26.txt @@ -0,0 +1,9 @@ +templated_var_loaded + +{ + "bool": true, + "multi_part": "1Foo", + "null_type": null, + "number": 5, + "string_num": "5" +} diff --git a/test/integration/targets/template/files/foo.dos.txt b/test/integration/targets/template/files/foo.dos.txt new file mode 100644 index 00000000..b716eca0 --- /dev/null +++ b/test/integration/targets/template/files/foo.dos.txt @@ -0,0 +1,3 @@ +BEGIN +templated_var_loaded +END diff --git a/test/integration/targets/template/files/foo.txt b/test/integration/targets/template/files/foo.txt new file mode 100644 index 00000000..58af3be8 --- /dev/null +++ b/test/integration/targets/template/files/foo.txt @@ -0,0 +1,9 @@ +templated_var_loaded + +{ + "bool": true, + "multi_part": "1Foo", + "null_type": null, + "number": 5, + "string_num": "5" +} diff --git a/test/integration/targets/template/files/foo.unix.txt b/test/integration/targets/template/files/foo.unix.txt new file mode 100644 index 00000000..d33849f2 --- /dev/null +++ b/test/integration/targets/template/files/foo.unix.txt @@ -0,0 +1,3 @@ +BEGIN +templated_var_loaded +END diff --git a/test/integration/targets/template/files/import_as.expected b/test/integration/targets/template/files/import_as.expected new file mode 100644 index 00000000..fc6ea021 --- /dev/null +++ b/test/integration/targets/template/files/import_as.expected @@ -0,0 +1,3 @@ +hello world import as +WIBBLE +Goodbye diff --git a/test/integration/targets/template/files/import_as_with_context.expected b/test/integration/targets/template/files/import_as_with_context.expected new file mode 100644 index 00000000..7099a47a --- /dev/null +++ b/test/integration/targets/template/files/import_as_with_context.expected @@ -0,0 +1,2 @@ +hello world as qux with context +WIBBLE diff --git a/test/integration/targets/template/files/import_with_context.expected b/test/integration/targets/template/files/import_with_context.expected new file mode 100644 index 00000000..5323655a --- /dev/null +++ b/test/integration/targets/template/files/import_with_context.expected @@ -0,0 +1,3 @@ +hello world with context +WIBBLE +Goodbye diff --git a/test/integration/targets/template/files/lstrip_blocks_false.expected b/test/integration/targets/template/files/lstrip_blocks_false.expected new file mode 100644 index 00000000..12600012 --- /dev/null +++ b/test/integration/targets/template/files/lstrip_blocks_false.expected @@ -0,0 +1,4 @@ + hello world + hello world + hello world + diff --git a/test/integration/targets/template/files/lstrip_blocks_true.expected b/test/integration/targets/template/files/lstrip_blocks_true.expected new file mode 100644 index 00000000..1b11f8b2 --- /dev/null +++ b/test/integration/targets/template/files/lstrip_blocks_true.expected @@ -0,0 +1,3 @@ +hello world +hello world +hello world diff --git a/test/integration/targets/template/files/trim_blocks_false.expected b/test/integration/targets/template/files/trim_blocks_false.expected new file mode 100644 index 00000000..283cefc8 --- /dev/null +++ b/test/integration/targets/template/files/trim_blocks_false.expected @@ -0,0 +1,4 @@ + +Hello world + +Goodbye diff --git a/test/integration/targets/template/files/trim_blocks_true.expected b/test/integration/targets/template/files/trim_blocks_true.expected new file mode 100644 index 00000000..03acd5d3 --- /dev/null +++ b/test/integration/targets/template/files/trim_blocks_true.expected @@ -0,0 +1,2 @@ +Hello world +Goodbye diff --git a/test/integration/targets/template/filter_plugins.yml b/test/integration/targets/template/filter_plugins.yml new file mode 100644 index 00000000..c3e97a54 --- /dev/null +++ b/test/integration/targets/template/filter_plugins.yml @@ -0,0 +1,9 @@ +- hosts: localhost + gather_facts: no + tasks: + - debug: + msg: "force templating in delegate_to before we hit the second one with a filter" + delegate_to: "{{ 'localhost' }}" + + - include_role: + name: role_filter diff --git a/test/integration/targets/template/meta/main.yml b/test/integration/targets/template/meta/main.yml new file mode 100644 index 00000000..06d4fd29 --- /dev/null +++ b/test/integration/targets/template/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - setup_nobody diff --git a/test/integration/targets/template/role_filter/filter_plugins/myplugin.py b/test/integration/targets/template/role_filter/filter_plugins/myplugin.py new file mode 100644 index 00000000..44935ab0 --- /dev/null +++ b/test/integration/targets/template/role_filter/filter_plugins/myplugin.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python + + +class FilterModule(object): + def filters(self): + return {'parse_ip': self.parse_ip} + + def parse_ip(self, ip): + return ip diff --git a/test/integration/targets/template/role_filter/tasks/main.yml b/test/integration/targets/template/role_filter/tasks/main.yml new file mode 100644 index 00000000..7d962a29 --- /dev/null +++ b/test/integration/targets/template/role_filter/tasks/main.yml @@ -0,0 +1,3 @@ +- name: test + command: echo hello + delegate_to: "{{ '127.0.0.1' | parse_ip }}" diff --git a/test/integration/targets/template/runme.sh b/test/integration/targets/template/runme.sh new file mode 100755 index 00000000..cb00df75 --- /dev/null +++ b/test/integration/targets/template/runme.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_ROLES_PATH=../ ansible-playbook template.yml -i ../../inventory -v "$@" + +# Test for #35571 +ansible testhost -i testhost, -m debug -a 'msg={{ hostvars["localhost"] }}' -e "vars1={{ undef }}" -e "vars2={{ vars1 }}" + +# Test for https://github.com/ansible/ansible/issues/27262 +ansible-playbook ansible_managed.yml -c ansible_managed.cfg -i ../../inventory -v "$@" + +# Test for #42585 +ANSIBLE_ROLES_PATH=../ ansible-playbook custom_template.yml -i ../../inventory -v "$@" + + +# Test for several corner cases #57188 +ansible-playbook corner_cases.yml -v "$@" + +# Test for #57351 +ansible-playbook filter_plugins.yml -v "$@" + +# https://github.com/ansible/ansible/issues/68699 +ansible-playbook unused_vars_include.yml -v "$@" + +# https://github.com/ansible/ansible/issues/55152 +ansible-playbook undefined_var_info.yml -v "$@" + +# https://github.com/ansible/ansible/issues/72615 +ansible-playbook 72615.yml -v "$@" + +# https://github.com/ansible/ansible/issues/6653 +ansible-playbook 6653.yml -v "$@" + +# https://github.com/ansible/ansible/issues/72262 +ansible-playbook 72262.yml -v "$@" diff --git a/test/integration/targets/template/tasks/backup_test.yml b/test/integration/targets/template/tasks/backup_test.yml new file mode 100644 index 00000000..eb4eff17 --- /dev/null +++ b/test/integration/targets/template/tasks/backup_test.yml @@ -0,0 +1,60 @@ +# https://github.com/ansible/ansible/issues/24408 + +- set_fact: + t_username: templateuser1 + t_groupname: templateuser1 + +- name: create the test group + group: + name: "{{ t_groupname }}" + +- name: create the test user + user: + name: "{{ t_username }}" + group: "{{ t_groupname }}" + createhome: no + +- name: set the dest file + set_fact: + t_dest: "{{ output_dir + '/tfile_dest.txt' }}" + +- name: create the old file + file: + path: "{{ t_dest }}" + state: touch + mode: 0777 + owner: "{{ t_username }}" + group: "{{ t_groupname }}" + +- name: failsafe attr change incase underlying system does not support it + shell: chattr =j "{{ t_dest }}" + ignore_errors: True + +- name: run the template + template: + src: foo.j2 + dest: "{{ t_dest }}" + backup: True + register: t_backup_res + +- name: check the data for the backup + stat: + path: "{{ t_backup_res.backup_file }}" + register: t_backup_stats + +- name: validate result of preserved backup + assert: + that: + - 't_backup_stats.stat.mode == "0777"' + - 't_backup_stats.stat.pw_name == t_username' + - 't_backup_stats.stat.gr_name == t_groupname' + +- name: cleanup the user + user: + name: "{{ t_username }}" + state: absent + +- name: cleanup the group + user: + name: "{{ t_groupname }}" + state: absent diff --git a/test/integration/targets/template/tasks/main.yml b/test/integration/targets/template/tasks/main.yml new file mode 100644 index 00000000..da803436 --- /dev/null +++ b/test/integration/targets/template/tasks/main.yml @@ -0,0 +1,719 @@ +# test code for the template module +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: + output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}" + +- name: show python interpreter + debug: + msg: "{{ ansible_python['executable'] }}" + +- name: show jinja2 version + debug: + msg: "{{ lookup('pipe', '{{ ansible_python[\"executable\"] }} -c \"import jinja2; print(jinja2.__version__)\"') }}" + +- name: get default group + shell: id -gn + register: group + +- name: fill in a basic template + template: src=foo.j2 dest={{output_dir}}/foo.templated mode=0644 + register: template_result + +- assert: + that: + - "'changed' in template_result" + - "'dest' in template_result" + - "'group' in template_result" + - "'gid' in template_result" + - "'md5sum' in template_result" + - "'checksum' in template_result" + - "'owner' in template_result" + - "'size' in template_result" + - "'src' in template_result" + - "'state' in template_result" + - "'uid' in template_result" + +- name: verify that the file was marked as changed + assert: + that: + - "template_result.changed == true" + +# Basic template with non-ascii names +- name: Check that non-ascii source and dest work + template: + src: 'café.j2' + dest: '{{ output_dir }}/café.txt' + register: template_results + +- name: Check that the resulting file exists + stat: + path: '{{ output_dir }}/café.txt' + register: stat_results + +- name: Check that template created the right file + assert: + that: + - 'template_results is changed' + - 'stat_results.stat["exists"]' + +# test for import with context on jinja-2.9 See https://github.com/ansible/ansible/issues/20494 +- name: fill in a template using import with context ala issue 20494 + template: src=import_with_context.j2 dest={{output_dir}}/import_with_context.templated mode=0644 + register: template_result + +- name: copy known good import_with_context.expected into place + copy: src=import_with_context.expected dest={{output_dir}}/import_with_context.expected + +- name: compare templated file to known good import_with_context + shell: diff -uw {{output_dir}}/import_with_context.templated {{output_dir}}/import_with_context.expected + register: diff_result + +- name: verify templated import_with_context matches known good + assert: + that: + - 'diff_result.stdout == ""' + - "diff_result.rc == 0" + +# test for nested include https://github.com/ansible/ansible/issues/34886 +- name: test if parent variables are defined in nested include + template: src=for_loop.j2 dest={{output_dir}}/for_loop.templated mode=0644 + +- name: save templated output + shell: "cat {{output_dir}}/for_loop.templated" + register: for_loop_out +- debug: var=for_loop_out +- name: verify variables got templated + assert: + that: + - '"foo" in for_loop_out.stdout' + - '"bar" in for_loop_out.stdout' + - '"bam" in for_loop_out.stdout' + +# test for 'import as' on jinja-2.9 See https://github.com/ansible/ansible/issues/20494 +- name: fill in a template using import as ala fails2 case in issue 20494 + template: src=import_as.j2 dest={{output_dir}}/import_as.templated mode=0644 + register: import_as_template_result + +- name: copy known good import_as.expected into place + copy: src=import_as.expected dest={{output_dir}}/import_as.expected + +- name: compare templated file to known good import_as + shell: diff -uw {{output_dir}}/import_as.templated {{output_dir}}/import_as.expected + register: import_as_diff_result + +- name: verify templated import_as matches known good + assert: + that: + - 'import_as_diff_result.stdout == ""' + - "import_as_diff_result.rc == 0" + +# test for 'import as with context' on jinja-2.9 See https://github.com/ansible/ansible/issues/20494 +- name: fill in a template using import as with context ala fails2 case in issue 20494 + template: src=import_as_with_context.j2 dest={{output_dir}}/import_as_with_context.templated mode=0644 + register: import_as_with_context_template_result + +- name: copy known good import_as_with_context.expected into place + copy: src=import_as_with_context.expected dest={{output_dir}}/import_as_with_context.expected + +- name: compare templated file to known good import_as_with_context + shell: diff -uw {{output_dir}}/import_as_with_context.templated {{output_dir}}/import_as_with_context.expected + register: import_as_with_context_diff_result + +- name: verify templated import_as_with_context matches known good + assert: + that: + - 'import_as_with_context_diff_result.stdout == ""' + - "import_as_with_context_diff_result.rc == 0" + +# VERIFY trim_blocks + +- name: Render a template with "trim_blocks" set to False + template: + src: trim_blocks.j2 + dest: "{{output_dir}}/trim_blocks_false.templated" + trim_blocks: False + register: trim_blocks_false_result + +- name: Get checksum of known good trim_blocks_false.expected + stat: + path: "{{role_path}}/files/trim_blocks_false.expected" + register: trim_blocks_false_good + +- name: Verify templated trim_blocks_false matches known good using checksum + assert: + that: + - "trim_blocks_false_result.checksum == trim_blocks_false_good.stat.checksum" + +- name: Render a template with "trim_blocks" set to True + template: + src: trim_blocks.j2 + dest: "{{output_dir}}/trim_blocks_true.templated" + trim_blocks: True + register: trim_blocks_true_result + +- name: Get checksum of known good trim_blocks_true.expected + stat: + path: "{{role_path}}/files/trim_blocks_true.expected" + register: trim_blocks_true_good + +- name: Verify templated trim_blocks_true matches known good using checksum + assert: + that: + - "trim_blocks_true_result.checksum == trim_blocks_true_good.stat.checksum" + +# VERIFY lstrip_blocks + +- name: Check support for lstrip_blocks in Jinja2 + shell: "{{ ansible_python.executable }} -c 'import jinja2; jinja2.defaults.LSTRIP_BLOCKS'" + register: lstrip_block_support + ignore_errors: True + +- name: Render a template with "lstrip_blocks" set to False + template: + src: lstrip_blocks.j2 + dest: "{{output_dir}}/lstrip_blocks_false.templated" + lstrip_blocks: False + register: lstrip_blocks_false_result + +- name: Get checksum of known good lstrip_blocks_false.expected + stat: + path: "{{role_path}}/files/lstrip_blocks_false.expected" + register: lstrip_blocks_false_good + +- name: Verify templated lstrip_blocks_false matches known good using checksum + assert: + that: + - "lstrip_blocks_false_result.checksum == lstrip_blocks_false_good.stat.checksum" + +- name: Render a template with "lstrip_blocks" set to True + template: + src: lstrip_blocks.j2 + dest: "{{output_dir}}/lstrip_blocks_true.templated" + lstrip_blocks: True + register: lstrip_blocks_true_result + ignore_errors: True + +- name: Verify exception is thrown if Jinja2 does not support lstrip_blocks but lstrip_blocks is used + assert: + that: + - "lstrip_blocks_true_result.failed" + - 'lstrip_blocks_true_result.msg is search(">=2.7")' + when: "lstrip_block_support is failed" + +- name: Get checksum of known good lstrip_blocks_true.expected + stat: + path: "{{role_path}}/files/lstrip_blocks_true.expected" + register: lstrip_blocks_true_good + when: "lstrip_block_support is successful" + +- name: Verify templated lstrip_blocks_true matches known good using checksum + assert: + that: + - "lstrip_blocks_true_result.checksum == lstrip_blocks_true_good.stat.checksum" + when: "lstrip_block_support is successful" + +# VERIFY CONTENTS + +- name: check what python version ansible is running on + command: "{{ ansible_python.executable }} -c 'import distutils.sysconfig ; print(distutils.sysconfig.get_python_version())'" + register: pyver + delegate_to: localhost + +- name: copy known good into place + copy: src=foo.txt dest={{output_dir}}/foo.txt + +- name: compare templated file to known good + shell: diff -uw {{output_dir}}/foo.templated {{output_dir}}/foo.txt + register: diff_result + +- name: verify templated file matches known good + assert: + that: + - 'diff_result.stdout == ""' + - "diff_result.rc == 0" + +# VERIFY MODE + +- name: set file mode + file: path={{output_dir}}/foo.templated mode=0644 + register: file_result + +- name: ensure file mode did not change + assert: + that: + - "file_result.changed != True" + +# VERIFY dest as a directory does not break file attributes +# Note: expanduser is needed to go down the particular codepath that was broken before +- name: setup directory for test + file: state=directory dest={{output_dir | expanduser}}/template-dir mode=0755 owner=nobody group={{ group.stdout }} + +- name: set file mode when the destination is a directory + template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir/ mode=0600 owner=root group={{ group.stdout }} + +- name: set file mode when the destination is a directory + template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir/ mode=0600 owner=root group={{ group.stdout }} + register: file_result + +- name: check that the file has the correct attributes + stat: path={{output_dir | expanduser}}/template-dir/foo.j2 + register: file_attrs + +- assert: + that: + - "file_attrs.stat.uid == 0" + - "file_attrs.stat.pw_name == 'root'" + - "file_attrs.stat.mode == '0600'" + +- name: check that the containing directory did not change attributes + stat: path={{output_dir | expanduser}}/template-dir/ + register: dir_attrs + +- assert: + that: + - "dir_attrs.stat.uid != 0" + - "dir_attrs.stat.pw_name == 'nobody'" + - "dir_attrs.stat.mode == '0755'" + +- name: Check that template to a directory where the directory does not end with a / is allowed + template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir mode=0600 owner=root group={{ group.stdout }} + +- name: make a symlink to the templated file + file: + path: '{{ output_dir }}/foo.symlink' + src: '{{ output_dir }}/foo.templated' + state: link + +- name: check that templating the symlink results in the file being templated + template: + src: foo.j2 + dest: '{{output_dir}}/foo.symlink' + mode: 0600 + follow: True + register: template_result + +- assert: + that: + - "template_result.changed == True" + +- name: check that the file has the correct attributes + stat: path={{output_dir | expanduser}}/template-dir/foo.j2 + register: file_attrs + +- assert: + that: + - "file_attrs.stat.mode == '0600'" + +- name: check that templating the symlink again makes no changes + template: + src: foo.j2 + dest: '{{output_dir}}/foo.symlink' + mode: 0600 + follow: True + register: template_result + +- assert: + that: + - "template_result.changed == False" + +# Test strange filenames + +- name: Create a temp dir for filename tests + file: + state: directory + dest: '{{ output_dir }}/filename-tests' + +- name: create a file with an unusual filename + template: + src: foo.j2 + dest: "{{ output_dir }}/filename-tests/foo t'e~m\\plated" + register: template_result + +- assert: + that: + - "template_result.changed == True" + +- name: check that the unusual filename was created + command: "ls {{ output_dir }}/filename-tests/" + register: unusual_results + +- assert: + that: + - "\"foo t'e~m\\plated\" in unusual_results.stdout_lines" + - "{{unusual_results.stdout_lines| length}} == 1" + +- name: check that the unusual filename can be checked for changes + template: + src: foo.j2 + dest: "{{ output_dir }}/filename-tests/foo t'e~m\\plated" + register: template_result + +- assert: + that: + - "template_result.changed == False" + + +# check_mode + +- name: fill in a basic template in check mode + template: src=short.j2 dest={{output_dir}}/short.templated + register: template_result + check_mode: True + +- name: check file exists + stat: path={{output_dir}}/short.templated + register: templated + +- name: verify that the file was marked as changed in check mode but was not created + assert: + that: + - "not templated.stat.exists" + - "template_result is changed" + +- name: fill in a basic template + template: src=short.j2 dest={{output_dir}}/short.templated + +- name: fill in a basic template in check mode + template: src=short.j2 dest={{output_dir}}/short.templated + register: template_result + check_mode: True + +- name: verify that the file was marked as not changes in check mode + assert: + that: + - "template_result is not changed" + - "'templated_var_loaded' in lookup('file', output_dir + '/short.templated')" + +- name: change var for the template + set_fact: + templated_var: "changed" + +- name: fill in a basic template with changed var in check mode + template: src=short.j2 dest={{output_dir}}/short.templated + register: template_result + check_mode: True + +- name: verify that the file was marked as changed in check mode but the content was not changed + assert: + that: + - "'templated_var_loaded' in lookup('file', output_dir + '/short.templated')" + - "template_result is changed" + +# Create a template using a child template, to ensure that variables +# are passed properly from the parent to subtemplate context (issue #20063) + +- name: test parent and subtemplate creation of context + template: src=parent.j2 dest={{output_dir}}/parent_and_subtemplate.templated + register: template_result + +- stat: path={{output_dir}}/parent_and_subtemplate.templated + +- name: verify that the parent and subtemplate creation worked + assert: + that: + - "template_result is changed" + +# +# template module can overwrite a file that's been hard linked +# https://github.com/ansible/ansible/issues/10834 +# + +- name: ensure test dir is absent + file: + path: '{{ output_dir | expanduser }}/hlink_dir' + state: absent + +- name: create test dir + file: + path: '{{ output_dir | expanduser }}/hlink_dir' + state: directory + +- name: template out test file to system 1 + template: + src: foo.j2 + dest: '{{ output_dir | expanduser }}/hlink_dir/test_file' + +- name: make hard link + file: + src: '{{ output_dir | expanduser }}/hlink_dir/test_file' + dest: '{{ output_dir | expanduser }}/hlink_dir/test_file_hlink' + state: hard + +- name: template out test file to system 2 + template: + src: foo.j2 + dest: '{{ output_dir | expanduser }}/hlink_dir/test_file' + register: hlink_result + +- name: check that the files are still hardlinked + stat: + path: '{{ output_dir | expanduser }}/hlink_dir/test_file' + register: orig_file + +- name: check that the files are still hardlinked + stat: + path: '{{ output_dir | expanduser }}/hlink_dir/test_file_hlink' + register: hlink_file + +# We've done nothing at this point to update the content of the file so it should still be hardlinked +- assert: + that: + - "hlink_result.changed == False" + - "orig_file.stat.inode == hlink_file.stat.inode" + +- name: change var for the template + set_fact: + templated_var: "templated_var_loaded" + +# UNIX TEMPLATE +- name: fill in a basic template (Unix) + template: + src: foo2.j2 + dest: '{{ output_dir }}/foo.unix.templated' + register: template_result + +- name: verify that the file was marked as changed (Unix) + assert: + that: + - 'template_result is changed' + +- name: fill in a basic template again (Unix) + template: + src: foo2.j2 + dest: '{{ output_dir }}/foo.unix.templated' + register: template_result2 + +- name: verify that the template was not changed (Unix) + assert: + that: + - 'template_result2 is not changed' + +# VERIFY UNIX CONTENTS +- name: copy known good into place (Unix) + copy: + src: foo.unix.txt + dest: '{{ output_dir }}/foo.unix.txt' + +- name: Dump templated file (Unix) + command: hexdump -C {{ output_dir }}/foo.unix.templated + +- name: Dump expected file (Unix) + command: hexdump -C {{ output_dir }}/foo.unix.txt + +- name: compare templated file to known good (Unix) + command: diff -u {{ output_dir }}/foo.unix.templated {{ output_dir }}/foo.unix.txt + register: diff_result + +- name: verify templated file matches known good (Unix) + assert: + that: + - 'diff_result.stdout == ""' + - "diff_result.rc == 0" + +# DOS TEMPLATE +- name: fill in a basic template (DOS) + template: + src: foo2.j2 + dest: '{{ output_dir }}/foo.dos.templated' + newline_sequence: '\r\n' + register: template_result + +- name: verify that the file was marked as changed (DOS) + assert: + that: + - 'template_result is changed' + +- name: fill in a basic template again (DOS) + template: + src: foo2.j2 + dest: '{{ output_dir }}/foo.dos.templated' + newline_sequence: '\r\n' + register: template_result2 + +- name: verify that the template was not changed (DOS) + assert: + that: + - 'template_result2 is not changed' + +# VERIFY DOS CONTENTS +- name: copy known good into place (DOS) + copy: + src: foo.dos.txt + dest: '{{ output_dir }}/foo.dos.txt' + +- name: Dump templated file (DOS) + command: hexdump -C {{ output_dir }}/foo.dos.templated + +- name: Dump expected file (DOS) + command: hexdump -C {{ output_dir }}/foo.dos.txt + +- name: compare templated file to known good (DOS) + command: diff -u {{ output_dir }}/foo.dos.templated {{ output_dir }}/foo.dos.txt + register: diff_result + +- name: verify templated file matches known good (DOS) + assert: + that: + - 'diff_result.stdout == ""' + - "diff_result.rc == 0" + +# VERIFY DOS CONTENTS +- name: copy known good into place (Unix) + copy: + src: foo.unix.txt + dest: '{{ output_dir }}/foo.unix.txt' + +- name: Dump templated file (Unix) + command: hexdump -C {{ output_dir }}/foo.unix.templated + +- name: Dump expected file (Unix) + command: hexdump -C {{ output_dir }}/foo.unix.txt + +- name: compare templated file to known good (Unix) + command: diff -u {{ output_dir }}/foo.unix.templated {{ output_dir }}/foo.unix.txt + register: diff_result + +- name: verify templated file matches known good (Unix) + assert: + that: + - 'diff_result.stdout == ""' + - "diff_result.rc == 0" + +# Check that mode=preserve works with template +- name: Create a template which has strange permissions + copy: + content: !unsafe '{{ ansible_managed }}\n' + dest: '{{ output_dir }}/foo-template.j2' + mode: 0547 + delegate_to: localhost + +- name: Use template with mode=preserve + template: + src: '{{ output_dir }}/foo-template.j2' + dest: '{{ output_dir }}/foo-templated.txt' + mode: 'preserve' + register: template_results + +- name: Get permissions from the templated file + stat: + path: '{{ output_dir }}/foo-templated.txt' + register: stat_results + +- name: Check that the resulting file has the correct permissions + assert: + that: + - 'template_results is changed' + - 'template_results.mode == "0547"' + - 'stat_results.stat["mode"] == "0547"' + +# Test output_encoding +- name: Prepare the list of encodings we want to check, including empty string for defaults + set_fact: + template_encoding_1252_encodings: ['', 'utf-8', 'windows-1252'] + +- name: Copy known good encoding_1252_*.expected into place + copy: + src: 'encoding_1252_{{ item | default("utf-8", true) }}.expected' + dest: '{{ output_dir }}/encoding_1252_{{ item }}.expected' + loop: '{{ template_encoding_1252_encodings }}' + +- name: Generate the encoding_1252_* files from templates using various encoding combinations + template: + src: 'encoding_1252.j2' + dest: '{{ output_dir }}/encoding_1252_{{ item }}.txt' + output_encoding: '{{ item }}' + loop: '{{ template_encoding_1252_encodings }}' + +- name: Compare the encoding_1252_* templated files to known good + command: diff -u {{ output_dir }}/encoding_1252_{{ item }}.expected {{ output_dir }}/encoding_1252_{{ item }}.txt + register: encoding_1252_diff_result + loop: '{{ template_encoding_1252_encodings }}' + +- name: Check that nested undefined values return Undefined + vars: + dict_var: + bar: {} + list_var: + - foo: {} + assert: + that: + - dict_var is defined + - dict_var.bar is defined + - dict_var.bar.baz is not defined + - dict_var.bar.baz | default('DEFAULT') == 'DEFAULT' + - dict_var.bar.baz.abc is not defined + - dict_var.bar.baz.abc | default('DEFAULT') == 'DEFAULT' + - dict_var.baz is not defined + - dict_var.baz.abc is not defined + - dict_var.baz.abc | default('DEFAULT') == 'DEFAULT' + - list_var.0 is defined + - list_var.1 is not defined + - list_var.0.foo is defined + - list_var.0.foo.bar is not defined + - list_var.0.foo.bar | default('DEFAULT') == 'DEFAULT' + - list_var.1.foo is not defined + - list_var.1.foo | default('DEFAULT') == 'DEFAULT' + - dict_var is defined + - dict_var['bar'] is defined + - dict_var['bar']['baz'] is not defined + - dict_var['bar']['baz'] | default('DEFAULT') == 'DEFAULT' + - dict_var['bar']['baz']['abc'] is not defined + - dict_var['bar']['baz']['abc'] | default('DEFAULT') == 'DEFAULT' + - dict_var['baz'] is not defined + - dict_var['baz']['abc'] is not defined + - dict_var['baz']['abc'] | default('DEFAULT') == 'DEFAULT' + - list_var[0] is defined + - list_var[1] is not defined + - list_var[0]['foo'] is defined + - list_var[0]['foo']['bar'] is not defined + - list_var[0]['foo']['bar'] | default('DEFAULT') == 'DEFAULT' + - list_var[1]['foo'] is not defined + - list_var[1]['foo'] | default('DEFAULT') == 'DEFAULT' + - dict_var['bar'].baz is not defined + - dict_var['bar'].baz | default('DEFAULT') == 'DEFAULT' + +- template: + src: template_destpath_test.j2 + dest: "{{ output_dir }}/template_destpath.templated" + +- copy: + content: "{{ output_dir}}/template_destpath.templated\n" + dest: "{{ output_dir }}/template_destpath.expected" + +- name: compare templated file to known good template_destpath + shell: diff -uw {{output_dir}}/template_destpath.templated {{output_dir}}/template_destpath.expected + register: diff_result + +- name: verify templated template_destpath matches known good + assert: + that: + - 'diff_result.stdout == ""' + - "diff_result.rc == 0" + +- debug: + msg: "{{ 'x' in y }}" + ignore_errors: yes + register: error + +- name: check that proper error message is emitted when in operator is used + assert: + that: "\"'y' is undefined\" in error.msg" + +# aliases file requires root for template tests so this should be safe +- include: backup_test.yml diff --git a/test/integration/targets/template/template.yml b/test/integration/targets/template/template.yml new file mode 100644 index 00000000..d33293be --- /dev/null +++ b/test/integration/targets/template/template.yml @@ -0,0 +1,4 @@ +- hosts: testhost + gather_facts: yes + roles: + - { role: template } diff --git a/test/integration/targets/template/templates/6653-include.j2 b/test/integration/targets/template/templates/6653-include.j2 new file mode 100644 index 00000000..26443b15 --- /dev/null +++ b/test/integration/targets/template/templates/6653-include.j2 @@ -0,0 +1 @@ +{{ x }} diff --git a/test/integration/targets/template/templates/6653.j2 b/test/integration/targets/template/templates/6653.j2 new file mode 100644 index 00000000..8026a79b --- /dev/null +++ b/test/integration/targets/template/templates/6653.j2 @@ -0,0 +1,4 @@ +{% for x in mylist %} +{{ x }} +{% include '6653-include.j2' with context %} +{% endfor %} diff --git a/test/integration/targets/template/templates/72262-included.j2 b/test/integration/targets/template/templates/72262-included.j2 new file mode 100644 index 00000000..35700cb8 --- /dev/null +++ b/test/integration/targets/template/templates/72262-included.j2 @@ -0,0 +1 @@ +{{ vars.test }} diff --git a/test/integration/targets/template/templates/72262-vars.j2 b/test/integration/targets/template/templates/72262-vars.j2 new file mode 100644 index 00000000..6ef92208 --- /dev/null +++ b/test/integration/targets/template/templates/72262-vars.j2 @@ -0,0 +1 @@ +{% set test = "I'm test variable" %} diff --git a/test/integration/targets/template/templates/72262.j2 b/test/integration/targets/template/templates/72262.j2 new file mode 100644 index 00000000..b72be0d1 --- /dev/null +++ b/test/integration/targets/template/templates/72262.j2 @@ -0,0 +1,3 @@ +{% import '72262-vars.j2' as vars with context %} +{% macro included() %}{% include '72262-included.j2' %}{% endmacro %} +{{ included()|indent }} diff --git a/test/integration/targets/template/templates/72615-macro-nested.j2 b/test/integration/targets/template/templates/72615-macro-nested.j2 new file mode 100644 index 00000000..c47a4992 --- /dev/null +++ b/test/integration/targets/template/templates/72615-macro-nested.j2 @@ -0,0 +1,4 @@ +{% macro print_context_vars_nested(value) %} +foo: {{ foo }} +bar: {{ value }} +{% endmacro %} diff --git a/test/integration/targets/template/templates/72615-macro.j2 b/test/integration/targets/template/templates/72615-macro.j2 new file mode 100644 index 00000000..328c271c --- /dev/null +++ b/test/integration/targets/template/templates/72615-macro.j2 @@ -0,0 +1,8 @@ +{% macro print_context_vars(value) %} +{{ foo }} +{{ value }} +{% set foo = "template-nested-level-foo" %} +{% set bar = "template-nested-level-bar" %} +{% from '72615-macro-nested.j2' import print_context_vars_nested with context %} +{{ print_context_vars_nested(bar) }} +{% endmacro %} diff --git a/test/integration/targets/template/templates/72615.j2 b/test/integration/targets/template/templates/72615.j2 new file mode 100644 index 00000000..b79f88e2 --- /dev/null +++ b/test/integration/targets/template/templates/72615.j2 @@ -0,0 +1,4 @@ +{% set foo = "template-level-foo" %} +{% set bar = "template-level-bar" %} +{% from '72615-macro.j2' import print_context_vars with context %} +{{ print_context_vars(bar) }} diff --git a/test/integration/targets/template/templates/bar b/test/integration/targets/template/templates/bar new file mode 100644 index 00000000..2b60207f --- /dev/null +++ b/test/integration/targets/template/templates/bar @@ -0,0 +1 @@ +Goodbye diff --git "a/test/integration/targets/template/templates/caf\303\251.j2" "b/test/integration/targets/template/templates/caf\303\251.j2" new file mode 100644 index 00000000..ef7e08e7 --- /dev/null +++ "b/test/integration/targets/template/templates/caf\303\251.j2" @@ -0,0 +1 @@ +{{ ansible_managed }} diff --git a/test/integration/targets/template/templates/encoding_1252.j2 b/test/integration/targets/template/templates/encoding_1252.j2 new file mode 100644 index 00000000..0d3cc352 --- /dev/null +++ b/test/integration/targets/template/templates/encoding_1252.j2 @@ -0,0 +1 @@ +windows-1252 Special Characters: €‚ƒ„…†‡ˆ‰Š‹ŒŽ‘’“”•–—˜™š›œžŸ ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ diff --git a/test/integration/targets/template/templates/foo.j2 b/test/integration/targets/template/templates/foo.j2 new file mode 100644 index 00000000..22187f91 --- /dev/null +++ b/test/integration/targets/template/templates/foo.j2 @@ -0,0 +1,3 @@ +{{ templated_var }} + +{{ templated_dict | to_nice_json }} diff --git a/test/integration/targets/template/templates/foo2.j2 b/test/integration/targets/template/templates/foo2.j2 new file mode 100644 index 00000000..e6e34852 --- /dev/null +++ b/test/integration/targets/template/templates/foo2.j2 @@ -0,0 +1,3 @@ +BEGIN +{{ templated_var }} +END diff --git a/test/integration/targets/template/templates/foo3.j2 b/test/integration/targets/template/templates/foo3.j2 new file mode 100644 index 00000000..710d55a7 --- /dev/null +++ b/test/integration/targets/template/templates/foo3.j2 @@ -0,0 +1,3 @@ +BEGIN +[% templated_var %] +END diff --git a/test/integration/targets/template/templates/for_loop.j2 b/test/integration/targets/template/templates/for_loop.j2 new file mode 100644 index 00000000..49fa412d --- /dev/null +++ b/test/integration/targets/template/templates/for_loop.j2 @@ -0,0 +1,4 @@ +{% for par_var in parent_vars %} +{% include 'for_loop_include.j2' %} + +{% endfor %} diff --git a/test/integration/targets/template/templates/for_loop_include.j2 b/test/integration/targets/template/templates/for_loop_include.j2 new file mode 100644 index 00000000..b1a0ad7d --- /dev/null +++ b/test/integration/targets/template/templates/for_loop_include.j2 @@ -0,0 +1,3 @@ +{% if par_var is defined %} +{% include 'for_loop_include_nested.j2' %} +{% endif %} diff --git a/test/integration/targets/template/templates/for_loop_include_nested.j2 b/test/integration/targets/template/templates/for_loop_include_nested.j2 new file mode 100644 index 00000000..368bce4b --- /dev/null +++ b/test/integration/targets/template/templates/for_loop_include_nested.j2 @@ -0,0 +1 @@ +{{ par_var }} diff --git a/test/integration/targets/template/templates/import_as.j2 b/test/integration/targets/template/templates/import_as.j2 new file mode 100644 index 00000000..b06f1be8 --- /dev/null +++ b/test/integration/targets/template/templates/import_as.j2 @@ -0,0 +1,4 @@ +{% import 'qux' as qux %} +hello world import as +{{ qux.wibble }} +{% include 'bar' %} diff --git a/test/integration/targets/template/templates/import_as_with_context.j2 b/test/integration/targets/template/templates/import_as_with_context.j2 new file mode 100644 index 00000000..3dd806a3 --- /dev/null +++ b/test/integration/targets/template/templates/import_as_with_context.j2 @@ -0,0 +1,3 @@ +{% import 'qux' as qux with context %} +hello world as qux with context +{{ qux.wibble }} diff --git a/test/integration/targets/template/templates/import_with_context.j2 b/test/integration/targets/template/templates/import_with_context.j2 new file mode 100644 index 00000000..104e68b3 --- /dev/null +++ b/test/integration/targets/template/templates/import_with_context.j2 @@ -0,0 +1,4 @@ +{% import 'qux' as qux with context %} +hello world with context +{{ qux.wibble }} +{% include 'bar' %} diff --git a/test/integration/targets/template/templates/lstrip_blocks.j2 b/test/integration/targets/template/templates/lstrip_blocks.j2 new file mode 100644 index 00000000..d572da67 --- /dev/null +++ b/test/integration/targets/template/templates/lstrip_blocks.j2 @@ -0,0 +1,8 @@ +{% set hello_world="hello world" %} +{% for i in [1, 2, 3] %} + {% if loop.first %} +{{hello_world}} + {% else %} +{{hello_world}} + {% endif %} +{% endfor %} diff --git a/test/integration/targets/template/templates/parent.j2 b/test/integration/targets/template/templates/parent.j2 new file mode 100644 index 00000000..99a8e4cc --- /dev/null +++ b/test/integration/targets/template/templates/parent.j2 @@ -0,0 +1,3 @@ +{% for parent_item in parent_vars %} +{% include "subtemplate.j2" %} +{% endfor %} diff --git a/test/integration/targets/template/templates/qux b/test/integration/targets/template/templates/qux new file mode 100644 index 00000000..d8cd22e4 --- /dev/null +++ b/test/integration/targets/template/templates/qux @@ -0,0 +1 @@ +{% set wibble = "WIBBLE" %} diff --git a/test/integration/targets/template/templates/short.j2 b/test/integration/targets/template/templates/short.j2 new file mode 100644 index 00000000..55aab8f1 --- /dev/null +++ b/test/integration/targets/template/templates/short.j2 @@ -0,0 +1 @@ +{{ templated_var }} diff --git a/test/integration/targets/template/templates/subtemplate.j2 b/test/integration/targets/template/templates/subtemplate.j2 new file mode 100644 index 00000000..f359bf20 --- /dev/null +++ b/test/integration/targets/template/templates/subtemplate.j2 @@ -0,0 +1,2 @@ +{{ parent_item }} + diff --git a/test/integration/targets/template/templates/template_destpath_test.j2 b/test/integration/targets/template/templates/template_destpath_test.j2 new file mode 100644 index 00000000..1d21d8cd --- /dev/null +++ b/test/integration/targets/template/templates/template_destpath_test.j2 @@ -0,0 +1 @@ +{{ template_destpath }} diff --git a/test/integration/targets/template/templates/trim_blocks.j2 b/test/integration/targets/template/templates/trim_blocks.j2 new file mode 100644 index 00000000..824a0a03 --- /dev/null +++ b/test/integration/targets/template/templates/trim_blocks.j2 @@ -0,0 +1,4 @@ +{% if True %} +Hello world +{% endif %} +Goodbye diff --git a/test/integration/targets/template/templates/unused_vars_include.j2 b/test/integration/targets/template/templates/unused_vars_include.j2 new file mode 100644 index 00000000..457cbbc0 --- /dev/null +++ b/test/integration/targets/template/templates/unused_vars_include.j2 @@ -0,0 +1 @@ +{{ var_set_in_template }} diff --git a/test/integration/targets/template/templates/unused_vars_template.j2 b/test/integration/targets/template/templates/unused_vars_template.j2 new file mode 100644 index 00000000..28afc902 --- /dev/null +++ b/test/integration/targets/template/templates/unused_vars_template.j2 @@ -0,0 +1,2 @@ +{% set var_set_in_template=test_var %} +{% include "unused_vars_include.j2" %} diff --git a/test/integration/targets/template/undefined_var_info.yml b/test/integration/targets/template/undefined_var_info.yml new file mode 100644 index 00000000..b96a58db --- /dev/null +++ b/test/integration/targets/template/undefined_var_info.yml @@ -0,0 +1,15 @@ +- hosts: localhost + gather_facts: no + vars: + foo: [] + bar: "{{ foo[0] }}" + tasks: + - debug: + msg: "{{ bar }}" + register: result + ignore_errors: yes + + - assert: + that: + - '"foo[0]" in result.msg' + - '"object has no element 0" in result.msg' diff --git a/test/integration/targets/template/unused_vars_include.yml b/test/integration/targets/template/unused_vars_include.yml new file mode 100644 index 00000000..ff31b70d --- /dev/null +++ b/test/integration/targets/template/unused_vars_include.yml @@ -0,0 +1,8 @@ +- hosts: localhost + gather_facts: no + vars: + test_var: foo + unused_var: "{{ undefined_var }}" + tasks: + - debug: + msg: "{{ lookup('template', 'unused_vars_template.j2') }}" diff --git a/test/integration/targets/template/vars/main.yml b/test/integration/targets/template/vars/main.yml new file mode 100644 index 00000000..9d45cf24 --- /dev/null +++ b/test/integration/targets/template/vars/main.yml @@ -0,0 +1,20 @@ +templated_var: templated_var_loaded + +number_var: 5 +string_num: "5" +bool_var: true +part_1: 1 +part_2: "Foo" +null_type: !!null + +templated_dict: + number: "{{ number_var }}" + string_num: "{{ string_num }}" + null_type: "{{ null_type }}" + bool: "{{ bool_var }}" + multi_part: "{{ part_1 }}{{ part_2 }}" + +parent_vars: +- foo +- bar +- bam diff --git a/test/integration/targets/template_jinja2_latest/aliases b/test/integration/targets/template_jinja2_latest/aliases new file mode 100644 index 00000000..8602d059 --- /dev/null +++ b/test/integration/targets/template_jinja2_latest/aliases @@ -0,0 +1,4 @@ +needs/root +shippable/posix/group2 +needs/target/template +skip/aix diff --git a/test/integration/targets/template_jinja2_latest/main.yml b/test/integration/targets/template_jinja2_latest/main.yml new file mode 100644 index 00000000..aa7d6433 --- /dev/null +++ b/test/integration/targets/template_jinja2_latest/main.yml @@ -0,0 +1,4 @@ +- hosts: testhost + gather_facts: True + roles: + - { role: template } diff --git a/test/integration/targets/template_jinja2_latest/requirements.txt b/test/integration/targets/template_jinja2_latest/requirements.txt new file mode 100644 index 00000000..49a806fb --- /dev/null +++ b/test/integration/targets/template_jinja2_latest/requirements.txt @@ -0,0 +1,2 @@ +jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later +jinja2 ; python_version >= '2.7' diff --git a/test/integration/targets/template_jinja2_latest/runme.sh b/test/integration/targets/template_jinja2_latest/runme.sh new file mode 100755 index 00000000..d6a09677 --- /dev/null +++ b/test/integration/targets/template_jinja2_latest/runme.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_TEST_PREFER_VENV=1 +source virtualenv.sh + +pip install -U -r requirements.txt + +ANSIBLE_ROLES_PATH=../ +export ANSIBLE_ROLES_PATH + +ansible-playbook -i ../../inventory main.yml -v "$@" diff --git a/test/integration/targets/templating_lookups/aliases b/test/integration/targets/templating_lookups/aliases new file mode 100644 index 00000000..f8e28c7e --- /dev/null +++ b/test/integration/targets/templating_lookups/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/templating_lookups/runme.sh b/test/integration/targets/templating_lookups/runme.sh new file mode 100755 index 00000000..e958bcfb --- /dev/null +++ b/test/integration/targets/templating_lookups/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_ROLES_PATH=./ UNICODE_VAR=café ansible-playbook runme.yml "$@" + +ansible-playbook template_lookup_vaulted/playbook.yml --vault-password-file template_lookup_vaulted/test_vault_pass "$@" + +ansible-playbook template_deepcopy/playbook.yml -i template_deepcopy/hosts "$@" + +# https://github.com/ansible/ansible/issues/66943 +ansible-playbook template_lookup_safe_eval_unicode/playbook.yml "$@" diff --git a/test/integration/targets/templating_lookups/runme.yml b/test/integration/targets/templating_lookups/runme.yml new file mode 100644 index 00000000..a27337bb --- /dev/null +++ b/test/integration/targets/templating_lookups/runme.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: no + roles: + - { role: template_lookups } diff --git a/test/integration/targets/templating_lookups/template_deepcopy/hosts b/test/integration/targets/templating_lookups/template_deepcopy/hosts new file mode 100644 index 00000000..ecd3b966 --- /dev/null +++ b/test/integration/targets/templating_lookups/template_deepcopy/hosts @@ -0,0 +1 @@ +h1 ansible_connection=local host_var=foo diff --git a/test/integration/targets/templating_lookups/template_deepcopy/playbook.yml b/test/integration/targets/templating_lookups/template_deepcopy/playbook.yml new file mode 100644 index 00000000..da55c167 --- /dev/null +++ b/test/integration/targets/templating_lookups/template_deepcopy/playbook.yml @@ -0,0 +1,10 @@ +- hosts: h1 + gather_facts: no + tasks: + - set_fact: + templated_foo: "{{ lookup('template', 'template.in') }}" + + - name: Test that the hostvar was templated correctly + assert: + that: + - templated_foo == "foo\n" diff --git a/test/integration/targets/templating_lookups/template_deepcopy/template.in b/test/integration/targets/templating_lookups/template_deepcopy/template.in new file mode 100644 index 00000000..77de0adf --- /dev/null +++ b/test/integration/targets/templating_lookups/template_deepcopy/template.in @@ -0,0 +1 @@ +{{hostvars['h1'].host_var}} diff --git a/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/playbook.yml b/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/playbook.yml new file mode 100644 index 00000000..29e4b615 --- /dev/null +++ b/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/playbook.yml @@ -0,0 +1,8 @@ +- hosts: localhost + gather_facts: no + vars: + original_dict: "{{ lookup('template', 'template.json.j2') }}" + copy_dict: {} + tasks: + - set_fact: + copy_dict: "{{ copy_dict | combine(original_dict) }}" diff --git a/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/template.json.j2 b/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/template.json.j2 new file mode 100644 index 00000000..bc31407c --- /dev/null +++ b/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/template.json.j2 @@ -0,0 +1,4 @@ +{ + "key1": "ascii_value", + "key2": "unicode_value_křížek", +} diff --git a/test/integration/targets/templating_lookups/template_lookup_vaulted/playbook.yml b/test/integration/targets/templating_lookups/template_lookup_vaulted/playbook.yml new file mode 100644 index 00000000..23f32e8f --- /dev/null +++ b/test/integration/targets/templating_lookups/template_lookup_vaulted/playbook.yml @@ -0,0 +1,13 @@ +# https://github.com/ansible/ansible/issues/34209 +- hosts: localhost + gather_facts: no + vars: + hello_world: Hello World + tasks: + - name: Test that template lookup can handle vaulted templates + set_fact: + vaulted_hello_world: "{{ lookup('template', 'vaulted_hello.j2') }}" + + - assert: + that: + - "vaulted_hello_world|trim == 'Unvaulted Hello World!'" diff --git a/test/integration/targets/templating_lookups/template_lookup_vaulted/templates/vaulted_hello.j2 b/test/integration/targets/templating_lookups/template_lookup_vaulted/templates/vaulted_hello.j2 new file mode 100644 index 00000000..a6e98bd8 --- /dev/null +++ b/test/integration/targets/templating_lookups/template_lookup_vaulted/templates/vaulted_hello.j2 @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +33623433323331343363343830343365376233386637366264646634663632343963396664393463 +3734626234626639323061643863613164643365363063310a663336663762356135396430353435 +39303930613231336135623761363130653235666433383965306235653963343166633233323638 +6635303662333734300a623063393761376531636535383164333632613839663237336463616436 +62643437623538633335366435346532636666616139386332323034336530356131 diff --git a/test/integration/targets/templating_lookups/template_lookup_vaulted/test_vault_pass b/test/integration/targets/templating_lookups/template_lookup_vaulted/test_vault_pass new file mode 100644 index 00000000..9daeafb9 --- /dev/null +++ b/test/integration/targets/templating_lookups/template_lookup_vaulted/test_vault_pass @@ -0,0 +1 @@ +test diff --git a/test/integration/targets/templating_lookups/template_lookups/tasks/errors.yml b/test/integration/targets/templating_lookups/template_lookups/tasks/errors.yml new file mode 100644 index 00000000..da57631a --- /dev/null +++ b/test/integration/targets/templating_lookups/template_lookups/tasks/errors.yml @@ -0,0 +1,31 @@ +- name: Task that fails due to templating error for plugin option + debug: msg="{{ 5 / 0 | int }}" + ignore_errors: true + register: result + +- assert: + that: + - result.failed + - result.exception + +- name: Loop that fails due to templating error in first entry and ignores errors + debug: msg="{{ 5 / item }}" + ignore_errors: true + register: result + loop: [0, 0, 1] + +- debug: var=result + +- assert: + that: + - result.results[0].failed + - result.results[0].exception + - result.results[0].item == 0 + + - result.results[1].failed + - result.results[1].exception + - result.results[1].item == 0 + + - not result.results[2].failed + - result.results[2].exception is undefined + - result.results[2].item == 1 diff --git a/test/integration/targets/templating_lookups/template_lookups/tasks/main.yml b/test/integration/targets/templating_lookups/template_lookups/tasks/main.yml new file mode 100644 index 00000000..f240a234 --- /dev/null +++ b/test/integration/targets/templating_lookups/template_lookups/tasks/main.yml @@ -0,0 +1,90 @@ +# UNICODE + +# https://github.com/ansible/ansible/issues/65297 +- name: get UNICODE_VAR environment var value + shell: "echo $UNICODE_VAR" + register: unicode_var_value + +- name: verify the UNICODE_VAR is defined + assert: + that: + - "unicode_var_value.stdout" + +- name: use env lookup to get UNICODE_VAR value + set_fact: + test_unicode_val: "{{ lookup('env', 'UNICODE_VAR') }}" + +- debug: var=unicode_var_value +- debug: var=test_unicode_val + +- name: compare unicode values + assert: + that: + - "test_unicode_val == unicode_var_value.stdout" + +# LOOKUP TEMPLATING + +- name: use bare interpolation + debug: msg="got {{item}}" + with_items: "{{things1}}" + register: bare_var + +- name: verify that list was interpolated + assert: + that: + - "bare_var.results[0].item == 1" + - "bare_var.results[1].item == 2" + +- name: use list with bare strings in it + debug: msg={{item}} + with_items: + - things2 + - things1 + +- name: use list with undefined var in it + debug: msg={{item}} + with_items: "{{things2}}" + ignore_errors: True + +# BUG #10073 nested template handling + +- name: set variable that clashes + set_fact: + PATH: foobar + +- name: get PATH environment var value + set_fact: + known_var_value: "{{ lookup('pipe', 'echo $PATH') }}" + +- name: do the lookup for env PATH + set_fact: + test_val: "{{ lookup('env', 'PATH') }}" + +- debug: var=test_val + +- name: compare values + assert: + that: + - "test_val != ''" + - "test_val == known_var_value" + +- name: set with_dict + shell: echo "{{ item.key + '=' + item.value }}" + with_dict: "{{ mydict }}" + +# BUG #34144 bad template caching + +- name: generate two random passwords + set_fact: + password1: "{{ lookup('password', '/dev/null length=20') }}" + password2: "{{ lookup('password', '/dev/null length=20') }}" + # If the passwords are generated randomly, the chance that they + # coincide is neglectable (< 1e-18 assuming 120 bits of randomness + # per password). + +- name: make sure passwords are not the same + assert: + that: + - password1 != password2 + +- include_tasks: ./errors.yml diff --git a/test/integration/targets/templating_lookups/template_lookups/vars/main.yml b/test/integration/targets/templating_lookups/template_lookups/vars/main.yml new file mode 100644 index 00000000..4c44b1cb --- /dev/null +++ b/test/integration/targets/templating_lookups/template_lookups/vars/main.yml @@ -0,0 +1,9 @@ +mydict: + mykey1: myval1 + mykey2: myval2 +things1: + - 1 + - 2 +things2: + - "{{ foo }}" + - "{{ foob | default('') }}" diff --git a/test/integration/targets/templating_settings/aliases b/test/integration/targets/templating_settings/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/templating_settings/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/templating_settings/dont_warn_register.yml b/test/integration/targets/templating_settings/dont_warn_register.yml new file mode 100644 index 00000000..277ce788 --- /dev/null +++ b/test/integration/targets/templating_settings/dont_warn_register.yml @@ -0,0 +1,6 @@ +- hosts: testhost + gather_facts: false + tasks: + - name: template in register warns, but no template should not + debug: msg=unimportant + register: thisshouldnotwarn diff --git a/test/integration/targets/templating_settings/runme.sh b/test/integration/targets/templating_settings/runme.sh new file mode 100755 index 00000000..2fb202c3 --- /dev/null +++ b/test/integration/targets/templating_settings/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_templating_settings.yml -i ../../inventory -v "$@" +[ "$(ansible-playbook dont_warn_register.yml -i ../../inventory -v "$@" 2>&1| grep -c 'is not templatable, but we found')" == "0" ] diff --git a/test/integration/targets/templating_settings/test_templating_settings.yml b/test/integration/targets/templating_settings/test_templating_settings.yml new file mode 100644 index 00000000..0c024dfd --- /dev/null +++ b/test/integration/targets/templating_settings/test_templating_settings.yml @@ -0,0 +1,14 @@ +--- +- name: 'Test templating in name' + hosts: testhost + vars: + a_list: + - 'part' + - 'of a' + - 'name' + + tasks: + # Note: this only tests that we do not traceback. It doesn't test that the + # name goes through templating correctly + - name: 'Task: {{ a_list | to_json }}' + debug: msg='{{ a_list | to_json }}' diff --git a/test/integration/targets/test_core/aliases b/test/integration/targets/test_core/aliases new file mode 100644 index 00000000..041b0cc7 --- /dev/null +++ b/test/integration/targets/test_core/aliases @@ -0,0 +1,2 @@ +shippable/posix/group5 +skip/python2.6 # tests are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/test_core/inventory b/test/integration/targets/test_core/inventory new file mode 100644 index 00000000..0fdd8ae3 --- /dev/null +++ b/test/integration/targets/test_core/inventory @@ -0,0 +1 @@ +unreachable ansible_connection=ssh ansible_host=127.0.0.1 ansible_port=1011 # IANA Reserved port diff --git a/test/integration/targets/test_core/runme.sh b/test/integration/targets/test_core/runme.sh new file mode 100755 index 00000000..c20c1741 --- /dev/null +++ b/test/integration/targets/test_core/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eu + +ANSIBLE_ROLES_PATH=../ ansible-playbook --vault-password-file vault-password runme.yml -i inventory "${@}" diff --git a/test/integration/targets/test_core/runme.yml b/test/integration/targets/test_core/runme.yml new file mode 100644 index 00000000..20a94672 --- /dev/null +++ b/test/integration/targets/test_core/runme.yml @@ -0,0 +1,4 @@ +- hosts: localhost + gather_facts: no + roles: + - test_core diff --git a/test/integration/targets/test_core/tasks/main.yml b/test/integration/targets/test_core/tasks/main.yml new file mode 100644 index 00000000..50c43581 --- /dev/null +++ b/test/integration/targets/test_core/tasks/main.yml @@ -0,0 +1,303 @@ +- name: Failure + set_fact: + hello: world + failed_when: true + ignore_errors: yes + register: intentional_failure + +- name: Success + set_fact: + hello: world + register: intentional_success + +- name: Try failure test on non-dictionary + set_fact: + hello: "{{ 'nope' is failure }}" + ignore_errors: yes + register: misuse_of_failure + +- name: Assert failure tests work + assert: + that: + - intentional_failure is failed # old name + - intentional_failure is failure + - intentional_success is not failure + - misuse_of_failure is failed + +- name: Assert successful tests work + assert: + that: + - intentional_success is succeeded # old name + - intentional_success is success # old name + - intentional_success is successful + - intentional_failure is not successful + +- name: Try reachable host + command: id + register: reachable_host + +- name: Try unreachable host + command: id + delegate_to: unreachable + ignore_unreachable: yes + ignore_errors: yes + register: unreachable_host + +- name: Try reachable test on non-dictionary + set_fact: + hello: "{{ 'nope' is reachable }}" + ignore_errors: yes + register: misuse_of_reachable + +- name: Assert reachable tests work + assert: + that: + - misuse_of_reachable is failed + - reachable_host is reachable + - unreachable_host is not reachable + +- name: Try unreachable test on non-dictionary + set_fact: + hello: "{{ 'nope' is unreachable }}" + ignore_errors: yes + register: misuse_of_unreachable + +- name: Assert unreachable tests work + assert: + that: + - misuse_of_unreachable is failed + - reachable_host is not unreachable + - unreachable_host is unreachable + +- name: Make changes + file: + path: dir_for_changed + state: directory + register: directory_created + +- name: Make no changes + file: + path: dir_for_changed + state: directory + register: directory_unchanged + +- name: Try changed test on non-dictionary + set_fact: + hello: "{{ 'nope' is changed }}" + ignore_errors: yes + register: misuse_of_changed + +# providing artificial task results since there are no modules in ansible-base that provide a 'results' list instead of 'changed' +- name: Prepare artificial task results + set_fact: + results_all_changed: + results: + - changed: true + - changed: true + results_some_changed: + results: + - changed: true + - changed: false + results_none_changed: + results: + - changed: false + - changed: false + results_missing_changed: {} + +- name: Assert changed tests work + assert: + that: + - directory_created is changed + - directory_unchanged is not changed + - misuse_of_changed is failed + - results_all_changed is changed + - results_some_changed is changed + - results_none_changed is not changed + - results_missing_changed is not changed + +- name: Skip me + set_fact: + hello: world + when: false + register: skipped_task + +- name: Don't skip me + set_fact: + hello: world + register: executed_task + +- name: Try skipped test on non-dictionary + set_fact: + hello: "{{ 'nope' is skipped }}" + ignore_errors: yes + register: misuse_of_skipped + +- name: Assert skipped tests work + assert: + that: + - skipped_task is skipped + - executed_task is not skipped + - misuse_of_skipped is failure + +- name: Not an async task + set_fact: + hello: world + register: non_async_task + +- name: Complete an async task + command: id + async: 10 + poll: 1 + register: async_completed + +- name: Start an async task without waiting for completion + shell: sleep 3 + async: 10 + poll: 0 + register: async_incomplete + +- name: Try finished test on non-dictionary + set_fact: + hello: "{{ 'nope' is finished }}" + ignore_errors: yes + register: misuse_of_finished + +- name: Assert finished tests work (warning expected) + assert: + that: + - non_async_task is finished + - misuse_of_finished is failed + - async_completed is finished + - async_incomplete is not finished + +- name: Try started test on non-dictionary + set_fact: + hello: "{{ 'nope' is started }}" + ignore_errors: yes + register: misuse_of_started + +- name: Assert started tests work (warning expected) + assert: + that: + - non_async_task is started + - misuse_of_started is failed + - async_completed is started + - async_incomplete is started + +- name: Assert match tests work + assert: + that: + - "'hello' is match('h.ll.')" + - "'hello' is not match('.ll.')" + +- name: Assert search tests work + assert: + that: + - "'hello' is search('.l')" + - "'hello' is not search('nope')" + +- name: Assert regex tests work + assert: + that: + - "'hello' is regex('.l')" + - "'hello' is regex('.L', ignorecase=true)" + - "'hello\nAnsible' is regex('^Ansible', multiline=true)" + - "'hello' is not regex('.L')" + - "'hello\nAnsible' is not regex('^Ansible')" + +- name: Try version tests with bad operator + set_fact: + result: "{{ '1.0' is version('1.0', 'equals') }}" + ignore_errors: yes + register: version_bad_operator + +- name: Try version tests with bad value + set_fact: + result: "{{ '1.0' is version('nope', '==', true) }}" + ignore_errors: yes + register: version_bad_value + +- name: Assert version tests work + assert: + that: + - "'1.0' is version_compare('1.0', '==')" # old name + - "'1.0' is version('1.0', '==')" + - "'1.0' is version('2.0', '!=')" + - "'1.0' is version('2.0', '<')" + - "'2.0' is version('1.0', '>')" + - "'1.0' is version('1.0', '<=')" + - "'1.0' is version('1.0', '>=')" + - "'1.0' is version_compare('1.0', '==', true)" # old name + - "'1.0' is version('1.0', '==', true)" + - "'1.0' is version('2.0', '!=', true)" + - "'1.0' is version('2.0', '<', true)" + - "'2.0' is version('1.0', '>', true)" + - "'1.0' is version('1.0', '<=', true)" + - "'1.0' is version('1.0', '>=', true)" + - version_bad_operator is failed + - version_bad_value is failed + +- name: Assert any tests work + assert: + that: + - "[true, false] is any" + - "[false] is not any" + +- name: Assert all tests work + assert: + that: + - "[true] is all" + - "[true, false] is not all" + +- name: Assert truthy tests work + assert: + that: + - '"string" is truthy' + - '"" is not truthy' + - True is truthy + - False is not truthy + - true is truthy + - false is not truthy + - 1 is truthy + - 0 is not truthy + - '[""] is truthy' + - '[] is not truthy' + - '"on" is truthy(convert_bool=True)' + - '"off" is not truthy(convert_bool=True)' + - '"fred" is truthy(convert_bool=True)' + - '{} is not truthy' + - '{"key": "value"} is truthy' + +- name: Assert falsy tests work + assert: + that: + - '"string" is not falsy' + - '"" is falsy' + - True is not falsy + - False is falsy + - true is not falsy + - false is falsy + - 1 is not falsy + - 0 is falsy + - '[""] is not falsy' + - '[] is falsy' + - '"on" is not falsy(convert_bool=True)' + - '"off" is falsy(convert_bool=True)' + - '{} is falsy' + - '{"key": "value"} is not falsy' + +- name: Create vaulted variable for vault_encrypted test + set_fact: + vaulted_value: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 35323961353038346165643738646465376139363061353835303739663538343266303232326635 + 3365353662646236356665323135633630656238316530640a663362363763633436373439663031 + 33663433383037396438656464636433653837376361313638366362333037323961316364363363 + 3835616438623261650a636164376534376661393134326662326362323131373964313961623365 + 3833 + +- name: Assert vault_encrypted tests work + assert: + that: + - vaulted_value is vault_encrypted + - inventory_hostname is not vault_encrypted diff --git a/test/integration/targets/test_core/vault-password b/test/integration/targets/test_core/vault-password new file mode 100644 index 00000000..96973929 --- /dev/null +++ b/test/integration/targets/test_core/vault-password @@ -0,0 +1 @@ +test-vault-password diff --git a/test/integration/targets/test_files/aliases b/test/integration/targets/test_files/aliases new file mode 100644 index 00000000..041b0cc7 --- /dev/null +++ b/test/integration/targets/test_files/aliases @@ -0,0 +1,2 @@ +shippable/posix/group5 +skip/python2.6 # tests are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/test_files/tasks/main.yml b/test/integration/targets/test_files/tasks/main.yml new file mode 100644 index 00000000..0d51fc95 --- /dev/null +++ b/test/integration/targets/test_files/tasks/main.yml @@ -0,0 +1,60 @@ +- name: Create a broken symbolic link + file: + src: does_not_exist + dest: link_to_nonexistent_file + state: link + force: yes + follow: no + +- name: Assert directory tests work + assert: + that: + - "'.' is is_dir" # old name + - "'.' is directory" + - "'does_not_exist' is not directory" + +- name: Assert file tests work + assert: + that: + - "(role_path + '/aliases') is is_file" # old name + - "(role_path + '/aliases') is file" + - "'does_not_exist' is not file" + +- name: Assert link tests work + assert: + that: + - "'link_to_nonexistent_file' is link" + - "'.' is not link" + +- name: Assert exists tests work + assert: + that: + - "(role_path + '/aliases') is exists" + - "'link_to_nonexistent_file' is not exists" + +- name: Assert link_exists tests work + assert: + that: + - "'link_to_nonexistent_file' is link_exists" + - "'does_not_exist' is not link_exists" + +- name: Assert abs tests work + assert: + that: + - "'/' is is_abs" # old name + - "'/' is abs" + - "'../' is not abs" + +- name: Assert same_file tests work + assert: + that: + - "'/' is is_same_file('/')" # old name + - "'/' is same_file('/')" + - "'/' is not same_file(role_path + '/aliases')" + +- name: Assert mount tests work + assert: + that: + - "'/' is is_mount" # old name + - "'/' is mount" + - "'/does_not_exist' is not mount" diff --git a/test/integration/targets/test_mathstuff/aliases b/test/integration/targets/test_mathstuff/aliases new file mode 100644 index 00000000..041b0cc7 --- /dev/null +++ b/test/integration/targets/test_mathstuff/aliases @@ -0,0 +1,2 @@ +shippable/posix/group5 +skip/python2.6 # tests are controller only, and we no longer support Python 2.6 on the controller diff --git a/test/integration/targets/test_mathstuff/tasks/main.yml b/test/integration/targets/test_mathstuff/tasks/main.yml new file mode 100644 index 00000000..dd379ce2 --- /dev/null +++ b/test/integration/targets/test_mathstuff/tasks/main.yml @@ -0,0 +1,38 @@ +- name: Get Jinja2 version + set_fact: + jinja2_version: >- + {{ lookup('pipe', '{{ ansible_playbook_python }} -c "import jinja2; print(jinja2.__version__)"') }} + +- name: Assert subset tests work + assert: + that: + - "[1] is issubset([1, 2])" # old name + - "[1] is subset([1, 2])" + - "[1] is not subset([2])" + +- name: Assert superset tests work + assert: + that: + - "[1, 2] is issuperset([1])" # old name + - "[1, 2] is superset([1])" + - "[2] is not superset([1])" + +- name: Assert contains tests work + assert: + that: + - "[1] is contains(1)" + - "[1] is not contains(2)" + +- name: Assert nan tests work + assert: + that: + - "'bad' is not nan" + - "1.1 | float is not nan" + +# Jinja2 versions prior to 2.10 will traceback when using: 'nan' | float +- name: Assert nan tests work (Jinja2 2.10+) + assert: + that: + - "'nan' | float is isnan" # old name + - "'nan' | float is nan" + when: jinja2_version is version('2.10', '>=') diff --git a/test/integration/targets/throttle/aliases b/test/integration/targets/throttle/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/throttle/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/throttle/group_vars/all.yml b/test/integration/targets/throttle/group_vars/all.yml new file mode 100644 index 00000000..b04b2aae --- /dev/null +++ b/test/integration/targets/throttle/group_vars/all.yml @@ -0,0 +1,4 @@ +--- +throttledir: '{{ base_throttledir }}/{{ subdir }}' +base_throttledir: "{{ lookup('env', 'OUTPUT_DIR') }}/throttle.dir" +subdir: "{{ test_id if lookup('env', 'SELECTED_STRATEGY') in ['free', 'host_pinned'] else '' }}" diff --git a/test/integration/targets/throttle/inventory b/test/integration/targets/throttle/inventory new file mode 100644 index 00000000..9f062d94 --- /dev/null +++ b/test/integration/targets/throttle/inventory @@ -0,0 +1,6 @@ +[localhosts] +testhost[00:11] + +[localhosts:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/integration/targets/throttle/runme.sh b/test/integration/targets/throttle/runme.sh new file mode 100755 index 00000000..0db5098d --- /dev/null +++ b/test/integration/targets/throttle/runme.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -eux + +# https://github.com/ansible/ansible/pull/42528 +SELECTED_STRATEGY='linear' ansible-playbook test_throttle.yml -vv -i inventory --forks 12 "$@" +SELECTED_STRATEGY='free' ansible-playbook test_throttle.yml -vv -i inventory --forks 12 "$@" diff --git a/test/integration/targets/throttle/test_throttle.py b/test/integration/targets/throttle/test_throttle.py new file mode 100755 index 00000000..3ee8424e --- /dev/null +++ b/test/integration/targets/throttle/test_throttle.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys +import time + +# read the args from sys.argv +throttledir, inventory_hostname, max_throttle = sys.argv[1:] +# format/create additional vars +max_throttle = int(max_throttle) +throttledir = os.path.expanduser(throttledir) +throttlefile = os.path.join(throttledir, inventory_hostname) +try: + # create the file + with(open(throttlefile, 'a')): + os.utime(throttlefile, None) + # count the number of files in the dir + throttlelist = os.listdir(throttledir) + print("tasks: %d/%d" % (len(throttlelist), max_throttle)) + # if we have too many files, fail + if len(throttlelist) > max_throttle: + print(throttlelist) + raise ValueError("Too many concurrent tasks: %d/%d" % (len(throttlelist), max_throttle)) + time.sleep(1.5) +finally: + # remove the file, then wait to make sure it's gone + os.unlink(throttlefile) + while True: + if not os.path.exists(throttlefile): + break + time.sleep(0.1) diff --git a/test/integration/targets/throttle/test_throttle.yml b/test/integration/targets/throttle/test_throttle.yml new file mode 100644 index 00000000..8990ea2f --- /dev/null +++ b/test/integration/targets/throttle/test_throttle.yml @@ -0,0 +1,84 @@ +--- +- hosts: localhosts + gather_facts: false + strategy: linear + run_once: yes + tasks: + - name: Clean base throttledir '{{ base_throttledir }}' + file: + state: absent + path: '{{ base_throttledir }}' + ignore_errors: yes + + - name: Create throttledir '{{ throttledir }}' + file: + state: directory + path: '{{ throttledir }}' + loop: "{{ range(1, test_count|int)|list }}" + loop_control: + loop_var: test_id + vars: + test_count: "{{ 9 if lookup('env', 'SELECTED_STRATEGY') in ['free', 'host_pinned'] else 2 }}" + +- hosts: localhosts + gather_facts: false + strategy: "{{ lookup('env', 'SELECTED_STRATEGY') }}" + tasks: + - block: + - name: "Test 1 (max throttle: 3)" + script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 3" + vars: + test_id: 1 + throttle: 3 + - block: + - name: "Test 2 (max throttle: 5)" + script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 5" + throttle: 5 + vars: + test_id: 2 + - block: + - name: "Test 3 (max throttle: 8)" + script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 8" + throttle: 8 + throttle: 6 + vars: + test_id: 3 + - block: + - block: + - name: "Test 4 (max throttle: 8)" + script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 8" + throttle: 8 + vars: + test_id: 4 + throttle: 6 + throttle: 12 + throttle: 15 + - block: + - name: "Teat 5 (max throttle: 3)" + script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 3" + vars: + test_id: 5 + throttle: 3 + - block: + - name: "Test 6 (max throttle: 5)" + script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 5" + throttle: 5 + vars: + test_id: 6 + - block: + - name: "Test 7 (max throttle: 6)" + script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 6" + throttle: 6 + vars: + test_id: 7 + throttle: 3 + - block: + - block: + - name: "Test 8 (max throttle: 8)" + script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 8" + throttle: 8 + vars: + test_id: 8 + throttle: 6 + throttle: 4 + throttle: 2 diff --git a/test/integration/targets/unarchive/aliases b/test/integration/targets/unarchive/aliases new file mode 100644 index 00000000..db9bbd8c --- /dev/null +++ b/test/integration/targets/unarchive/aliases @@ -0,0 +1,4 @@ +needs/root +shippable/posix/group2 +destructive +skip/aix diff --git a/test/integration/targets/unarchive/files/foo.txt b/test/integration/targets/unarchive/files/foo.txt new file mode 100644 index 00000000..7c6ded14 --- /dev/null +++ b/test/integration/targets/unarchive/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git "a/test/integration/targets/unarchive/files/test-unarchive-nonascii-\343\201\217\343\202\211\343\201\250\343\201\277.tar.gz" "b/test/integration/targets/unarchive/files/test-unarchive-nonascii-\343\201\217\343\202\211\343\201\250\343\201\277.tar.gz" new file mode 100644 index 00000000..4882b920 Binary files /dev/null and "b/test/integration/targets/unarchive/files/test-unarchive-nonascii-\343\201\217\343\202\211\343\201\250\343\201\277.tar.gz" differ diff --git a/test/integration/targets/unarchive/meta/main.yml b/test/integration/targets/unarchive/meta/main.yml new file mode 100644 index 00000000..56245b3d --- /dev/null +++ b/test/integration/targets/unarchive/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - prepare_tests + - setup_remote_tmp_dir + - setup_gnutar diff --git a/test/integration/targets/unarchive/tasks/main.yml b/test/integration/targets/unarchive/tasks/main.yml new file mode 100644 index 00000000..7051539c --- /dev/null +++ b/test/integration/targets/unarchive/tasks/main.yml @@ -0,0 +1,16 @@ +- import_tasks: prepare_tests.yml +- import_tasks: test_tar.yml +- import_tasks: test_tar_gz.yml +- import_tasks: test_tar_gz_creates.yml +- import_tasks: test_tar_gz_owner_group.yml +- import_tasks: test_tar_gz_keep_newer.yml +- import_tasks: test_zip.yml +- import_tasks: test_exclude.yml +- import_tasks: test_parent_not_writeable.yml +- import_tasks: test_mode.yml +- import_tasks: test_quotable_characters.yml +- import_tasks: test_non_ascii_filename.yml +- import_tasks: test_missing_files.yml +- import_tasks: test_symlink.yml +- import_tasks: test_download.yml +- import_tasks: test_unprivileged_user.yml diff --git a/test/integration/targets/unarchive/tasks/prepare_tests.yml b/test/integration/targets/unarchive/tasks/prepare_tests.yml new file mode 100644 index 00000000..783d77d3 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/prepare_tests.yml @@ -0,0 +1,92 @@ +# Need unzip for unarchive module, and zip for archive creation. +- name: Ensure zip & unzip are present + package: + name: + - zip + - unzip + when: ansible_pkg_mgr in ('yum', 'dnf', 'apt', 'pkgng') + +- name: prep our file + copy: + src: foo.txt + dest: "{{remote_tmp_dir}}/foo-unarchive.txt" + mode: preserve + +- name: prep a tar file + shell: tar cvf test-unarchive.tar foo-unarchive.txt chdir={{remote_tmp_dir}} + +- name: prep a tar.gz file + shell: tar czvf test-unarchive.tar.gz foo-unarchive.txt chdir={{remote_tmp_dir}} + +- name: prep a chmodded file for zip + copy: + src: foo.txt + dest: '{{remote_tmp_dir}}/foo-unarchive-777.txt' + mode: '0777' + +- name: prep a windows permission file for our zip + copy: + src: foo.txt + dest: '{{remote_tmp_dir}}/FOO-UNAR.TXT' + mode: preserve + +# This gets around an unzip timestamp bug in some distributions +# Recent unzip on Ubuntu and BSD will randomly round some timestamps up. +# But that doesn't seem to happen when the timestamp has an even second. +- name: Bug work around + command: touch -t "201705111530.00" {{remote_tmp_dir}}/foo-unarchive.txt {{remote_tmp_dir}}/foo-unarchive-777.txt {{remote_tmp_dir}}/FOO-UNAR.TXT +# See Ubuntu bug 1691636: https://bugs.launchpad.net/ubuntu/+source/unzip/+bug/1691636 +# When these are fixed, this code should be removed. + +- name: prep a zip file + shell: zip test-unarchive.zip foo-unarchive.txt foo-unarchive-777.txt chdir={{remote_tmp_dir}} + +- name: Prepare - Create test dirs + file: + path: "{{remote_tmp_dir}}/{{item}}" + state: directory + with_items: + - created/include + - created/exclude + - created/other + +- name: Prepare - Create test files + file: + path: "{{remote_tmp_dir}}/created/{{item}}" + state: touch + with_items: + - include/include-1.txt + - include/include-2.txt + - include/include-3.txt + - exclude/exclude-1.txt + - exclude/exclude-2.txt + - exclude/exclude-3.txt + - other/include-1.ext + - other/include-2.ext + - other/exclude-1.ext + - other/exclude-2.ext + - other/other-1.ext + - other/other-2.ext + +- name: Prepare - zip file + shell: zip -r {{remote_tmp_dir}}/unarchive-00.zip * chdir={{remote_tmp_dir}}/created/ + +- name: Prepare - tar file + shell: tar czvf {{remote_tmp_dir}}/unarchive-00.tar * chdir={{remote_tmp_dir}}/created/ + +- name: add a file with Windows permissions to zip file + shell: zip -k test-unarchive.zip FOO-UNAR.TXT chdir={{remote_tmp_dir}} + +- name: prep a subdirectory + file: + path: '{{remote_tmp_dir}}/unarchive-dir' + state: directory + +- name: prep our file + copy: + src: foo.txt + dest: '{{remote_tmp_dir}}/unarchive-dir/foo-unarchive.txt' + mode: preserve + +- name: prep a tar.gz file with directory + shell: tar czvf test-unarchive-dir.tar.gz unarchive-dir chdir={{remote_tmp_dir}} diff --git a/test/integration/targets/unarchive/tasks/test_download.yml b/test/integration/targets/unarchive/tasks/test_download.yml new file mode 100644 index 00000000..6b17449b --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_download.yml @@ -0,0 +1,34 @@ +# Test downloading a file before unarchiving it +- name: create our unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + state: directory + +- name: Install packages to make TLS connections work on CentOS 6 + pip: + name: + - urllib3==1.10.2 + - ndg_httpsclient==0.4.4 + - pyOpenSSL==16.2.0 + state: present + when: + - ansible_facts.distribution == 'CentOS' + - not ansible_facts.python.has_sslcontext + +- name: unarchive a tar from an URL + unarchive: + src: "https://releases.ansible.com/ansible/ansible-latest.tar.gz" + dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + mode: "0700" + remote_src: yes + register: unarchive13 + +- name: Test that unarchive succeeded + assert: + that: + - "unarchive13.changed == true" + +- name: remove our tar.gz unarchive destination + file: + path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz' + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_exclude.yml b/test/integration/targets/unarchive/tasks/test_exclude.yml new file mode 100644 index 00000000..be24756c --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_exclude.yml @@ -0,0 +1,48 @@ +- name: "Create {{ remote_tmp_dir }}/exclude directory" + file: + state: directory + path: "{{ remote_tmp_dir }}/exclude-{{item}}" + with_items: + - zip + - tar + +- name: Unpack archive file excluding regular and glob files. + unarchive: + src: "{{ remote_tmp_dir }}/unarchive-00.{{item}}" + dest: "{{ remote_tmp_dir }}/exclude-{{item}}" + remote_src: yes + exclude: + - "exclude/exclude-*.txt" + - "other/exclude-1.ext" + with_items: + - zip + - tar + +- name: verify that the file was unarchived + shell: find {{ remote_tmp_dir }}/exclude-{{item}} chdir={{ remote_tmp_dir }} + register: unarchive00 + with_items: + - zip + - tar + +- name: verify that archive extraction excluded the files + assert: + that: + - "'exclude/exclude-1.txt' not in item.stdout" + - "'other/exclude-1.ext' not in item.stdout" + with_items: + - "{{ unarchive00.results }}" + +- name: remove our zip unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-zip' + state: absent + +- name: remove our test files for the archive + file: + path: '{{remote_tmp_dir}}/{{item}}' + state: absent + with_items: + - foo-unarchive.txt + - foo-unarchive-777.txt + - FOO-UNAR.TXT diff --git a/test/integration/targets/unarchive/tasks/test_missing_files.yml b/test/integration/targets/unarchive/tasks/test_missing_files.yml new file mode 100644 index 00000000..4f57e184 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_missing_files.yml @@ -0,0 +1,47 @@ +# Test that unarchiving is performed if files are missing +# https://github.com/ansible/ansible-modules-core/issues/1064 +- name: create our unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + state: directory + +- name: unarchive a tar that has directories + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive-dir.tar.gz" + dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + mode: "0700" + remote_src: yes + register: unarchive10 + +- name: Test that unarchive succeeded + assert: + that: + - "unarchive10.changed == true" + +- name: Change the mode of the toplevel dir + file: + path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/unarchive-dir" + mode: "0701" + +- name: Remove a file from the extraction point + file: + path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/unarchive-dir/foo-unarchive.txt" + state: absent + +- name: unarchive a tar that has directories + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive-dir.tar.gz" + dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + mode: "0700" + remote_src: yes + register: unarchive10_1 + +- name: Test that unarchive succeeded + assert: + that: + - "unarchive10_1.changed == true" + +- name: remove our tar.gz unarchive destination + file: + path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz' + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_mode.yml b/test/integration/targets/unarchive/tasks/test_mode.yml new file mode 100644 index 00000000..c69e3bd2 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_mode.yml @@ -0,0 +1,151 @@ +- name: create our unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + state: directory + +- name: unarchive and set mode to 0600, directories 0700 + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz" + dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + remote_src: yes + mode: "u+rwX,g-rwx,o-rwx" + list_files: True + register: unarchive06 + +- name: Test that the file modes were changed + stat: + path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/foo-unarchive.txt" + register: unarchive06_stat + +- name: Test that the file modes were changed + assert: + that: + - "unarchive06.changed == true" + - "unarchive06_stat.stat.mode == '0600'" + # Verify that file list is generated + - "'files' in unarchive06" + - "{{unarchive06['files']| length}} == 1" + - "'foo-unarchive.txt' in unarchive06['files']" + +- name: remove our tar.gz unarchive destination + file: + path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz' + state: absent + +- name: create our unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + state: directory + +- name: unarchive over existing extraction and set mode to 0644 + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz" + dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + remote_src: yes + mode: "u+rwX,g-wx,o-wx,g+r,o+r" + register: unarchive06_2 + +- name: Test that the file modes were changed + stat: + path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/foo-unarchive.txt" + register: unarchive06_2_stat + +- debug: + var: unarchive06_2_stat.stat.mode + +- name: Test that the files were changed + assert: + that: + - "unarchive06_2.changed == true" + - "unarchive06_2_stat.stat.mode == '0644'" + +- name: Repeat the last request to verify no changes + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz" + dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + remote_src: yes + mode: "u+rwX-x,g-wx,o-wx,g+r,o+r" + list_files: True + register: unarchive07 + +- name: Test that the files were not changed + assert: + that: + - "unarchive07.changed == false" + # Verify that file list is generated + - "'files' in unarchive07" + - "{{unarchive07['files']| length}} == 1" + - "'foo-unarchive.txt' in unarchive07['files']" + +- name: remove our tar.gz unarchive destination + file: + path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz' + state: absent + +- name: create our unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-zip' + state: directory + +- name: unarchive and set mode to 0601, directories 0700 + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive.zip" + dest: "{{ remote_tmp_dir }}/test-unarchive-zip" + remote_src: yes + mode: "u+rwX-x,g-rwx,o=x" + list_files: True + register: unarchive08 + +- name: Test that the file modes were changed + stat: + path: "{{ remote_tmp_dir }}/test-unarchive-zip/foo-unarchive.txt" + register: unarchive08_stat + +- name: Test that the file modes were changed + assert: + that: + - "unarchive08.changed == true" + - "unarchive08_stat.stat.mode == '0601'" + # Verify that file list is generated + - "'files' in unarchive08" + - "{{unarchive08['files']| length}} == 3" + - "'foo-unarchive.txt' in unarchive08['files']" + - "'foo-unarchive-777.txt' in unarchive08['files']" + - "'FOO-UNAR.TXT' in unarchive08['files']" + +- name: unarchive zipfile a second time and set mode to 0601, directories 0700 + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive.zip" + dest: "{{ remote_tmp_dir }}/test-unarchive-zip" + remote_src: yes + mode: "u+rwX-x,g-rwx,o=x" + list_files: True + register: unarchive08 + +- name: Test that the file modes were not changed + stat: + path: "{{ remote_tmp_dir }}/test-unarchive-zip/foo-unarchive.txt" + register: unarchive08_stat + +- debug: + var: unarchive08 + +- debug: + var: unarchive08_stat + +- name: Test that the files did not change + assert: + that: + - "unarchive08.changed == false" + - "unarchive08_stat.stat.mode == '0601'" + # Verify that file list is generated + - "'files' in unarchive08" + - "{{unarchive08['files']| length}} == 3" + - "'foo-unarchive.txt' in unarchive08['files']" + - "'foo-unarchive-777.txt' in unarchive08['files']" + - "'FOO-UNAR.TXT' in unarchive08['files']" + +- name: remove our zip unarchive destination + file: + path: '{{ remote_tmp_dir }}/test-unarchive-zip' + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_non_ascii_filename.yml b/test/integration/targets/unarchive/tasks/test_non_ascii_filename.yml new file mode 100644 index 00000000..c884f49a --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_non_ascii_filename.yml @@ -0,0 +1,66 @@ +- name: create our unarchive destination + file: + path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-くらとみ-tar-gz" + state: directory + +- name: test that unarchive works with an archive that contains non-ascii filenames + unarchive: + # Both the filename of the tarball and the filename inside the tarball have + # nonascii chars + src: "test-unarchive-nonascii-くらとみ.tar.gz" + dest: "{{ remote_tmp_dir }}/test-unarchive-nonascii-くらとみ-tar-gz" + mode: "u+rwX,go+rX" + remote_src: no + register: nonascii_result0 + +- name: Check that file is really there + stat: + path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-くらとみ-tar-gz/storage/àâæçéèïîôœ(copy)!@#$%^&-().jpg" + register: nonascii_stat0 + +- name: Assert that nonascii tests succeeded + assert: + that: + - "nonascii_result0.changed == true" + - "nonascii_stat0.stat.exists == true" + +- name: remove nonascii test + file: + path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-くらとみ-tar-gz" + state: absent + +- name: test non-ascii with different LC_ALL + block: + - name: create our unarchive destination + file: + path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-くらとみ-tar-gz" + state: directory + + - name: test that unarchive works with an archive that contains non-ascii filenames + unarchive: + # Both the filename of the tarball and the filename inside the tarball have + # nonascii chars + src: "test-unarchive-nonascii-くらとみ.tar.gz" + dest: "{{ remote_tmp_dir }}/test-unarchive-nonascii-くらとみ-tar-gz" + mode: "u+rwX,go+rX" + remote_src: no + register: nonascii_result0 + + - name: Check that file is really there + stat: + path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-くらとみ-tar-gz/storage/àâæçéèïîôœ(copy)!@#$%^&-().jpg" + register: nonascii_stat0 + + - name: Assert that nonascii tests succeeded + assert: + that: + - "nonascii_result0.changed == true" + - "nonascii_stat0.stat.exists == true" + + - name: remove nonascii test + file: + path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-くらとみ-tar-gz" + state: absent + + environment: + LC_ALL: C diff --git a/test/integration/targets/unarchive/tasks/test_parent_not_writeable.yml b/test/integration/targets/unarchive/tasks/test_parent_not_writeable.yml new file mode 100644 index 00000000..bfb082c6 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_parent_not_writeable.yml @@ -0,0 +1,32 @@ +- name: check if /tmp/foo-unarchive.text exists + stat: + path: /tmp/foo-unarchive.txt + ignore_errors: True + register: unarchive04 + +- name: fail if the proposed destination file exists for safey + fail: + msg: /tmp/foo-unarchive.txt already exists, aborting + when: unarchive04.stat.exists + +- name: try unarchiving to /tmp + unarchive: + src: '{{remote_tmp_dir}}/test-unarchive.tar.gz' + dest: /tmp + remote_src: true + register: unarchive05 + +- name: verify that the file was marked as changed + assert: + that: + - "unarchive05.changed == true" + +- name: verify that the file was unarchived + file: + path: /tmp/foo-unarchive.txt + state: file + +- name: remove our unarchive destination + file: + path: /tmp/foo-unarchive.txt + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_quotable_characters.yml b/test/integration/targets/unarchive/tasks/test_quotable_characters.yml new file mode 100644 index 00000000..0a3c2cc3 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_quotable_characters.yml @@ -0,0 +1,38 @@ +- name: create our unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + state: directory + +- name: create a directory with quotable chars + file: + path: '{{ remote_tmp_dir }}/test-quotes~root' + state: directory + +- name: unarchive into directory with quotable chars + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz" + dest: "{{ remote_tmp_dir }}/test-quotes~root" + remote_src: yes + register: unarchive08 + +- name: Test that unarchive succeeded + assert: + that: + - "unarchive08.changed == true" + +- name: unarchive into directory with quotable chars a second time + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz" + dest: "{{ remote_tmp_dir }}/test-quotes~root" + remote_src: yes + register: unarchive09 + +- name: Test that unarchive did nothing + assert: + that: + - "unarchive09.changed == false" + +- name: remove quotable chars test + file: + path: '{{ remote_tmp_dir }}/test-quotes~root' + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_symlink.yml b/test/integration/targets/unarchive/tasks/test_symlink.yml new file mode 100644 index 00000000..fcb72828 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_symlink.yml @@ -0,0 +1,64 @@ +- name: Create a destination dir + file: + path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + state: directory + +- name: Create a symlink to the detination dir + file: + path: "{{ remote_tmp_dir }}/link-to-unarchive-dir" + src: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + state: "link" + +- name: test that unarchive works when dest is a symlink to a dir + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz" + dest: "{{ remote_tmp_dir }}/link-to-unarchive-dir" + mode: "u+rwX,go+rX" + remote_src: yes + register: unarchive_11 + +- name: Check that file is really there + stat: + path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/foo-unarchive.txt" + register: unarchive11_stat0 + +- name: Assert that unarchive when dest is a symlink to a dir worked + assert: + that: + - "unarchive_11.changed == true" + - "unarchive11_stat0.stat.exists == true" + +- name: remove our tar.gz unarchive destination + file: + path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz' + state: absent + +- name: Create a file + file: + path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + state: touch + +- name: Create a symlink to the file + file: + src: "{{ remote_tmp_dir }}/test-unarchive-tar-gz" + path: "{{ remote_tmp_dir }}/link-to-unarchive-file" + state: "link" + +- name: test that unarchive fails when dest is a link to a file + unarchive: + src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz" + dest: "{{ remote_tmp_dir }}/link-to-unarchive-file" + mode: "u+rwX,go+rX" + remote_src: yes + ignore_errors: True + register: unarchive_12 + +- name: Assert that unarchive when dest is a file failed + assert: + that: + - "unarchive_12.failed == true" + +- name: remove our tar.gz unarchive destination + file: + path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz' + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_tar.yml b/test/integration/targets/unarchive/tasks/test_tar.yml new file mode 100644 index 00000000..09105c60 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_tar.yml @@ -0,0 +1,26 @@ +- name: create our tar unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar' + state: directory + +- name: unarchive a tar file + unarchive: + src: '{{remote_tmp_dir}}/test-unarchive.tar' + dest: '{{remote_tmp_dir}}/test-unarchive-tar' + remote_src: yes + register: unarchive01 + +- name: verify that the file was marked as changed + assert: + that: + - "unarchive01.changed == true" + +- name: verify that the file was unarchived + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar/foo-unarchive.txt' + state: file + +- name: remove our tar unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar' + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_tar_gz.yml b/test/integration/targets/unarchive/tasks/test_tar_gz.yml new file mode 100644 index 00000000..ac9e9a15 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_tar_gz.yml @@ -0,0 +1,28 @@ +- name: create our tar.gz unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + state: directory + +- name: unarchive a tar.gz file + unarchive: + src: '{{remote_tmp_dir}}/test-unarchive.tar.gz' + dest: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + remote_src: yes + register: unarchive02 + +- name: verify that the file was marked as changed + assert: + that: + - "unarchive02.changed == true" + # Verify that no file list is generated + - "'files' not in unarchive02" + +- name: verify that the file was unarchived + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt' + state: file + +- name: remove our tar.gz unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_tar_gz_creates.yml b/test/integration/targets/unarchive/tasks/test_tar_gz_creates.yml new file mode 100644 index 00000000..fa3a23f8 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_tar_gz_creates.yml @@ -0,0 +1,53 @@ +- name: create our tar.gz unarchive destination for creates + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + state: directory + +- name: unarchive a tar.gz file with creates set + unarchive: + src: '{{remote_tmp_dir}}/test-unarchive.tar.gz' + dest: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + creates: '{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt' + remote_src: yes + register: unarchive02b + +- name: verify that the file was marked as changed + assert: + that: + - "unarchive02b.changed == true" + +- name: verify that the file was unarchived + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt' + state: file + +- name: unarchive a tar.gz file with creates over an existing file + unarchive: + src: '{{remote_tmp_dir}}/test-unarchive.tar.gz' + dest: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + creates: '{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt' + remote_src: yes + register: unarchive02c + +- name: verify that the file was not marked as changed + assert: + that: + - "unarchive02c.changed == false" + +- name: unarchive a tar.gz file with creates over an existing file using complex_args + unarchive: + src: "{{remote_tmp_dir}}/test-unarchive.tar.gz" + dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz" + remote_src: yes + creates: "{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt" + register: unarchive02d + +- name: verify that the file was not marked as changed + assert: + that: + - "unarchive02d.changed == false" + +- name: remove our tar.gz unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-tar-gz' + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_tar_gz_keep_newer.yml b/test/integration/targets/unarchive/tasks/test_tar_gz_keep_newer.yml new file mode 100644 index 00000000..aec94545 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_tar_gz_keep_newer.yml @@ -0,0 +1,57 @@ +- name: create our tar.gz unarchive destination for keep-newer + file: + path: "{{remote_tmp_dir}}/test-unarchive-tar-gz" + state: directory + +- name: Create a newer file that we would replace + copy: + dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt" + content: boo + mode: preserve + +- name: unarchive a tar.gz file but avoid overwriting newer files (keep_newer=true) + unarchive: + src: "{{remote_tmp_dir}}/test-unarchive.tar.gz" + dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz" + remote_src: yes + keep_newer: true + register: unarchive02f + +- name: Make sure the file still contains 'boo' + shell: cat {{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt + register: unarchive02f_cat + +- name: remove our tar.gz unarchive destination + file: + path: "{{remote_tmp_dir}}/test-unarchive-tar-gz" + state: absent + +- name: create our tar.gz unarchive destination for keep-newer (take 2) + file: + path: "{{remote_tmp_dir}}/test-unarchive-tar-gz" + state: directory + +- name: unarchive a tar.gz file and overwrite newer files (keep_newer=false) + unarchive: + src: "{{remote_tmp_dir}}/test-unarchive.tar.gz" + dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz" + remote_src: yes + keep_newer: false + register: unarchive02g + +- name: Make sure the file still contains 'boo' + shell: cat {{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt + register: unarchive02g_cat + +- name: remove our tar.gz unarchive destination + file: + path: "{{remote_tmp_dir}}/test-unarchive-tar-gz" + state: absent + +- name: verify results + assert: + that: + - unarchive02f is changed + - unarchive02f_cat.stdout == 'boo' + - unarchive02g is changed + - unarchive02g_cat.stdout != 'boo' diff --git a/test/integration/targets/unarchive/tasks/test_tar_gz_owner_group.yml b/test/integration/targets/unarchive/tasks/test_tar_gz_owner_group.yml new file mode 100644 index 00000000..257692e1 --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_tar_gz_owner_group.yml @@ -0,0 +1,48 @@ +- block: + - name: Create a group to chown to + group: + name: testgroup + + - name: Create a user to chown to + user: + name: testuser + groups: + - testgroup + + - name: create our tar.gz unarchive destination for chown + file: + path: "{{remote_tmp_dir}}/test-unarchive-tar-gz" + state: directory + + - name: unarchive a tar.gz file with owner and group set to the above user + unarchive: + src: "{{remote_tmp_dir}}/test-unarchive.tar.gz" + dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz" + remote_src: yes + owner: testuser + group: testgroup + register: unarchive02e + + - name: Stat a file in the directory we unarchived to + stat: + path: "{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt" + register: unarchive02e_file_stat + + - name: verify results + assert: + that: + - unarchive02e is changed + - unarchive02e_file_stat.stat.exists + - unarchive02e_file_stat.stat.pw_name == 'testuser' + - unarchive02e_file_stat.stat.gr_name == 'testgroup' + + always: + - name: Remove testuser + user: + name: testuser + state: absent + + - name: Remove testgroup + group: + name: testgroup + state: absent diff --git a/test/integration/targets/unarchive/tasks/test_unprivileged_user.yml b/test/integration/targets/unarchive/tasks/test_unprivileged_user.yml new file mode 100644 index 00000000..6181e3bd --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_unprivileged_user.yml @@ -0,0 +1,86 @@ +- name: Create unarchivetest1 user + user: + name: unarchivetest1 + uid: 1002610001 + group: "{{ group_table[ansible_facts['distribution']] | default(omit) }}" + register: user + vars: + group_table: + MacOSX: staff + +- name: Test unarchiving twice as unprivileged user + become: yes + become_user: unarchivetest1 + block: + - name: prep our file + copy: + src: foo.txt + dest: "{{ user.home }}/foo-unarchive.txt" + mode: preserve + + - name: Prep a zip file as unarchivetest1 user + shell: zip unarchivetest1-unarchive.zip foo-unarchive.txt + args: + chdir: "{{ user.home }}" + creates: "{{ user.home }}/unarchivetest1-unarchive.zip" + + - name: create our zip unarchive destination as unarchivetest1 user + file: + path: "{{ user.home }}/unarchivetest1-unarchive-zip" + state: directory + + - name: unarchive a zip file as unarchivetest1 user + unarchive: + src: "{{ user.home }}/unarchivetest1-unarchive.zip" + dest: "{{ user.home }}/unarchivetest1-unarchive-zip" + remote_src: yes + list_files: True + register: unarchive10 + + - name: stat the unarchived file + stat: + path: "{{ user.home }}/unarchivetest1-unarchive-zip/foo-unarchive.txt" + register: archive_path + + - name: verify that the tasks performed as expected + assert: + that: + - unarchive10 is changed + # Verify that file list is generated + - "'files' in unarchive10" + - "{{unarchive10['files']| length}} == 1" + - "'foo-unarchive.txt' in unarchive10['files']" + - archive_path.stat.exists + + - name: repeat the last request to verify no changes + unarchive: + src: "{{ user.home }}/unarchivetest1-unarchive.zip" + dest: "{{ user.home }}/unarchivetest1-unarchive-zip" + remote_src: yes + list_files: True + register: unarchive10b + + # Due to a bug in the date calculation used to determine if a change + # was made or not, this check is unreliable. This seems to only happen on + # Ubuntu 1604. + # https://github.com/ansible/ansible/blob/58145dff9ca1a713f8ed295a0076779a91c41cba/lib/ansible/modules/unarchive.py#L472-L474 + - name: Check that unarchiving again reports no change + assert: + that: + - unarchive10b is not changed + ignore_errors: yes + + always: + - name: remove our unarchivetest1 user and files + user: + name: unarchivetest1 + state: absent + remove: yes + become: no + + - name: Remove user home directory on macOS + file: + path: /Users/unarchivetest1 + state: absent + become: no + when: ansible_facts.distribution == 'MacOSX' diff --git a/test/integration/targets/unarchive/tasks/test_zip.yml b/test/integration/targets/unarchive/tasks/test_zip.yml new file mode 100644 index 00000000..aae57d8e --- /dev/null +++ b/test/integration/targets/unarchive/tasks/test_zip.yml @@ -0,0 +1,45 @@ +- name: create our zip unarchive destination + file: + path: '{{remote_tmp_dir}}/test-unarchive-zip' + state: directory + +- name: unarchive a zip file + unarchive: + src: '{{remote_tmp_dir}}/test-unarchive.zip' + dest: '{{remote_tmp_dir}}/test-unarchive-zip' + list_files: True + remote_src: yes + register: unarchive03 + +- name: verify that the file was marked as changed + assert: + that: + - "unarchive03.changed == true" + # Verify that file list is generated + - "'files' in unarchive03" + - "{{unarchive03['files']| length}} == 3" + - "'foo-unarchive.txt' in unarchive03['files']" + - "'foo-unarchive-777.txt' in unarchive03['files']" + - "'FOO-UNAR.TXT' in unarchive03['files']" + +- name: verify that the file was unarchived + file: + path: '{{remote_tmp_dir}}/test-unarchive-zip/{{item}}' + state: file + with_items: + - foo-unarchive.txt + - foo-unarchive-777.txt + - FOO-UNAR.TXT + +- name: repeat the last request to verify no changes + unarchive: + src: '{{remote_tmp_dir}}/test-unarchive.zip' + dest: '{{remote_tmp_dir}}/test-unarchive-zip' + list_files: true + remote_src: true + register: unarchive03b + +- name: verify that the task was not marked as changed + assert: + that: + - "unarchive03b.changed == false" diff --git a/test/integration/targets/undefined/aliases b/test/integration/targets/undefined/aliases new file mode 100644 index 00000000..70a7b7a9 --- /dev/null +++ b/test/integration/targets/undefined/aliases @@ -0,0 +1 @@ +shippable/posix/group5 diff --git a/test/integration/targets/undefined/tasks/main.yml b/test/integration/targets/undefined/tasks/main.yml new file mode 100644 index 00000000..de6681a0 --- /dev/null +++ b/test/integration/targets/undefined/tasks/main.yml @@ -0,0 +1,18 @@ +- when: lookup('pipe', ansible_playbook_python ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.7', '>=') + block: + - set_fact: + names: '{{ things|map(attribute="name") }}' + vars: + things: + - name: one + - name: two + - notname: three + - name: four + + - assert: + that: + - '"%r"|format(undef) == "AnsibleUndefined"' + # The existence of AnsibleUndefined in a templating result + # prevents safe_eval from turning the value into a python object + - names is string + - '", AnsibleUndefined," in names' diff --git a/test/integration/targets/unicode/aliases b/test/integration/targets/unicode/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/unicode/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/unicode/inventory b/test/integration/targets/unicode/inventory new file mode 100644 index 00000000..11b35606 --- /dev/null +++ b/test/integration/targets/unicode/inventory @@ -0,0 +1,5 @@ +[local] +testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" + +[all:vars] +unicode_host_var=CaféEñyei diff --git "a/test/integration/targets/unicode/k\305\231\303\255\305\276ek-ansible-project/ansible.cfg" "b/test/integration/targets/unicode/k\305\231\303\255\305\276ek-ansible-project/ansible.cfg" new file mode 100644 index 00000000..6775889f --- /dev/null +++ "b/test/integration/targets/unicode/k\305\231\303\255\305\276ek-ansible-project/ansible.cfg" @@ -0,0 +1,2 @@ +[defaults] +library=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules:. diff --git a/test/integration/targets/unicode/runme.sh b/test/integration/targets/unicode/runme.sh new file mode 100755 index 00000000..aa14783b --- /dev/null +++ b/test/integration/targets/unicode/runme.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook unicode.yml -i inventory -v -e 'extra_var=café' "$@" +# Test the start-at-task flag #9571 +ANSIBLE_HOST_PATTERN_MISMATCH=warning ansible-playbook unicode.yml -i inventory -v --start-at-task '*¶' -e 'start_at_task=True' "$@" + +# Test --version works with non-ascii ansible project paths #66617 +# Unset these so values from the project dir are used +unset ANSIBLE_CONFIG +unset ANSIBLE_LIBRARY +pushd křížek-ansible-project && ansible --version; popd diff --git a/test/integration/targets/unicode/unicode-test-script b/test/integration/targets/unicode/unicode-test-script new file mode 100755 index 00000000..340f2a9f --- /dev/null +++ b/test/integration/targets/unicode/unicode-test-script @@ -0,0 +1,7 @@ +#!/bin/sh + +echo "Non-ascii arguments:" +echo $@ + +echo "Non-ascii Env var:" +echo $option diff --git a/test/integration/targets/unicode/unicode.yml b/test/integration/targets/unicode/unicode.yml new file mode 100644 index 00000000..672133d5 --- /dev/null +++ b/test/integration/targets/unicode/unicode.yml @@ -0,0 +1,149 @@ +--- +- name: 'A play with unicode: ¢ £ ¤ ¥' + hosts: localhost + vars: + test_var: 'Ī ī Ĭ ĭ Į į İ ı IJ ij Ĵ ĵ Ķ ķ ĸ Ĺ ĺ Ļ ļ Ľ ľ Ŀ ŀ Ł ł Ń ń Ņ ņ Ň ň ʼn Ŋ ŋ Ō ō Ŏ ŏ Ő ő Œ' + hostnames: + - 'host-ϬϭϮϯϰ' + - 'host-fóöbär' + - 'host-ΙΚΛΜΝΞ' + - 'host-στυφχψ' + - 'host-ϬϭϮϯϰϱ' + + tasks: + - name: 'A task name with unicode: è é ê ë' + debug: msg='hi there' + + - name: 'A task with unicode parameters' + debug: var=test_var + + # € ‚ ƒ „ … † ‡ ˆ ‰ Š ‹ Œ Ž ‘ ’ “ ” • – — ˜ ™ š › œ ž Ÿ ¡ ¢ £ ¤ ¥ ¦ § ¨ © ª « ¬ ­ ®' + + - name: 'A task using with_items containing unicode' + debug: msg='{{item}}' + with_items: + - '¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' + - 'Ø Ù Ú Û Ü Ý Þ ß à á â ã ä å æ ç è é ê ë ì í î ï ð ñ ò ó ô õ ö ÷ ø ù ú û ü ý þ ÿ Ā' + - 'ā Ă ă Ą ą Ć ć Ĉ ĉ Ċ ċ Č č Ď ď Đ đ Ē ē Ĕ ĕ Ė ė Ę ę Ě ě Ĝ ĝ Ğ ğ Ġ ġ Ģ ģ Ĥ ĥ Ħ ħ Ĩ ĩ' + + - add_host: + name: '{{item}}' + groups: 'ĪīĬĭ' + ansible_ssh_host: 127.0.0.1 + ansible_connection: local + ansible_python_interpreter: "{{ ansible_playbook_python }}" + with_items: "{{ hostnames }}" + + - name: 'A task with unicode extra vars' + debug: var=extra_var + + - name: 'A task with unicode host vars' + debug: var=unicode_host_var + + - name: 'A task with unicode shell parameters' + shell: echo '¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' + register: output + + - name: 'Assert that the unicode was echoed' + assert: + that: + - "'¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' in output.stdout_lines" + + - name: Run raw with non-ascii options + raw: "/bin/echo Zażółć gęślą jaźń" + register: results + + - name: Check that raw output the right thing + assert: + that: + - "'Zażółć gęślą jaźń' in results.stdout_lines" + + - name: Run a script with non-ascii options and environment + script: unicode-test-script --option "Zażółć gęślą jaźń" + environment: + option: Zażółć + register: results + + - name: Check that script output includes the nonascii arguments and environment values + assert: + that: + - "'--option Zażółć gęślą jaźń' in results.stdout_lines" + - "'Zażółć' in results.stdout_lines" + + - name: Ping with non-ascii environment variable and option + ping: + data: "Zażółć gęślą jaźń" + environment: + option: Zażółć + register: results + + - name: Check that ping with non-ascii data was correct + assert: + that: + - "'Zażółć gęślą jaźń' == results.ping" + + - name: Command that echos a non-ascii env var + command: "echo $option" + environment: + option: Zażółć + register: results + + - name: Check that a non-ascii env var was passed to the command module + assert: + that: + - "'Zażółć' in results.stdout_lines" + + - name: Clean a temp directory + file: + path: /var/tmp/ansible_test_unicode_get_put + state: absent + + - name: Create a temp directory + file: + path: /var/tmp/ansible_test_unicode_get_put + state: directory + + - name: Create a file with a non-ascii filename + file: + path: /var/tmp/ansible_test_unicode_get_put/Zażółć + state: touch + delegate_to: localhost + + - name: Put with unicode filename + copy: + src: /var/tmp/ansible_test_unicode_get_put/Zażółć + dest: /var/tmp/ansible_test_unicode_get_put/Zażółć2 + + - name: Fetch with unicode filename + fetch: + src: /var/tmp/ansible_test_unicode_get_put/Zażółć2 + dest: /var/tmp/ansible_test_unicode_get_put/ + + - name: Clean a temp directory + file: + path: /var/tmp/ansible_test_unicode_get_put + state: absent + +- name: 'A play for hosts in group: ĪīĬĭ' + hosts: 'ĪīĬĭ' + gather_facts: true + tasks: + - debug: msg='Unicode is a good thing ™' + - debug: msg=АБВГД + +# Run this test by adding to the CLI: -e start_at_task=True --start-at-task '*¶' +- name: 'Show that we can skip to unicode named tasks' + hosts: localhost + gather_facts: false + vars: + flag: 'original' + start_at_task: False + tasks: + - name: 'Override flag var' + set_fact: flag='new' + + - name: 'A unicode task at the end of the playbook: ¶' + assert: + that: + - 'flag == "original"' + when: start_at_task|bool diff --git a/test/integration/targets/unsafe_writes/aliases b/test/integration/targets/unsafe_writes/aliases new file mode 100644 index 00000000..4fb7a116 --- /dev/null +++ b/test/integration/targets/unsafe_writes/aliases @@ -0,0 +1,6 @@ +needs/root +skip/freebsd +skip/osx +skip/macos +skip/aix +shippable/posix/group3 diff --git a/test/integration/targets/unsafe_writes/basic.yml b/test/integration/targets/unsafe_writes/basic.yml new file mode 100644 index 00000000..b173c7f8 --- /dev/null +++ b/test/integration/targets/unsafe_writes/basic.yml @@ -0,0 +1,53 @@ +- hosts: testhost + gather_facts: false + vars: + testudir: '{{output_dir}}/unsafe_writes_test' + testufile: '{{testudir}}/unreplacablefile.txt' + tasks: + - name: test unsafe_writes on immutable dir (file cannot be atomically replaced) + block: + - name: create target dir + file: path={{testudir}} state=directory + - name: setup test file + copy: content=ORIGINAL dest={{testufile}} + - name: make target dir immutable (cannot write to file w/o unsafe_writes) + file: path={{testudir}} state=directory attributes="+i" + become: yes + ignore_errors: true + register: madeimmutable + + - name: only run if immutable dir command worked, some of our test systems don't allow for it + when: madeimmutable is success + block: + - name: test this is actually immmutable working as we expect + file: path={{testufile}} state=absent + register: breakimmutable + ignore_errors: True + + - name: only run if reallyh immutable dir + when: breakimmutable is failed + block: + - name: test overwriting file w/o unsafe + copy: content=NEW dest={{testufile}} unsafe_writes=False + ignore_errors: true + register: copy_without + + - name: ensure we properly failed + assert: + that: + - copy_without is failed + + - name: test overwriting file with unsafe + copy: content=NEW dest={{testufile}} unsafe_writes=True + register: copy_with + + - name: ensure we properly changed + assert: + that: + - copy_with is changed + + always: + - name: remove immutable flag from dir to prevent issues with cleanup + file: path={{testudir}} state=directory attributes="-i" + ignore_errors: true + become: yes diff --git a/test/integration/targets/unsafe_writes/runme.sh b/test/integration/targets/unsafe_writes/runme.sh new file mode 100755 index 00000000..5c37f727 --- /dev/null +++ b/test/integration/targets/unsafe_writes/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook basic.yml -i ../../inventory -e "output_dir=${OUTPUT_DIR}" "$@" diff --git a/test/integration/targets/until/aliases b/test/integration/targets/until/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/until/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/until/tasks/main.yml b/test/integration/targets/until/tasks/main.yml new file mode 100644 index 00000000..4a09ff3b --- /dev/null +++ b/test/integration/targets/until/tasks/main.yml @@ -0,0 +1,71 @@ +- shell: '{{ ansible_python.executable }} -c "import tempfile; print(tempfile.mkstemp()[1])"' + register: tempfilepath + +- set_fact: + until_tempfile_path: "{{ tempfilepath.stdout }}" + +- name: loop with default retries + shell: echo "run" >> {{ until_tempfile_path }} && wc -w < {{ until_tempfile_path }} | tr -d ' ' + register: runcount + until: runcount.stdout | int == 3 + delay: 0.01 + +- assert: + that: runcount.stdout | int == 3 + +- file: path="{{ until_tempfile_path }}" state=absent + +- name: loop with specified max retries + shell: echo "run" >> {{ until_tempfile_path }} + until: 1==0 + retries: 5 + delay: 0.01 + ignore_errors: true + +- name: validate output + shell: wc -l < {{ until_tempfile_path }} + register: runcount + +- assert: + that: runcount.stdout | int == 6 # initial + 5 retries + +- file: + path: "{{ until_tempfile_path }}" + state: absent + +- name: Test failed_when impacting until + shell: 'true' + register: failed_when_until + failed_when: True + until: failed_when_until is successful + retries: 3 + delay: 0.5 + ignore_errors: True + +- assert: + that: + - failed_when_until.attempts == 3 + +- name: Test changed_when impacting until + shell: 'true' + register: changed_when_until + changed_when: False + until: changed_when_until is changed + retries: 3 + delay: 0.5 + ignore_errors: True + +- assert: + that: + - changed_when_until.attempts == 3 + +# This task shouldn't fail, previously .attempts was not available to changed_when/failed_when +# and would cause the conditional to fail due to ``'dict object' has no attribute 'attempts'`` +# https://github.com/ansible/ansible/issues/34139 +- name: Test access to attempts in changed_when/failed_when + shell: 'true' + register: changed_when_attempts + until: 1 == 0 + retries: 5 + delay: 0.5 + failed_when: changed_when_attempts.attempts > 6 diff --git a/test/integration/targets/unvault/aliases b/test/integration/targets/unvault/aliases new file mode 100644 index 00000000..765b70da --- /dev/null +++ b/test/integration/targets/unvault/aliases @@ -0,0 +1 @@ +shippable/posix/group2 diff --git a/test/integration/targets/unvault/main.yml b/test/integration/targets/unvault/main.yml new file mode 100644 index 00000000..a0f97b4b --- /dev/null +++ b/test/integration/targets/unvault/main.yml @@ -0,0 +1,9 @@ +- hosts: localhost + tasks: + - set_fact: + unvaulted: "{{ lookup('unvault', 'vault') }}" + - debug: + msg: "{{ unvaulted }}" + - assert: + that: + - "unvaulted == 'foo: bar\n'" diff --git a/test/integration/targets/unvault/password b/test/integration/targets/unvault/password new file mode 100644 index 00000000..d97c5ead --- /dev/null +++ b/test/integration/targets/unvault/password @@ -0,0 +1 @@ +secret diff --git a/test/integration/targets/unvault/runme.sh b/test/integration/targets/unvault/runme.sh new file mode 100755 index 00000000..df4585e3 --- /dev/null +++ b/test/integration/targets/unvault/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux + + +ansible-playbook --vault-password-file password main.yml diff --git a/test/integration/targets/unvault/vault b/test/integration/targets/unvault/vault new file mode 100644 index 00000000..828d3696 --- /dev/null +++ b/test/integration/targets/unvault/vault @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +33386337343963393533343039333563323733646137636162346266643134323539396237646333 +3663363965336335663161656236616532346363303832310a393264356663393330346137613239 +34633765333936633466353932663166343531616230326161383365323966386434366431353839 +3838623233373231340a303166666433613439303464393661363365643765666137393137653138 +3631 diff --git a/test/integration/targets/uri/aliases b/test/integration/targets/uri/aliases new file mode 100644 index 00000000..11e91ee7 --- /dev/null +++ b/test/integration/targets/uri/aliases @@ -0,0 +1,4 @@ +destructive +shippable/posix/group4 +needs/httptester +skip/aix diff --git a/test/integration/targets/uri/files/README b/test/integration/targets/uri/files/README new file mode 100644 index 00000000..ef779126 --- /dev/null +++ b/test/integration/targets/uri/files/README @@ -0,0 +1,9 @@ +The files were taken from http://www.json.org/JSON_checker/ +> If the JSON_checker is working correctly, it must accept all of the pass*.json files and reject all of the fail*.json files. + +Difference with JSON_checker dataset: + - *${n}.json renamed to *${n-1}.json to be 0-based + - fail0.json renamed to pass3.json as python json module allows JSON payload to be string + - fail17.json renamed to pass4.json as python json module has no problems with deep structures + - fail32.json renamed to fail0.json to fill gap + - fail31.json renamed to fail17.json to fill gap diff --git a/test/integration/targets/uri/files/fail0.json b/test/integration/targets/uri/files/fail0.json new file mode 100644 index 00000000..ca5eb19d --- /dev/null +++ b/test/integration/targets/uri/files/fail0.json @@ -0,0 +1 @@ +["mismatch"} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail1.json b/test/integration/targets/uri/files/fail1.json new file mode 100644 index 00000000..6b7c11e5 --- /dev/null +++ b/test/integration/targets/uri/files/fail1.json @@ -0,0 +1 @@ +["Unclosed array" \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail10.json b/test/integration/targets/uri/files/fail10.json new file mode 100644 index 00000000..76eb95b4 --- /dev/null +++ b/test/integration/targets/uri/files/fail10.json @@ -0,0 +1 @@ +{"Illegal expression": 1 + 2} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail11.json b/test/integration/targets/uri/files/fail11.json new file mode 100644 index 00000000..77580a45 --- /dev/null +++ b/test/integration/targets/uri/files/fail11.json @@ -0,0 +1 @@ +{"Illegal invocation": alert()} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail12.json b/test/integration/targets/uri/files/fail12.json new file mode 100644 index 00000000..379406b5 --- /dev/null +++ b/test/integration/targets/uri/files/fail12.json @@ -0,0 +1 @@ +{"Numbers cannot have leading zeroes": 013} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail13.json b/test/integration/targets/uri/files/fail13.json new file mode 100644 index 00000000..0ed366b3 --- /dev/null +++ b/test/integration/targets/uri/files/fail13.json @@ -0,0 +1 @@ +{"Numbers cannot be hex": 0x14} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail14.json b/test/integration/targets/uri/files/fail14.json new file mode 100644 index 00000000..fc8376b6 --- /dev/null +++ b/test/integration/targets/uri/files/fail14.json @@ -0,0 +1 @@ +["Illegal backslash escape: \x15"] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail15.json b/test/integration/targets/uri/files/fail15.json new file mode 100644 index 00000000..3fe21d4b --- /dev/null +++ b/test/integration/targets/uri/files/fail15.json @@ -0,0 +1 @@ +[\naked] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail16.json b/test/integration/targets/uri/files/fail16.json new file mode 100644 index 00000000..62b9214a --- /dev/null +++ b/test/integration/targets/uri/files/fail16.json @@ -0,0 +1 @@ +["Illegal backslash escape: \017"] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail17.json b/test/integration/targets/uri/files/fail17.json new file mode 100644 index 00000000..45cba739 --- /dev/null +++ b/test/integration/targets/uri/files/fail17.json @@ -0,0 +1 @@ +{"Comma instead if closing brace": true, \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail18.json b/test/integration/targets/uri/files/fail18.json new file mode 100644 index 00000000..3b9c46fa --- /dev/null +++ b/test/integration/targets/uri/files/fail18.json @@ -0,0 +1 @@ +{"Missing colon" null} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail19.json b/test/integration/targets/uri/files/fail19.json new file mode 100644 index 00000000..27c1af3e --- /dev/null +++ b/test/integration/targets/uri/files/fail19.json @@ -0,0 +1 @@ +{"Double colon":: null} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail2.json b/test/integration/targets/uri/files/fail2.json new file mode 100644 index 00000000..168c81eb --- /dev/null +++ b/test/integration/targets/uri/files/fail2.json @@ -0,0 +1 @@ +{unquoted_key: "keys must be quoted"} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail20.json b/test/integration/targets/uri/files/fail20.json new file mode 100644 index 00000000..62474573 --- /dev/null +++ b/test/integration/targets/uri/files/fail20.json @@ -0,0 +1 @@ +{"Comma instead of colon", null} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail21.json b/test/integration/targets/uri/files/fail21.json new file mode 100644 index 00000000..a7752581 --- /dev/null +++ b/test/integration/targets/uri/files/fail21.json @@ -0,0 +1 @@ +["Colon instead of comma": false] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail22.json b/test/integration/targets/uri/files/fail22.json new file mode 100644 index 00000000..494add1c --- /dev/null +++ b/test/integration/targets/uri/files/fail22.json @@ -0,0 +1 @@ +["Bad value", truth] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail23.json b/test/integration/targets/uri/files/fail23.json new file mode 100644 index 00000000..caff239b --- /dev/null +++ b/test/integration/targets/uri/files/fail23.json @@ -0,0 +1 @@ +['single quote'] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail24.json b/test/integration/targets/uri/files/fail24.json new file mode 100644 index 00000000..8b7ad23e --- /dev/null +++ b/test/integration/targets/uri/files/fail24.json @@ -0,0 +1 @@ +[" tab character in string "] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail25.json b/test/integration/targets/uri/files/fail25.json new file mode 100644 index 00000000..845d26a6 --- /dev/null +++ b/test/integration/targets/uri/files/fail25.json @@ -0,0 +1 @@ +["tab\ character\ in\ string\ "] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail26.json b/test/integration/targets/uri/files/fail26.json new file mode 100644 index 00000000..6b01a2ca --- /dev/null +++ b/test/integration/targets/uri/files/fail26.json @@ -0,0 +1,2 @@ +["line +break"] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail27.json b/test/integration/targets/uri/files/fail27.json new file mode 100644 index 00000000..621a0101 --- /dev/null +++ b/test/integration/targets/uri/files/fail27.json @@ -0,0 +1,2 @@ +["line\ +break"] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail28.json b/test/integration/targets/uri/files/fail28.json new file mode 100644 index 00000000..47ec421b --- /dev/null +++ b/test/integration/targets/uri/files/fail28.json @@ -0,0 +1 @@ +[0e] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail29.json b/test/integration/targets/uri/files/fail29.json new file mode 100644 index 00000000..8ab0bc4b --- /dev/null +++ b/test/integration/targets/uri/files/fail29.json @@ -0,0 +1 @@ +[0e+] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail3.json b/test/integration/targets/uri/files/fail3.json new file mode 100644 index 00000000..9de168bf --- /dev/null +++ b/test/integration/targets/uri/files/fail3.json @@ -0,0 +1 @@ +["extra comma",] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail30.json b/test/integration/targets/uri/files/fail30.json new file mode 100644 index 00000000..1cce602b --- /dev/null +++ b/test/integration/targets/uri/files/fail30.json @@ -0,0 +1 @@ +[0e+-1] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail4.json b/test/integration/targets/uri/files/fail4.json new file mode 100644 index 00000000..ddf3ce3d --- /dev/null +++ b/test/integration/targets/uri/files/fail4.json @@ -0,0 +1 @@ +["double extra comma",,] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail5.json b/test/integration/targets/uri/files/fail5.json new file mode 100644 index 00000000..ed91580e --- /dev/null +++ b/test/integration/targets/uri/files/fail5.json @@ -0,0 +1 @@ +[ , "<-- missing value"] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail6.json b/test/integration/targets/uri/files/fail6.json new file mode 100644 index 00000000..8a96af3e --- /dev/null +++ b/test/integration/targets/uri/files/fail6.json @@ -0,0 +1 @@ +["Comma after the close"], \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail7.json b/test/integration/targets/uri/files/fail7.json new file mode 100644 index 00000000..b28479c6 --- /dev/null +++ b/test/integration/targets/uri/files/fail7.json @@ -0,0 +1 @@ +["Extra close"]] \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail8.json b/test/integration/targets/uri/files/fail8.json new file mode 100644 index 00000000..5815574f --- /dev/null +++ b/test/integration/targets/uri/files/fail8.json @@ -0,0 +1 @@ +{"Extra comma": true,} \ No newline at end of file diff --git a/test/integration/targets/uri/files/fail9.json b/test/integration/targets/uri/files/fail9.json new file mode 100644 index 00000000..5d8c0047 --- /dev/null +++ b/test/integration/targets/uri/files/fail9.json @@ -0,0 +1 @@ +{"Extra value after close": true} "misplaced quoted value" \ No newline at end of file diff --git a/test/integration/targets/uri/files/formdata.txt b/test/integration/targets/uri/files/formdata.txt new file mode 100644 index 00000000..974c0f97 --- /dev/null +++ b/test/integration/targets/uri/files/formdata.txt @@ -0,0 +1 @@ +_multipart/form-data_ diff --git a/test/integration/targets/uri/files/pass0.json b/test/integration/targets/uri/files/pass0.json new file mode 100644 index 00000000..70e26854 --- /dev/null +++ b/test/integration/targets/uri/files/pass0.json @@ -0,0 +1,58 @@ +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E66, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "0123456789": "digit", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066, +1e1, +0.1e1, +1e-1, +1e00,2e+00,2e-00 +,"rosebud"] \ No newline at end of file diff --git a/test/integration/targets/uri/files/pass1.json b/test/integration/targets/uri/files/pass1.json new file mode 100644 index 00000000..d3c63c7a --- /dev/null +++ b/test/integration/targets/uri/files/pass1.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] \ No newline at end of file diff --git a/test/integration/targets/uri/files/pass2.json b/test/integration/targets/uri/files/pass2.json new file mode 100644 index 00000000..4528d51f --- /dev/null +++ b/test/integration/targets/uri/files/pass2.json @@ -0,0 +1,6 @@ +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} diff --git a/test/integration/targets/uri/files/pass3.json b/test/integration/targets/uri/files/pass3.json new file mode 100644 index 00000000..6216b865 --- /dev/null +++ b/test/integration/targets/uri/files/pass3.json @@ -0,0 +1 @@ +"A JSON payload should be an object or array, not a string." \ No newline at end of file diff --git a/test/integration/targets/uri/files/pass4.json b/test/integration/targets/uri/files/pass4.json new file mode 100644 index 00000000..edac9271 --- /dev/null +++ b/test/integration/targets/uri/files/pass4.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]] \ No newline at end of file diff --git a/test/integration/targets/uri/files/testserver.py b/test/integration/targets/uri/files/testserver.py new file mode 100644 index 00000000..81043b66 --- /dev/null +++ b/test/integration/targets/uri/files/testserver.py @@ -0,0 +1,20 @@ +import sys + +if __name__ == '__main__': + if sys.version_info[0] >= 3: + import http.server + import socketserver + PORT = int(sys.argv[1]) + + class Handler(http.server.SimpleHTTPRequestHandler): + pass + + Handler.extensions_map['.json'] = 'application/json' + httpd = socketserver.TCPServer(("", PORT), Handler) + httpd.serve_forever() + else: + import mimetypes + mimetypes.init() + mimetypes.add_type('application/json', '.json') + import SimpleHTTPServer + SimpleHTTPServer.test() diff --git a/test/integration/targets/uri/meta/main.yml b/test/integration/targets/uri/meta/main.yml new file mode 100644 index 00000000..39b94950 --- /dev/null +++ b/test/integration/targets/uri/meta/main.yml @@ -0,0 +1,5 @@ +dependencies: + - prepare_tests + - prepare_http_tests + - setup_remote_tmp_dir + - setup_remote_constraints diff --git a/test/integration/targets/uri/tasks/main.yml b/test/integration/targets/uri/tasks/main.yml new file mode 100644 index 00000000..409607af --- /dev/null +++ b/test/integration/targets/uri/tasks/main.yml @@ -0,0 +1,600 @@ +# test code for the uri module +# (c) 2014, Leonid Evdokimov + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: set role facts + set_fact: + http_port: 15260 + files_dir: '{{ output_dir|expanduser }}/files' + checkout_dir: '{{ output_dir }}/git' + +- name: create a directory to serve files from + file: + dest: "{{ files_dir }}" + state: directory + +- copy: + src: "{{ item }}" + dest: "{{files_dir}}/{{ item }}" + with_sequence: start=0 end=4 format=pass%d.json + +- copy: + src: "{{ item }}" + dest: "{{files_dir}}/{{ item }}" + with_sequence: start=0 end=30 format=fail%d.json + +- copy: + src: "testserver.py" + dest: "{{ output_dir }}/testserver.py" + +- name: start SimpleHTTPServer + shell: cd {{ files_dir }} && {{ ansible_python.executable }} {{ output_dir}}/testserver.py {{ http_port }} + async: 120 # this test set can take ~1m to run on FreeBSD (via Shippable) + poll: 0 + +- wait_for: port={{ http_port }} + + +- name: checksum pass_json + stat: path={{ files_dir }}/{{ item }}.json get_checksum=yes + register: pass_checksum + with_sequence: start=0 end=4 format=pass%d + +- name: fetch pass_json + uri: return_content=yes url=http://localhost:{{ http_port }}/{{ item }}.json + register: fetch_pass_json + with_sequence: start=0 end=4 format=pass%d + +- name: check pass_json + assert: + that: + - '"json" in item.1' + - item.0.stat.checksum == item.1.content | checksum + with_together: + - "{{pass_checksum.results}}" + - "{{fetch_pass_json.results}}" + + +- name: checksum fail_json + stat: path={{ files_dir }}/{{ item }}.json get_checksum=yes + register: fail_checksum + with_sequence: start=0 end=30 format=fail%d + +- name: fetch fail_json + uri: return_content=yes url=http://localhost:{{ http_port }}/{{ item }}.json + register: fail + with_sequence: start=0 end=30 format=fail%d + +- name: check fail_json + assert: + that: + - item.0.stat.checksum == item.1.content | checksum + - '"json" not in item.1' + with_together: + - "{{fail_checksum.results}}" + - "{{fail.results}}" + +- name: test https fetch to a site with mismatched hostname and certificate + uri: + url: "https://{{ badssl_host }}/" + dest: "{{ output_dir }}/shouldnotexist.html" + ignore_errors: True + register: result + +- stat: + path: "{{ output_dir }}/shouldnotexist.html" + register: stat_result + +- name: Assert that the file was not downloaded + assert: + that: + - result.failed == true + - "'Failed to validate the SSL certificate' in result.msg or 'Hostname mismatch' in result.msg or (result.msg is match('hostname .* doesn.t match .*'))" + - stat_result.stat.exists == false + - result.status is defined + - result.status == -1 + - result.url == 'https://' ~ badssl_host ~ '/' + +- name: Clean up any cruft from the results directory + file: + name: "{{ output_dir }}/kreitz.html" + state: absent + +- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no + uri: + url: "https://{{ badssl_host }}/" + dest: "{{ output_dir }}/kreitz.html" + validate_certs: no + register: result + +- stat: + path: "{{ output_dir }}/kreitz.html" + register: stat_result + +- name: Assert that the file was downloaded + assert: + that: + - "stat_result.stat.exists == true" + - "result.changed == true" + +- name: test redirect without follow_redirects + uri: + url: 'https://{{ httpbin_host }}/redirect/2' + follow_redirects: 'none' + status_code: 302 + register: result + +- name: Assert location header + assert: + that: + - 'result.location|default("") == "https://{{ httpbin_host }}/relative-redirect/1"' + +- name: Check SSL with redirect + uri: + url: 'https://{{ httpbin_host }}/redirect/2' + register: result + +- name: Assert SSL with redirect + assert: + that: + - 'result.url|default("") == "https://{{ httpbin_host }}/get"' + +- name: redirect to bad SSL site + uri: + url: 'http://{{ badssl_host }}' + register: result + ignore_errors: true + +- name: Ensure bad SSL site reidrect fails + assert: + that: + - result is failed + - 'badssl_host in result.msg' + +- name: test basic auth + uri: + url: 'https://{{ httpbin_host }}/basic-auth/user/passwd' + user: user + password: passwd + +- name: test basic forced auth + uri: + url: 'https://{{ httpbin_host }}/hidden-basic-auth/user/passwd' + force_basic_auth: true + user: user + password: passwd + +- name: test digest auth + uri: + url: 'https://{{ httpbin_host }}/digest-auth/auth/user/passwd' + user: user + password: passwd + headers: + Cookie: "fake=fake_value" + +- name: test PUT + uri: + url: 'https://{{ httpbin_host }}/put' + method: PUT + body: 'foo=bar' + +- name: test OPTIONS + uri: + url: 'https://{{ httpbin_host }}/' + method: OPTIONS + register: result + +- name: Assert we got an allow header + assert: + that: + - 'result.allow.split(", ")|sort == ["GET", "HEAD", "OPTIONS"]' + +# Ubuntu12.04 doesn't have python-urllib3, this makes handling required dependencies a pain across all variations +# We'll use this to just skip 12.04 on those tests. We should be sufficiently covered with other OSes and versions +- name: Set fact if running on Ubuntu 12.04 + set_fact: + is_ubuntu_precise: "{{ ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'precise' }}" + +- name: Test that SNI succeeds on python versions that have SNI + uri: + url: 'https://{{ sni_host }}/' + return_content: true + when: ansible_python.has_sslcontext + register: result + +- name: Assert SNI verification succeeds on new python + assert: + that: + - result is successful + - 'sni_host in result.content' + when: ansible_python.has_sslcontext + +- name: Verify SNI verification fails on old python without urllib3 contrib + uri: + url: 'https://{{ sni_host }}' + ignore_errors: true + when: not ansible_python.has_sslcontext + register: result + +- name: Assert SNI verification fails on old python + assert: + that: + - result is failed + when: result is not skipped + +- name: check if urllib3 is installed as an OS package + package: + name: "{{ uri_os_packages[ansible_os_family].urllib3 }}" + check_mode: yes + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool and uri_os_packages[ansible_os_family].urllib3|default + register: urllib3 + +- name: uninstall conflicting urllib3 pip package + pip: + name: urllib3 + state: absent + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool and uri_os_packages[ansible_os_family].urllib3|default and urllib3.changed + +- name: install OS packages that are needed for SNI on old python + package: + name: "{{ item }}" + with_items: "{{ uri_os_packages[ansible_os_family].step1 | default([]) }}" + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + +- name: install python modules for Older Python SNI verification + pip: + name: "{{ item }}" + with_items: + - ndg-httpsclient + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + +- name: Verify SNI verification succeeds on old python with urllib3 contrib + uri: + url: 'https://{{ sni_host }}' + return_content: true + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + register: result + +- name: Assert SNI verification succeeds on old python + assert: + that: + - result is successful + - 'sni_host in result.content' + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + +- name: Uninstall ndg-httpsclient + pip: + name: "{{ item }}" + state: absent + with_items: + - ndg-httpsclient + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + +- name: uninstall OS packages that are needed for SNI on old python + package: + name: "{{ item }}" + state: absent + with_items: "{{ uri_os_packages[ansible_os_family].step1 | default([]) }}" + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + +- name: install OS packages that are needed for building cryptography + package: + name: "{{ item }}" + with_items: "{{ uri_os_packages[ansible_os_family].step2 | default([]) }}" + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + +- name: install urllib3 and pyopenssl via pip + pip: + name: "{{ item }}" + state: latest + extra_args: "-c {{ remote_constraints }}" + with_items: + - urllib3 + - PyOpenSSL + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + +- name: Verify SNI verification succeeds on old python with pip urllib3 contrib + uri: + url: 'https://{{ sni_host }}' + return_content: true + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + register: result + +- name: Assert SNI verification succeeds on old python with pip urllib3 contrib + assert: + that: + - result is successful + - 'sni_host in result.content' + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + +- name: Uninstall urllib3 and PyOpenSSL + pip: + name: "{{ item }}" + state: absent + with_items: + - urllib3 + - PyOpenSSL + when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool + +- name: validate the status_codes are correct + uri: + url: "https://{{ httpbin_host }}/status/202" + status_code: 202 + method: POST + body: foo + +- name: Validate body_format json does not override content-type in 2.3 or newer + uri: + url: "https://{{ httpbin_host }}/post" + method: POST + body: + foo: bar + body_format: json + headers: + 'Content-Type': 'text/json' + return_content: true + register: result + failed_when: result.json.headers['Content-Type'] != 'text/json' + +- name: Validate body_format form-urlencoded using dicts works + uri: + url: https://{{ httpbin_host }}/post + method: POST + body: + user: foo + password: bar!#@ |&82$M + submit: Sign in + body_format: form-urlencoded + return_content: yes + register: result + +- name: Assert form-urlencoded dict input + assert: + that: + - result is successful + - result.json.headers['Content-Type'] == 'application/x-www-form-urlencoded' + - result.json.form.password == 'bar!#@ |&82$M' + +- name: Validate body_format form-urlencoded using lists works + uri: + url: https://{{ httpbin_host }}/post + method: POST + body: + - [ user, foo ] + - [ password, bar!#@ |&82$M ] + - [ submit, Sign in ] + body_format: form-urlencoded + return_content: yes + register: result + +- name: Assert form-urlencoded list input + assert: + that: + - result is successful + - result.json.headers['Content-Type'] == 'application/x-www-form-urlencoded' + - result.json.form.password == 'bar!#@ |&82$M' + +- name: Validate body_format form-urlencoded of invalid input fails + uri: + url: https://{{ httpbin_host }}/post + method: POST + body: + - foo + - bar: baz + body_format: form-urlencoded + return_content: yes + register: result + ignore_errors: yes + +- name: Assert invalid input fails + assert: + that: + - result is failure + - "'failed to parse body as form_urlencoded: too many values to unpack' in result.msg" + +- name: multipart/form-data + uri: + url: https://{{ httpbin_host }}/post + method: POST + body_format: form-multipart + body: + file1: + filename: formdata.txt + file2: + content: text based file content + filename: fake.txt + mime_type: text/plain + text_form_field1: value1 + text_form_field2: + content: value2 + mime_type: text/plain + register: multipart + +- name: Assert multipart/form-data + assert: + that: + - multipart.json.files.file1 == '_multipart/form-data_\n' + - multipart.json.files.file2 == 'text based file content' + - multipart.json.form.text_form_field1 == 'value1' + - multipart.json.form.text_form_field2 == 'value2' + + +- name: Validate invalid method + uri: + url: https://{{ httpbin_host }}/anything + method: UNKNOWN + register: result + ignore_errors: yes + +- name: Assert invalid method fails + assert: + that: + - result is failure + - result.status == 405 + - "'METHOD NOT ALLOWED' in result.msg" + +- name: Test client cert auth, no certs + uri: + url: "https://ansible.http.tests/ssl_client_verify" + status_code: 200 + return_content: true + register: result + failed_when: result.content != "ansible.http.tests:NONE" + when: has_httptester + +- name: Test client cert auth, with certs + uri: + url: "https://ansible.http.tests/ssl_client_verify" + client_cert: "{{ remote_tmp_dir }}/client.pem" + client_key: "{{ remote_tmp_dir }}/client.key" + return_content: true + register: result + failed_when: result.content != "ansible.http.tests:SUCCESS" + when: has_httptester + +- name: Test client cert auth, with no validation + uri: + url: "https://fail.ansible.http.tests/ssl_client_verify" + client_cert: "{{ remote_tmp_dir }}/client.pem" + client_key: "{{ remote_tmp_dir }}/client.key" + return_content: true + validate_certs: no + register: result + failed_when: result.content != "ansible.http.tests:SUCCESS" + when: has_httptester + +- name: Test client cert auth, with validation and ssl mismatch + uri: + url: "https://fail.ansible.http.tests/ssl_client_verify" + client_cert: "{{ remote_tmp_dir }}/client.pem" + client_key: "{{ remote_tmp_dir }}/client.key" + return_content: true + validate_certs: yes + register: result + failed_when: result is not failed + when: has_httptester + +- uri: + url: https://{{ httpbin_host }}/response-headers?Set-Cookie=Foo%3Dbar&Set-Cookie=Baz%3Dqux + register: result + +- assert: + that: + - result['set_cookie'] == 'Foo=bar, Baz=qux' + # Python sorts cookies in order of most specific (ie. longest) path first + # items with the same path are reversed from response order + - result['cookies_string'] == 'Baz=qux; Foo=bar' + +- name: Write out netrc template + template: + src: netrc.j2 + dest: "{{ remote_tmp_dir }}/netrc" + +- name: Test netrc with port + uri: + url: "https://{{ httpbin_host }}:443/basic-auth/user/passwd" + environment: + NETRC: "{{ remote_tmp_dir }}/netrc" + +- name: Test JSON POST with src + uri: + url: "https://{{ httpbin_host}}/post" + src: pass0.json + method: POST + return_content: true + body_format: json + register: result + +- name: Validate POST with src works + assert: + that: + - result.json.json[0] == 'JSON Test Pattern pass1' + +- name: Copy file pass0.json to remote + copy: + src: "{{ role_path }}/files/pass0.json" + dest: "{{ remote_tmp_dir }}/pass0.json" + +- name: Test JSON POST with src and remote_src=True + uri: + url: "https://{{ httpbin_host}}/post" + src: "{{ remote_tmp_dir }}/pass0.json" + remote_src: true + method: POST + return_content: true + body_format: json + register: result + +- name: Validate POST with src and remote_src=True works + assert: + that: + - result.json.json[0] == 'JSON Test Pattern pass1' + +- name: Make request that includes password in JSON keys + uri: + url: "https://{{ httpbin_host}}/get?key-password=value-password" + user: admin + password: password + register: sanitize_keys + +- name: assert that keys were sanitized + assert: + that: + - sanitize_keys.json.args['key-********'] == 'value-********' + +- name: Create a testing file + copy: + content: "content" + dest: "{{ output_dir }}/output" + +- name: Download a file from non existing location + uri: + url: http://does/not/exist + dest: "{{ output_dir }}/output" + ignore_errors: yes + +- name: Save testing file's output + command: "cat {{ output_dir }}/output" + register: file_out + +- name: Test the testing file was not overwritten + assert: + that: + - "'content' in file_out.stdout" + +- name: Clean up + file: + dest: "{{ output_dir }}/output" + state: absent + +- name: Test follow_redirects=none + import_tasks: redirect-none.yml + +- name: Test follow_redirects=safe + import_tasks: redirect-safe.yml + +- name: Test follow_redirects=urllib2 + import_tasks: redirect-urllib2.yml + +- name: Test follow_redirects=all + import_tasks: redirect-all.yml + +- name: Check unexpected failures + import_tasks: unexpected-failures.yml + +- name: Check return-content + import_tasks: return-content.yml diff --git a/test/integration/targets/uri/tasks/redirect-all.yml b/test/integration/targets/uri/tasks/redirect-all.yml new file mode 100644 index 00000000..d5b47a1c --- /dev/null +++ b/test/integration/targets/uri/tasks/redirect-all.yml @@ -0,0 +1,272 @@ +- name: Test HTTP 301 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: HEAD + register: http_301_head + +- assert: + that: + - http_301_head is successful + - http_301_head.json is not defined + - http_301_head.redirected == true + - http_301_head.status == 200 + - http_301_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 301 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: GET + register: http_301_get + +- assert: + that: + - http_301_get is successful + - http_301_get.json.data == '' + - http_301_get.json.method == 'GET' + - http_301_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_301_get.redirected == true + - http_301_get.status == 200 + - http_301_get.url == 'https://{{ httpbin_host }}/anything' + +# NOTE: The HTTP POST turns into an HTTP GET +- name: Test HTTP 301 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + register: http_301_post + +- assert: + that: + - http_301_post is successful + - http_301_post.json.data == '' + - http_301_post.json.method == 'GET' + - http_301_post.json.url == 'https://{{ httpbin_host }}/anything' + - http_301_post.redirected == true + - http_301_post.status == 200 + - http_301_post.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 302 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: HEAD + register: http_302_head + +- assert: + that: + - http_302_head is successful + - http_302_head.json is not defined + - http_302_head.redirected == true + - http_302_head.status == 200 + - http_302_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 302 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: GET + register: http_302_get + +- assert: + that: + - http_302_get is successful + - http_302_get.json.data == '' + - http_302_get.json.method == 'GET' + - http_302_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_302_get.redirected == true + - http_302_get.status == 200 + - http_302_get.url == 'https://{{ httpbin_host }}/anything' + +# NOTE: The HTTP POST turns into an HTTP GET +- name: Test HTTP 302 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + register: http_302_post + +- assert: + that: + - http_302_post is successful + - http_302_post.json.data == '' + - http_302_post.json.method == 'GET' + - http_302_post.json.url == 'https://{{ httpbin_host }}/anything' + - http_302_post.redirected == true + - http_302_post.status == 200 + - http_302_post.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 303 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: HEAD + register: http_303_head + +- assert: + that: + - http_303_head is successful + - http_303_head.json is not defined + - http_303_head.redirected == true + - http_303_head.status == 200 + - http_303_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 303 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: GET + register: http_303_get + +- assert: + that: + - http_303_get is successful + - http_303_get.json.data == '' + - http_303_get.json.method == 'GET' + - http_303_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_303_get.redirected == true + - http_303_get.status == 200 + - http_303_get.url == 'https://{{ httpbin_host }}/anything' + +# NOTE: The HTTP POST turns into an HTTP GET +- name: Test HTTP 303 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + register: http_303_post + +- assert: + that: + - http_303_post is successful + - http_303_post.json.data == '' + - http_303_post.json.method == 'GET' + - http_303_post.json.url == 'https://{{ httpbin_host }}/anything' + - http_303_post.redirected == true + - http_303_post.status == 200 + - http_303_post.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: HEAD + register: http_307_head + +- assert: + that: + - http_307_head is successful + - http_307_head.json is not defined + - http_307_head.redirected == true + - http_307_head.status == 200 + - http_307_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: GET + register: http_307_get + +- assert: + that: + - http_307_get is successful + - http_307_get.json.data == '' + - http_307_get.json.method == 'GET' + - http_307_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_307_get.redirected == true + - http_307_get.status == 200 + - http_307_get.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + register: http_307_post + +- assert: + that: + - http_307_post is successful + - http_307_post.json.json.foo == 'bar' + - http_307_post.json.method == 'POST' + - http_307_post.json.url == 'https://{{ httpbin_host }}/anything' + - http_307_post.redirected == true + - http_307_post.status == 200 + - http_307_post.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 308 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: HEAD + register: http_308_head + +- assert: + that: + - http_308_head is successful + - http_308_head.json is undefined + - http_308_head.redirected == true + - http_308_head.status == 200 + - http_308_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 308 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: GET + register: http_308_get + +- assert: + that: + - http_308_get is successful + - http_308_get.json.data == '' + - http_308_get.json.method == 'GET' + - http_308_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_308_get.redirected == true + - http_308_get.status == 200 + - http_308_get.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 308 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: all + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + register: http_308_post + +- assert: + that: + - http_308_post is successful + - http_308_post.json.json.foo == 'bar' + - http_308_post.json.method == 'POST' + - http_308_post.json.url == 'https://{{ httpbin_host }}/anything' + - http_308_post.redirected == true + - http_308_post.status == 200 + - http_308_post.url == 'https://{{ httpbin_host }}/anything' diff --git a/test/integration/targets/uri/tasks/redirect-none.yml b/test/integration/targets/uri/tasks/redirect-none.yml new file mode 100644 index 00000000..0f5ec68b --- /dev/null +++ b/test/integration/targets/uri/tasks/redirect-none.yml @@ -0,0 +1,296 @@ +- name: Test HTTP 301 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: HEAD + ignore_errors: yes + register: http_301_head + +- assert: + that: + - http_301_head is failure + - http_301_head.json is not defined + - http_301_head.location == 'https://{{ httpbin_host }}/anything' + - "http_301_head.msg == 'Status code was 301 and not [200]: HTTP Error 301: MOVED PERMANENTLY'" + - http_301_head.redirected == false + - http_301_head.status == 301 + - http_301_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 301 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: GET + ignore_errors: yes + register: http_301_get + +- assert: + that: + - http_301_get is failure + - http_301_get.json is not defined + - http_301_get.location == 'https://{{ httpbin_host }}/anything' + - "http_301_get.msg == 'Status code was 301 and not [200]: HTTP Error 301: MOVED PERMANENTLY'" + - http_301_get.redirected == false + - http_301_get.status == 301 + - http_301_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 301 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_301_post + +- assert: + that: + - http_301_post is failure + - http_301_post.json is not defined + - http_301_post.location == 'https://{{ httpbin_host }}/anything' + - "http_301_post.msg == 'Status code was 301 and not [200]: HTTP Error 301: MOVED PERMANENTLY'" + - http_301_post.redirected == false + - http_301_post.status == 301 + - http_301_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 302 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: HEAD + ignore_errors: yes + register: http_302_head + +- assert: + that: + - http_302_head is failure + - http_302_head.json is not defined + - http_302_head.location == 'https://{{ httpbin_host }}/anything' + - "http_302_head.msg == 'Status code was 302 and not [200]: HTTP Error 302: FOUND'" + - http_302_head.redirected == false + - http_302_head.status == 302 + - http_302_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 302 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: GET + ignore_errors: yes + register: http_302_get + +- assert: + that: + - http_302_get is failure + - http_302_get.json is not defined + - http_302_get.location == 'https://{{ httpbin_host }}/anything' + - "http_302_get.msg == 'Status code was 302 and not [200]: HTTP Error 302: FOUND'" + - http_302_get.redirected == false + - http_302_get.status == 302 + - http_302_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 302 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_302_post + +- assert: + that: + - http_302_post is failure + - http_302_post.json is not defined + - http_302_post.location == 'https://{{ httpbin_host }}/anything' + - "http_302_post.msg == 'Status code was 302 and not [200]: HTTP Error 302: FOUND'" + - http_302_post.redirected == false + - http_302_post.status == 302 + - http_302_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 303 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: HEAD + ignore_errors: yes + register: http_303_head + +- assert: + that: + - http_303_head is failure + - http_303_head.json is not defined + - http_303_head.location == 'https://{{ httpbin_host }}/anything' + - "http_303_head.msg == 'Status code was 303 and not [200]: HTTP Error 303: SEE OTHER'" + - http_303_head.redirected == false + - http_303_head.status == 303 + - http_303_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 303 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: GET + ignore_errors: yes + register: http_303_get + +- assert: + that: + - http_303_get is failure + - http_303_get.json is not defined + - http_303_get.location == 'https://{{ httpbin_host }}/anything' + - "http_303_get.msg == 'Status code was 303 and not [200]: HTTP Error 303: SEE OTHER'" + - http_303_get.redirected == false + - http_303_get.status == 303 + - http_303_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 303 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_303_post + +- assert: + that: + - http_303_post is failure + - http_303_post.json is not defined + - http_303_post.location == 'https://{{ httpbin_host }}/anything' + - "http_303_post.msg == 'Status code was 303 and not [200]: HTTP Error 303: SEE OTHER'" + - http_303_post.redirected == false + - http_303_post.status == 303 + - http_303_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: HEAD + ignore_errors: yes + register: http_307_head + +- assert: + that: + - http_307_head is failure + - http_307_head.json is not defined + - http_307_head.location == 'https://{{ httpbin_host }}/anything' + - "http_307_head.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'" + - http_307_head.redirected == false + - http_307_head.status == 307 + - http_307_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: GET + ignore_errors: yes + register: http_307_get + +- assert: + that: + - http_307_get is failure + - http_307_get.json is not defined + - http_307_get.location == 'https://{{ httpbin_host }}/anything' + - "http_307_get.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'" + - http_307_get.redirected == false + - http_307_get.status == 307 + - http_307_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_307_post + +- assert: + that: + - http_307_post is failure + - http_307_post.json is not defined + - http_307_post.location == 'https://{{ httpbin_host }}/anything' + - "http_307_post.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'" + - http_307_post.redirected == false + - http_307_post.status == 307 + - http_307_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything' + +# NOTE: This is a bug, fixed in https://github.com/ansible/ansible/pull/36809 +- name: Test HTTP 308 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: GET + ignore_errors: yes + register: http_308_head + +- assert: + that: + - http_308_head is failure + - http_308_head.json is not defined + - http_308_head.location == 'https://{{ httpbin_host }}/anything' + - "http_308_head.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'" + - http_308_head.redirected == false + - http_308_head.status == 308 + - http_308_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything' + +# NOTE: This is a bug, fixed in https://github.com/ansible/ansible/pull/36809 +- name: Test HTTP 308 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: GET + ignore_errors: yes + register: http_308_get + +- assert: + that: + - http_308_get is failure + - http_308_get.json is not defined + - http_308_get.location == 'https://{{ httpbin_host }}/anything' + - "http_308_get.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'" + - http_308_get.redirected == false + - http_308_get.status == 308 + - http_308_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 308 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: none + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_308_post + +- assert: + that: + - http_308_post is failure + - http_308_post.json is not defined + - http_308_post.location == 'https://{{ httpbin_host }}/anything' + - "http_308_post.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'" + - http_308_post.redirected == false + - http_308_post.status == 308 + - http_308_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything' diff --git a/test/integration/targets/uri/tasks/redirect-safe.yml b/test/integration/targets/uri/tasks/redirect-safe.yml new file mode 100644 index 00000000..c95dd5aa --- /dev/null +++ b/test/integration/targets/uri/tasks/redirect-safe.yml @@ -0,0 +1,274 @@ +- name: Test HTTP 301 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: HEAD + register: http_301_head + +- assert: + that: + - http_301_head is successful + - http_301_head.json is not defined + - http_301_head.redirected == true + - http_301_head.status == 200 + - http_301_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 301 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: GET + register: http_301_get + +- assert: + that: + - http_301_get is successful + - http_301_get.json.data == '' + - http_301_get.json.method == 'GET' + - http_301_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_301_get.redirected == true + - http_301_get.status == 200 + - http_301_get.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 301 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_301_post + +- assert: + that: + - http_301_post is failure + - http_301_post.json is not defined + - http_301_post.location == 'https://{{ httpbin_host }}/anything' + - "http_301_post.msg == 'Status code was 301 and not [200]: HTTP Error 301: MOVED PERMANENTLY'" + - http_301_post.redirected == false + - http_301_post.status == 301 + - http_301_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 302 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: HEAD + register: http_302_head + +- assert: + that: + - http_302_head is successful + - http_302_head.json is not defined + - http_302_head.redirected == true + - http_302_head.status == 200 + - http_302_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 302 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: GET + register: http_302_get + +- assert: + that: + - http_302_get is successful + - http_302_get.json.data == '' + - http_302_get.json.method == 'GET' + - http_302_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_302_get.redirected == true + - http_302_get.status == 200 + - http_302_get.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 302 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_302_post + +- assert: + that: + - http_302_post is failure + - http_302_post.json is not defined + - http_302_post.location == 'https://{{ httpbin_host }}/anything' + - "http_302_post.msg == 'Status code was 302 and not [200]: HTTP Error 302: FOUND'" + - http_302_post.redirected == false + - http_302_post.status == 302 + - http_302_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 303 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: HEAD + register: http_303_head + +- assert: + that: + - http_303_head is successful + - http_303_head.json is not defined + - http_303_head.redirected == true + - http_303_head.status == 200 + - http_303_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 303 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: GET + register: http_303_get + +- assert: + that: + - http_303_get is successful + - http_303_get.json.data == '' + - http_303_get.json.method == 'GET' + - http_303_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_303_get.redirected == true + - http_303_get.status == 200 + - http_303_get.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 303 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_303_post + +- assert: + that: + - http_303_post is failure + - http_303_post.json is not defined + - http_303_post.location == 'https://{{ httpbin_host }}/anything' + - "http_303_post.msg == 'Status code was 303 and not [200]: HTTP Error 303: SEE OTHER'" + - http_303_post.redirected == false + - http_303_post.status == 303 + - http_303_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: HEAD + register: http_307_head + +- assert: + that: + - http_307_head is successful + - http_307_head.json is not defined + - http_307_head.redirected == true + - http_307_head.status == 200 + - http_307_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: GET + register: http_307_get + +- assert: + that: + - http_307_get is successful + - http_307_get.json.data == '' + - http_307_get.json.method == 'GET' + - http_307_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_307_get.redirected == true + - http_307_get.status == 200 + - http_307_get.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_307_post + +- assert: + that: + - http_307_post is failure + - http_307_post.json is not defined + - http_307_post.location == 'https://{{ httpbin_host }}/anything' + - "http_307_post.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'" + - http_307_post.redirected == false + - http_307_post.status == 307 + - http_307_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything' + +- name: Test HTTP 308 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: HEAD + register: http_308_head + +- assert: + that: + - http_308_head is successful + - http_308_head.json is not defined + - http_308_head.redirected == true + - http_308_head.status == 200 + - http_308_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 308 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: GET + register: http_308_get + +- assert: + that: + - http_308_get is successful + - http_308_get.json.data == '' + - http_308_get.json.method == 'GET' + - http_308_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_308_get.redirected == true + - http_308_get.status == 200 + - http_308_get.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 308 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: safe + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_308_post + +- assert: + that: + - http_308_post is failure + - http_308_post.json is not defined + - http_308_post.location == 'https://{{ httpbin_host }}/anything' + - "http_308_post.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'" + - http_308_post.redirected == false + - http_308_post.status == 308 + - http_308_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything' diff --git a/test/integration/targets/uri/tasks/redirect-urllib2.yml b/test/integration/targets/uri/tasks/redirect-urllib2.yml new file mode 100644 index 00000000..10b115ee --- /dev/null +++ b/test/integration/targets/uri/tasks/redirect-urllib2.yml @@ -0,0 +1,294 @@ +# NOTE: The HTTP HEAD turns into an HTTP GET +- name: Test HTTP 301 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: HEAD + register: http_301_head + +- assert: + that: + - http_301_head is successful + - http_301_head.json.data == '' + - http_301_head.json.method == 'GET' + - http_301_head.json.url == 'https://{{ httpbin_host }}/anything' + - http_301_head.redirected == true + - http_301_head.status == 200 + - http_301_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 301 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: GET + register: http_301_get + +- assert: + that: + - http_301_get is successful + - http_301_get.json.data == '' + - http_301_get.json.method == 'GET' + - http_301_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_301_get.redirected == true + - http_301_get.status == 200 + - http_301_get.url == 'https://{{ httpbin_host }}/anything' + +# NOTE: The HTTP POST turns into an HTTP GET +- name: Test HTTP 301 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + register: http_301_post + +- assert: + that: + - http_301_post is successful + - http_301_post.json.data == '' + - http_301_post.json.method == 'GET' + - http_301_post.json.url == 'https://{{ httpbin_host }}/anything' + - http_301_post.redirected == true + - http_301_post.status == 200 + - http_301_post.url == 'https://{{ httpbin_host }}/anything' + +# NOTE: The HTTP HEAD turns into an HTTP GET +- name: Test HTTP 302 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: HEAD + register: http_302_head + +- assert: + that: + - http_302_head is successful + - http_302_head.json.data == '' + - http_302_head.json.method == 'GET' + - http_302_head.json.url == 'https://{{ httpbin_host }}/anything' + - http_302_head.redirected == true + - http_302_head.status == 200 + - http_302_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 302 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: GET + register: http_302_get + +- assert: + that: + - http_302_get is successful + - http_302_get.json.data == '' + - http_302_get.json.method == 'GET' + - http_302_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_302_get.redirected == true + - http_302_get.status == 200 + - http_302_get.url == 'https://{{ httpbin_host }}/anything' + +# NOTE: The HTTP POST turns into an HTTP GET +- name: Test HTTP 302 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + register: http_302_post + +- assert: + that: + - http_302_post is successful + - http_302_post.json.data == '' + - http_302_post.json.method == 'GET' + - http_302_post.json.url == 'https://{{ httpbin_host }}/anything' + - http_302_post.redirected == true + - http_302_post.status == 200 + - http_302_post.url == 'https://{{ httpbin_host }}/anything' + +# NOTE: The HTTP HEAD turns into an HTTP GET +- name: Test HTTP 303 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: HEAD + register: http_303_head + +- assert: + that: + - http_303_head is successful + - http_303_head.json.data == '' + - http_303_head.json.method == 'GET' + - http_303_head.json.url == 'https://{{ httpbin_host }}/anything' + - http_303_head.redirected == true + - http_303_head.status == 200 + - http_303_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 303 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: GET + register: http_303_get + +- assert: + that: + - http_303_get is successful + - http_303_get.json.data == '' + - http_303_get.json.method == 'GET' + - http_303_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_303_get.redirected == true + - http_303_get.status == 200 + - http_303_get.url == 'https://{{ httpbin_host }}/anything' + +# NOTE: The HTTP POST turns into an HTTP GET +- name: Test HTTP 303 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + register: http_303_post + +- assert: + that: + - http_303_post is successful + - http_303_post.json.data == '' + - http_303_post.json.method == 'GET' + - http_303_post.json.url == 'https://{{ httpbin_host }}/anything' + - http_303_post.redirected == true + - http_303_post.status == 200 + - http_303_post.url == 'https://{{ httpbin_host }}/anything' + +# NOTE: The HTTP HEAD turns into an HTTP GET +- name: Test HTTP 307 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: HEAD + register: http_307_head + +- assert: + that: + - http_307_head is successful + - http_307_head.json.data == '' + - http_307_head.json.method == 'GET' + - http_307_head.json.url == 'https://{{ httpbin_host }}/anything' + - http_307_head.redirected == true + - http_307_head.status == 200 + - http_307_head.url == 'https://{{ httpbin_host }}/anything' + +- name: Test HTTP 307 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: GET + register: http_307_get + +- assert: + that: + - http_307_get is successful + - http_307_get.json.data == '' + - http_307_get.json.method == 'GET' + - http_307_get.json.url == 'https://{{ httpbin_host }}/anything' + - http_307_get.redirected == true + - http_307_get.status == 200 + - http_307_get.url == 'https://{{ httpbin_host }}/anything' + +# FIXME: This is fixed in https://github.com/ansible/ansible/pull/36809 +- name: Test HTTP 307 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_307_post + +- assert: + that: + - http_307_post is failure + - http_307_post.json is not defined + - http_307_post.location == 'https://{{ httpbin_host }}/anything' + - "http_307_post.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'" + - http_307_post.redirected == false + - http_307_post.status == 307 + - http_307_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything' + +# FIXME: This is fixed in https://github.com/ansible/ansible/pull/36809 +- name: Test HTTP 308 using HEAD + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: GET + ignore_errors: yes + register: http_308_head + +- assert: + that: + - http_308_head is failure + - http_308_head.json is not defined + - http_308_head.location == 'https://{{ httpbin_host }}/anything' + - "http_308_head.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'" + - http_308_head.redirected == false + - http_308_head.status == 308 + - http_308_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything' + +# FIXME: This is fixed in https://github.com/ansible/ansible/pull/36809 +- name: Test HTTP 308 using GET + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: GET + ignore_errors: yes + register: http_308_get + +- assert: + that: + - http_308_get is failure + - http_308_get.json is not defined + - http_308_get.location == 'https://{{ httpbin_host }}/anything' + - "http_308_get.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'" + - http_308_get.redirected == false + - http_308_get.status == 308 + - http_308_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything' + +# FIXME: This is fixed in https://github.com/ansible/ansible/pull/36809 +- name: Test HTTP 308 using POST + uri: + url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything + follow_redirects: urllib2 + return_content: yes + method: POST + body: '{ "foo": "bar" }' + body_format: json + ignore_errors: yes + register: http_308_post + +- assert: + that: + - http_308_post is failure + - http_308_post.json is not defined + - http_308_post.location == 'https://{{ httpbin_host }}/anything' + - "http_308_post.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'" + - http_308_post.redirected == false + - http_308_post.status == 308 + - http_308_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything' diff --git a/test/integration/targets/uri/tasks/return-content.yml b/test/integration/targets/uri/tasks/return-content.yml new file mode 100644 index 00000000..5a9b97e6 --- /dev/null +++ b/test/integration/targets/uri/tasks/return-content.yml @@ -0,0 +1,49 @@ +- name: Test when return_content is yes + uri: + url: https://{{ httpbin_host }}/get + return_content: yes + register: result + +- name: Assert content exists when return_content is yes and request succeeds + assert: + that: + - result is successful + - "'content' in result" + +- name: Test when return_content is yes + uri: + url: http://does/not/exist + return_content: yes + register: result + ignore_errors: true + +- name: Assert content exists when return_content is yes and request fails + assert: + that: + - result is failed + - "'content' in result" + +- name: Test when return_content is no + uri: + url: https://{{ httpbin_host }}/get + return_content: no + register: result + +- name: Assert content does not exist when return_content is no and request succeeds + assert: + that: + - result is successful + - "'content' not in result" + +- name: Test when return_content is no + uri: + url: http://does/not/exist + return_content: no + register: result + ignore_errors: true + +- name: Assert content does not exist when return_content is no and request fails + assert: + that: + - result is failed + - "'content' not in result" \ No newline at end of file diff --git a/test/integration/targets/uri/tasks/unexpected-failures.yml b/test/integration/targets/uri/tasks/unexpected-failures.yml new file mode 100644 index 00000000..ac38871c --- /dev/null +++ b/test/integration/targets/uri/tasks/unexpected-failures.yml @@ -0,0 +1,27 @@ +--- +# same as expanduser & expandvars called on managed host +- command: 'echo {{ output_dir }}' + register: echo + +- set_fact: + remote_dir_expanded: '{{ echo.stdout }}' + +- name: ensure test directory doesn't exist + file: + path: '{{ output_dir }}/non/existent/path' + state: absent + +- name: destination doesn't exist + uri: + url: 'https://{{ httpbin_host }}/get' + dest: '{{ output_dir }}/non/existent/path' + ignore_errors: true + register: ret + +- name: check that unexpected failure didn't happen + assert: + that: + - ret is failed + - "not ret.msg.startswith('MODULE FAILURE')" + - '"Destination dir ''" ~ remote_dir_expanded ~ "/non/existent'' not writable" in ret.msg' + - ret.status == 200 diff --git a/test/integration/targets/uri/templates/netrc.j2 b/test/integration/targets/uri/templates/netrc.j2 new file mode 100644 index 00000000..3a100d51 --- /dev/null +++ b/test/integration/targets/uri/templates/netrc.j2 @@ -0,0 +1,3 @@ +machine {{ httpbin_host }} +login user +password passwd diff --git a/test/integration/targets/uri/vars/main.yml b/test/integration/targets/uri/vars/main.yml new file mode 100644 index 00000000..83a740bc --- /dev/null +++ b/test/integration/targets/uri/vars/main.yml @@ -0,0 +1,20 @@ +uri_os_packages: + RedHat: + urllib3: python-urllib3 + step1: + - python-pyasn1 + - pyOpenSSL + - python-urllib3 + step2: + - libffi-devel + - openssl-devel + - python-devel + Debian: + step1: + - python-pyasn1 + - python-openssl + - python-urllib3 + step2: + - libffi-dev + - libssl-dev + - python-dev diff --git a/test/integration/targets/user/aliases b/test/integration/targets/user/aliases new file mode 100644 index 00000000..3a07aab3 --- /dev/null +++ b/test/integration/targets/user/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group1 +skip/aix diff --git a/test/integration/targets/user/files/userlist.sh b/test/integration/targets/user/files/userlist.sh new file mode 100644 index 00000000..96a83b20 --- /dev/null +++ b/test/integration/targets/user/files/userlist.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +#- name: make a list of groups +# shell: | +# cat /etc/group | cut -d: -f1 +# register: group_names +# when: 'ansible_distribution != "MacOSX"' + +#- name: make a list of groups [mac] +# shell: dscl localhost -list /Local/Default/Groups +# register: group_names +# when: 'ansible_distribution == "MacOSX"' + +DISTRO="$*" + +if [[ "$DISTRO" == "MacOSX" ]]; then + dscl localhost -list /Local/Default/Users +else + grep -E -v ^\# /etc/passwd | cut -d: -f1 +fi diff --git a/test/integration/targets/user/meta/main.yml b/test/integration/targets/user/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/user/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/user/tasks/main.yml b/test/integration/targets/user/tasks/main.yml new file mode 100644 index 00000000..3b8ff377 --- /dev/null +++ b/test/integration/targets/user/tasks/main.yml @@ -0,0 +1,34 @@ +# Test code for the user module. +# (c) 2017, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +- import_tasks: test_create_user.yml +- import_tasks: test_create_system_user.yml +- import_tasks: test_create_user_uid.yml +- import_tasks: test_create_user_password.yml +- import_tasks: test_create_user_home.yml +- import_tasks: test_remove_user.yml +- import_tasks: test_no_home_fallback.yml +- import_tasks: test_expires.yml +- import_tasks: test_expires_new_account.yml +- import_tasks: test_expires_new_account_epoch_negative.yml +- import_tasks: test_shadow_backup.yml +- import_tasks: test_ssh_key_passphrase.yml +- import_tasks: test_password_lock.yml +- import_tasks: test_password_lock_new_user.yml +- import_tasks: test_local.yml diff --git a/test/integration/targets/user/tasks/test_create_system_user.yml b/test/integration/targets/user/tasks/test_create_system_user.yml new file mode 100644 index 00000000..da746c50 --- /dev/null +++ b/test/integration/targets/user/tasks/test_create_system_user.yml @@ -0,0 +1,12 @@ +# create system user + +- name: remove user + user: + name: ansibulluser + state: absent + +- name: create system user + user: + name: ansibulluser + state: present + system: yes diff --git a/test/integration/targets/user/tasks/test_create_user.yml b/test/integration/targets/user/tasks/test_create_user.yml new file mode 100644 index 00000000..bced7905 --- /dev/null +++ b/test/integration/targets/user/tasks/test_create_user.yml @@ -0,0 +1,67 @@ +- name: remove the test user + user: + name: ansibulluser + state: absent + +- name: try to create a user + user: + name: ansibulluser + state: present + register: user_test0_0 + +- name: create the user again + user: + name: ansibulluser + state: present + register: user_test0_1 + +- debug: + var: user_test0 + verbosity: 2 + +- name: make a list of users + script: userlist.sh {{ ansible_facts.distribution }} + register: user_names + +- debug: + var: user_names + verbosity: 2 + +- name: validate results for testcase 0 + assert: + that: + - user_test0_0 is changed + - user_test0_1 is not changed + - '"ansibulluser" in user_names.stdout_lines' + +- name: run existing user check tests + user: + name: "{{ user_names.stdout_lines | random }}" + state: present + create_home: no + loop: "{{ range(1, 5+1) | list }}" + register: user_test1 + +- debug: + var: user_test1 + verbosity: 2 + +- name: validate results for testcase 1 + assert: + that: + - user_test1.results is defined + - user_test1.results | length == 5 + +- name: validate changed results for testcase 1 + assert: + that: + - "user_test1.results[0] is not changed" + - "user_test1.results[1] is not changed" + - "user_test1.results[2] is not changed" + - "user_test1.results[3] is not changed" + - "user_test1.results[4] is not changed" + - "user_test1.results[0]['state'] == 'present'" + - "user_test1.results[1]['state'] == 'present'" + - "user_test1.results[2]['state'] == 'present'" + - "user_test1.results[3]['state'] == 'present'" + - "user_test1.results[4]['state'] == 'present'" diff --git a/test/integration/targets/user/tasks/test_create_user_home.yml b/test/integration/targets/user/tasks/test_create_user_home.yml new file mode 100644 index 00000000..1b529f76 --- /dev/null +++ b/test/integration/targets/user/tasks/test_create_user_home.yml @@ -0,0 +1,136 @@ +# https://github.com/ansible/ansible/issues/42484 +# Skipping macOS for now since there is a bug when changing home directory +- name: Test home directory creation + when: ansible_facts.system != 'Darwin' + block: + - name: create user specifying home + user: + name: ansibulluser + state: present + home: "{{ user_home_prefix[ansible_facts.system] }}/ansibulluser" + register: user_test3_0 + + - name: create user again specifying home + user: + name: ansibulluser + state: present + home: "{{ user_home_prefix[ansible_facts.system] }}/ansibulluser" + register: user_test3_1 + + - name: change user home + user: + name: ansibulluser + state: present + home: "{{ user_home_prefix[ansible_facts.system] }}/ansibulluser-mod" + register: user_test3_2 + + - name: change user home back + user: + name: ansibulluser + state: present + home: "{{ user_home_prefix[ansible_facts.system] }}/ansibulluser" + register: user_test3_3 + + - name: validate results for testcase 3 + assert: + that: + - user_test3_0 is not changed + - user_test3_1 is not changed + - user_test3_2 is changed + - user_test3_3 is changed + +# https://github.com/ansible/ansible/issues/41393 +# Create a new user account with a path that has parent directories that do not exist +- name: Create user with home path that has parents that do not exist + user: + name: ansibulluser2 + state: present + home: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/ansibulluser2" + register: create_home_with_no_parent_1 + +- name: Create user with home path that has parents that do not exist again + user: + name: ansibulluser2 + state: present + home: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/ansibulluser2" + register: create_home_with_no_parent_2 + +- name: Check the created home directory + stat: + path: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/ansibulluser2" + register: home_with_no_parent_3 + +- name: Ensure user with non-existing parent paths was created successfully + assert: + that: + - create_home_with_no_parent_1 is changed + - create_home_with_no_parent_1.home == user_home_prefix[ansible_facts.system] ~ '/in2deep/ansibulluser2' + - create_home_with_no_parent_2 is not changed + - home_with_no_parent_3.stat.uid == create_home_with_no_parent_1.uid + - home_with_no_parent_3.stat.gr_name == default_user_group[ansible_facts.distribution] | default('ansibulluser2') + +- name: Cleanup test account + user: + name: ansibulluser2 + home: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/ansibulluser2" + state: absent + remove: yes + +- name: Remove testing dir + file: + path: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/" + state: absent + + +# https://github.com/ansible/ansible/issues/60307 +# Make sure we can create a user when the home directory is missing +- name: Create user with home path that does not exist + user: + name: ansibulluser3 + state: present + home: "{{ user_home_prefix[ansible_facts.system] }}/nosuchdir" + createhome: no + +- name: Cleanup test account + user: + name: ansibulluser3 + state: absent + remove: yes + +# https://github.com/ansible/ansible/issues/70589 +# Create user with create_home: no and parent directory does not exist. +- name: "Check if parent dir for home dir for user exists (before)" + stat: + path: "{{ user_home_prefix[ansible_facts.system] }}/thereisnodir" + register: create_user_no_create_home_with_no_parent_parent_dir_before + +- name: "Create user with create_home == no and home path parent dir does not exist" + user: + name: randomuser + state: present + create_home: false + home: "{{ user_home_prefix[ansible_facts.system] }}/thereisnodir/randomuser" + register: create_user_no_create_home_with_no_parent + +- name: "Check if parent dir for home dir for user exists (after)" + stat: + path: "{{ user_home_prefix[ansible_facts.system] }}/thereisnodir" + register: create_user_no_create_home_with_no_parent_parent_dir_after + +- name: "Check if home for user is created" + stat: + path: "{{ user_home_prefix[ansible_facts.system] }}/thereisnodir/randomuser" + register: create_user_no_create_home_with_no_parent_home_dir + +- name: "Ensure user with non-existing parent paths with create_home: no was created successfully" + assert: + that: + - not create_user_no_create_home_with_no_parent_parent_dir_before.stat.exists + - not create_user_no_create_home_with_no_parent_parent_dir_after.stat.isdir is defined + - not create_user_no_create_home_with_no_parent_home_dir.stat.exists + +- name: Cleanup test account + user: + name: randomuser + state: absent + remove: yes diff --git a/test/integration/targets/user/tasks/test_create_user_password.yml b/test/integration/targets/user/tasks/test_create_user_password.yml new file mode 100644 index 00000000..02aae003 --- /dev/null +++ b/test/integration/targets/user/tasks/test_create_user_password.yml @@ -0,0 +1,90 @@ +# test user add with password +- name: add an encrypted password for user + user: + name: ansibulluser + password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS." + state: present + update_password: always + register: test_user_encrypt0 + +- name: there should not be warnings + assert: + that: "'warnings' not in test_user_encrypt0" + +# https://github.com/ansible/ansible/issues/65711 +- name: Test updating password only on creation + user: + name: ansibulluser + password: '*' + update_password: on_create + register: test_user_update_password + +- name: Ensure password was not changed + assert: + that: + - test_user_update_password is not changed + +- name: Verify password hash for Linux + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + block: + - name: LINUX | Get shadow entry for ansibulluser + getent: + database: shadow + key: ansibulluser + + - name: LINUX | Ensure password hash was not removed + assert: + that: + - getent_shadow['ansibulluser'][1] != '*' + +- name: Test plaintext warning + when: ansible_facts.system != 'Darwin' + block: + - name: add an plaintext password for user + user: + name: ansibulluser + password: "plaintextpassword" + state: present + update_password: always + register: test_user_encrypt1 + + - name: there should be a warning complains that the password is plaintext + assert: + that: "'warnings' in test_user_encrypt1" + + - name: add an invalid hashed password + user: + name: ansibulluser + password: "$6$rounds=656000$tgK3gYTyRLUmhyv2$lAFrYUQwn7E6VsjPOwQwoSx30lmpiU9r/E0Al7tzKrR9mkodcMEZGe9OXD0H/clOn6qdsUnaL4zefy5fG+++++" + state: present + update_password: always + register: test_user_encrypt2 + + - name: there should be a warning complains about the character set of password + assert: + that: "'warnings' in test_user_encrypt2" + + - name: change password to '!' + user: + name: ansibulluser + password: '!' + register: test_user_encrypt3 + + - name: change password to '*' + user: + name: ansibulluser + password: '*' + register: test_user_encrypt4 + + - name: change password to '*************' + user: + name: ansibulluser + password: '*************' + register: test_user_encrypt5 + + - name: there should be no warnings when setting the password to '!', '*' or '*************' + assert: + that: + - "'warnings' not in test_user_encrypt3" + - "'warnings' not in test_user_encrypt4" + - "'warnings' not in test_user_encrypt5" diff --git a/test/integration/targets/user/tasks/test_create_user_uid.yml b/test/integration/targets/user/tasks/test_create_user_uid.yml new file mode 100644 index 00000000..9ac8a96f --- /dev/null +++ b/test/integration/targets/user/tasks/test_create_user_uid.yml @@ -0,0 +1,26 @@ +# test adding user with uid +# https://github.com/ansible/ansible/issues/62969 +- name: remove the test user + user: + name: ansibulluser + state: absent + +- name: try to create a user with uid + user: + name: ansibulluser + state: present + uid: 572 + register: user_test01_0 + +- name: create the user again + user: + name: ansibulluser + state: present + uid: 572 + register: user_test01_1 + +- name: validate results for testcase 0 + assert: + that: + - user_test01_0 is changed + - user_test01_1 is not changed diff --git a/test/integration/targets/user/tasks/test_expires.yml b/test/integration/targets/user/tasks/test_expires.yml new file mode 100644 index 00000000..8c238934 --- /dev/null +++ b/test/integration/targets/user/tasks/test_expires.yml @@ -0,0 +1,147 @@ +# Date is March 3, 2050 +- name: Set user expiration + user: + name: ansibulluser + state: present + expires: 2529881062 + register: user_test_expires1 + tags: + - timezone + +- name: Set user expiration again to ensure no change is made + user: + name: ansibulluser + state: present + expires: 2529881062 + register: user_test_expires2 + tags: + - timezone + +- name: Ensure that account with expiration was created and did not change on subsequent run + assert: + that: + - user_test_expires1 is changed + - user_test_expires2 is not changed + +- name: Verify expiration date for Linux + block: + - name: LINUX | Get expiration date for ansibulluser + getent: + database: shadow + key: ansibulluser + + - name: LINUX | Ensure proper expiration date was set + assert: + that: + - getent_shadow['ansibulluser'][6] == '29281' + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + + +- name: Verify expiration date for BSD + block: + - name: BSD | Get expiration date for ansibulluser + shell: 'grep ansibulluser /etc/master.passwd | cut -d: -f 7' + changed_when: no + register: bsd_account_expiration + + - name: BSD | Ensure proper expiration date was set + assert: + that: + - bsd_account_expiration.stdout == '2529881062' + when: ansible_facts.os_family == 'FreeBSD' + +- name: Change timezone + timezone: + name: America/Denver + register: original_timezone + tags: + - timezone + +- name: Change system timezone to make sure expiration comparison works properly + block: + - name: Create user with expiration again to ensure no change is made in a new timezone + user: + name: ansibulluser + state: present + expires: 2529881062 + register: user_test_different_tz + tags: + - timezone + + - name: Ensure that no change was reported + assert: + that: + - user_test_different_tz is not changed + tags: + - timezone + + always: + - name: Restore original timezone - {{ original_timezone.diff.before.name }} + timezone: + name: "{{ original_timezone.diff.before.name }}" + when: original_timezone.diff.before.name != "n/a" + tags: + - timezone + + - name: Restore original timezone when n/a + file: + path: /etc/sysconfig/clock + state: absent + when: + - original_timezone.diff.before.name == "n/a" + - "'/etc/sysconfig/clock' in original_timezone.msg" + tags: + - timezone + + +- name: Unexpire user + user: + name: ansibulluser + state: present + expires: -1 + register: user_test_expires3 + +- name: Verify un expiration date for Linux + block: + - name: LINUX | Get expiration date for ansibulluser + getent: + database: shadow + key: ansibulluser + + - name: LINUX | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['ansibulluser'][6] }}" + that: + - not getent_shadow['ansibulluser'][6] or getent_shadow['ansibulluser'][6] | int < 0 + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + +- name: Verify un expiration date for Linux/BSD + block: + - name: Unexpire user again to check for change + user: + name: ansibulluser + state: present + expires: -1 + register: user_test_expires4 + + - name: Ensure first expiration reported a change and second did not + assert: + msg: The second run of the expiration removal task reported a change when it should not + that: + - user_test_expires3 is changed + - user_test_expires4 is not changed + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse', 'FreeBSD'] + +- name: Verify un expiration date for BSD + block: + - name: BSD | Get expiration date for ansibulluser + shell: 'grep ansibulluser /etc/master.passwd | cut -d: -f 7' + changed_when: no + register: bsd_account_expiration + + - name: BSD | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be '0', not {{ bsd_account_expiration.stdout }}" + that: + - bsd_account_expiration.stdout == '0' + when: ansible_facts.os_family == 'FreeBSD' diff --git a/test/integration/targets/user/tasks/test_expires_new_account.yml b/test/integration/targets/user/tasks/test_expires_new_account.yml new file mode 100644 index 00000000..b77d137f --- /dev/null +++ b/test/integration/targets/user/tasks/test_expires_new_account.yml @@ -0,0 +1,55 @@ +# Test setting no expiration when creating a new account +# https://github.com/ansible/ansible/issues/44155 +- name: Remove ansibulluser + user: + name: ansibulluser + state: absent + +- name: Create user account without expiration + user: + name: ansibulluser + state: present + expires: -1 + register: user_test_create_no_expires_1 + +- name: Create user account without expiration again + user: + name: ansibulluser + state: present + expires: -1 + register: user_test_create_no_expires_2 + +- name: Ensure changes were made appropriately + assert: + msg: Setting 'expires='-1 resulted in incorrect changes + that: + - user_test_create_no_expires_1 is changed + - user_test_create_no_expires_2 is not changed + +- name: Verify un expiration date for Linux + block: + - name: LINUX | Get expiration date for ansibulluser + getent: + database: shadow + key: ansibulluser + + - name: LINUX | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['ansibulluser'][6] }}" + that: + - not getent_shadow['ansibulluser'][6] or getent_shadow['ansibulluser'][6] | int < 0 + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + +- name: Verify un expiration date for BSD + block: + - name: BSD | Get expiration date for ansibulluser + shell: 'grep ansibulluser /etc/master.passwd | cut -d: -f 7' + changed_when: no + register: bsd_account_expiration + + - name: BSD | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be '0', not {{ bsd_account_expiration.stdout }}" + that: + - bsd_account_expiration.stdout == '0' + when: ansible_facts.os_family == 'FreeBSD' diff --git a/test/integration/targets/user/tasks/test_expires_new_account_epoch_negative.yml b/test/integration/targets/user/tasks/test_expires_new_account_epoch_negative.yml new file mode 100644 index 00000000..77a07c4a --- /dev/null +++ b/test/integration/targets/user/tasks/test_expires_new_account_epoch_negative.yml @@ -0,0 +1,112 @@ +# Test setting epoch 0 expiration when creating a new account, then removing the expiry +# https://github.com/ansible/ansible/issues/47114 +- name: Remove ansibulluser + user: + name: ansibulluser + state: absent + +- name: Create user account with epoch 0 expiration + user: + name: ansibulluser + state: present + expires: 0 + register: user_test_expires_create0_1 + +- name: Create user account with epoch 0 expiration again + user: + name: ansibulluser + state: present + expires: 0 + register: user_test_expires_create0_2 + +- name: Change the user account to remove the expiry time + user: + name: ansibulluser + expires: -1 + register: user_test_remove_expires_1 + +- name: Change the user account to remove the expiry time again + user: + name: ansibulluser + expires: -1 + register: user_test_remove_expires_2 + + +- name: Verify un expiration date for Linux + block: + - name: LINUX | Ensure changes were made appropriately + assert: + msg: Creating an account with 'expries=0' then removing that expriation with 'expires=-1' resulted in incorrect changes + that: + - user_test_expires_create0_1 is changed + - user_test_expires_create0_2 is not changed + - user_test_remove_expires_1 is changed + - user_test_remove_expires_2 is not changed + + - name: LINUX | Get expiration date for ansibulluser + getent: + database: shadow + key: ansibulluser + + - name: LINUX | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['ansibulluser'][6] }}" + that: + - not getent_shadow['ansibulluser'][6] or getent_shadow['ansibulluser'][6] | int < 0 + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + + +- name: Verify proper expiration behavior for BSD + block: + - name: BSD | Ensure changes were made appropriately + assert: + msg: Creating an account with 'expries=0' then removing that expriation with 'expires=-1' resulted in incorrect changes + that: + - user_test_expires_create0_1 is changed + - user_test_expires_create0_2 is not changed + - user_test_remove_expires_1 is not changed + - user_test_remove_expires_2 is not changed + when: ansible_facts.os_family == 'FreeBSD' + + +# Test expiration with a very large negative number. This should have the same +# result as setting -1. +- name: Set expiration date using very long negative number + user: + name: ansibulluser + state: present + expires: -2529881062 + register: user_test_expires5 + +- name: Ensure no change was made + assert: + that: + - user_test_expires5 is not changed + +- name: Verify un expiration date for Linux + block: + - name: LINUX | Get expiration date for ansibulluser + getent: + database: shadow + key: ansibulluser + + - name: LINUX | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['ansibulluser'][6] }}" + that: + - not getent_shadow['ansibulluser'][6] or getent_shadow['ansibulluser'][6] | int < 0 + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + +- name: Verify un expiration date for BSD + block: + - name: BSD | Get expiration date for ansibulluser + shell: 'grep ansibulluser /etc/master.passwd | cut -d: -f 7' + changed_when: no + register: bsd_account_expiration + + - name: BSD | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be '0', not {{ bsd_account_expiration.stdout }}" + that: + - bsd_account_expiration.stdout == '0' + when: ansible_facts.os_family == 'FreeBSD' diff --git a/test/integration/targets/user/tasks/test_local.yml b/test/integration/targets/user/tasks/test_local.yml new file mode 100644 index 00000000..16c79c57 --- /dev/null +++ b/test/integration/targets/user/tasks/test_local.yml @@ -0,0 +1,169 @@ +## Check local mode +# Even if we don't have a system that is bound to a directory, it's useful +# to run with local: true to exercise the code path that reads through the local +# user database file. +# https://github.com/ansible/ansible/issues/50947 + +- name: Create /etc/gshadow + file: + path: /etc/gshadow + state: touch + when: ansible_facts.os_family == 'Suse' + tags: + - user_test_local_mode + +- name: Create /etc/libuser.conf + file: + path: /etc/libuser.conf + state: touch + when: + - ansible_facts.distribution == 'Ubuntu' + - ansible_facts.distribution_major_version is version_compare('16', '==') + tags: + - user_test_local_mode + +- name: Ensure luseradd is present + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: libuser + state: present + when: ansible_facts.system in ['Linux'] + tags: + - user_test_local_mode + +- name: Create local account that already exists to check for warning + user: + name: root + local: yes + register: local_existing + tags: + - user_test_local_mode + +- name: Create local_ansibulluser + user: + name: local_ansibulluser + state: present + local: yes + register: local_user_test_1 + tags: + - user_test_local_mode + +- name: Create local_ansibulluser again + user: + name: local_ansibulluser + state: present + local: yes + register: local_user_test_2 + tags: + - user_test_local_mode + +- name: Remove local_ansibulluser + user: + name: local_ansibulluser + state: absent + remove: yes + local: yes + register: local_user_test_remove_1 + tags: + - user_test_local_mode + +- name: Remove local_ansibulluser again + user: + name: local_ansibulluser + state: absent + remove: yes + local: yes + register: local_user_test_remove_2 + tags: + - user_test_local_mode + +- name: Create test groups + group: + name: "{{ item }}" + loop: + - testgroup1 + - testgroup2 + - testgroup3 + - testgroup4 + tags: + - user_test_local_mode + +- name: Create local_ansibulluser with groups + user: + name: local_ansibulluser + state: present + local: yes + groups: ['testgroup1', 'testgroup2'] + register: local_user_test_3 + ignore_errors: yes + tags: + - user_test_local_mode + +- name: Append groups for local_ansibulluser + user: + name: local_ansibulluser + state: present + local: yes + groups: ['testgroup3', 'testgroup4'] + append: yes + register: local_user_test_4 + ignore_errors: yes + tags: + - user_test_local_mode + +- name: Test append without groups for local_ansibulluser + user: + name: local_ansibulluser + state: present + append: yes + register: local_user_test_5 + ignore_errors: yes + tags: + - user_test_local_mode + +- name: Remove local_ansibulluser again + user: + name: local_ansibulluser + state: absent + remove: yes + local: yes + tags: + - user_test_local_mode + +- name: Remove test groups + group: + name: "{{ item }}" + state: absent + loop: + - testgroup1 + - testgroup2 + - testgroup3 + - testgroup4 + tags: + - user_test_local_mode + +- name: Ensure local user accounts were created and removed properly + assert: + that: + - local_user_test_1 is changed + - local_user_test_2 is not changed + - local_user_test_3 is changed + - local_user_test_4 is changed + - local_user_test_remove_1 is changed + - local_user_test_remove_2 is not changed + tags: + - user_test_local_mode + +- name: Ensure warnings were displayed properly + assert: + that: + - local_user_test_1['warnings'] | length > 0 + - local_user_test_1['warnings'] | first is search('The local user account may already exist') + - local_user_test_5['warnings'] is search("'append' is set, but no 'groups' are specified. Use 'groups'") + - local_existing['warnings'] is not defined + when: ansible_facts.system in ['Linux'] + tags: + - user_test_local_mode + +- name: Test expires for local users + import_tasks: test_local_expires.yml diff --git a/test/integration/targets/user/tasks/test_local_expires.yml b/test/integration/targets/user/tasks/test_local_expires.yml new file mode 100644 index 00000000..e6620353 --- /dev/null +++ b/test/integration/targets/user/tasks/test_local_expires.yml @@ -0,0 +1,333 @@ +--- +## local user expires +# Date is March 3, 2050 + +- name: Remove local_ansibulluser + user: + name: local_ansibulluser + state: absent + remove: yes + local: yes + tags: + - user_test_local_mode + +- name: Set user expiration + user: + name: local_ansibulluser + state: present + local: yes + expires: 2529881062 + register: user_test_local_expires1 + tags: + - timezone + - user_test_local_mode + +- name: Set user expiration again to ensure no change is made + user: + name: local_ansibulluser + state: present + local: yes + expires: 2529881062 + register: user_test_local_expires2 + tags: + - timezone + - user_test_local_mode + +- name: Ensure that account with expiration was created and did not change on subsequent run + assert: + that: + - user_test_local_expires1 is changed + - user_test_local_expires2 is not changed + tags: + - user_test_local_mode + +- name: Verify expiration date for Linux + block: + - name: LINUX | Get expiration date for local_ansibulluser + getent: + database: shadow + key: local_ansibulluser + tags: + - user_test_local_mode + + - name: LINUX | Ensure proper expiration date was set + assert: + that: + - getent_shadow['local_ansibulluser'][6] == '29281' + tags: + - user_test_local_mode + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + +- name: Change timezone + timezone: + name: America/Denver + register: original_timezone + tags: + - timezone + - user_test_local_mode + +- name: Change system timezone to make sure expiration comparison works properly + block: + - name: Create user with expiration again to ensure no change is made in a new timezone + user: + name: local_ansibulluser + state: present + local: yes + expires: 2529881062 + register: user_test_local_different_tz + tags: + - timezone + - user_test_local_mode + + - name: Ensure that no change was reported + assert: + that: + - user_test_local_different_tz is not changed + tags: + - timezone + - user_test_local_mode + + always: + - name: Restore original timezone - {{ original_timezone.diff.before.name }} + timezone: + name: "{{ original_timezone.diff.before.name }}" + when: original_timezone.diff.before.name != "n/a" + tags: + - timezone + - user_test_local_mode + + - name: Restore original timezone when n/a + file: + path: /etc/sysconfig/clock + state: absent + when: + - original_timezone.diff.before.name == "n/a" + - "'/etc/sysconfig/clock' in original_timezone.msg" + tags: + - timezone + - user_test_local_mode + + +- name: Unexpire user + user: + name: local_ansibulluser + state: present + local: yes + expires: -1 + register: user_test_local_expires3 + tags: + - user_test_local_mode + +- name: Verify un expiration date for Linux + block: + - name: LINUX | Get expiration date for local_ansibulluser + getent: + database: shadow + key: local_ansibulluser + tags: + - user_test_local_mode + + - name: LINUX | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['local_ansibulluser'][6] }}" + that: + - not getent_shadow['local_ansibulluser'][6] or getent_shadow['local_ansibulluser'][6] | int < 0 + tags: + - user_test_local_mode + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + +- name: Verify un expiration date for Linux/BSD + block: + - name: Unexpire user again to check for change + user: + name: local_ansibulluser + state: present + local: yes + expires: -1 + register: user_test_local_expires4 + tags: + - user_test_local_mode + + - name: Ensure first expiration reported a change and second did not + assert: + msg: The second run of the expiration removal task reported a change when it should not + that: + - user_test_local_expires3 is changed + - user_test_local_expires4 is not changed + tags: + - user_test_local_mode + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse', 'FreeBSD'] + +# Test setting no expiration when creating a new account +# https://github.com/ansible/ansible/issues/44155 +- name: Remove local_ansibulluser + user: + name: local_ansibulluser + state: absent + remove: yes + local: yes + tags: + - user_test_local_mode + +- name: Create user account without expiration + user: + name: local_ansibulluser + state: present + local: yes + expires: -1 + register: user_test_local_create_no_expires_1 + tags: + - user_test_local_mode + +- name: Create user account without expiration again + user: + name: local_ansibulluser + state: present + local: yes + expires: -1 + register: user_test_local_create_no_expires_2 + tags: + - user_test_local_mode + +- name: Ensure changes were made appropriately + assert: + msg: Setting 'expires='-1 resulted in incorrect changes + that: + - user_test_local_create_no_expires_1 is changed + - user_test_local_create_no_expires_2 is not changed + tags: + - user_test_local_mode + +- name: Verify un expiration date for Linux + block: + - name: LINUX | Get expiration date for local_ansibulluser + getent: + database: shadow + key: local_ansibulluser + tags: + - user_test_local_mode + + - name: LINUX | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['local_ansibulluser'][6] }}" + that: + - not getent_shadow['local_ansibulluser'][6] or getent_shadow['local_ansibulluser'][6] | int < 0 + tags: + - user_test_local_mode + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + +# Test setting epoch 0 expiration when creating a new account, then removing the expiry +# https://github.com/ansible/ansible/issues/47114 +- name: Remove local_ansibulluser + user: + name: local_ansibulluser + state: absent + remove: yes + local: yes + tags: + - user_test_local_mode + +- name: Create user account with epoch 0 expiration + user: + name: local_ansibulluser + state: present + local: yes + expires: 0 + register: user_test_local_expires_create0_1 + tags: + - user_test_local_mode + +- name: Create user account with epoch 0 expiration again + user: + name: local_ansibulluser + state: present + local: yes + expires: 0 + register: user_test_local_expires_create0_2 + tags: + - user_test_local_mode + +- name: Change the user account to remove the expiry time + user: + name: local_ansibulluser + expires: -1 + local: yes + register: user_test_local_remove_expires_1 + tags: + - user_test_local_mode + +- name: Change the user account to remove the expiry time again + user: + name: local_ansibulluser + expires: -1 + local: yes + register: user_test_local_remove_expires_2 + tags: + - user_test_local_mode + + +- name: Verify un expiration date for Linux + block: + - name: LINUX | Ensure changes were made appropriately + assert: + msg: Creating an account with 'expries=0' then removing that expriation with 'expires=-1' resulted in incorrect changes + that: + - user_test_local_expires_create0_1 is changed + - user_test_local_expires_create0_2 is not changed + - user_test_local_remove_expires_1 is changed + - user_test_local_remove_expires_2 is not changed + tags: + - user_test_local_mode + + - name: LINUX | Get expiration date for local_ansibulluser + getent: + database: shadow + key: local_ansibulluser + tags: + - user_test_local_mode + + - name: LINUX | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['local_ansibulluser'][6] }}" + that: + - not getent_shadow['local_ansibulluser'][6] or getent_shadow['local_ansibulluser'][6] | int < 0 + tags: + - user_test_local_mode + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] + +# Test expiration with a very large negative number. This should have the same +# result as setting -1. +- name: Set expiration date using very long negative number + user: + name: local_ansibulluser + state: present + local: yes + expires: -2529881062 + register: user_test_local_expires5 + tags: + - user_test_local_mode + +- name: Ensure no change was made + assert: + that: + - user_test_local_expires5 is not changed + tags: + - user_test_local_mode + +- name: Verify un expiration date for Linux + block: + - name: LINUX | Get expiration date for local_ansibulluser + getent: + database: shadow + key: local_ansibulluser + tags: + - user_test_local_mode + + - name: LINUX | Ensure proper expiration date was set + assert: + msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['local_ansibulluser'][6] }}" + that: + - not getent_shadow['local_ansibulluser'][6] or getent_shadow['local_ansibulluser'][6] | int < 0 + tags: + - user_test_local_mode + when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse'] diff --git a/test/integration/targets/user/tasks/test_no_home_fallback.yml b/test/integration/targets/user/tasks/test_no_home_fallback.yml new file mode 100644 index 00000000..f7627fae --- /dev/null +++ b/test/integration/targets/user/tasks/test_no_home_fallback.yml @@ -0,0 +1,106 @@ +## create user without home and test fallback home dir create + +- name: Test home directory creation + when: ansible_facts.system != 'Darwin' + block: + - name: create the user + user: + name: ansibulluser + + - name: delete the user and home dir + user: + name: ansibulluser + state: absent + force: true + remove: true + + - name: create the user without home + user: + name: ansibulluser + create_home: no + + - name: create the user home dir + user: + name: ansibulluser + register: user_create_home_fallback + + - name: stat home dir + stat: + path: '{{ user_create_home_fallback.home }}' + register: user_create_home_fallback_dir + + - name: read UMASK from /etc/login.defs and return mode + shell: | + import re + import os + try: + for line in open('/etc/login.defs').readlines(): + m = re.match(r'^UMASK\s+(\d+)$', line) + if m: + umask = int(m.group(1), 8) + except: + umask = os.umask(0) + mode = oct(0o777 & ~umask) + print(str(mode).replace('o', '')) + args: + executable: "{{ ansible_python_interpreter }}" + register: user_login_defs_umask + + - name: validate that user home dir is created + assert: + that: + - user_create_home_fallback is changed + - user_create_home_fallback_dir.stat.exists + - user_create_home_fallback_dir.stat.isdir + - user_create_home_fallback_dir.stat.pw_name == 'ansibulluser' + - user_create_home_fallback_dir.stat.mode == user_login_defs_umask.stdout + +- name: Create non-system user + when: ansible_facts.distribution == "MacOSX" + block: + - name: create non-system user on macOS to test the shell is set to /bin/bash + user: + name: macosuser + register: macosuser_output + + - name: validate the shell is set to /bin/bash + assert: + that: + - 'macosuser_output.shell == "/bin/bash"' + + - name: cleanup + user: + name: macosuser + state: absent + + - name: create system user on macOS to test the shell is set to /usr/bin/false + user: + name: macosuser + system: yes + register: macosuser_output + + - name: validate the shell is set to /usr/bin/false + assert: + that: + - 'macosuser_output.shell == "/usr/bin/false"' + + - name: cleanup + user: + name: macosuser + state: absent + + - name: create non-system user on macos and set the shell to /bin/sh + user: + name: macosuser + shell: /bin/sh + register: macosuser_output + + - name: validate the shell is set to /bin/sh + assert: + that: + - 'macosuser_output.shell == "/bin/sh"' + + - name: cleanup + user: + name: macosuser + state: absent diff --git a/test/integration/targets/user/tasks/test_password_lock.yml b/test/integration/targets/user/tasks/test_password_lock.yml new file mode 100644 index 00000000..dde374ee --- /dev/null +++ b/test/integration/targets/user/tasks/test_password_lock.yml @@ -0,0 +1,140 @@ +- name: Test password lock + when: ansible_facts.system in ['FreeBSD', 'OpenBSD', 'Linux'] + block: + - name: Remove ansibulluser + user: + name: ansibulluser + state: absent + remove: yes + + - name: Create ansibulluser with password + user: + name: ansibulluser + password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS." + + - name: Lock account without password parameter + user: + name: ansibulluser + password_lock: yes + register: password_lock_1 + + - name: Lock account without password parameter again + user: + name: ansibulluser + password_lock: yes + register: password_lock_2 + + - name: Unlock account without password parameter + user: + name: ansibulluser + password_lock: no + register: password_lock_3 + + - name: Unlock account without password parameter again + user: + name: ansibulluser + password_lock: no + register: password_lock_4 + + - name: Lock account with password parameter + user: + name: ansibulluser + password_lock: yes + password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS." + register: password_lock_5 + + - name: Lock account with password parameter again + user: + name: ansibulluser + password_lock: yes + password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS." + register: password_lock_6 + + - name: Unlock account with password parameter + user: + name: ansibulluser + password_lock: no + password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS." + register: password_lock_7 + + - name: Unlock account with password parameter again + user: + name: ansibulluser + password_lock: no + password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS." + register: password_lock_8 + + - name: Ensure task reported changes appropriately + assert: + msg: The password_lock tasks did not make changes appropriately + that: + - password_lock_1 is changed + - password_lock_2 is not changed + - password_lock_3 is changed + - password_lock_4 is not changed + - password_lock_5 is changed + - password_lock_6 is not changed + - password_lock_7 is changed + - password_lock_8 is not changed + + - name: Lock account + user: + name: ansibulluser + password_lock: yes + + - name: Verify account lock for BSD + when: ansible_facts.system in ['FreeBSD', 'OpenBSD'] + block: + - name: BSD | Get account status + shell: "{{ status_command[ansible_facts['system']] }}" + register: account_status_locked + + - name: Unlock account + user: + name: ansibulluser + password_lock: no + + - name: BSD | Get account status + shell: "{{ status_command[ansible_facts['system']] }}" + register: account_status_unlocked + + - name: FreeBSD | Ensure account is locked + assert: + that: + - "'LOCKED' in account_status_locked.stdout" + - "'LOCKED' not in account_status_unlocked.stdout" + when: ansible_facts['system'] == 'FreeBSD' + + - name: Verify account lock for Linux + when: ansible_facts.system == 'Linux' + block: + - name: LINUX | Get account status + getent: + database: shadow + key: ansibulluser + + - name: LINUX | Ensure account is locked + assert: + that: + - getent_shadow['ansibulluser'][0].startswith('!') + + - name: Unlock account + user: + name: ansibulluser + password_lock: no + + - name: LINUX | Get account status + getent: + database: shadow + key: ansibulluser + + - name: LINUX | Ensure account is unlocked + assert: + that: + - not getent_shadow['ansibulluser'][0].startswith('!') + + always: + - name: Unlock account + user: + name: ansibulluser + password_lock: no diff --git a/test/integration/targets/user/tasks/test_password_lock_new_user.yml b/test/integration/targets/user/tasks/test_password_lock_new_user.yml new file mode 100644 index 00000000..dd4f23da --- /dev/null +++ b/test/integration/targets/user/tasks/test_password_lock_new_user.yml @@ -0,0 +1,63 @@ +- name: Test password lock + when: ansible_facts.system in ['FreeBSD', 'OpenBSD', 'Linux'] + block: + - name: Remove ansibulluser + user: + name: ansibulluser + state: absent + remove: yes + + - name: Create ansibulluser with password and locked + user: + name: ansibulluser + password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS." + password_lock: yes + register: create_with_lock_1 + + - name: Create ansibulluser with password and locked again + user: + name: ansibulluser + password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS." + password_lock: yes + register: create_with_lock_2 + + - name: Ensure task reported changes appropriately + assert: + msg: The password_lock tasks did not make changes appropriately + that: + - create_with_lock_1 is changed + - create_with_lock_2 is not changed + + - name: Verify account lock for BSD + when: ansible_facts.system in ['FreeBSD', 'OpenBSD'] + block: + - name: BSD | Get account status + shell: "{{ status_command[ansible_facts['system']] }}" + register: account_status_locked + + - name: FreeBSD | Ensure account is locked + assert: + that: + - "'LOCKED' in account_status_locked.stdout" + when: ansible_facts.system == 'FreeBSD' + + + - name: Verify account lock for Linux + when: ansible_facts.system == 'Linux' + block: + - name: LINUX | Get account status + getent: + database: shadow + key: ansibulluser + + - name: LINUX | Ensure account is locked + assert: + that: + - getent_shadow['ansibulluser'][0].startswith('!') + + + always: + - name: Unlock account + user: + name: ansibulluser + password_lock: no diff --git a/test/integration/targets/user/tasks/test_remove_user.yml b/test/integration/targets/user/tasks/test_remove_user.yml new file mode 100644 index 00000000..dea71cbf --- /dev/null +++ b/test/integration/targets/user/tasks/test_remove_user.yml @@ -0,0 +1,19 @@ +- name: try to delete the user + user: + name: ansibulluser + state: absent + force: true + register: user_test2 + +- name: make a new list of users + script: userlist.sh {{ ansible_facts.distribution }} + register: user_names2 + +- debug: + var: user_names2 + verbosity: 2 + +- name: validate results for testcase 2 + assert: + that: + - '"ansibulluser" not in user_names2.stdout_lines' diff --git a/test/integration/targets/user/tasks/test_shadow_backup.yml b/test/integration/targets/user/tasks/test_shadow_backup.yml new file mode 100644 index 00000000..2655fbf2 --- /dev/null +++ b/test/integration/targets/user/tasks/test_shadow_backup.yml @@ -0,0 +1,21 @@ +- name: Test shadow backup on Solaris + when: ansible_facts.os_family == 'Solaris' + block: + - name: Create a user to test shadow file backup + user: + name: ansibulluser + state: present + register: result + + - name: Find shadow backup files + find: + path: /etc + patterns: 'shadow\..*~$' + use_regex: yes + register: shadow_backups + + - name: Assert that a backup file was created + assert: + that: + - result.bakup + - shadow_backups.files | map(attribute='path') | list | length > 0 diff --git a/test/integration/targets/user/tasks/test_ssh_key_passphrase.yml b/test/integration/targets/user/tasks/test_ssh_key_passphrase.yml new file mode 100644 index 00000000..bb0486da --- /dev/null +++ b/test/integration/targets/user/tasks/test_ssh_key_passphrase.yml @@ -0,0 +1,29 @@ +# Test creating ssh key with passphrase +- name: Remove ansibulluser + user: + name: ansibulluser + state: absent + +- name: Create user with ssh key + user: + name: ansibulluser + state: present + generate_ssh_key: yes + force: yes + ssh_key_file: "{{ output_dir }}/test_id_rsa" + ssh_key_passphrase: secret_passphrase + +- name: Unlock ssh key + command: "ssh-keygen -y -f {{ output_dir }}/test_id_rsa -P secret_passphrase" + register: result + +- name: Check that ssh key was unlocked successfully + assert: + that: + - result.rc == 0 + +- name: Clean ssh key + file: + path: "{{ output_dir }}/test_id_rsa" + state: absent + when: ansible_os_family == 'FreeBSD' diff --git a/test/integration/targets/user/vars/main.yml b/test/integration/targets/user/vars/main.yml new file mode 100644 index 00000000..4b328f71 --- /dev/null +++ b/test/integration/targets/user/vars/main.yml @@ -0,0 +1,13 @@ +user_home_prefix: + Linux: '/home' + FreeBSD: '/home' + SunOS: '/home' + Darwin: '/Users' + +status_command: + OpenBSD: "grep ansibulluser /etc/master.passwd | cut -d ':' -f 2" + FreeBSD: 'pw user show ansibulluser' + +default_user_group: + openSUSE Leap: users + MacOSX: admin diff --git a/test/integration/targets/var_blending/aliases b/test/integration/targets/var_blending/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/var_blending/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/var_blending/group_vars/all b/test/integration/targets/var_blending/group_vars/all new file mode 100644 index 00000000..30aa3d6d --- /dev/null +++ b/test/integration/targets/var_blending/group_vars/all @@ -0,0 +1,9 @@ +a: 999 +b: 998 +c: 997 +d: 996 +uno: 1 +dos: 2 +tres: 3 +etest: 'from group_vars' +inventory_beats_default: 'narf' diff --git a/test/integration/targets/var_blending/group_vars/local b/test/integration/targets/var_blending/group_vars/local new file mode 100644 index 00000000..8feb93fc --- /dev/null +++ b/test/integration/targets/var_blending/group_vars/local @@ -0,0 +1 @@ +tres: 'three' diff --git a/test/integration/targets/var_blending/host_vars/testhost b/test/integration/targets/var_blending/host_vars/testhost new file mode 100644 index 00000000..49271aef --- /dev/null +++ b/test/integration/targets/var_blending/host_vars/testhost @@ -0,0 +1,4 @@ +a: 1 +b: 2 +c: 3 +d: 4 diff --git a/test/integration/targets/var_blending/inventory b/test/integration/targets/var_blending/inventory new file mode 100644 index 00000000..f0afb18d --- /dev/null +++ b/test/integration/targets/var_blending/inventory @@ -0,0 +1,26 @@ +[local] +testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" +testhost2 ansible_connection=local # connections are never made to this host, only host vars are accessed + +# the following inline declarations are accompanied +# by (preferred) group_vars/ and host_vars/ variables +# and are used in testing of variable precedence + +[arbitrary_parent:children] +local + +[local:vars] +parent_var=6000 +groups_tree_var=5000 + +[arbitrary_parent:vars] +groups_tree_var=4000 +overridden_in_parent=1000 + +[arbitrary_grandparent:children] +arbitrary_parent + +[arbitrary_grandparent:vars] +groups_tree_var=3000 +grandparent_var=2000 +overridden_in_parent=2000 diff --git a/test/integration/targets/var_blending/roles/test_var_blending/defaults/main.yml b/test/integration/targets/var_blending/roles/test_var_blending/defaults/main.yml new file mode 100644 index 00000000..671a1271 --- /dev/null +++ b/test/integration/targets/var_blending/roles/test_var_blending/defaults/main.yml @@ -0,0 +1,4 @@ +etest: "from role defaults" +role_var_beats_default: "shouldn't see this" +parameterized_beats_default: "shouldn't see this" +inventory_beats_default: "shouldn't see this" diff --git a/test/integration/targets/var_blending/roles/test_var_blending/files/foo.txt b/test/integration/targets/var_blending/roles/test_var_blending/files/foo.txt new file mode 100644 index 00000000..d51be39b --- /dev/null +++ b/test/integration/targets/var_blending/roles/test_var_blending/files/foo.txt @@ -0,0 +1,77 @@ +The value of groups_tree_var = 5000. +This comes from host, not the parents or grandparents. + +The value of the grandparent variable grandparent_var is +not overridden and is = 2000 + +The value of the parent variable is not overridden and +is = 6000 + +The variable 'overridden_in_parent' is set in the parent +and grandparent, so the parent wins. It's value is = 1000. + +The values of 'uno', 'dos', and 'tres' are set in group_vars/all but 'tres' is +set to the value of 'three' in group_vars/local, which should override it. + +uno = 1 +dos = 2 +tres = three + +The values of 'a', 'b', 'c', and 'd' are set in host_vars/local and should not +be clobbered by values that are also set in group_vars. + +a = 1 +b = 2 +c = 3 +d = 4 + +The value of 'badwolf' is set via the include_vars plugin. + +badwolf = badwolf + +The value of 'winter' is set via the main.yml in the role. + +winter = coming + +Here's an arbitrary variable set as vars_files in the playbook. + +vars_file_var = 321 + +And vars. + +vars = 123 + +Variables about other hosts can be looked up via hostvars. This includes +facts but here we'll just access a variable defined in the groups. + +999 + +Ansible has pretty basic precedence rules for variable overriding. We already have +some tests above about group order. Here are a few more. + + * -e variables always win + * then comes "most everything else" + * then comes variables defined in inventory + * then "role defaults", which are the most "defaulty" and lose in priority to everything. + +Given the above rules, here's a test that a -e variable overrides inventory, +and also defaults, and role vars. + +etest = from -e + +Now a test to make sure role variables can override inventory variables. + +role_var_beats_inventory = chevron 5 encoded + +Role variables should also beat defaults. + +role_var_beats_default = chevron 6 encoded + +But defaults are lower priority than inventory, so inventory should win. + +inventory_beats_default = narf + +That's the end of the precedence tests for now, but more are welcome. + + + diff --git a/test/integration/targets/var_blending/roles/test_var_blending/tasks/main.yml b/test/integration/targets/var_blending/roles/test_var_blending/tasks/main.yml new file mode 100644 index 00000000..f2b2e54a --- /dev/null +++ b/test/integration/targets/var_blending/roles/test_var_blending/tasks/main.yml @@ -0,0 +1,57 @@ +# test code +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- include_vars: more_vars.yml + +- set_fact: + output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}" + +- name: deploy a template that will use variables at various levels + template: src=foo.j2 dest={{output_dir}}/foo.templated + register: template_result + +- name: copy known good into place + copy: src=foo.txt dest={{output_dir}}/foo.txt + +- name: compare templated file to known good + shell: diff {{output_dir}}/foo.templated {{output_dir}}/foo.txt + register: diff_result + +- name: verify templated file matches known good + assert: + that: + - 'diff_result.stdout == ""' + +- name: check debug variable with same name as var content + debug: var=same_value_as_var_name_var + register: same_value_as_var_name + +- name: check debug variable output when variable is undefined + debug: var=undefined_variable + register: var_undefined + +- assert: + that: + - "'VARIABLE IS NOT DEFINED!' in var_undefined.undefined_variable" + - same_value_as_var_name.same_value_as_var_name_var == 'same_value_as_var_name_var' + +- name: cleanup temporary template output + file: path={{output_dir}}/foo.templated state=absent + +- name: cleanup temporary copy + file: path={{output_dir}}/foo.txt state=absent diff --git a/test/integration/targets/var_blending/roles/test_var_blending/templates/foo.j2 b/test/integration/targets/var_blending/roles/test_var_blending/templates/foo.j2 new file mode 100644 index 00000000..10709b1a --- /dev/null +++ b/test/integration/targets/var_blending/roles/test_var_blending/templates/foo.j2 @@ -0,0 +1,77 @@ +The value of groups_tree_var = {{ groups_tree_var }}. +This comes from host, not the parents or grandparents. + +The value of the grandparent variable grandparent_var is +not overridden and is = {{ grandparent_var }} + +The value of the parent variable is not overridden and +is = {{ parent_var }} + +The variable 'overridden_in_parent' is set in the parent +and grandparent, so the parent wins. It's value is = {{ overridden_in_parent }}. + +The values of 'uno', 'dos', and 'tres' are set in group_vars/all but 'tres' is +set to the value of 'three' in group_vars/local, which should override it. + +uno = {{ uno }} +dos = {{ dos }} +tres = {{ tres }} + +The values of 'a', 'b', 'c', and 'd' are set in host_vars/local and should not +be clobbered by values that are also set in group_vars. + +a = {{ a }} +b = {{ b }} +c = {{ c }} +d = {{ d }} + +The value of 'badwolf' is set via the include_vars plugin. + +badwolf = {{ badwolf }} + +The value of 'winter' is set via the main.yml in the role. + +winter = {{ winter }} + +Here's an arbitrary variable set as vars_files in the playbook. + +vars_file_var = {{ vars_file_var }} + +And vars. + +vars = {{ vars_var }} + +Variables about other hosts can be looked up via hostvars. This includes +facts but here we'll just access a variable defined in the groups. + +{{ hostvars['testhost2']['a'] }} + +Ansible has pretty basic precedence rules for variable overriding. We already have +some tests above about group order. Here are a few more. + + * -e variables always win + * then comes "most everything else" + * then comes variables defined in inventory + * then "role defaults", which are the most "defaulty" and lose in priority to everything. + +Given the above rules, here's a test that a -e variable overrides inventory, +and also defaults, and role vars. + +etest = {{ etest }} + +Now a test to make sure role variables can override inventory variables. + +role_var_beats_inventory = {{ role_var_beats_inventory }} + +Role variables should also beat defaults. + +role_var_beats_default = {{ role_var_beats_default }} + +But defaults are lower priority than inventory, so inventory should win. + +inventory_beats_default = {{ inventory_beats_default }} + +That's the end of the precedence tests for now, but more are welcome. + + + diff --git a/test/integration/targets/var_blending/roles/test_var_blending/vars/main.yml b/test/integration/targets/var_blending/roles/test_var_blending/vars/main.yml new file mode 100644 index 00000000..1bb08bf8 --- /dev/null +++ b/test/integration/targets/var_blending/roles/test_var_blending/vars/main.yml @@ -0,0 +1,4 @@ +winter: coming +etest: 'from role vars' +role_var_beats_inventory: 'chevron 5 encoded' +role_var_beats_default: 'chevron 6 encoded' diff --git a/test/integration/targets/var_blending/roles/test_var_blending/vars/more_vars.yml b/test/integration/targets/var_blending/roles/test_var_blending/vars/more_vars.yml new file mode 100644 index 00000000..bac93d3e --- /dev/null +++ b/test/integration/targets/var_blending/roles/test_var_blending/vars/more_vars.yml @@ -0,0 +1,3 @@ +badwolf: badwolf + +same_value_as_var_name_var: "same_value_as_var_name_var" diff --git a/test/integration/targets/var_blending/runme.sh b/test/integration/targets/var_blending/runme.sh new file mode 100755 index 00000000..d0cf7f09 --- /dev/null +++ b/test/integration/targets/var_blending/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_var_blending.yml -i inventory -e @test_vars.yml -v "$@" diff --git a/test/integration/targets/var_blending/test_var_blending.yml b/test/integration/targets/var_blending/test_var_blending.yml new file mode 100644 index 00000000..88a35b2c --- /dev/null +++ b/test/integration/targets/var_blending/test_var_blending.yml @@ -0,0 +1,8 @@ +- hosts: testhost + vars_files: + - vars_file.yml + vars: + vars_var: 123 + gather_facts: True + roles: + - { role: test_var_blending, parameterized_beats_default: 1234, tags: test_var_blending } diff --git a/test/integration/targets/var_blending/test_vars.yml b/test/integration/targets/var_blending/test_vars.yml new file mode 100644 index 00000000..abb71a55 --- /dev/null +++ b/test/integration/targets/var_blending/test_vars.yml @@ -0,0 +1 @@ +etest: 'from -e' diff --git a/test/integration/targets/var_blending/vars_file.yml b/test/integration/targets/var_blending/vars_file.yml new file mode 100644 index 00000000..971e16a7 --- /dev/null +++ b/test/integration/targets/var_blending/vars_file.yml @@ -0,0 +1,12 @@ +# this file is here to support testing vars_files in the blending tests only. +# in general define test data in the individual role: +# roles/role_name/vars/main.yml + +foo: "Hello" +things1: + - 1 + - 2 +things2: + - "{{ foo }}" + - "{{ foob | default('') }}" +vars_file_var: 321 diff --git a/test/integration/targets/var_precedence/aliases b/test/integration/targets/var_precedence/aliases new file mode 100644 index 00000000..3005e4b2 --- /dev/null +++ b/test/integration/targets/var_precedence/aliases @@ -0,0 +1 @@ +shippable/posix/group4 diff --git a/test/integration/targets/var_precedence/ansible-var-precedence-check.py b/test/integration/targets/var_precedence/ansible-var-precedence-check.py new file mode 100755 index 00000000..f19cd1c5 --- /dev/null +++ b/test/integration/targets/var_precedence/ansible-var-precedence-check.py @@ -0,0 +1,541 @@ +#!/usr/bin/env python + +# A tool to check the order of precedence for ansible variables +# https://github.com/ansible/ansible/blob/devel/test/integration/test_var_precedence.yml + +import json +import os +import sys +import shutil +import stat +import subprocess +import tempfile +import yaml +from pprint import pprint +from optparse import OptionParser +from jinja2 import Environment + +ENV = Environment() +TESTDIR = tempfile.mkdtemp() + + +def run_command(args, cwd=None): + p = subprocess.Popen( + args, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + shell=True, + cwd=cwd, + ) + (so, se) = p.communicate() + return (p.returncode, so, se) + + +def clean_test_dir(): + if os.path.isdir(TESTDIR): + shutil.rmtree(TESTDIR) + os.makedirs(TESTDIR) + + +class Role(object): + def __init__(self, name): + self.name = name + self.load = True + self.dependencies = [] + self.defaults = False + self.vars = False + self.tasks = [] + self.params = dict() + + def write_role(self): + + fpath = os.path.join(TESTDIR, 'roles', self.name) + if not os.path.isdir(fpath): + os.makedirs(fpath) + + if self.defaults: + # roles/x/defaults/main.yml + fpath = os.path.join(TESTDIR, 'roles', self.name, 'defaults') + if not os.path.isdir(fpath): + os.makedirs(fpath) + fname = os.path.join(fpath, 'main.yml') + with open(fname, 'w') as f: + f.write('findme: %s\n' % self.name) + + if self.vars: + # roles/x/vars/main.yml + fpath = os.path.join(TESTDIR, 'roles', self.name, 'vars') + if not os.path.isdir(fpath): + os.makedirs(fpath) + fname = os.path.join(fpath, 'main.yml') + with open(fname, 'w') as f: + f.write('findme: %s\n' % self.name) + + if self.dependencies: + fpath = os.path.join(TESTDIR, 'roles', self.name, 'meta') + if not os.path.isdir(fpath): + os.makedirs(fpath) + fname = os.path.join(fpath, 'main.yml') + with open(fname, 'w') as f: + f.write('dependencies:\n') + for dep in self.dependencies: + f.write('- { role: %s }\n' % dep) + + +class DynamicInventory(object): + BASESCRIPT = '''#!/usr/bin/python +import json +data = """{{ data }}""" +data = json.loads(data) +print(json.dumps(data, indent=2, sort_keys=True)) +''' + + BASEINV = { + '_meta': { + 'hostvars': { + 'testhost': {} + } + } + } + + def __init__(self, features): + self.ENV = Environment() + self.features = features + self.fpath = None + self.inventory = self.BASEINV.copy() + self.build() + + def build(self): + xhost = 'testhost' + if 'script_host' in self.features: + self.inventory['_meta']['hostvars'][xhost]['findme'] = 'script_host' + else: + self.inventory['_meta']['hostvars'][xhost] = {} + + if 'script_child' in self.features: + self.inventory['child'] = { + 'hosts': [xhost], + 'vars': {'findme': 'script_child'} + } + + if 'script_parent' in self.features: + + self.inventory['parent'] = { + 'vars': {'findme': 'script_parent'} + } + + if 'script_child' in self.features: + self.inventory['parent']['children'] = ['child'] + else: + self.inventory['parent']['hosts'] = [xhost] + + if 'script_all' in self.features: + self.inventory['all'] = { + 'hosts': [xhost], + 'vars': { + 'findme': 'script_all' + }, + } + else: + self.inventory['all'] = { + 'hosts': [xhost], + } + + def write_script(self): + fdir = os.path.join(TESTDIR, 'inventory') + if not os.path.isdir(fdir): + os.makedirs(fdir) + fpath = os.path.join(fdir, 'hosts') + # fpath = os.path.join(TESTDIR, 'inventory') + self.fpath = fpath + + data = json.dumps(self.inventory) + t = self.ENV.from_string(self.BASESCRIPT) + fdata = t.render(data=data) + with open(fpath, 'w') as f: + f.write(fdata + '\n') + st = os.stat(fpath) + os.chmod(fpath, st.st_mode | stat.S_IEXEC) + + +class VarTestMaker(object): + def __init__(self, features, dynamic_inventory=False): + clean_test_dir() + self.dynamic_inventory = dynamic_inventory + self.di = None + self.features = features[:] + self.inventory = '' + self.playvars = dict() + self.varsfiles = [] + self.playbook = dict(hosts='testhost', gather_facts=False) + self.tasks = [] + self.roles = [] + self.ansible_command = None + self.stdout = None + + def write_playbook(self): + fname = os.path.join(TESTDIR, 'site.yml') + pb_copy = self.playbook.copy() + + if self.playvars: + pb_copy['vars'] = self.playvars + if self.varsfiles: + pb_copy['vars_files'] = self.varsfiles + if self.roles: + pb_copy['roles'] = [] + for role in self.roles: + role.write_role() + role_def = dict(role=role.name) + role_def.update(role.params) + pb_copy['roles'].append(role_def) + if self.tasks: + pb_copy['tasks'] = self.tasks + + with open(fname, 'w') as f: + pb_yaml = yaml.dump([pb_copy], f, default_flow_style=False, indent=2) + + def build(self): + + if self.dynamic_inventory: + # python based inventory file + self.di = DynamicInventory(self.features) + self.di.write_script() + else: + # ini based inventory file + if 'ini_host' in self.features: + self.inventory += 'testhost findme=ini_host\n' + else: + self.inventory += 'testhost\n' + self.inventory += '\n' + + if 'ini_child' in self.features: + self.inventory += '[child]\n' + self.inventory += 'testhost\n' + self.inventory += '\n' + self.inventory += '[child:vars]\n' + self.inventory += 'findme=ini_child\n' + self.inventory += '\n' + + if 'ini_parent' in self.features: + if 'ini_child' in self.features: + self.inventory += '[parent:children]\n' + self.inventory += 'child\n' + else: + self.inventory += '[parent]\n' + self.inventory += 'testhost\n' + self.inventory += '\n' + self.inventory += '[parent:vars]\n' + self.inventory += 'findme=ini_parent\n' + self.inventory += '\n' + + if 'ini_all' in self.features: + self.inventory += '[all:vars]\n' + self.inventory += 'findme=ini_all\n' + self.inventory += '\n' + + # default to a single file called inventory + invfile = os.path.join(TESTDIR, 'inventory', 'hosts') + ipath = os.path.join(TESTDIR, 'inventory') + if not os.path.isdir(ipath): + os.makedirs(ipath) + + with open(invfile, 'w') as f: + f.write(self.inventory) + + hpath = os.path.join(TESTDIR, 'inventory', 'host_vars') + if not os.path.isdir(hpath): + os.makedirs(hpath) + gpath = os.path.join(TESTDIR, 'inventory', 'group_vars') + if not os.path.isdir(gpath): + os.makedirs(gpath) + + if 'ini_host_vars_file' in self.features: + hfile = os.path.join(hpath, 'testhost') + with open(hfile, 'w') as f: + f.write('findme: ini_host_vars_file\n') + + if 'ini_group_vars_file_all' in self.features: + hfile = os.path.join(gpath, 'all') + with open(hfile, 'w') as f: + f.write('findme: ini_group_vars_file_all\n') + + if 'ini_group_vars_file_child' in self.features: + hfile = os.path.join(gpath, 'child') + with open(hfile, 'w') as f: + f.write('findme: ini_group_vars_file_child\n') + + if 'ini_group_vars_file_parent' in self.features: + hfile = os.path.join(gpath, 'parent') + with open(hfile, 'w') as f: + f.write('findme: ini_group_vars_file_parent\n') + + if 'pb_host_vars_file' in self.features: + os.makedirs(os.path.join(TESTDIR, 'host_vars')) + fname = os.path.join(TESTDIR, 'host_vars', 'testhost') + with open(fname, 'w') as f: + f.write('findme: pb_host_vars_file\n') + + if 'pb_group_vars_file_parent' in self.features: + if not os.path.isdir(os.path.join(TESTDIR, 'group_vars')): + os.makedirs(os.path.join(TESTDIR, 'group_vars')) + fname = os.path.join(TESTDIR, 'group_vars', 'parent') + with open(fname, 'w') as f: + f.write('findme: pb_group_vars_file_parent\n') + + if 'pb_group_vars_file_child' in self.features: + if not os.path.isdir(os.path.join(TESTDIR, 'group_vars')): + os.makedirs(os.path.join(TESTDIR, 'group_vars')) + fname = os.path.join(TESTDIR, 'group_vars', 'child') + with open(fname, 'w') as f: + f.write('findme: pb_group_vars_file_child\n') + + if 'pb_group_vars_file_all' in self.features: + if not os.path.isdir(os.path.join(TESTDIR, 'group_vars')): + os.makedirs(os.path.join(TESTDIR, 'group_vars')) + fname = os.path.join(TESTDIR, 'group_vars', 'all') + with open(fname, 'w') as f: + f.write('findme: pb_group_vars_file_all\n') + + if 'play_var' in self.features: + self.playvars['findme'] = 'play_var' + + if 'set_fact' in self.features: + self.tasks.append(dict(set_fact='findme="set_fact"')) + + if 'vars_file' in self.features: + self.varsfiles.append('varsfile.yml') + fname = os.path.join(TESTDIR, 'varsfile.yml') + with open(fname, 'w') as f: + f.write('findme: vars_file\n') + + if 'include_vars' in self.features: + self.tasks.append(dict(include_vars='included_vars.yml')) + fname = os.path.join(TESTDIR, 'included_vars.yml') + with open(fname, 'w') as f: + f.write('findme: include_vars\n') + + if 'role_var' in self.features: + role = Role('role_var') + role.vars = True + role.load = True + self.roles.append(role) + + if 'role_parent_default' in self.features: + role = Role('role_default') + role.load = False + role.defaults = True + self.roles.append(role) + + role = Role('role_parent_default') + role.dependencies.append('role_default') + role.defaults = True + role.load = True + if 'role_params' in self.features: + role.params = dict(findme='role_params') + self.roles.append(role) + + elif 'role_default' in self.features: + role = Role('role_default') + role.defaults = True + role.load = True + if 'role_params' in self.features: + role.params = dict(findme='role_params') + self.roles.append(role) + + debug_task = dict(debug='var=findme') + test_task = {'assert': dict(that=['findme == "%s"' % self.features[0]])} + if 'task_vars' in self.features: + test_task['vars'] = dict(findme="task_vars") + if 'registered_vars' in self.features: + test_task['register'] = 'findme' + + if 'block_vars' in self.features: + block_wrapper = [ + debug_task, + { + 'block': [test_task], + 'vars': dict(findme="block_vars"), + } + ] + else: + block_wrapper = [debug_task, test_task] + + if 'include_params' in self.features: + self.tasks.append(dict(name='including tasks', include='included_tasks.yml', vars=dict(findme='include_params'))) + else: + self.tasks.append(dict(include='included_tasks.yml')) + + fname = os.path.join(TESTDIR, 'included_tasks.yml') + with open(fname, 'w') as f: + f.write(yaml.dump(block_wrapper)) + + self.write_playbook() + + def run(self): + ''' + if self.dynamic_inventory: + cmd = 'ansible-playbook -c local -i inventory/hosts site.yml' + else: + cmd = 'ansible-playbook -c local -i inventory site.yml' + ''' + cmd = 'ansible-playbook -c local -i inventory site.yml' + if 'extra_vars' in self.features: + cmd += ' --extra-vars="findme=extra_vars"' + cmd = cmd + ' -vvvvv' + self.ansible_command = cmd + (rc, so, se) = run_command(cmd, cwd=TESTDIR) + self.stdout = so + + if rc != 0: + raise Exception("playbook failed (rc=%s), stdout: '%s' stderr: '%s'" % (rc, so, se)) + + def show_tree(self): + print('## TREE') + cmd = 'tree %s' % TESTDIR + (rc, so, se) = run_command(cmd) + lines = so.split('\n') + lines = lines[:-3] + print('\n'.join(lines)) + + def show_content(self): + print('## CONTENT') + cmd = 'find %s -type f | xargs tail -n +1' % TESTDIR + (rc, so, se) = run_command(cmd) + print(so) + + def show_stdout(self): + print('## COMMAND') + print(self.ansible_command) + print('## STDOUT') + print(self.stdout) + + +def main(): + features = [ + 'extra_vars', + 'include_params', + # 'role_params', # FIXME: we don't yet validate tasks within a role + 'set_fact', + # 'registered_vars', # FIXME: hard to simulate + 'include_vars', + # 'role_dep_params', + 'task_vars', + 'block_vars', + 'role_var', + 'vars_file', + 'play_var', + # 'host_facts', # FIXME: hard to simulate + 'pb_host_vars_file', + 'ini_host_vars_file', + 'ini_host', + 'pb_group_vars_file_child', + # 'ini_group_vars_file_child', #FIXME: this contradicts documented precedence pb group vars files should override inventory ones + 'pb_group_vars_file_parent', + 'ini_group_vars_file_parent', + 'pb_group_vars_file_all', + 'ini_group_vars_file_all', + 'ini_child', + 'ini_parent', + 'ini_all', + 'role_parent_default', + 'role_default', + ] + + parser = OptionParser() + parser.add_option('-f', '--feature', action='append') + parser.add_option('--use_dynamic_inventory', action='store_true') + parser.add_option('--show_tree', action='store_true') + parser.add_option('--show_content', action='store_true') + parser.add_option('--show_stdout', action='store_true') + parser.add_option('--copy_testcases_to_local_dir', action='store_true') + (options, args) = parser.parse_args() + + if options.feature: + for f in options.feature: + if f not in features: + print('%s is not a valid feature' % f) + sys.exit(1) + features = [x for x in options.feature] + + fdesc = { + 'ini_host': 'host var inside the ini', + 'script_host': 'host var inside the script _meta', + 'ini_child': 'child group var inside the ini', + 'script_child': 'child group var inside the script', + 'ini_parent': 'parent group var inside the ini', + 'script_parent': 'parent group var inside the script', + 'ini_all': 'all group var inside the ini', + 'script_all': 'all group var inside the script', + 'ini_host_vars_file': 'var in inventory/host_vars/host', + 'ini_group_vars_file_parent': 'var in inventory/group_vars/parent', + 'ini_group_vars_file_child': 'var in inventory/group_vars/child', + 'ini_group_vars_file_all': 'var in inventory/group_vars/all', + 'pb_group_vars_file_parent': 'var in playbook/group_vars/parent', + 'pb_group_vars_file_child': 'var in playbook/group_vars/child', + 'pb_group_vars_file_all': 'var in playbook/group_vars/all', + 'pb_host_vars_file': 'var in playbook/host_vars/host', + 'play_var': 'var set in playbook header', + 'role_parent_default': 'var in roles/role_parent/defaults/main.yml', + 'role_default': 'var in roles/role/defaults/main.yml', + 'role_var': 'var in ???', + 'include_vars': 'var in included file', + 'set_fact': 'var made by set_fact', + 'vars_file': 'var in file added by vars_file', + 'block_vars': 'vars defined on the block', + 'task_vars': 'vars defined on the task', + 'extra_vars': 'var passed via the cli' + } + + dinv = options.use_dynamic_inventory + if dinv: + # some features are specific to ini, so swap those + for (idx, x) in enumerate(features): + if x.startswith('ini_') and 'vars_file' not in x: + features[idx] = x.replace('ini_', 'script_') + + dinv = options.use_dynamic_inventory + + index = 1 + while features: + VTM = VarTestMaker(features, dynamic_inventory=dinv) + VTM.build() + + if options.show_tree or options.show_content or options.show_stdout: + print('') + if options.show_tree: + VTM.show_tree() + if options.show_content: + VTM.show_content() + + try: + print("CHECKING: %s (%s)" % (features[0], fdesc.get(features[0], ''))) + res = VTM.run() + if options.show_stdout: + VTM.show_stdout() + + features.pop(0) + + if options.copy_testcases_to_local_dir: + topdir = 'testcases' + if index == 1 and os.path.isdir(topdir): + shutil.rmtree(topdir) + if not os.path.isdir(topdir): + os.makedirs(topdir) + thisindex = str(index) + if len(thisindex) == 1: + thisindex = '0' + thisindex + thisdir = os.path.join(topdir, '%s.%s' % (thisindex, res)) + shutil.copytree(TESTDIR, thisdir) + + except Exception as e: + print("ERROR !!!") + print(e) + print('feature: %s failed' % features[0]) + sys.exit(1) + finally: + shutil.rmtree(TESTDIR) + index += 1 + + +if __name__ == "__main__": + main() diff --git a/test/integration/targets/var_precedence/host_vars/testhost b/test/integration/targets/var_precedence/host_vars/testhost new file mode 100644 index 00000000..7d533554 --- /dev/null +++ b/test/integration/targets/var_precedence/host_vars/testhost @@ -0,0 +1,2 @@ +# Var precedence testing +defaults_file_var_role3: "overridden from inventory" diff --git a/test/integration/targets/var_precedence/inventory b/test/integration/targets/var_precedence/inventory new file mode 100644 index 00000000..3b52d041 --- /dev/null +++ b/test/integration/targets/var_precedence/inventory @@ -0,0 +1,13 @@ +[local] +testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" + +[all:vars] +extra_var_override=FROM_INVENTORY +inven_var=inventory_var + +[inven_overridehosts] +invenoverride ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" + +[inven_overridehosts:vars] +foo=foo +var_dir=vars diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence/meta/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence/meta/main.yml new file mode 100644 index 00000000..423b94e3 --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - { role: test_var_precedence_role1, param_var: "param_var_role1" } + - { role: test_var_precedence_role2, param_var: "param_var_role2" } + - { role: test_var_precedence_role3, param_var: "param_var_role3" } diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence/tasks/main.yml new file mode 100644 index 00000000..7850e6b6 --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence/tasks/main.yml @@ -0,0 +1,10 @@ +- debug: var=extra_var +- debug: var=vars_var +- debug: var=vars_files_var +- debug: var=vars_files_var_role +- assert: + that: + - 'extra_var == "extra_var"' + - 'vars_var == "vars_var"' + - 'vars_files_var == "vars_files_var"' + - 'vars_files_var_role == "vars_files_var_role3"' diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_dep/defaults/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/defaults/main.yml new file mode 100644 index 00000000..dda4224c --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# should be overridden by vars_files in the main play +vars_files_var: "BAD!" +# should be seen in role1 (no override) +defaults_file_var_role1: "defaults_file_var_role1" diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_dep/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/tasks/main.yml new file mode 100644 index 00000000..2f8e1709 --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/tasks/main.yml @@ -0,0 +1,14 @@ +- debug: var=extra_var +- debug: var=param_var +- debug: var=vars_var +- debug: var=vars_files_var +- debug: var=vars_files_var_role +- debug: var=defaults_file_var_role1 +- assert: + that: + - 'extra_var == "extra_var"' + - 'param_var == "param_var_role1"' + - 'vars_var == "vars_var"' + - 'vars_files_var == "vars_files_var"' + - 'vars_files_var_role == "vars_files_var_dep"' + - 'defaults_file_var_role1 == "defaults_file_var_role1"' diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_dep/vars/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/vars/main.yml new file mode 100644 index 00000000..a69efad5 --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/vars/main.yml @@ -0,0 +1,4 @@ +--- +# should override the global vars_files_var since it's local to the role +# but will be set to the value in the last role included which defines it +vars_files_var_role: "vars_files_var_dep" diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_inven_override/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_inven_override/tasks/main.yml new file mode 100644 index 00000000..942ae4ec --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_inven_override/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- debug: var=foo +- assert: + that: + - 'foo == "bar"' diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role1/defaults/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/defaults/main.yml new file mode 100644 index 00000000..dda4224c --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# should be overridden by vars_files in the main play +vars_files_var: "BAD!" +# should be seen in role1 (no override) +defaults_file_var_role1: "defaults_file_var_role1" diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role1/meta/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/meta/main.yml new file mode 100644 index 00000000..c8b410b5 --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - test_var_precedence_dep diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role1/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/tasks/main.yml new file mode 100644 index 00000000..95b2a0bb --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/tasks/main.yml @@ -0,0 +1,14 @@ +- debug: var=extra_var +- debug: var=param_var +- debug: var=vars_var +- debug: var=vars_files_var +- debug: var=vars_files_var_role +- debug: var=defaults_file_var_role1 +- assert: + that: + - 'extra_var == "extra_var"' + - 'param_var == "param_var_role1"' + - 'vars_var == "vars_var"' + - 'vars_files_var == "vars_files_var"' + - 'vars_files_var_role == "vars_files_var_role1"' + - 'defaults_file_var_role1 == "defaults_file_var_role1"' diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role1/vars/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/vars/main.yml new file mode 100644 index 00000000..2f7613d3 --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/vars/main.yml @@ -0,0 +1,4 @@ +--- +# should override the global vars_files_var since it's local to the role +# but will be set to the value in the last role included which defines it +vars_files_var_role: "vars_files_var_role1" diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role2/defaults/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/defaults/main.yml new file mode 100644 index 00000000..8ed63ced --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# should be overridden by vars_files in the main play +vars_files_var: "BAD!" +# should be overridden by the vars file in role2 +defaults_file_var_role2: "BAD!" diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role2/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/tasks/main.yml new file mode 100644 index 00000000..a862389c --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/tasks/main.yml @@ -0,0 +1,14 @@ +- debug: var=extra_var +- debug: var=param_var +- debug: var=vars_var +- debug: var=vars_files_var +- debug: var=vars_files_var_role +- debug: var=defaults_file_var_role1 +- assert: + that: + - 'extra_var == "extra_var"' + - 'param_var == "param_var_role2"' + - 'vars_var == "vars_var"' + - 'vars_files_var == "vars_files_var"' + - 'vars_files_var_role == "vars_files_var_role2"' + - 'defaults_file_var_role2 == "overridden by role vars"' diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role2/vars/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/vars/main.yml new file mode 100644 index 00000000..483c5ea2 --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/vars/main.yml @@ -0,0 +1,5 @@ +--- +# should override the global vars_files_var since it's local to the role +vars_files_var_role: "vars_files_var_role2" +# should override the value in defaults/main.yml for role 2 +defaults_file_var_role2: "overridden by role vars" diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role3/defaults/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/defaults/main.yml new file mode 100644 index 00000000..763b0d50 --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# should be overridden by vars_files in the main play +vars_files_var: "BAD!" +# should override the defaults var for role 1 and 2 +defaults_file_var: "last one wins" +# should be overridden from the inventory value +defaults_file_var_role3: "BAD!" diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role3/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/tasks/main.yml new file mode 100644 index 00000000..12346ecd --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/tasks/main.yml @@ -0,0 +1,14 @@ +- debug: var=extra_var +- debug: var=param_var +- debug: var=vars_var +- debug: var=vars_files_var +- debug: var=vars_files_var_role +- debug: var=defaults_file_var_role1 +- assert: + that: + - 'extra_var == "extra_var"' + - 'param_var == "param_var_role3"' + - 'vars_var == "vars_var"' + - 'vars_files_var == "vars_files_var"' + - 'vars_files_var_role == "vars_files_var_role3"' + - 'defaults_file_var_role3 == "overridden from inventory"' diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role3/vars/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/vars/main.yml new file mode 100644 index 00000000..3cfb1b1c --- /dev/null +++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/vars/main.yml @@ -0,0 +1,3 @@ +--- +# should override the global vars_files_var since it's local to the role +vars_files_var_role: "vars_files_var_role3" diff --git a/test/integration/targets/var_precedence/runme.sh b/test/integration/targets/var_precedence/runme.sh new file mode 100755 index 00000000..0f0811c3 --- /dev/null +++ b/test/integration/targets/var_precedence/runme.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook test_var_precedence.yml -i inventory -v "$@" \ + -e 'extra_var=extra_var' \ + -e 'extra_var_override=extra_var_override' + +./ansible-var-precedence-check.py diff --git a/test/integration/targets/var_precedence/test_var_precedence.yml b/test/integration/targets/var_precedence/test_var_precedence.yml new file mode 100644 index 00000000..58584bfb --- /dev/null +++ b/test/integration/targets/var_precedence/test_var_precedence.yml @@ -0,0 +1,44 @@ +--- +- hosts: testhost + vars: + - ansible_hostname: "BAD!" + - vars_var: "vars_var" + - param_var: "BAD!" + - vars_files_var: "BAD!" + - extra_var_override_once_removed: "{{ extra_var_override }}" + - from_inventory_once_removed: "{{ inven_var | default('BAD!') }}" + vars_files: + - vars/test_var_precedence.yml + roles: + - { role: test_var_precedence, param_var: "param_var" } + tasks: + - name: register a result + command: echo 'BAD!' + register: registered_var + - name: use set_fact to override the registered_var + set_fact: registered_var="this is from set_fact" + - debug: var=extra_var + - debug: var=extra_var_override_once_removed + - debug: var=vars_var + - debug: var=vars_files_var + - debug: var=vars_files_var_role + - debug: var=registered_var + - debug: var=from_inventory_once_removed + - assert: + that: item + with_items: + - 'extra_var == "extra_var"' + - 'extra_var_override == "extra_var_override"' + - 'extra_var_override_once_removed == "extra_var_override"' + - 'vars_var == "vars_var"' + - 'vars_files_var == "vars_files_var"' + - 'vars_files_var_role == "vars_files_var_role3"' + - 'registered_var == "this is from set_fact"' + - 'from_inventory_once_removed == "inventory_var"' + +- hosts: inven_overridehosts + vars_files: + - "test_var_precedence.yml" + roles: + - role: test_var_precedence_inven_override + foo: bar diff --git a/test/integration/targets/var_precedence/vars/test_var_precedence.yml b/test/integration/targets/var_precedence/vars/test_var_precedence.yml new file mode 100644 index 00000000..19d65cba --- /dev/null +++ b/test/integration/targets/var_precedence/vars/test_var_precedence.yml @@ -0,0 +1,5 @@ +--- +extra_var: "BAD!" +role_var: "BAD!" +vars_files_var: "vars_files_var" +vars_files_var_role: "should be overridden by roles" diff --git a/test/integration/targets/var_templating/aliases b/test/integration/targets/var_templating/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/var_templating/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/var_templating/group_vars/all.yml b/test/integration/targets/var_templating/group_vars/all.yml new file mode 100644 index 00000000..4eae7c1b --- /dev/null +++ b/test/integration/targets/var_templating/group_vars/all.yml @@ -0,0 +1,7 @@ +--- +x: 100 +y: "{{ x }}" +nested_x: + value: + x: 100 +nested_y: "{{ nested_x }}" diff --git a/test/integration/targets/var_templating/runme.sh b/test/integration/targets/var_templating/runme.sh new file mode 100755 index 00000000..0d3ac6bb --- /dev/null +++ b/test/integration/targets/var_templating/runme.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -eux + +# this should succeed since we override the undefined variable +ansible-playbook undefined.yml -i inventory -v "$@" -e '{"mytest": False}' + +# this should still work, just show that var is undefined in debug +ansible-playbook undefined.yml -i inventory -v "$@" + +# this should work since we dont use the variable +ansible-playbook undall.yml -i inventory -v "$@" + +# test hostvars templating +ansible-playbook task_vars_templating.yml -v "$@" + +ansible-playbook test_connection_vars.yml -v "$@" 2>&1 | grep 'sudo' diff --git a/test/integration/targets/var_templating/task_vars_templating.yml b/test/integration/targets/var_templating/task_vars_templating.yml new file mode 100644 index 00000000..88e1e604 --- /dev/null +++ b/test/integration/targets/var_templating/task_vars_templating.yml @@ -0,0 +1,58 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - add_host: + name: host1 + ansible_connection: local + ansible_host: 127.0.0.1 + +- hosts: all + gather_facts: no + tasks: + - debug: + msg: "{{ hostvars['host1']['x'] }}" + register: x_1 + - debug: + msg: "{{ hostvars['host1']['y'] }}" + register: y_1 + - debug: + msg: "{{ hostvars_['x'] }}" + vars: + hostvars_: "{{ hostvars['host1'] }}" + register: x_2 + - debug: + msg: "{{ hostvars_['y'] }}" + vars: + hostvars_: "{{ hostvars['host1'] }}" + register: y_2 + + - assert: + that: + - x_1 == x_2 + - y_1 == y_2 + - x_1 == y_1 + + - debug: + msg: "{{ hostvars['host1']['nested_x']['value'] }}" + register: x_1 + - debug: + msg: "{{ hostvars['host1']['nested_y']['value'] }}" + register: y_1 + - debug: + msg: "{{ hostvars_['nested_x']['value'] }}" + vars: + hostvars_: "{{ hostvars['host1'] }}" + register: x_2 + - debug: + msg: "{{ hostvars_['nested_y']['value'] }}" + vars: + hostvars_: "{{ hostvars['host1'] }}" + register: y_2 + + - assert: + that: + - x_1 == x_2 + - y_1 == y_2 + - x_1 == y_1 diff --git a/test/integration/targets/var_templating/test_connection_vars.yml b/test/integration/targets/var_templating/test_connection_vars.yml new file mode 100644 index 00000000..2b22eea6 --- /dev/null +++ b/test/integration/targets/var_templating/test_connection_vars.yml @@ -0,0 +1,26 @@ +--- +- hosts: localhost + gather_facts: no + vars: + my_var: + become_method: sudo + connection: local + become: 1 + tasks: + + - include_vars: "./vars/connection.yml" + + - command: whoami + ignore_errors: yes + register: result + failed_when: result is not success and (result.module_stderr is defined or result.module_stderr is defined) + + - assert: + that: + - "'sudo' in result.module_stderr" + when: result is not success and result.module_stderr is defined + + - assert: + that: + - "'Invalid become method specified' not in result.msg" + when: result is not success and result.msg is defined diff --git a/test/integration/targets/var_templating/undall.yml b/test/integration/targets/var_templating/undall.yml new file mode 100644 index 00000000..9ea9f1d1 --- /dev/null +++ b/test/integration/targets/var_templating/undall.yml @@ -0,0 +1,6 @@ +- hosts: localhost + gather_facts: false + tasks: + - debug: + vars: + mytest: '{{ und }}' diff --git a/test/integration/targets/var_templating/undefined.yml b/test/integration/targets/var_templating/undefined.yml new file mode 100644 index 00000000..cf083d5f --- /dev/null +++ b/test/integration/targets/var_templating/undefined.yml @@ -0,0 +1,13 @@ +- hosts: localhost + gather_facts: false + tasks: + - name: show defined/undefined var + debug: var=mytest + vars: + mytest: '{{ und }}' + register: var_undefined + + - name: ensure either mytest is defined or debug finds it to be undefined + assert: + that: + - mytest is defined or 'VARIABLE IS NOT DEFINED!' in var_undefined['mytest'] diff --git a/test/integration/targets/var_templating/vars/connection.yml b/test/integration/targets/var_templating/vars/connection.yml new file mode 100644 index 00000000..263929a8 --- /dev/null +++ b/test/integration/targets/var_templating/vars/connection.yml @@ -0,0 +1,3 @@ +ansible_become: "{{ my_var.become }}" +ansible_become_method: "{{ my_var.become_method }}" +ansible_connection: "{{ my_var.connection }}" diff --git a/test/integration/targets/vault/aliases b/test/integration/targets/vault/aliases new file mode 100644 index 00000000..757c9966 --- /dev/null +++ b/test/integration/targets/vault/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/aix diff --git a/test/integration/targets/vault/empty-password b/test/integration/targets/vault/empty-password new file mode 100644 index 00000000..e69de29b diff --git a/test/integration/targets/vault/encrypted-vault-password b/test/integration/targets/vault/encrypted-vault-password new file mode 100644 index 00000000..7aa4e4be --- /dev/null +++ b/test/integration/targets/vault/encrypted-vault-password @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +34353166613539646338666531633061646161663836373965663032313466613135313130383133 +3634383331386336333436323832356264343033323166370a323737396234376132353731643863 +62386335616635363062613562666561643931626332623464306666636131356134386531363533 +3831323230353333620a616633376363373830346332663733316634663937336663633631326361 +62343638656532393932643530633133326233316134383036316333373962626164 diff --git a/test/integration/targets/vault/encrypted_file_encrypted_var_password b/test/integration/targets/vault/encrypted_file_encrypted_var_password new file mode 100644 index 00000000..57bc06e3 --- /dev/null +++ b/test/integration/targets/vault/encrypted_file_encrypted_var_password @@ -0,0 +1 @@ +test-encrypted-file-password diff --git a/test/integration/targets/vault/example1_password b/test/integration/targets/vault/example1_password new file mode 100644 index 00000000..e723c8f9 --- /dev/null +++ b/test/integration/targets/vault/example1_password @@ -0,0 +1 @@ +example1 diff --git a/test/integration/targets/vault/example2_password b/test/integration/targets/vault/example2_password new file mode 100644 index 00000000..7b010f87 --- /dev/null +++ b/test/integration/targets/vault/example2_password @@ -0,0 +1 @@ +example2 diff --git a/test/integration/targets/vault/example3_password b/test/integration/targets/vault/example3_password new file mode 100644 index 00000000..f5bc5a8c --- /dev/null +++ b/test/integration/targets/vault/example3_password @@ -0,0 +1 @@ +example3 diff --git a/test/integration/targets/vault/faux-editor.py b/test/integration/targets/vault/faux-editor.py new file mode 100755 index 00000000..68f62590 --- /dev/null +++ b/test/integration/targets/vault/faux-editor.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ansible-vault is a script that encrypts/decrypts YAML files. See +# https://docs.ansible.com/playbooks_vault.html for more details. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import time +import os + + +def main(args): + path = os.path.abspath(args[1]) + + fo = open(path, 'r+') + + content = fo.readlines() + + content.append('faux editor added at %s\n' % time.time()) + + fo.seek(0) + fo.write(''.join(content)) + fo.close() + + return 0 + + +if __name__ == '__main__': + sys.exit(main(sys.argv[:])) diff --git a/test/integration/targets/vault/files/test_assemble/nonsecret.txt b/test/integration/targets/vault/files/test_assemble/nonsecret.txt new file mode 100644 index 00000000..320b6b4c --- /dev/null +++ b/test/integration/targets/vault/files/test_assemble/nonsecret.txt @@ -0,0 +1 @@ +THIS IS OK diff --git a/test/integration/targets/vault/files/test_assemble/secret.vault b/test/integration/targets/vault/files/test_assemble/secret.vault new file mode 100644 index 00000000..fd278564 --- /dev/null +++ b/test/integration/targets/vault/files/test_assemble/secret.vault @@ -0,0 +1,7 @@ +$ANSIBLE_VAULT;1.1;AES256 +37626439373465656332623633333336353334326531333666363766303339336134313136616165 +6561333963343739386334653636393363396366396338660a663537666561643862343233393265 +33336436633864323935356337623861663631316530336532633932623635346364363338363437 +3365313831366365350a613934313862313538626130653539303834656634353132343065633162 +34316135313837623735653932663139353164643834303534346238386435373832366564646236 +3461333465343434666639373432366139363566303564643066 diff --git a/test/integration/targets/vault/format_1_1_AES256.yml b/test/integration/targets/vault/format_1_1_AES256.yml new file mode 100644 index 00000000..5616605e --- /dev/null +++ b/test/integration/targets/vault/format_1_1_AES256.yml @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +33613463343938323434396164663236376438313435633837336438366530666431643031333734 +6463646538393331333239393363333830613039376562360a396635393636636539346332336364 +35303039353164386461326439346165656463383137663932323930666632326263636266656461 +3232663537653637640a643166666232633936636664376435316664656631633166323237356163 +6138 diff --git a/test/integration/targets/vault/format_1_2_AES256.yml b/test/integration/targets/vault/format_1_2_AES256.yml new file mode 100644 index 00000000..1e3795fb --- /dev/null +++ b/test/integration/targets/vault/format_1_2_AES256.yml @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.2;AES256;test_vault_id +30383835613535356232333534303264656530633664616233386138396563623939626136366537 +3635323530646538626138383136636437616637616430610a386661346563346136326637656461 +64393364343964633364336666333630383164643662343930663432316333633537353938376437 +6134656262373731390a363166356461376663313532343733326438386632623930313366643038 +6133 diff --git a/test/integration/targets/vault/host_vars/myhost.yml b/test/integration/targets/vault/host_vars/myhost.yml new file mode 100644 index 00000000..1434ec15 --- /dev/null +++ b/test/integration/targets/vault/host_vars/myhost.yml @@ -0,0 +1,7 @@ +myvar: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 31356335363836383937363933366135623233343830326234633633623734336636343630396464 + 3234343638313166663237343536646336323862613739380a346266316336356230643838663031 + 34623034383639323062373235356564393337346666393665313237313231306131356637346537 + 3966393238666430310a363462326639323033653237373036643936613234623063643761663033 + 3832 diff --git a/test/integration/targets/vault/host_vars/testhost.yml b/test/integration/targets/vault/host_vars/testhost.yml new file mode 100644 index 00000000..b3e569ad --- /dev/null +++ b/test/integration/targets/vault/host_vars/testhost.yml @@ -0,0 +1,7 @@ +vaulted_utf8_value: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 39313961356631343234656136636231663539363963386364653436346133366366633031366364 + 3332376636333837333036633662316135383365343335380a393331663434663238666537343163 + 62363561336431623666633735313766613663333736653064373632666131356434336537383336 + 3333343436613232330a643461363831633166333237653530353131316361643465353132616362 + 3461 diff --git a/test/integration/targets/vault/invalid_format/README.md b/test/integration/targets/vault/invalid_format/README.md new file mode 100644 index 00000000..cbbc07a9 --- /dev/null +++ b/test/integration/targets/vault/invalid_format/README.md @@ -0,0 +1 @@ +Based on https://github.com/yves-vogl/ansible-inline-vault-issue diff --git a/test/integration/targets/vault/invalid_format/broken-group-vars-tasks.yml b/test/integration/targets/vault/invalid_format/broken-group-vars-tasks.yml new file mode 100644 index 00000000..71dbacc0 --- /dev/null +++ b/test/integration/targets/vault/invalid_format/broken-group-vars-tasks.yml @@ -0,0 +1,23 @@ +--- +- hosts: broken-group-vars + gather_facts: false + tasks: + - name: EXPECTED FAILURE + debug: + msg: "some_var_that_fails: {{ some_var_that_fails }}" + + - name: EXPECTED FAILURE Display hostvars + debug: + msg: "{{inventory_hostname}} hostvars: {{ hostvars[inventory_hostname] }}" + + +# ansible-vault --vault-password-file=vault-secret encrypt_string test +# !vault | +# $ANSIBLE_VAULT;1.1;AES256 +# 64323332393930623633306662363165386332376638653035356132646165663632616263653366 +# 6233383362313531623238613461323861376137656265380a366464663835633065616361636231 +# 39653230653538366165623664326661653135306132313730393232343432333635326536373935 +# 3366323866663763660a323766383531396433663861656532373663373134376263383263316261 +# 3137 + +# $ ansible-playbook -i inventory --vault-password-file=vault-secret tasks.yml diff --git a/test/integration/targets/vault/invalid_format/broken-host-vars-tasks.yml b/test/integration/targets/vault/invalid_format/broken-host-vars-tasks.yml new file mode 100644 index 00000000..9afbd58e --- /dev/null +++ b/test/integration/targets/vault/invalid_format/broken-host-vars-tasks.yml @@ -0,0 +1,7 @@ +--- +- hosts: broken-host-vars + gather_facts: false + tasks: + - name: EXPECTED FAILURE Display hostvars + debug: + msg: "{{inventory_hostname}} hostvars: {{ hostvars[inventory_hostname] }}" diff --git a/test/integration/targets/vault/invalid_format/group_vars/broken-group-vars.yml b/test/integration/targets/vault/invalid_format/group_vars/broken-group-vars.yml new file mode 100644 index 00000000..5f477431 --- /dev/null +++ b/test/integration/targets/vault/invalid_format/group_vars/broken-group-vars.yml @@ -0,0 +1,8 @@ +$ANSIBLE_VAULT;1.1;AES256 +64306566356165343030353932383461376334336665626135343932356431383134306338353664 +6435326361306561633165633536333234306665346437330a366265346466626464396264393262 +34616366626565336637653032336465363165363334356535353833393332313239353736623237 +6434373738633039650a353435303366323139356234616433613663626334643939303361303764 +3636363333333333333333333 +36313937643431303637353931366363643661396238303530323262326334343432383637633439 +6365373237336535353661356430313965656538363436333836 diff --git a/test/integration/targets/vault/invalid_format/host_vars/broken-host-vars.example.com/vars b/test/integration/targets/vault/invalid_format/host_vars/broken-host-vars.example.com/vars new file mode 100644 index 00000000..2d309eb5 --- /dev/null +++ b/test/integration/targets/vault/invalid_format/host_vars/broken-host-vars.example.com/vars @@ -0,0 +1,11 @@ +--- +example_vars: + some_key: + another_key: some_value + bad_vault_dict_key: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 64323332393930623633306662363165386332376638653035356132646165663632616263653366 + 623338xyz2313531623238613461323861376137656265380a366464663835633065616361636231 + 3366323866663763660a323766383531396433663861656532373663373134376263383263316261 + 3137 + diff --git a/test/integration/targets/vault/invalid_format/inventory b/test/integration/targets/vault/invalid_format/inventory new file mode 100644 index 00000000..e6e259a4 --- /dev/null +++ b/test/integration/targets/vault/invalid_format/inventory @@ -0,0 +1,5 @@ +[broken-group-vars] +broken.example.com + +[broken-host-vars] +broken-host-vars.example.com diff --git a/test/integration/targets/vault/invalid_format/original-broken-host-vars b/test/integration/targets/vault/invalid_format/original-broken-host-vars new file mode 100644 index 00000000..6be696b5 --- /dev/null +++ b/test/integration/targets/vault/invalid_format/original-broken-host-vars @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +64323332393930623633306662363165386332376638653035356132646165663632616263653366 +6233383362313531623238613461323861376137656265380a366464663835633065616361636231 +3366323866663763660a323766383531396433663861656532373663373134376263383263316261 +3137 + diff --git a/test/integration/targets/vault/invalid_format/original-group-vars.yml b/test/integration/targets/vault/invalid_format/original-group-vars.yml new file mode 100644 index 00000000..817557be --- /dev/null +++ b/test/integration/targets/vault/invalid_format/original-group-vars.yml @@ -0,0 +1,2 @@ +--- +some_var_that_fails: blippy diff --git a/test/integration/targets/vault/invalid_format/some-vars b/test/integration/targets/vault/invalid_format/some-vars new file mode 100644 index 00000000..e841a262 --- /dev/null +++ b/test/integration/targets/vault/invalid_format/some-vars @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +37303462633933386339386465613039363964643466663866356261313966663465646262636333 +3965643566363764356563363334363431656661636634380a333837343065326239336639373238 +64316236383836383434366662626339643561616630326137383262396331396538363136323063 +6236616130383264620a613863373631316234656236323332633166623738356664353531633239 +3533 diff --git a/test/integration/targets/vault/invalid_format/vault-secret b/test/integration/targets/vault/invalid_format/vault-secret new file mode 100644 index 00000000..4406e35c --- /dev/null +++ b/test/integration/targets/vault/invalid_format/vault-secret @@ -0,0 +1 @@ +enemenemu \ No newline at end of file diff --git a/test/integration/targets/vault/inventory.toml b/test/integration/targets/vault/inventory.toml new file mode 100644 index 00000000..d97ed398 --- /dev/null +++ b/test/integration/targets/vault/inventory.toml @@ -0,0 +1,5 @@ +[vauled_group.hosts] +vaulted_host_toml={ ansible_host="localhost", ansible_connection="local" } + +[vauled_group.vars] +hello="world" diff --git a/test/integration/targets/vault/password-script.py b/test/integration/targets/vault/password-script.py new file mode 100755 index 00000000..c47fdfb9 --- /dev/null +++ b/test/integration/targets/vault/password-script.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ansible-vault is a script that encrypts/decrypts YAML files. See +# https://docs.ansible.com/playbooks_vault.html for more details. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys + +PASSWORD = 'test-vault-password' + + +def main(args): + print(PASSWORD) + return 0 + + +if __name__ == '__main__': + sys.exit(main(sys.argv[:])) diff --git a/test/integration/targets/vault/roles/test_vault/tasks/main.yml b/test/integration/targets/vault/roles/test_vault/tasks/main.yml new file mode 100644 index 00000000..4e5551d9 --- /dev/null +++ b/test/integration/targets/vault/roles/test_vault/tasks/main.yml @@ -0,0 +1,9 @@ +- assert: + that: + - 'secret_var == "secret"' + + +- copy: src=vault-secret.txt dest={{output_dir}}/secret.txt + +- name: cleanup decrypted file + file: path={{ output_dir }}/secret.txt state=absent diff --git a/test/integration/targets/vault/roles/test_vault/vars/main.yml b/test/integration/targets/vault/roles/test_vault/vars/main.yml new file mode 100644 index 00000000..cfac107a --- /dev/null +++ b/test/integration/targets/vault/roles/test_vault/vars/main.yml @@ -0,0 +1,9 @@ +$ANSIBLE_VAULT;1.1;AES256 +31626536666232643662346539623662393436386162643439643434656231343435653936343235 +6139346364396166336636383734333430373763336434310a303137623539653939336132626234 +64613232396532313731313935333433353330666466646663303233323331636234326464643166 +6538653264636166370a613161313064653566323037393962643032353230396536313865326362 +34396262303130326632623162623230346238633932393938393766313036643835613936356233 +33323730373331386337353339613165373064323134343930333031623036326164353534646631 +31313963666234623731316238656233396638643331306231373539643039383434373035306233 +30386230363730643561 diff --git a/test/integration/targets/vault/roles/test_vault_embedded/tasks/main.yml b/test/integration/targets/vault/roles/test_vault_embedded/tasks/main.yml new file mode 100644 index 00000000..eba93896 --- /dev/null +++ b/test/integration/targets/vault/roles/test_vault_embedded/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Assert that a embedded vault of a string with no newline works + assert: + that: + - '"{{ vault_encrypted_one_line_var }}" == "Setec Astronomy"' + +- name: Assert that a multi line embedded vault works, including new line + assert: + that: + - vault_encrypted_var == "Setec Astronomy\n" + +# TODO: add a expected fail here +# - debug: var=vault_encrypted_one_line_var_with_embedded_template diff --git a/test/integration/targets/vault/roles/test_vault_embedded/vars/main.yml b/test/integration/targets/vault/roles/test_vault_embedded/vars/main.yml new file mode 100644 index 00000000..54e6004f --- /dev/null +++ b/test/integration/targets/vault/roles/test_vault_embedded/vars/main.yml @@ -0,0 +1,17 @@ +# If you use normal 'ansible-vault create' or edit, files always have at least one new line +# so c&p from a vault encrypted that wasn't specifically created sans new line ends up with one. +# (specifically created, as in 'echo -n "just one line" > my_secret.yml' +vault_encrypted_var: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 66386439653236336462626566653063336164663966303231363934653561363964363833313662 + 6431626536303530376336343832656537303632313433360a626438346336353331386135323734 + 62656361653630373231613662633962316233633936396165386439616533353965373339616234 + 3430613539666330390a313736323265656432366236633330313963326365653937323833366536 + 34623731376664623134383463316265643436343438623266623965636363326136 +vault_encrypted_one_line_var: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 33363965326261303234626463623963633531343539616138316433353830356566396130353436 + 3562643163366231316662386565383735653432386435610a306664636137376132643732393835 + 63383038383730306639353234326630666539346233376330303938323639306661313032396437 + 6233623062366136310a633866373936313238333730653739323461656662303864663666653563 + 3138 diff --git a/test/integration/targets/vault/roles/test_vault_embedded_ids/tasks/main.yml b/test/integration/targets/vault/roles/test_vault_embedded_ids/tasks/main.yml new file mode 100644 index 00000000..9aeaf240 --- /dev/null +++ b/test/integration/targets/vault/roles/test_vault_embedded_ids/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- name: set a fact from vault_encrypted_example1_releases + set_fact: + example1_releases: "{{ vault_encrypted_example1_releases }}" + +- name: Assert that a embedded vault of a multiline string with a vault id works + assert: + that: + - "vault_encrypted_example1_releases is defined" + - "example1_releases is defined" + - "example1_releases.startswith('Ansible Releases')" + # - '"{{ vault_encrypted_example1_releases }}" == "Setec Astronomy"' + +- name: Assert that a embedded vault with a different vault id works + assert: + that: + - "vault_encrypted_example2_hello == 'Hello world'" + +- name: Assert that a embedded vault with no vault id and format 1.2 works + assert: + that: + - "vault_encrypted_example3_foobar == 'Foobar'" + #- name: Assert that a multi line embedded vault works, including new line + # assert: + # that: + # - vault_encrypted_var == "Setec Astronomy\n" + +# TODO: add a expected fail here +# - debug: var=vault_encrypted_one_line_var_with_embedded_template diff --git a/test/integration/targets/vault/roles/test_vault_embedded_ids/vars/main.yml b/test/integration/targets/vault/roles/test_vault_embedded_ids/vars/main.yml new file mode 100644 index 00000000..9c8fa4b2 --- /dev/null +++ b/test/integration/targets/vault/roles/test_vault_embedded_ids/vars/main.yml @@ -0,0 +1,194 @@ +vault_encrypted_example2_hello: !vault | + $ANSIBLE_VAULT;1.2;AES256;example2 + 30383930326535616363383537613266376364323738313835353566633533353364363837383638 + 3737633764613862343666346337353964613138653036610a313663393231386139343835626436 + 66633336303866323335616661366363333463616530326635383836656432396665313338313737 + 6539616630663262650a383762303362356438616261646564303230633930336563373566623235 + 3566 +vault_encrypted_example1_releases: !vault | + $ANSIBLE_VAULT;1.2;AES256;example1 + 63643833646565393535303862343135326261343362396234656137313731313864316539616462 + 3333313439353638393963643535633835643035383331340a393639386166313838326336363032 + 65396565616531663839316132646230316561613865333437653666323034396337626431663931 + 3339363233356438350a363734616337306136376139346162376334343537613032633563666361 + 36386437356463616563646336393064626131363963643434376439346331663836663961353533 + 62343663623863663830663531663930636532653165636238636433373835623435313632313030 + 33333734343566393739393661383430623063323132303132306361666433386166633564626434 + 62666361653465616636646335353230373961393863373261633461303233313965346565643434 + 63383633303131643730366233383264373865376562623962636562343732343266636535356362 + 62396635613231336162393630343136663731366665623835303762636161393163373361383634 + 65333739326264346136333337363666396336353065366161316130653738356133646364316130 + 32346636386665633131376662356238386161373565336430623263353036323561633235303135 + 35333031316366373636326665656230343934383334303863643364613364663436383030373237 + 35323964376564313636643633303262633033363633663966393535613064343364313161383061 + 66393733366463393936663033633038653465636539356266353936373162303661613962393662 + 61313534643064366432333166666130663730653333613964316130363135646532303531376537 + 63313339623337363464343637323431336438636337386264303961333139326666306365363937 + 36386437343036346165366439636533666237393535316536333966376536623030643663343561 + 64626362363736316234356639663039396634653766646237376636653062383530366562323138 + 61343537616263373137613232393731363866653038633932643163633732326463656365346535 + 63316337346636326631326134633339363133393337393035333730663133646332343536636337 + 36626566633162333463613735656564393764356337346535646539373536363933326139626239 + 35386434663636343366303830663531616530616563343737653761616232303865626634646537 + 38383430366131396133636530383865356430343965633062373366383261383231663162323566 + 30373061366533643938383363333266636463383134393264343662623465323164356464666364 + 35636135316333636266313038613239616638343761326332663933356164323635653861346430 + 65616661353162633765666633393139613830626535633462633166376563313236623465626339 + 38663138633664613738656166356431343438653833623132383330656637343661616432623362 + 66643466343663306434353237343737633535343233653765356134373739316234353836303034 + 37336435376135363362323130316338316135633633303861303665393766616537356666653238 + 63366461383334356666633134616436663731633666323261393761363264333430366234353732 + 66333732373236303338333862626537326638393964363965303532353465613638393934313538 + 66323366353064666334626461313933333961613637663332656131383038393264636537643730 + 35626265346363393665663431663036633461613362343330643133333232326664623833626336 + 65353363373962383561396163653361663736383235376661626132386131353137303764623231 + 63326538623231396366356432663537333331343335633531326331616531313039393335313139 + 65376461323434383065383834626535393063363432326233383930626437343961313538303135 + 39386561623662333335313661636637656336353537313466386239613166396436626630376337 + 36633739326336366530643733393962633737343035346536366336643266346162333931633235 + 66643966626262343862393832663132356435343561646634373835306130623637633836633166 + 30313732333963383565373261306232663365363033376431313437326366656264346532666561 + 63386231636634613235333363326166616238613734643739343237303963663539633535356232 + 66393365616165393130356561363733313735336132336166353839303230643437643165353338 + 39663138313130366635386365663830336365646562666635323361373362626339306536313664 + 32383934623533373361666536326131316630616661623839666137656330306433326637386134 + 34393162343535633438643036613831303265646632383231306239646132393338663564653939 + 63613232646230616338316434376663613266303362386631353733623335643034356631383139 + 62613932396132636339393337383065613061306162633831386236323163633439303263393663 + 38616237313761306533636361386161666264333839616463386631633233343132373732636639 + 61326239383961656437646236656336303638656665316633643630393063373964323534643961 + 39383538303234343438363736373136316464643165383361336262303231353937316432366639 + 36613662393736386433356532626162643462313234316230643639333535653064303830373166 + 31393332336539313362373136326639386566343637623633396134643533393839353934613064 + 65396233353363393763363231633462663537626165646666633937343733653932633733313237 + 31323633326463333938343062626361313761646133633865623130323665336634356364366566 + 31626562373662313064306239356336376136306336643961323839313964393734343265306137 + 62663563306665636463356465663432346331323832666163623530666265393164336466383936 + 64653831316162313861373462643264373965623632653430373439656535636365383066643464 + 61366436613631386161306631386331656632636337653864343261643433363438396361373831 + 37363532346564343562356132306432303933643431636539303039306638356537353237323036 + 63366334623438393838383561383937313330303832326330326366303264303437646666613638 + 37653266633362636330656666303437323138346666373265663466616635326366313233323430 + 62616165626239363833613565326264373063376232303837363062616663333461373062323266 + 32626636316465666230626634396431323032323962313437323837336562313438346634656335 + 33613566636461663334623966646465623531653631653565333836613261633534393439613738 + 66356364383637666465336666333962393735643766633836383833396533626635633734326136 + 65656562366337326161303466336232646533346135353332643030383433643662363465633931 + 63323761623537383438333837333733363263663630336264376239336234663866633131376463 + 66663438313439643565316138383439353839366365393238376439626537656535643739373237 + 66666266366533393738363138613437666435366163643835383830643333323730303537313139 + 32313436663932633933353265356431336138306437353936363638643539383236323232326630 + 62323963626138633865376238666264666531613237636232373938303030393632643230336138 + 38663237646637616232343664396136376534313533613364663062356535313766343331616431 + 36616237336532333239386663643538643239613866393631393364306463303131643863363533 + 31356436373062666266656431643038323766383632613939616539663637623164323161633464 + 39666663353339383164363534616330323936333865663564646334373438303061656662656331 + 37633530663666323834383333623136633164326632313938643234326235616461323734353638 + 63393365313334646538373631643266383936333533383630623861343764373863346161316333 + 38356466626234653336326433353234613430623135343739323433326435373663363237643531 + 36626238613832633661343263383962373536353766653631323431393330623634656166333437 + 66376537643836626264383961303465363035336666306165316631316661366637303361656332 + 36616463626135653235393562343464353262616331326539316361393036623134623361383635 + 39383565313433653139663963306362373233313738613933626563333230656239613462363164 + 65396539333833633137313163396635373433303164633463383935663939343266396366666231 + 30353434323837343563613662643632386662616363646630353530386466643939623866626331 + 63613266366135646562653064333166356561626138343364373631376336393931313262323063 + 32653938333837366231343865656239353433663537313763376132613366363333313137323065 + 31666663656539333438343664323062323238353061663439326333366162303636626634313037 + 38366631306438393333356138393730316161336233656239626565366134643535383536613034 + 37343733663631663863643337373462633462666234393063336330306465366637653136393533 + 63336535316438303564613366343565363831666233626466623161356635363464343634303136 + 61616561393861393036353433356364376533656334326433323934643236346133363535613334 + 32626332653362313731643035653335383164303534616537333132356535376233343566313736 + 39353037636530376338383739366230346134643738313037386438613461323934663537666164 + 66353330303730336435313735343333316364373432313030396361343061343632653765646336 + 39666537366537343635396235373433363438393637663166666530356339316334313834363938 + 33393837336265353265303635663363353439343062316363643637623564353261643637306434 + 36393662363737316234323461373763663364356535313165656661613137396366386464663866 + 63653562313539313839613436653137663262346233626464616237373737373736306231383265 + 35323532373631613762616234386162643035613838376264343532396263626562623262363532 + 36303530353137616134346262646464633462646662323262633366393736383834616665666466 + 34393363353135616437346332386634396635363130623337653230666334303630653738633334 + 33316162326335373838643261656561303736363331316134363736393362313734346236306638 + 65343163646264643539643635633761393665623039653232623435383062363462346336613238 + 38306138353832306263356265316236303065626566643134373836303933323130303634393931 + 31633334373064353263353135656433623863636261633664646439336539343636656464306531 + 36373364323637393634623666353730626532613534343638663966313332636437383233303864 + 33356432613638303936653134373338626261353662653930333534643732656130653636316433 + 33653364373636613739353439383066646530303565383432356134396436306134643030643034 + 63323433396238636330383836396364613738616338356563633565613537313138346661636164 + 34333566393738343661663062346433396532613032663331313566333161396230343336346264 + 66333935316630653936346336366336303363376633623034346536643731313136363835303964 + 37346537373236343832306637653563386435363435333537393733333966643461623064316639 + 65323363343338326435633631303037623234303334353366303936373664383762316364663036 + 61353638376335333663343066303961616234336664313732366630343331613537633336316534 + 31656561626430383338353231376263383362333966666363316435373533613138323039363463 + 33363031373035316431353930626632666165376538303638353631303931326262386363376330 + 36333531303235306532363763313233616165646234343235306332383262663261366164623130 + 66613232636264636336313230303261626639316465383265373762346434616362383562633533 + 64346438653161306266663634623666646239383363313862383563386461626264383165373561 + 64383431653061393132623833653337643266663462666462366339363233353335386264383936 + 38396264373833343935653264373631626662653962353438313262633339316537306463663930 + 31613634613535346364643930613739383035336164303064653736663031633135613966656463 + 64333539643534376662666539653766666532333832333430346333613236356534643964383135 + 38326235626164663364366163353434613530306531343735353761396563326536636335326336 + 34613835333362346363623235316564363934333732646435373033613863346565353034306333 + 33643763363838656339396435316162616539623764366163376438656266353137633262613464 + 31393434646435623032383934373262666430616262353165343231666631666238653134396539 + 32323137616639306262366638366536366665633331653363643234643238656338316133613166 + 38343566623137353566306538616639363935303766633732633638356362373463616563663438 + 66346133636562373031316363616662663132636263653037343962313630313535396563313230 + 34613735663838613130346461343166663830623861393634353438376336363961326263333634 + 34646465326238636630316164316339333961333939363139623262396531303665383230363562 + 63626431333365663337323430653230613837396133636431303863366239303531653966653932 + 65363139366637623531306333363465386636366334383734353330626566346532653263633238 + 39383434346665323730366261316433303739313032653638636232666432323930653837643831 + 63393565306538663365616364326334306333346463343330316161616362323063666666373035 + 66383938383238353134386333343437623030363032303531643736353636643165373362363666 + 31363037613064633164346638306231663161626265663535363634336665656163636637393161 + 64313363373965396262386337613533393639353332316234643666613065343939393336366633 + 64303637323531393936386365316366656432346230653066306334626431366335353130663233 + 62303961663362623637303535333432313635303936363462336438663232333862303934383166 + 31626438623963346262376135633434643533316162376633353661356463616538363733346464 + 65646563626139356264363132616161303438653133353961636135333833376364333138353263 + 36613437373365666665643664343666366234636164626437396139393864653031396331303938 + 35323839646265393232326434616233323535396134346465363131366165373163353932363538 + 39353764623463393732346134656539353966643366653765663038323631373432663839396239 + 35623665623661326231643734346134623961663539363436323134333630306663653039653062 + 36623730663538666166363436616131363233643739393966333437643637303737383733356138 + 34343733623137326265343332326437316365346439316137663361373066333166383032396636 + 35623561626139666264373363363965383633653633656464393932666634353962623637643262 + 32323663303861376166656266653962643166326535363237316333663631323235333833636361 + 31633038353265386439313766313966633536346230646566633333646632383938363761373363 + 38353931343136633062303366643930323034616265653030643062333461616637366666336437 + 36346330636666313833346534363461336366393533346338653061356333653839623364336266 + 32373965346363613165383639366365396665353966393262393562353664623231326132363735 + 38386238336135306464366332353035613938313262323739326638623733663030656533383438 + 38316364393030376436313031613936363435633562633862323063643035383030313865396666 + 66646338316262653734633431393862626633643163313732343638313066646163353264653531 + 64346265656363323666656239333466313666373234626261633630653133316639313233303466 + 62353735626634616661396238356138343064386332366361643530613364366365663764393037 + 31613730313234393263653964376262373131383064393133636533656534343431613964663634 + 65656365393439306433313333346234333332346230666462633132313863623765306665306461 + 65633862656637646134353030393637353339646265613731646564333561313431346135626532 + 66646363383932636562343731626164633138386463356634353062323965376235383130633231 + 61623537333030383130623064356662356463646532613339303336666631366539613835646364 + 37636634353430386632656331313936393261643638326162376238326139643939636333366364 + 31626163376436336631 +vault_encrypted_example3_foobar: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 37336431373836376339373763306436396334623061366266353763363766313063363230636138 + 3665663061366436306232323636376261303064616339620a333365323266643364396136626665 + 62363862653134623665326635396563643832636234386266616436626334363839326434383431 + 3330373333366233380a363431386334636164643936313430623661633265346632343331373866 + 3732 +# We dont have a secret for this vaulttext, but nothing references it +# so nothing should ever try to decrypt it. So this is testing that +# we dont require all vaulted vars to be decrypted. +vault_encrypted_example4_unknown_password: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 64316436303566666563393931613833316533346539373635663031376664366131353264366132 + 3637623935356263643639313562366434383234633232660a353636666134353030646539643139 + 65376235333932353531356666363434313066366161383532363166653762326533323233623431 + 3934393962633637330a356337626634343736313339316365373239663031663938353063326665 + 30643339386131663336366531663031383030313936356631613432336338313962 diff --git a/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/README.md b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/README.md new file mode 100644 index 00000000..4a75cece --- /dev/null +++ b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/README.md @@ -0,0 +1 @@ +file is encrypted with password of 'test-encrypted-file-password' diff --git a/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/tasks/main.yml b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/tasks/main.yml new file mode 100644 index 00000000..e09004a1 --- /dev/null +++ b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Assert that a vault encrypted file with embedded vault of a string with no newline works + assert: + that: + - '"{{ vault_file_encrypted_with_encrypted_one_line_var }}" == "Setec Astronomy"' + +- name: Assert that a vault encrypted file with multi line embedded vault works, including new line + assert: + that: + - vault_file_encrypted_with_encrypted_var == "Setec Astronomy\n" + +# TODO: add a expected fail here +# - debug: var=vault_encrypted_one_line_var_with_embedded_template diff --git a/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/vars/main.yml b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/vars/main.yml new file mode 100644 index 00000000..89cc4a0f --- /dev/null +++ b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/vars/main.yml @@ -0,0 +1,76 @@ +$ANSIBLE_VAULT;1.1;AES256 +31613535653961393639346266636234373833316530373965356161373735666662613137386466 +3365303539306132613861646362396161323962373839640a653030376530316136643961623665 +65643665616338363432383264363730386538353635663339633932353933653132343430613332 +6136663837306333370a643139336230663465346637663032613231656364316533613235623532 +65643738663735636662363565313561646162343865393733663838393239646634633936336262 +39626235616537663934363932323831376539666331353334386636663738643932306239663265 +64646664616331643663326561386638393764313737303865326166373031336665663533373431 +35353736346264616135656164636337363966323935643032646138366166636537333565306230 +65646533623134393633623663336263393533613632663464653663313835306265333139646563 +35393061343266343138333936646364333735373930666262376137396562356231393330313731 +36363164623939393436363564353162373364626536376434626463343161646437316665613662 +38343534363965373735316339643061333931666264353566316235616433666536313065306132 +31623933633533366162323961343662323364353065316235303162306635663435663066393865 +64356634363761333838326331343865653633396665353638633730663134313565653166656131 +33366464396532313635326237363135316230663838393030303963616161393966393836633237 +30333338343031366235396438663838633136666563646161363332663533626662663531653439 +63643435383931663038613637346637383365336431646663366436626333313536396135636566 +31373133363661636338376166356664353366343730373164663361623338383636336464373038 +36306437363139346233623036636330333664323165636538666138306465653435666132623835 +30363266333666626363366465313165643761396562653761313764616562666439366437623766 +33343666623866653461376137353731356530363732386261383863666439333735666638653533 +38393430323961356333383464643036383739663064633461363937336538373539666662653764 +36376266333230666232396665616434303432653562353131383430643533623932363537346435 +33326335663561643564663936323832376634336363373531363666333732643363646130383464 +30656366633863643966656134653833343634383136363539366330336261313736343838663936 +39333835353035386664633331303264356339613933393162393037306565636563386436633532 +34376564343237303166613461383963353030383166326538643932323130643830376165366564 +30366432623761623366653966313865653262363064316130393339393366323539373338306265 +31626564393065303032383161343137636432353061333964613935363865356139313766303039 +32333863353465306265653237396232383330333438303866316362353161383266316633663364 +66353130326237376331656334633965633339303138656263616239323261663864666236323662 +33643463303965313264396463333963376464313838373765633463396534363836366132653437 +30303132633232623265303966316639373664656262636166653438323534326435363966616133 +33663463626536643930623034343237613933623462346635306565623834346532613539383838 +39356339303930663739333236316234666633623961323362323537313833383538363132636165 +31396433386664356532383432666464613137376561396534316665386134333665626430373064 +30626561363731326635393334633837303934653062616461303732316239663764633565353633 +33336161623332383064376538353531343534333836313139376439316564313436623462396134 +31643831656135653234396362653861643933346433646633383130323139353465616430383061 +34623164376436326466333765353037323630356662646364366265303534313764393862653238 +66376365323561643030343534636263386338333566613436383630613561646639616265313465 +66336239303432666361383038323038383663346561356664626634333037313838363732643463 +33373734663933373238363635623336323232313161353861306430323334353836616265623639 +65613436323939643932383537666530306134633435373331623963633436386162306565656433 +35383962633163643837343436383664313565656134646633393237353065666535316561613266 +64653234366462623764313438666466616664303138656565663036376230323763393135323330 +35383861306262356430656531343938643763306663323031636638383762626564616366393434 +33373035363633396230396161623433336530326432343666346332613262376338313731626462 +63616463363831333239643535383936646264336466616635353063383163306564373263656265 +65383466653162626132633463613037343865316639653931633965323637373733653131666233 +35643831646638383232616538656265663365306136343733633535323537653165636665383832 +65303162656238303665346232353136346639316263636264346533356263353066353438323535 +36303236326663303763653137656264336566646161663538383361306138323064336235616438 +32373731643331373239383339326365366337646237643836373238656339646362366239623533 +33306531353863653834666361393161366465626632643061363266353465653964363263613430 +32323132613866343733376437643239316661313330323661633234343630626132383434343461 +61663765383134666330316237633963323463363762383666323866386336316438373461306138 +38613266346532313134386236386131626262663534313935623635343533383831386332343534 +65333963353861656232383134396438613034663333633661346465636436373533346561306661 +33656535613963663938313233333736343036393734373363316236373765343736633635386336 +30323036393431363636316466393561626365366333623431353435633963613935346239666534 +33623037306334343464633932313430616666633631313366356532643938333835333231313039 +65363734336630303861626636613139663130616362333662616532313734393636353963643032 +39626162623933616561383736636466316331346135613063383261373865366232376562316237 +65393563633131653761646365313831646265316233343833653363626465363863363936316664 +63363863363761353264316662643338656432356336326339623961396538643838666330303934 +62343537653262353737316266366134623961323637613338303164383734613034383964623135 +35646130363038356530383638663431663238336337313034303631366538326361646530626138 +34653533383964353866653562666463333961313434373063333163346537636631393138316465 +62656361613365366137346337363830356263633162623466373564346437653036386136333333 +32323863393866373932353534343133306333303265336564383132616365363439393364336562 +62333130343664343436356338623336643735373164373962313762333763343137626238316536 +36376539666331376162376361646631396231306165316362343164616232393864656161393735 +63313439643865346231346363376137306464396637356539353139343932333438323964323035 +326532383066643037653036333166346238 diff --git a/test/integration/targets/vault/roles/test_vaulted_template/tasks/main.yml b/test/integration/targets/vault/roles/test_vaulted_template/tasks/main.yml new file mode 100644 index 00000000..b4af5efc --- /dev/null +++ b/test/integration/targets/vault/roles/test_vaulted_template/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: Template from a vaulted template file + template: + src: vaulted_template.j2 + dest: "{{ output_dir }}/vaulted_template.out" + vars: + vaulted_template_var: "here_i_am" + +- name: Get output template contents + slurp: + path: "{{ output_dir }}/vaulted_template.out" + register: vaulted_template_out + +- debug: + msg: "{{ vaulted_template_out.content|b64decode }}" + +- assert: + that: + - vaulted_template_out.content|b64decode == 'here_i_am\n' diff --git a/test/integration/targets/vault/roles/test_vaulted_template/templates/vaulted_template.j2 b/test/integration/targets/vault/roles/test_vaulted_template/templates/vaulted_template.j2 new file mode 100644 index 00000000..af9c3eb1 --- /dev/null +++ b/test/integration/targets/vault/roles/test_vaulted_template/templates/vaulted_template.j2 @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +65626437623461633630303033303939616334373263633438623938396564376435366534303865 +6363663439346464336437346263343235626463663130640a373233623733653830306262376430 +31666538323132343039613537323761343234613531353035373434666632333932623064316564 +3532363462643736380a303136353830636635313662663065343066323631633562356663633536 +31343265376433633234656432393066393865613235303165666338663930303035 diff --git a/test/integration/targets/vault/runme.sh b/test/integration/targets/vault/runme.sh new file mode 100755 index 00000000..197095bc --- /dev/null +++ b/test/integration/targets/vault/runme.sh @@ -0,0 +1,526 @@ +#!/usr/bin/env bash + +set -euvx + +export ANSIBLE_TEST_PREFER_VENV=1 +source virtualenv.sh + + +MYTMPDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') +trap 'rm -rf "${MYTMPDIR}"' EXIT + +# create a test file +TEST_FILE="${MYTMPDIR}/test_file" +echo "This is a test file" > "${TEST_FILE}" + +TEST_FILE_1_2="${MYTMPDIR}/test_file_1_2" +echo "This is a test file for format 1.2" > "${TEST_FILE_1_2}" + +TEST_FILE_ENC_PASSWORD="${MYTMPDIR}/test_file_enc_password" +echo "This is a test file for encrypted with a vault password that is itself vault encrypted" > "${TEST_FILE_ENC_PASSWORD}" + +TEST_FILE_ENC_PASSWORD_DEFAULT="${MYTMPDIR}/test_file_enc_password_default" +echo "This is a test file for encrypted with a vault password that is itself vault encrypted using --encrypted-vault-id default" > "${TEST_FILE_ENC_PASSWORD_DEFAULT}" + +TEST_FILE_OUTPUT="${MYTMPDIR}/test_file_output" + +TEST_FILE_EDIT="${MYTMPDIR}/test_file_edit" +echo "This is a test file for edit" > "${TEST_FILE_EDIT}" + +TEST_FILE_EDIT2="${MYTMPDIR}/test_file_edit2" +echo "This is a test file for edit2" > "${TEST_FILE_EDIT2}" + +# test case for https://github.com/ansible/ansible/issues/35834 +# (being prompted for new password on vault-edit with no configured passwords) + +TEST_FILE_EDIT3="${MYTMPDIR}/test_file_edit3" +echo "This is a test file for edit3" > "${TEST_FILE_EDIT3}" + +# ansible-config view +ansible-config view + +# ansible-config +ansible-config dump --only-changed +ansible-vault encrypt "$@" --vault-id vault-password "${TEST_FILE_EDIT3}" +# EDITOR=./faux-editor.py ansible-vault edit "$@" "${TEST_FILE_EDIT3}" +EDITOR=./faux-editor.py ansible-vault edit --vault-id vault-password -vvvvv "${TEST_FILE_EDIT3}" +echo $? + +# view the vault encrypted password file +ansible-vault view "$@" --vault-id vault-password encrypted-vault-password + +# encrypt with a password from a vault encrypted password file and multiple vault-ids +# should fail because we dont know which vault id to use to encrypt with +ansible-vault encrypt "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && : +WRONG_RC=$? +echo "rc was $WRONG_RC (5 is expected)" +[ $WRONG_RC -eq 5 ] + +# try to view the file encrypted with the vault-password we didnt specify +# to verify we didnt choose the wrong vault-id +ansible-vault view "$@" --vault-id vault-password encrypted-vault-password + +FORMAT_1_1_HEADER="\$ANSIBLE_VAULT;1.1;AES256" +FORMAT_1_2_HEADER="\$ANSIBLE_VAULT;1.2;AES256" + + +VAULT_PASSWORD_FILE=vault-password +# new format, view, using password client script +ansible-vault view "$@" --vault-id vault-password@test-vault-client.py format_1_1_AES256.yml + +# view, using password client script, unknown vault/keyname +ansible-vault view "$@" --vault-id some_unknown_vault_id@test-vault-client.py format_1_1_AES256.yml && : + +# Use linux setsid to test without a tty. No setsid if osx/bsd though... +if [ -x "$(command -v setsid)" ]; then + # tests related to https://github.com/ansible/ansible/issues/30993 + CMD='ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' + setsid sh -c "echo test-vault-password|${CMD}" < /dev/null > log 2>&1 && : + WRONG_RC=$? + cat log + echo "rc was $WRONG_RC (0 is expected)" + [ $WRONG_RC -eq 0 ] + + setsid sh -c 'tty; ansible-vault view --ask-vault-pass -vvvvv test_vault.yml' < /dev/null > log 2>&1 && : + WRONG_RC=$? + echo "rc was $WRONG_RC (1 is expected)" + [ $WRONG_RC -eq 1 ] + cat log + + setsid sh -c 'tty; echo passbhkjhword|ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1 && : + WRONG_RC=$? + echo "rc was $WRONG_RC (1 is expected)" + [ $WRONG_RC -eq 1 ] + cat log + + setsid sh -c 'tty; echo test-vault-password |ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1 + echo $? + cat log + + setsid sh -c 'tty; echo test-vault-password|ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1 + echo $? + cat log + + setsid sh -c 'tty; echo test-vault-password |ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1 + echo $? + cat log + + setsid sh -c 'tty; echo test-vault-password|ansible-vault view --ask-vault-pass -vvvvv vaulted.inventory' < /dev/null > log 2>&1 + echo $? + cat log + + # test using --ask-vault-password option + CMD='ansible-playbook -i ../../inventory -vvvvv --ask-vault-password test_vault.yml' + setsid sh -c "echo test-vault-password|${CMD}" < /dev/null > log 2>&1 && : + WRONG_RC=$? + cat log + echo "rc was $WRONG_RC (0 is expected)" + [ $WRONG_RC -eq 0 ] +fi + +ansible-vault view "$@" --vault-password-file vault-password-wrong format_1_1_AES256.yml && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +set -eux + + +# new format, view +ansible-vault view "$@" --vault-password-file vault-password format_1_1_AES256.yml + +# new format, view with vault-id +ansible-vault view "$@" --vault-id=vault-password format_1_1_AES256.yml + +# new format, view, using password script +ansible-vault view "$@" --vault-password-file password-script.py format_1_1_AES256.yml + +# new format, view, using password script with vault-id +ansible-vault view "$@" --vault-id password-script.py format_1_1_AES256.yml + +# new 1.2 format, view +ansible-vault view "$@" --vault-password-file vault-password format_1_2_AES256.yml + +# new 1.2 format, view with vault-id +ansible-vault view "$@" --vault-id=test_vault_id@vault-password format_1_2_AES256.yml + +# new 1,2 format, view, using password script +ansible-vault view "$@" --vault-password-file password-script.py format_1_2_AES256.yml + +# new 1.2 format, view, using password script with vault-id +ansible-vault view "$@" --vault-id password-script.py format_1_2_AES256.yml + +# newish 1.1 format, view, using a vault-id list from config env var +ANSIBLE_VAULT_IDENTITY_LIST='wrong-password@vault-password-wrong,default@vault-password' ansible-vault view "$@" --vault-id password-script.py format_1_1_AES256.yml + +# new 1.2 format, view, ENFORCE_IDENTITY_MATCH=true, should fail, no 'test_vault_id' vault_id +ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-password-file vault-password format_1_2_AES256.yml && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# new 1.2 format, view with vault-id, ENFORCE_IDENTITY_MATCH=true, should work, 'test_vault_id' is provided +ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id=test_vault_id@vault-password format_1_2_AES256.yml + +# new 1,2 format, view, using password script, ENFORCE_IDENTITY_MATCH=true, should fail, no 'test_vault_id' +ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-password-file password-script.py format_1_2_AES256.yml && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + + +# new 1.2 format, view, using password script with vault-id, ENFORCE_IDENTITY_MATCH=true, should fail +ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id password-script.py format_1_2_AES256.yml && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# new 1.2 format, view, using password script with vault-id, ENFORCE_IDENTITY_MATCH=true, 'test_vault_id' provided should work +ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id=test_vault_id@password-script.py format_1_2_AES256.yml + +# test with a default vault password set via config/env, right password +ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-vault view "$@" format_1_1_AES256.yml + +# test with a default vault password set via config/env, wrong password +ANSIBLE_VAULT_PASSWORD_FILE=vault-password-wrong ansible-vault view "$@" format_1_1_AES.yml && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# test with a default vault-id list set via config/env, right password +ANSIBLE_VAULT_PASSWORD_FILE=wrong@vault-password-wrong,correct@vault-password ansible-vault view "$@" format_1_1_AES.yml && : + +# test with a default vault-id list set via config/env,wrong passwords +ANSIBLE_VAULT_PASSWORD_FILE=wrong@vault-password-wrong,alsowrong@vault-password-wrong ansible-vault view "$@" format_1_1_AES.yml && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# try specifying a --encrypt-vault-id that doesnt exist, should exit with an error indicating +# that --encrypt-vault-id and the known vault-ids +ansible-vault encrypt "$@" --vault-password-file vault-password --encrypt-vault-id doesnt_exist "${TEST_FILE}" && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# encrypt it +ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE}" + +ansible-vault view "$@" --vault-password-file vault-password "${TEST_FILE}" + +# view with multiple vault-password files, including a wrong one +ansible-vault view "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong "${TEST_FILE}" + +# view with multiple vault-password files, including a wrong one, using vault-id +ansible-vault view "$@" --vault-id vault-password --vault-id vault-password-wrong "${TEST_FILE}" + +# And with the password files specified in a different order +ansible-vault view "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password "${TEST_FILE}" + +# And with the password files specified in a different order, using vault-id +ansible-vault view "$@" --vault-id vault-password-wrong --vault-id vault-password "${TEST_FILE}" + +# And with the password files specified in a different order, using --vault-id and non default vault_ids +ansible-vault view "$@" --vault-id test_vault_id@vault-password-wrong --vault-id test_vault_id@vault-password "${TEST_FILE}" + +ansible-vault decrypt "$@" --vault-password-file vault-password "${TEST_FILE}" + +# encrypt it, using a vault_id so we write a 1.2 format file +ansible-vault encrypt "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}" + +ansible-vault view "$@" --vault-id vault-password "${TEST_FILE_1_2}" +ansible-vault view "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}" + +# view with multiple vault-password files, including a wrong one +ansible-vault view "$@" --vault-id vault-password --vault-id wrong_password@vault-password-wrong "${TEST_FILE_1_2}" + +# And with the password files specified in a different order, using vault-id +ansible-vault view "$@" --vault-id vault-password-wrong --vault-id vault-password "${TEST_FILE_1_2}" + +# And with the password files specified in a different order, using --vault-id and non default vault_ids +ansible-vault view "$@" --vault-id test_vault_id@vault-password-wrong --vault-id test_vault_id@vault-password "${TEST_FILE_1_2}" + +ansible-vault decrypt "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}" + +# multiple vault passwords +ansible-vault view "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong format_1_1_AES256.yml + +# multiple vault passwords, --vault-id +ansible-vault view "$@" --vault-id test_vault_id@vault-password --vault-id test_vault_id@vault-password-wrong format_1_1_AES256.yml + +# encrypt it, with password from password script +ansible-vault encrypt "$@" --vault-password-file password-script.py "${TEST_FILE}" + +ansible-vault view "$@" --vault-password-file password-script.py "${TEST_FILE}" + +ansible-vault decrypt "$@" --vault-password-file password-script.py "${TEST_FILE}" + +# encrypt it, with password from password script +ansible-vault encrypt "$@" --vault-id test_vault_id@password-script.py "${TEST_FILE}" + +ansible-vault view "$@" --vault-id test_vault_id@password-script.py "${TEST_FILE}" + +ansible-vault decrypt "$@" --vault-id test_vault_id@password-script.py "${TEST_FILE}" + +# new password file for rekeyed file +NEW_VAULT_PASSWORD="${MYTMPDIR}/new-vault-password" +echo "newpassword" > "${NEW_VAULT_PASSWORD}" + +ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE}" + +ansible-vault rekey "$@" --vault-password-file vault-password --new-vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}" + +# --new-vault-password-file and --new-vault-id should cause options error +ansible-vault rekey "$@" --vault-password-file vault-password --new-vault-id=foobar --new-vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}" && : +WRONG_RC=$? +echo "rc was $WRONG_RC (2 is expected)" +[ $WRONG_RC -eq 2 ] + +ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}" + +# view file with unicode in filename +ansible-vault view "$@" --vault-password-file vault-password vault-café.yml + +# view with old password file and new password file +ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --vault-password-file vault-password "${TEST_FILE}" + +# view with old password file and new password file, different order +ansible-vault view "$@" --vault-password-file vault-password --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}" + +# view with old password file and new password file and another wrong +ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --vault-password-file vault-password-wrong --vault-password-file vault-password "${TEST_FILE}" + +# view with old password file and new password file and another wrong, using --vault-id +ansible-vault view "$@" --vault-id "tmp_new_password@${NEW_VAULT_PASSWORD}" --vault-id wrong_password@vault-password-wrong --vault-id myorg@vault-password "${TEST_FILE}" + +ansible-vault decrypt "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}" + +# reading/writing to/from stdin/stdin (See https://github.com/ansible/ansible/issues/23567) +ansible-vault encrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output="${TEST_FILE_OUTPUT}" < "${TEST_FILE}" +OUTPUT=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output=- < "${TEST_FILE_OUTPUT}") +echo "${OUTPUT}" | grep 'This is a test file' + +OUTPUT_DASH=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output=- "${TEST_FILE_OUTPUT}") +echo "${OUTPUT_DASH}" | grep 'This is a test file' + +OUTPUT_DASH_SPACE=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output - "${TEST_FILE_OUTPUT}") +echo "${OUTPUT_DASH_SPACE}" | grep 'This is a test file' + + +# test using an empty vault password file +ansible-vault view "$@" --vault-password-file empty-password format_1_1_AES256.yml && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +ansible-vault view "$@" --vault-id=empty@empty-password --vault-password-file empty-password format_1_1_AES256.yml && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +echo 'foo' > some_file.txt +ansible-vault encrypt "$@" --vault-password-file empty-password some_file.txt && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + + +ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "a test string" + +# Test with multiple vault password files +# https://github.com/ansible/ansible/issues/57172 +env ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --encrypt-vault-id default "a test string" + +ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy" + +ansible-vault encrypt_string "$@" --vault-id "${NEW_VAULT_PASSWORD}" "a test string" + +ansible-vault encrypt_string "$@" --vault-id "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy" + + +# from stdin +ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" < "${TEST_FILE}" + +ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --stdin-name "the_var_from_stdin" < "${TEST_FILE}" + +# write to file +ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy" --output "${MYTMPDIR}/enc_string_test_file" + +# test ansible-vault edit with a faux editor +ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE_EDIT}" + +# edit a 1.1 format with no vault-id, should stay 1.1 +EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-password-file vault-password "${TEST_FILE_EDIT}" +head -1 "${TEST_FILE_EDIT}" | grep "${FORMAT_1_1_HEADER}" + +# edit a 1.1 format with vault-id, should stay 1.1 +cat "${TEST_FILE_EDIT}" +EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT}" +cat "${TEST_FILE_EDIT}" +head -1 "${TEST_FILE_EDIT}" | grep "${FORMAT_1_1_HEADER}" + +ansible-vault encrypt "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT2}" + +# verify that we aren't prompted for a new vault password on edit if we are running interactively (ie, with prompts) +# have to use setsid nd --ask-vault-pass to force a prompt to simulate. +# See https://github.com/ansible/ansible/issues/35834 +setsid sh -c 'tty; echo password |ansible-vault edit --ask-vault-pass vault_test.yml' < /dev/null > log 2>&1 && : +grep 'New Vault password' log && : +WRONG_RC=$? +echo "The stdout log had 'New Vault password' in it and it is not supposed to. rc of grep was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# edit a 1.2 format with vault id, should keep vault id and 1.2 format +EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT2}" +head -1 "${TEST_FILE_EDIT2}" | grep "${FORMAT_1_2_HEADER};vault_password" + +# edit a 1.2 file with no vault-id, should keep vault id and 1.2 format +EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-password-file vault-password "${TEST_FILE_EDIT2}" +head -1 "${TEST_FILE_EDIT2}" | grep "${FORMAT_1_2_HEADER};vault_password" + +# encrypt with a password from a vault encrypted password file and multiple vault-ids +# should fail because we dont know which vault id to use to encrypt with +ansible-vault encrypt "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && : +WRONG_RC=$? +echo "rc was $WRONG_RC (5 is expected)" +[ $WRONG_RC -eq 5 ] + + +# encrypt with a password from a vault encrypted password file and multiple vault-ids +# but this time specify with --encrypt-vault-id, but specifying vault-id names (instead of default) +# ansible-vault encrypt "$@" --vault-id from_vault_password@vault-password --vault-id from_encrypted_vault_password@encrypted-vault-password --encrypt-vault-id from_encrypted_vault_password "${TEST_FILE(_ENC_PASSWORD}" + +# try to view the file encrypted with the vault-password we didnt specify +# to verify we didnt choose the wrong vault-id +# ansible-vault view "$@" --vault-id vault-password "${TEST_FILE_ENC_PASSWORD}" && : +# WRONG_RC=$? +# echo "rc was $WRONG_RC (1 is expected)" +# [ $WRONG_RC -eq 1 ] + +ansible-vault encrypt "$@" --vault-id vault-password "${TEST_FILE_ENC_PASSWORD}" + +# view the file encrypted with a password from a vault encrypted password file +ansible-vault view "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" + +# try to view the file encrypted with a password from a vault encrypted password file but without the password to the password file. +# This should fail with an +ansible-vault view "$@" --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + + +# test playbooks using vaulted files +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --list-tasks +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --list-hosts +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --syntax-check +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password +ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password --syntax-check +ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password +ansible-playbook test_vaulted_inventory.yml -i vaulted.inventory -v "$@" --vault-password-file vault-password +ansible-playbook test_vaulted_template.yml -i ../../inventory -v "$@" --vault-password-file vault-password + +# test using --vault-pass-file option +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-pass-file vault-password + +# install TOML for parse toml inventory +# test playbooks using vaulted files(toml) +pip install toml +ansible-vault encrypt ./inventory.toml -v "$@" --vault-password-file=./vault-password +ansible-playbook test_vaulted_inventory_toml.yml -i ./inventory.toml -v "$@" --vault-password-file vault-password +ansible-vault decrypt ./inventory.toml -v "$@" --vault-password-file=./vault-password + +# test a playbook with a host_var whose value is non-ascii utf8 (see https://github.com/ansible/ansible/issues/37258) +ansible-playbook -i ../../inventory -v "$@" --vault-id vault-password test_vaulted_utf8_value.yml + +# test with password from password script +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file password-script.py +ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file password-script.py + +# with multiple password files +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password + +ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong --syntax-check +ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password + +# test with a default vault password file set in config +ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong + +# test using vault_identity_list config +ANSIBLE_VAULT_IDENTITY_LIST='wrong-password@vault-password-wrong,default@vault-password' ansible-playbook test_vault.yml -i ../../inventory -v "$@" + +# test that we can have a vault encrypted yaml file that includes embedded vault vars +# that were encrypted with a different vault secret +ansible-playbook test_vault_file_encrypted_embedded.yml -i ../../inventory "$@" --vault-id encrypted_file_encrypted_var_password --vault-id vault-password + +# with multiple password files, --vault-id, ordering +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password --vault-id vault-password-wrong +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password + +ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-id vault-password --vault-id vault-password-wrong --syntax-check +ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password + +# test with multiple password files, including a script, and a wrong password +ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file password-script.py --vault-password-file vault-password + +# test with multiple password files, including a script, and a wrong password, and a mix of --vault-id and --vault-password-file +ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-id password-script.py --vault-id vault-password + +# test with multiple password files, including a script, and a wrong password, and a mix of --vault-id and --vault-password-file +ansible-playbook test_vault_embedded_ids.yml -i ../../inventory -v "$@" \ + --vault-password-file vault-password-wrong \ + --vault-id password-script.py --vault-id example1@example1_password \ + --vault-id example2@example2_password --vault-password-file example3_password \ + --vault-id vault-password + +# with wrong password +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# with multiple wrong passwords +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password-wrong && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# with wrong password, --vault-id +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# with multiple wrong passwords with --vault-id +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password-wrong && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# with multiple wrong passwords with --vault-id +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id wrong1@vault-password-wrong --vault-id wrong2@vault-password-wrong && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# with empty password file +ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id empty@empty-password && : +WRONG_RC=$? +echo "rc was $WRONG_RC (1 is expected)" +[ $WRONG_RC -eq 1 ] + +# test invalid format ala https://github.com/ansible/ansible/issues/28038 +EXPECTED_ERROR='Vault format unhexlify error: Non-hexadecimal digit found' +ansible-playbook "$@" -i invalid_format/inventory --vault-id invalid_format/vault-secret invalid_format/broken-host-vars-tasks.yml 2>&1 | grep "${EXPECTED_ERROR}" + +EXPECTED_ERROR='Vault format unhexlify error: Odd-length string' +ansible-playbook "$@" -i invalid_format/inventory --vault-id invalid_format/vault-secret invalid_format/broken-group-vars-tasks.yml 2>&1 | grep "${EXPECTED_ERROR}" + +# Run playbook with vault file with unicode in filename (https://github.com/ansible/ansible/issues/50316) +ansible-playbook -i ../../inventory -v "$@" --vault-password-file vault-password test_utf8_value_in_filename.yml + +# Ensure we don't leave unencrypted temp files dangling +ansible-playbook -v "$@" --vault-password-file vault-password test_dangling_temp.yml + +ansible-playbook "$@" --vault-password-file vault-password single_vault_as_string.yml diff --git a/test/integration/targets/vault/runme_change_pip_installed.sh b/test/integration/targets/vault/runme_change_pip_installed.sh new file mode 100755 index 00000000..5ab2a8ec --- /dev/null +++ b/test/integration/targets/vault/runme_change_pip_installed.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# start by removing pycrypto and cryptography + +pip uninstall -y cryptography +pip uninstall -y pycrypto + +./runme.sh + +# now just pycrypto +pip install --user pycrypto + +./runme.sh + + +# now just cryptography + +pip uninstall -y pycrypto +pip install --user cryptography + +./runme.sh + +# now both + +pip install --user pycrypto + +./runme.sh diff --git a/test/integration/targets/vault/single_vault_as_string.yml b/test/integration/targets/vault/single_vault_as_string.yml new file mode 100644 index 00000000..1eb17d04 --- /dev/null +++ b/test/integration/targets/vault/single_vault_as_string.yml @@ -0,0 +1,123 @@ +- hosts: localhost + vars: + vaulted_value: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 35323961353038346165643738646465376139363061353835303739663538343266303232326635 + 3365353662646236356665323135633630656238316530640a663362363763633436373439663031 + 33663433383037396438656464636433653837376361313638366362333037323961316364363363 + 3835616438623261650a636164376534376661393134326662326362323131373964313961623365 + 3833 + tasks: + - debug: + msg: "{{ vaulted_value }}" + + - debug: + msg: "{{ vaulted_value|type_debug }}" + + - assert: + that: + - vaulted_value is vault_encrypted + - vaulted_value == 'foo bar' + - vaulted_value|string == 'foo bar' + - vaulted_value|quote == "'foo bar'" + - vaulted_value|capitalize == 'Foo bar' + - vaulted_value|center(width=9) == ' foo bar ' + - vaulted_value|default('monkey') == 'foo bar' + - vaulted_value|escape == 'foo bar' + - vaulted_value|forceescape == 'foo bar' + - vaulted_value|first == 'f' + - "'%s'|format(vaulted_value) == 'foo bar'" + - vaulted_value|indent(indentfirst=True) == ' foo bar' + - vaulted_value.split() == ['foo', 'bar'] + - vaulted_value|join('-') == 'f-o-o- -b-a-r' + - vaulted_value|last == 'r' + - vaulted_value|length == 7 + - vaulted_value|list == ['f', 'o', 'o', ' ', 'b', 'a', 'r'] + - vaulted_value|lower == 'foo bar' + - vaulted_value|replace('foo', 'baz') == 'baz bar' + - vaulted_value|reverse|string == 'rab oof' + - vaulted_value|safe == 'foo bar' + - vaulted_value|slice(2)|list == [['f', 'o', 'o', ' '], ['b', 'a', 'r']] + - vaulted_value|sort|list == [" ", "a", "b", "f", "o", "o", "r"] + - vaulted_value|trim == 'foo bar' + - vaulted_value|upper == 'FOO BAR' + # jinja2.filters.do_urlencode uses an isinstance against string_types + # - vaulted_value|urlencode == 'foo%20bar' + - vaulted_value|urlize == 'foo bar' + - vaulted_value is not callable + - vaulted_value is iterable + - vaulted_value is lower + - vaulted_value is not none + # This is not exactly a string, and UserString doesn't fulfill this + # - vaulted_value is string + - vaulted_value is not upper + + - vaulted_value|b64encode == 'Zm9vIGJhcg==' + - vaulted_value|to_uuid == '0271fe51-bb26-560f-b118-5d6513850860' + - vaulted_value|string|to_json == '"foo bar"' + - vaulted_value|md5 == '327b6f07435811239bc47e1544353273' + - vaulted_value|sha1 == '3773dea65156909838fa6c22825cafe090ff8030' + - vaulted_value|hash == '3773dea65156909838fa6c22825cafe090ff8030' + - vaulted_value|regex_replace('foo', 'baz') == 'baz bar' + - vaulted_value|regex_escape == 'foo\ bar' + - vaulted_value|regex_search('foo') == 'foo' + - vaulted_value|regex_findall('foo') == ['foo'] + - vaulted_value|comment == '#\n# foo bar\n#' + + - assert: + that: + - vaulted_value|random(seed='foo') == ' ' + - vaulted_value|shuffle(seed='foo') == ["o", "f", "r", "b", "o", "a", " "] + - vaulted_value|pprint == "'foo bar'" + when: ansible_python.version.major == 3 + + - assert: + that: + - vaulted_value|random(seed='foo') == 'r' + - vaulted_value|shuffle(seed='foo') == ["b", "o", "a", " ", "o", "f", "r"] + - vaulted_value|pprint == "u'foo bar'" + when: ansible_python.version.major == 2 + + - assert: + that: + - vaulted_value|map('upper')|list == ['F', 'O', 'O', ' ', 'B', 'A', 'R'] + when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.7', '>=') + + + - assert: + that: + - vaulted_value.split()|first|int(base=36) == 20328 + - vaulted_value|select('equalto', 'o')|list == ['o', 'o'] + - vaulted_value|title == 'Foo Bar' + - vaulted_value is equalto('foo bar') + when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.8', '>=') + + - assert: + that: + - vaulted_value|string|tojson == '"foo bar"' + - vaulted_value|truncate(4) == 'foo bar' + when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.9', '>=') + + - assert: + that: + - vaulted_value|wordwrap(4) == 'foo\nbar' + when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.11', '>=') + + - assert: + that: + - vaulted_value|wordcount == 2 + when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.11.2', '>=') + + - ping: + data: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 35323961353038346165643738646465376139363061353835303739663538343266303232326635 + 3365353662646236356665323135633630656238316530640a663362363763633436373439663031 + 33663433383037396438656464636433653837376361313638366362333037323961316364363363 + 3835616438623261650a636164376534376661393134326662326362323131373964313961623365 + 3833 + register: ping_result + + - assert: + that: + - ping_result.ping == 'foo bar' diff --git a/test/integration/targets/vault/test-vault-client.py b/test/integration/targets/vault/test-vault-client.py new file mode 100755 index 00000000..a2f17dc5 --- /dev/null +++ b/test/integration/targets/vault/test-vault-client.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +import argparse +import sys + +# TODO: could read these from the files I suppose... +secrets = {'vault-password': 'test-vault-password', + 'vault-password-wrong': 'hunter42', + 'vault-password-ansible': 'ansible', + 'password': 'password', + 'vault-client-password-1': 'password-1', + 'vault-client-password-2': 'password-2'} + + +def build_arg_parser(): + parser = argparse.ArgumentParser(description='Get a vault password from user keyring') + + parser.add_argument('--vault-id', action='store', default=None, + dest='vault_id', + help='name of the vault secret to get from keyring') + parser.add_argument('--username', action='store', default=None, + help='the username whose keyring is queried') + parser.add_argument('--set', action='store_true', default=False, + dest='set_password', + help='set the password instead of getting it') + return parser + + +def get_secret(keyname): + return secrets.get(keyname, None) + + +def main(): + rc = 0 + + arg_parser = build_arg_parser() + args = arg_parser.parse_args() + # print('args: %s' % args) + + keyname = args.vault_id or 'ansible' + + if args.set_password: + print('--set is not supported yet') + sys.exit(1) + + secret = get_secret(keyname) + if secret is None: + sys.stderr.write('test-vault-client could not find key for vault-id="%s"\n' % keyname) + # key not found rc=2 + return 2 + + sys.stdout.write('%s\n' % secret) + + return rc + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/test/integration/targets/vault/test_dangling_temp.yml b/test/integration/targets/vault/test_dangling_temp.yml new file mode 100644 index 00000000..71a9d73a --- /dev/null +++ b/test/integration/targets/vault/test_dangling_temp.yml @@ -0,0 +1,34 @@ +- hosts: localhost + gather_facts: False + vars: + od: "{{output_dir|default('/tmp')}}/test_vault_assemble" + tasks: + - name: create target directory + file: + path: "{{od}}" + state: directory + + - name: assemble_file file with secret + assemble: + src: files/test_assemble + dest: "{{od}}/dest_file" + remote_src: no + mode: 0600 + + - name: remove assembled file with secret (so nothing should have unencrypted secret) + file: path="{{od}}/dest_file" state=absent + + - name: find temp files with secrets + find: + paths: '{{temp_paths}}' + contains: 'VAULT TEST IN WHICH BAD THING HAPPENED' + recurse: yes + register: badthings + vars: + temp_paths: "{{[lookup('env', 'TMP'), lookup('env', 'TEMP'), hardcoded]|flatten(1)|unique|list}}" + hardcoded: ['/tmp', '/var/tmp'] + + - name: ensure we failed to find any + assert: + that: + - badthings['matched'] == 0 diff --git a/test/integration/targets/vault/test_utf8_value_in_filename.yml b/test/integration/targets/vault/test_utf8_value_in_filename.yml new file mode 100644 index 00000000..9bd394dc --- /dev/null +++ b/test/integration/targets/vault/test_utf8_value_in_filename.yml @@ -0,0 +1,16 @@ +- name: "Test that the vaulted file with UTF-8 in filename decrypts correctly" + gather_facts: false + hosts: testhost + vars: + expected: "my_secret" + vars_files: + - vault-café.yml + tasks: + - name: decrypt vaulted file with utf8 in filename and show it in debug + debug: + var: vault_string + + - name: assert decrypted value matches expected + assert: + that: + - "vault_string == expected" diff --git a/test/integration/targets/vault/test_vault.yml b/test/integration/targets/vault/test_vault.yml new file mode 100644 index 00000000..7f8ed115 --- /dev/null +++ b/test/integration/targets/vault/test_vault.yml @@ -0,0 +1,6 @@ +- hosts: testhost + gather_facts: False + vars: + - output_dir: . + roles: + - { role: test_vault, tags: test_vault} diff --git a/test/integration/targets/vault/test_vault_embedded.yml b/test/integration/targets/vault/test_vault_embedded.yml new file mode 100644 index 00000000..ee9739f8 --- /dev/null +++ b/test/integration/targets/vault/test_vault_embedded.yml @@ -0,0 +1,4 @@ +- hosts: testhost + gather_facts: False + roles: + - { role: test_vault_embedded, tags: test_vault_embedded} diff --git a/test/integration/targets/vault/test_vault_embedded_ids.yml b/test/integration/targets/vault/test_vault_embedded_ids.yml new file mode 100644 index 00000000..23ebbb96 --- /dev/null +++ b/test/integration/targets/vault/test_vault_embedded_ids.yml @@ -0,0 +1,4 @@ +- hosts: testhost + gather_facts: False + roles: + - { role: test_vault_embedded_ids, tags: test_vault_embedded_ids} diff --git a/test/integration/targets/vault/test_vault_file_encrypted_embedded.yml b/test/integration/targets/vault/test_vault_file_encrypted_embedded.yml new file mode 100644 index 00000000..685d20ef --- /dev/null +++ b/test/integration/targets/vault/test_vault_file_encrypted_embedded.yml @@ -0,0 +1,4 @@ +- hosts: testhost + gather_facts: False + roles: + - { role: test_vault_file_encrypted_embedded, tags: test_vault_file_encrypted_embedded} diff --git a/test/integration/targets/vault/test_vaulted_inventory.yml b/test/integration/targets/vault/test_vaulted_inventory.yml new file mode 100644 index 00000000..06b6582b --- /dev/null +++ b/test/integration/targets/vault/test_vaulted_inventory.yml @@ -0,0 +1,5 @@ +- hosts: vaulted_host + gather_facts: no + tasks: + - name: See if we knew vaulted_host + debug: msg="Found vaulted_host from vaulted.inventory" diff --git a/test/integration/targets/vault/test_vaulted_inventory_toml.yml b/test/integration/targets/vault/test_vaulted_inventory_toml.yml new file mode 100644 index 00000000..f6e2c5d6 --- /dev/null +++ b/test/integration/targets/vault/test_vaulted_inventory_toml.yml @@ -0,0 +1,9 @@ +- hosts: vaulted_host_toml + gather_facts: no + tasks: + - name: See if we knew vaulted_host_toml + debug: msg="Found vaulted_host from vaulted.inventory.toml" + + - assert: + that: + - 'hello=="world"' diff --git a/test/integration/targets/vault/test_vaulted_template.yml b/test/integration/targets/vault/test_vaulted_template.yml new file mode 100644 index 00000000..b495211d --- /dev/null +++ b/test/integration/targets/vault/test_vaulted_template.yml @@ -0,0 +1,6 @@ +- hosts: testhost + gather_facts: False + vars: + - output_dir: . + roles: + - { role: test_vaulted_template, tags: test_vaulted_template} diff --git a/test/integration/targets/vault/test_vaulted_utf8_value.yml b/test/integration/targets/vault/test_vaulted_utf8_value.yml new file mode 100644 index 00000000..63b602b1 --- /dev/null +++ b/test/integration/targets/vault/test_vaulted_utf8_value.yml @@ -0,0 +1,15 @@ +- name: "test that the vaulted_utf8_value decrypts correctly" + gather_facts: false + hosts: testhost + vars: + expected: "aöffü" + tasks: + - name: decrypt vaulted_utf8_value and show it in debug + debug: + var: vaulted_utf8_value + + - name: assert decrypted vaulted_utf8_value matches expected + assert: + that: + - "vaulted_utf8_value == expected" + - "vaulted_utf8_value == 'aöffü'" diff --git "a/test/integration/targets/vault/vault-caf\303\251.yml" "b/test/integration/targets/vault/vault-caf\303\251.yml" new file mode 100644 index 00000000..0d179aec --- /dev/null +++ "b/test/integration/targets/vault/vault-caf\303\251.yml" @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +63363732353363646661643038636339343263303161346533393636336562336465396265373834 +6366313833613236356666646532613636303532366231340a316238666435306332656662613731 +31623433613434633539333564613564656439343661363831336364376266653462366161383038 +6530386533363933350a336631653833666663643166303932653261323431623333356539666265 +37316464303231366163333430346537353631376538393939646362313337363866 diff --git a/test/integration/targets/vault/vault-password b/test/integration/targets/vault/vault-password new file mode 100644 index 00000000..96973929 --- /dev/null +++ b/test/integration/targets/vault/vault-password @@ -0,0 +1 @@ +test-vault-password diff --git a/test/integration/targets/vault/vault-password-ansible b/test/integration/targets/vault/vault-password-ansible new file mode 100644 index 00000000..90d40550 --- /dev/null +++ b/test/integration/targets/vault/vault-password-ansible @@ -0,0 +1 @@ +ansible diff --git a/test/integration/targets/vault/vault-password-wrong b/test/integration/targets/vault/vault-password-wrong new file mode 100644 index 00000000..50e2efad --- /dev/null +++ b/test/integration/targets/vault/vault-password-wrong @@ -0,0 +1 @@ +hunter42 diff --git a/test/integration/targets/vault/vault-secret.txt b/test/integration/targets/vault/vault-secret.txt new file mode 100644 index 00000000..b6bc9bfb --- /dev/null +++ b/test/integration/targets/vault/vault-secret.txt @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +39303432393062643236616234306333383838333662386165616633303735336537613337396337 +6662666233356462326631653161663663363166323338320a653131656636666339633863346530 +32326238646631653133643936306666643065393038386234343736663239363665613963343661 +3230353633643361650a363034323631613864326438396665343237383566336339323837326464 +3930 diff --git a/test/integration/targets/vault/vaulted.inventory b/test/integration/targets/vault/vaulted.inventory new file mode 100644 index 00000000..1ed258b6 --- /dev/null +++ b/test/integration/targets/vault/vaulted.inventory @@ -0,0 +1,8 @@ +$ANSIBLE_VAULT;1.1;AES256 +62663838646564656432633932396339666332653932656230356332316530613665336461653731 +3839393466623734663861313636356530396434376462320a623966363661306334333639356263 +37366332626434326537353562636139333835613961333635633333313832666432396361393861 +3538626339636634360a396239383139646438323662383637663138646439306532613732306263 +64666237366334663931363462313131323861613237613337366562373532373537613531636334 +64653938333938313539653539303031393936306432623862363263663438653932643338373338 +633436626431656361633934363263303962 diff --git a/test/integration/targets/wait_for/aliases b/test/integration/targets/wait_for/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/test/integration/targets/wait_for/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/test/integration/targets/wait_for/files/testserver.py b/test/integration/targets/wait_for/files/testserver.py new file mode 100644 index 00000000..1f6f1187 --- /dev/null +++ b/test/integration/targets/wait_for/files/testserver.py @@ -0,0 +1,16 @@ +import sys + +if __name__ == '__main__': + if sys.version_info[0] >= 3: + import http.server + import socketserver + PORT = int(sys.argv[1]) + Handler = http.server.SimpleHTTPRequestHandler + httpd = socketserver.TCPServer(("", PORT), Handler) + httpd.serve_forever() + else: + import mimetypes + mimetypes.init() + mimetypes.add_type('application/json', '.json') + import SimpleHTTPServer + SimpleHTTPServer.test() diff --git a/test/integration/targets/wait_for/files/zombie.py b/test/integration/targets/wait_for/files/zombie.py new file mode 100644 index 00000000..913074eb --- /dev/null +++ b/test/integration/targets/wait_for/files/zombie.py @@ -0,0 +1,13 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import sys +import time + +child_pid = os.fork() + +if child_pid > 0: + time.sleep(60) +else: + sys.exit() diff --git a/test/integration/targets/wait_for/meta/main.yml b/test/integration/targets/wait_for/meta/main.yml new file mode 100644 index 00000000..07faa217 --- /dev/null +++ b/test/integration/targets/wait_for/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/targets/wait_for/tasks/main.yml b/test/integration/targets/wait_for/tasks/main.yml new file mode 100644 index 00000000..67e07786 --- /dev/null +++ b/test/integration/targets/wait_for/tasks/main.yml @@ -0,0 +1,177 @@ +--- +- name: test wait_for with delegate_to + wait_for: + timeout: 2 + delegate_to: localhost + register: waitfor + +- assert: + that: + - waitfor is successful + - waitfor.elapsed >= 2 + +- name: setup create a directory to serve files from + file: + dest: "{{ files_dir }}" + state: directory + +- name: setup webserver + copy: + src: "testserver.py" + dest: "{{ output_dir }}/testserver.py" + +- name: setup a path + file: + path: "{{ output_dir }}/wait_for_file" + state: touch + +- name: setup remove a file after 3s + shell: sleep 3 && rm {{ output_dir }}/wait_for_file + async: 20 + poll: 0 + +- name: test for absent path + wait_for: + path: "{{ output_dir }}/wait_for_file" + state: absent + timeout: 20 + register: waitfor +- name: verify test for absent path + assert: + that: + - waitfor is successful + - waitfor.path == "{{ output_dir | expanduser }}/wait_for_file" + - waitfor.elapsed >= 2 + - waitfor.elapsed <= 15 + +- name: setup create a file after 3s + shell: sleep 3 && touch {{ output_dir }}/wait_for_file + async: 20 + poll: 0 + +- name: test for present path + wait_for: + path: "{{ output_dir }}/wait_for_file" + timeout: 5 + register: waitfor +- name: verify test for absent path + assert: + that: + - waitfor is successful + - waitfor.path == "{{ output_dir | expanduser }}/wait_for_file" + - waitfor.elapsed >= 2 + - waitfor.elapsed <= 15 + +- name: setup write keyword to file after 3s + shell: sleep 3 && echo completed > {{output_dir}}/wait_for_keyword + async: 20 + poll: 0 + +- name: test wait for keyword in file + wait_for: + path: "{{output_dir}}/wait_for_keyword" + search_regex: completed + timeout: 5 + register: waitfor + +- name: verify test wait for keyword in file + assert: + that: + - waitfor is successful + - "waitfor.search_regex == 'completed'" + - waitfor.elapsed >= 2 + - waitfor.elapsed <= 15 + +- name: setup write keyword to file after 3s + shell: sleep 3 && echo "completed data 123" > {{output_dir}}/wait_for_keyword + async: 20 + poll: 0 + +- name: test wait for keyword in file with match groups + wait_for: + path: "{{output_dir}}/wait_for_keyword" + search_regex: completed (?P\w+) ([0-9]+) + timeout: 5 + register: waitfor + +- name: verify test wait for keyword in file with match groups + assert: + that: + - waitfor is successful + - waitfor.elapsed >= 2 + - waitfor.elapsed <= 15 + - waitfor['match_groupdict'] | length == 1 + - waitfor['match_groupdict']['foo'] == 'data' + - waitfor['match_groups'] == ['data', '123'] + +- name: test wait for port timeout + wait_for: + port: 12121 + timeout: 3 + register: waitfor + ignore_errors: true +- name: verify test wait for port timeout + assert: + that: + - waitfor is failed + - waitfor.elapsed == 3 + - "waitfor.msg == 'Timeout when waiting for 127.0.0.1:12121'" + +- name: test fail with custom msg + wait_for: + port: 12121 + msg: fail with custom message + timeout: 3 + register: waitfor + ignore_errors: true +- name: verify test fail with custom msg + assert: + that: + - waitfor is failed + - waitfor.elapsed == 3 + - "waitfor.msg == 'fail with custom message'" + +- name: setup start SimpleHTTPServer + shell: sleep 3 && cd {{ files_dir }} && {{ ansible_python.executable }} {{ output_dir}}/testserver.py {{ http_port }} + async: 120 # this test set can take ~1m to run on FreeBSD (via Shippable) + poll: 0 + +- name: test wait for port with sleep + wait_for: + port: "{{ http_port }}" + sleep: 3 + register: waitfor +- name: verify test wait for port sleep + assert: + that: + - waitfor is successful + - waitfor is not changed + - "waitfor.port == {{ http_port }}" + +- name: install psutil using pip (non-Linux only) + pip: + name: psutil + when: ansible_system != 'Linux' + +- name: Copy zombie.py + copy: + src: zombie.py + dest: "{{ output_dir }}" + +- name: Create zombie process + shell: "{{ ansible_python.executable }} {{ output_dir }}/zombie" + async: 90 + poll: 0 + +- name: test wait for port drained + wait_for: + port: "{{ http_port }}" + state: drained + register: waitfor + +- name: verify test wait for port + assert: + that: + - waitfor is successful + - waitfor is not changed + - "waitfor.port == {{ http_port }}" diff --git a/test/integration/targets/wait_for/vars/main.yml b/test/integration/targets/wait_for/vars/main.yml new file mode 100644 index 00000000..c2732948 --- /dev/null +++ b/test/integration/targets/wait_for/vars/main.yml @@ -0,0 +1,4 @@ +--- +http_port: 15261 +files_dir: '{{ output_dir|expanduser }}/files' +checkout_dir: '{{ output_dir }}/git' diff --git a/test/integration/targets/wait_for_connection/aliases b/test/integration/targets/wait_for_connection/aliases new file mode 100644 index 00000000..7ab3bd0a --- /dev/null +++ b/test/integration/targets/wait_for_connection/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +shippable/windows/group1 diff --git a/test/integration/targets/wait_for_connection/tasks/main.yml b/test/integration/targets/wait_for_connection/tasks/main.yml new file mode 100644 index 00000000..19749e68 --- /dev/null +++ b/test/integration/targets/wait_for_connection/tasks/main.yml @@ -0,0 +1,30 @@ +- name: Test normal connection to target node + wait_for_connection: + connect_timeout: 5 + sleep: 1 + timeout: 10 + +- name: Test normal connection to target node with delay + wait_for_connection: + connect_timeout: 5 + sleep: 1 + timeout: 10 + delay: 3 + register: result + +- name: Verify delay was honored + assert: + that: + - result.elapsed >= 3 + +- name: Use invalid parameter + wait_for_connection: + foo: bar + ignore_errors: yes + register: invalid_parameter + +- name: Ensure task fails with error + assert: + that: + - invalid_parameter is failed + - "invalid_parameter.msg == 'Invalid options for wait_for_connection: foo'" diff --git a/test/integration/targets/want_json_modules_posix/aliases b/test/integration/targets/want_json_modules_posix/aliases new file mode 100644 index 00000000..b5983214 --- /dev/null +++ b/test/integration/targets/want_json_modules_posix/aliases @@ -0,0 +1 @@ +shippable/posix/group3 diff --git a/test/integration/targets/want_json_modules_posix/library/helloworld.py b/test/integration/targets/want_json_modules_posix/library/helloworld.py new file mode 100644 index 00000000..ad0301cb --- /dev/null +++ b/test/integration/targets/want_json_modules_posix/library/helloworld.py @@ -0,0 +1,31 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON + +import json +import sys + +try: + with open(sys.argv[1], 'r') as f: + data = json.load(f) +except (IOError, OSError, IndexError): + print(json.dumps(dict(msg="No argument file provided", failed=True))) + sys.exit(1) + +salutation = data.get('salutation', 'Hello') +name = data.get('name', 'World') +print(json.dumps(dict(msg='%s, %s!' % (salutation, name)))) diff --git a/test/integration/targets/want_json_modules_posix/meta/main.yml b/test/integration/targets/want_json_modules_posix/meta/main.yml new file mode 100644 index 00000000..1810d4be --- /dev/null +++ b/test/integration/targets/want_json_modules_posix/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/test/integration/targets/want_json_modules_posix/tasks/main.yml b/test/integration/targets/want_json_modules_posix/tasks/main.yml new file mode 100644 index 00000000..27e9f781 --- /dev/null +++ b/test/integration/targets/want_json_modules_posix/tasks/main.yml @@ -0,0 +1,43 @@ +- name: Hello, World! + helloworld: + register: hello_world + +- assert: + that: + - 'hello_world.msg == "Hello, World!"' + +- name: Hello, Ansible! + helloworld: + args: + name: Ansible + register: hello_ansible + +- assert: + that: + - 'hello_ansible.msg == "Hello, Ansible!"' + +- name: Goodbye, Ansible! + helloworld: + args: + salutation: Goodbye + name: Ansible + register: goodbye_ansible + +- assert: + that: + - 'goodbye_ansible.msg == "Goodbye, Ansible!"' + +- name: Copy module to remote + copy: + src: "{{ role_path }}/library/helloworld.py" + dest: "{{ remote_tmp_dir }}/helloworld.py" + +- name: Execute module directly + command: '{{ ansible_python_interpreter|default(ansible_playbook_python) }} {{ remote_tmp_dir }}/helloworld.py' + register: direct + ignore_errors: true + +- assert: + that: + - direct is failed + - 'direct.stdout | from_json == {"msg": "No argument file provided", "failed": true}' diff --git a/test/integration/targets/win_async_wrapper/aliases b/test/integration/targets/win_async_wrapper/aliases new file mode 100644 index 00000000..59dda5e6 --- /dev/null +++ b/test/integration/targets/win_async_wrapper/aliases @@ -0,0 +1,3 @@ +async_status +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/win_async_wrapper/library/async_test.ps1 b/test/integration/targets/win_async_wrapper/library/async_test.ps1 new file mode 100644 index 00000000..fa41b3e8 --- /dev/null +++ b/test/integration/targets/win_async_wrapper/library/async_test.ps1 @@ -0,0 +1,48 @@ +#!powershell + +# Copyright: (c) 2018, Ansible Project + +#Requires -Module Ansible.ModuleUtils.Legacy + +$parsed_args = Parse-Args $args + +$sleep_delay_sec = Get-AnsibleParam -obj $parsed_args -name "sleep_delay_sec" -type "int" -default 0 +$fail_mode = Get-AnsibleParam -obj $parsed_args -name "fail_mode" -type "str" -default "success" -validateset "success","graceful","exception" + +If($fail_mode -isnot [array]) { + $fail_mode = @($fail_mode) +} + +$result = @{ + changed = $true + module_pid = $pid + module_tempdir = $PSScriptRoot +} + +If($sleep_delay_sec -gt 0) { + Sleep -Seconds $sleep_delay_sec + $result["slept_sec"] = $sleep_delay_sec +} + +If($fail_mode -contains "leading_junk") { + Write-Output "leading junk before module output" +} + +If($fail_mode -contains "graceful") { + Fail-Json $result "failed gracefully" +} + +Try { + + If($fail_mode -contains "exception") { + Throw "failing via exception" + } + + Exit-Json $result +} +Finally +{ + If($fail_mode -contains "trailing_junk") { + Write-Output "trailing junk after module output" + } +} diff --git a/test/integration/targets/win_async_wrapper/tasks/main.yml b/test/integration/targets/win_async_wrapper/tasks/main.yml new file mode 100644 index 00000000..91b45846 --- /dev/null +++ b/test/integration/targets/win_async_wrapper/tasks/main.yml @@ -0,0 +1,257 @@ +- name: capture timestamp before fire and forget + set_fact: + start_timestamp: "{{ lookup('pipe', 'date +%s') }}" + +- name: async fire and forget + async_test: + sleep_delay_sec: 15 + async: 20 + poll: 0 + register: asyncresult + +- name: validate response + assert: + that: + - asyncresult.ansible_job_id is match('\d+\.\d+') + - asyncresult.started == 1 + - asyncresult is started + - asyncresult.finished == 0 + - asyncresult is not finished + - asyncresult.results_file is search('\.ansible_async.+\d+\.\d+') + # ensure that async is actually async- this test will fail if # hosts > forks or if the target host is VERY slow + - (lookup('pipe', 'date +%s') | int) - (start_timestamp | int) < 15 + +- name: async poll immediate success + async_test: + sleep_delay_sec: 0 + async: 10 + poll: 1 + register: asyncresult + +- name: validate response + assert: + that: + - asyncresult.ansible_job_id is match('\d+\.\d+') + - asyncresult.finished == 1 + - asyncresult is finished + - asyncresult is changed + - asyncresult.ansible_async_watchdog_pid is number +# - asyncresult.module_tempdir is search('ansible-tmp-') + - asyncresult.module_pid is number + +# this part of the test is flaky- Windows PIDs are reused aggressively, so this occasionally fails due to a new process with the same ID +# FUTURE: consider having the test module hook to a kernel object we can poke at that gets signaled/released on exit +#- name: ensure that watchdog and module procs have exited +# raw: Get-Process | Where { $_.Id -in ({{ asyncresult.ansible_async_watchdog_pid }}, {{ asyncresult.module_pid }}) } +# register: proclist +# +#- name: validate no running watchdog/module processes were returned +# assert: +# that: +# - proclist.stdout.strip() == '' + +#- name: ensure that module_tempdir was deleted +# raw: Test-Path {{ asyncresult.module_tempdir }} +# register: tempdircheck +# +#- name: validate tempdir response +# assert: +# that: +# - tempdircheck.stdout is search('False') + +- name: async poll retry + async_test: + sleep_delay_sec: 5 + async: 10 + poll: 1 + register: asyncresult + +- name: validate response + assert: + that: + - asyncresult.ansible_job_id is match('\d+\.\d+') + - asyncresult.finished == 1 + - asyncresult is finished + - asyncresult is changed +# - asyncresult.module_tempdir is search('ansible-tmp-') + - asyncresult.module_pid is number + +# this part of the test is flaky- Windows PIDs are reused aggressively, so this occasionally fails due to a new process with the same ID +# FUTURE: consider having the test module hook to a kernel object we can poke at that gets signaled/released on exit +#- name: ensure that watchdog and module procs have exited +# raw: Get-Process | Where { $_.Id -in ({{ asyncresult.ansible_async_watchdog_pid }}, {{ asyncresult.module_pid }}) } +# register: proclist +# +#- name: validate no running watchdog/module processes were returned +# assert: +# that: +# - proclist.stdout.strip() == '' + +#- name: ensure that module_tempdir was deleted +# raw: Test-Path {{ asyncresult.module_tempdir }} +# register: tempdircheck +# +#- name: validate tempdir response +# assert: +# that: +# - tempdircheck.stdout is search('False') + +- name: async poll timeout + async_test: + sleep_delay_sec: 5 + async: 3 + poll: 1 + register: asyncresult + ignore_errors: true + +- name: validate response + assert: + that: + - asyncresult.ansible_job_id is match('\d+\.\d+') + - asyncresult.finished == 1 + - asyncresult is finished + - asyncresult is not changed + - asyncresult is failed + - asyncresult.msg is search('timed out') + +- name: async poll graceful module failure + async_test: + fail_mode: graceful + async: 5 + poll: 1 + register: asyncresult + ignore_errors: true + +- name: validate response + assert: + that: + - asyncresult.ansible_job_id is match('\d+\.\d+') + - asyncresult.finished == 1 + - asyncresult is finished + - asyncresult is changed + - asyncresult is failed + - asyncresult.msg == 'failed gracefully' + +- name: async poll exception module failure + async_test: + fail_mode: exception + async: 5 + poll: 1 + register: asyncresult + ignore_errors: true + +- name: validate response + assert: + that: + - asyncresult.ansible_job_id is match('\d+\.\d+') + - asyncresult.finished == 1 + - asyncresult is finished + - asyncresult is not changed + - asyncresult is failed + - 'asyncresult.msg == "Unhandled exception while executing module: failing via exception"' + +- name: echo some non ascii characters + win_command: cmd.exe /c echo über den Fußgängerübergang gehen + async: 10 + poll: 1 + register: nonascii_output + +- name: assert echo some non ascii characters + assert: + that: + - nonascii_output is changed + - nonascii_output.rc == 0 + - nonascii_output.stdout_lines|count == 1 + - nonascii_output.stdout_lines[0] == 'über den Fußgängerübergang gehen' + - nonascii_output.stderr == '' + +- name: test async with custom async dir + win_shell: echo hi + register: async_custom_dir + async: 5 + vars: + ansible_async_dir: '{{win_output_dir}}' + +- name: assert results file is in the remote tmp specified + assert: + that: + - async_custom_dir.results_file == win_output_dir + '\\' + async_custom_dir.ansible_job_id + +- name: test async fire and forget with custom async dir + win_shell: echo hi + register: async_custom_dir_poll + async: 5 + poll: 0 + vars: + ansible_async_dir: '{{win_output_dir}}' + +- name: poll with different dir - fail + async_status: + jid: '{{ async_custom_dir_poll.ansible_job_id }}' + register: fail_async_custom_dir_poll + ignore_errors: yes + +- name: poll with different dir - success + async_status: + jid: '{{ async_custom_dir_poll.ansible_job_id }}' + register: success_async_custom_dir_poll + vars: + ansible_async_dir: '{{win_output_dir}}' + +- name: assert test async fire and forget with custom async dir + assert: + that: + - fail_async_custom_dir_poll.failed + - '"could not find job at ''" + nonascii_output.results_file|win_dirname + "''" in fail_async_custom_dir_poll.msg' + - not success_async_custom_dir_poll.failed + - success_async_custom_dir_poll.results_file == win_output_dir + '\\' + async_custom_dir_poll.ansible_job_id + +# FUTURE: figure out why the last iteration of this test often fails on shippable +#- name: loop async success +# async_test: +# sleep_delay_sec: 3 +# async: 10 +# poll: 0 +# with_sequence: start=1 end=4 +# register: async_many +# +#- name: wait for completion +# async_status: +# jid: "{{ item }}" +# register: asyncout +# until: asyncout is finished +# retries: 10 +# delay: 1 +# with_items: "{{ async_many.results | map(attribute='ansible_job_id') | list }}" +# +#- name: validate results +# assert: +# that: +# - item.finished == 1 +# - item is finished +# - item.slept_sec == 3 +# - item is changed +# - item.ansible_job_id is match('\d+\.\d+') +# with_items: "{{ asyncout.results }}" + +# this part of the test is flaky- Windows PIDs are reused aggressively, so this occasionally fails due to a new process with the same ID +# FUTURE: consider having the test module hook to a kernel object we can poke at that gets signaled/released on exit +#- name: ensure that all watchdog and module procs have exited +# raw: Get-Process | Where { $_.Id -in ({{ asyncout.results | join(',', attribute='ansible_async_watchdog_pid') }}, {{ asyncout.results | join(',', attribute='module_pid') }}) } +# register: proclist +# +#- name: validate no processes were returned +# assert: +# that: +# - proclist.stdout.strip() == "" + +# FUTURE: test junk before/after JSON +# FUTURE: verify tempdir stays through module exec +# FUTURE: verify tempdir is deleted after module exec +# FUTURE: verify tempdir is permanent with ANSIBLE_KEEP_REMOTE_FILES=1 (how?) +# FUTURE: verify binary modules work + +# FUTURE: test status/return +# FUTURE: test status/cleanup +# FUTURE: test reboot/connection failure +# FUTURE: figure out how to ensure that processes and tempdirs are cleaned up in all exceptional cases diff --git a/test/integration/targets/win_become/aliases b/test/integration/targets/win_become/aliases new file mode 100644 index 00000000..1eed2ecf --- /dev/null +++ b/test/integration/targets/win_become/aliases @@ -0,0 +1,2 @@ +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/win_become/tasks/main.yml b/test/integration/targets/win_become/tasks/main.yml new file mode 100644 index 00000000..a0759580 --- /dev/null +++ b/test/integration/targets/win_become/tasks/main.yml @@ -0,0 +1,251 @@ +- set_fact: + become_test_username: ansible_become_test + become_test_admin_username: ansible_become_admin + gen_pw: "{{ 'password123!' + lookup('password', '/dev/null chars=ascii_letters,digits length=8') }}" + +- name: create unprivileged user + win_user: + name: "{{ become_test_username }}" + password: "{{ gen_pw }}" + update_password: always + groups: Users + register: user_limited_result + +- name: create a privileged user + win_user: + name: "{{ become_test_admin_username }}" + password: "{{ gen_pw }}" + update_password: always + groups: Administrators + register: user_admin_result + +- name: add requisite logon rights for test user + win_user_right: + name: '{{item}}' + users: '{{become_test_username}}' + action: add + with_items: + - SeNetworkLogonRight + - SeInteractiveLogonRight + - SeBatchLogonRight + +- name: fetch current target date/time for log filtering + raw: '[datetime]::now | Out-String' + register: test_starttime + +- name: execute tests and ensure that test user is deleted regardless of success/failure + block: + - name: ensure current user is not the become user + win_whoami: + register: whoami_out + failed_when: whoami_out.account.sid == user_limited_result.sid or whoami_out.account.sid == user_admin_result.sid + + - name: get become user profile dir so we can clean it up later + vars: &become_vars + ansible_become_user: "{{ become_test_username }}" + ansible_become_password: "{{ gen_pw }}" + ansible_become_method: runas + ansible_become: yes + win_shell: $env:USERPROFILE + register: profile_dir_out + + - name: ensure profile dir contains test username (eg, if become fails silently, prevent deletion of real user profile) + assert: + that: + - become_test_username in profile_dir_out.stdout_lines[0] + + - name: get become admin user profile dir so we can clean it up later + vars: &admin_become_vars + ansible_become_user: "{{ become_test_admin_username }}" + ansible_become_password: "{{ gen_pw }}" + ansible_become_method: runas + ansible_become: yes + win_shell: $env:USERPROFILE + register: admin_profile_dir_out + + - name: ensure profile dir contains admin test username + assert: + that: + - become_test_admin_username in admin_profile_dir_out.stdout_lines[0] + + - name: test become runas via task vars (underprivileged user) + vars: *become_vars + win_whoami: + register: whoami_out + + - name: verify output + assert: + that: + - whoami_out.account.sid == user_limited_result.sid + - whoami_out.account.account_name == become_test_username + - whoami_out.label.account_name == 'Medium Mandatory Level' + - whoami_out.label.sid == 'S-1-16-8192' + - whoami_out.logon_type == 'Interactive' + + - name: test become runas via task vars (privileged user) + vars: *admin_become_vars + win_whoami: + register: whoami_out + + - name: verify output + assert: + that: + - whoami_out.account.sid == user_admin_result.sid + - whoami_out.account.account_name == become_test_admin_username + - whoami_out.label.account_name == 'High Mandatory Level' + - whoami_out.label.sid == 'S-1-16-12288' + - whoami_out.logon_type == 'Interactive' + + - name: test become runas via task keywords + vars: + ansible_become_password: "{{ gen_pw }}" + become: yes + become_method: runas + become_user: "{{ become_test_username }}" + win_shell: whoami + register: whoami_out + + - name: verify output + assert: + that: + - whoami_out.stdout_lines[0].endswith(become_test_username) + + - name: test become via block vars + vars: *become_vars + block: + - name: ask who the current user is + win_whoami: + register: whoami_out + + - name: verify output + assert: + that: + - whoami_out.account.sid == user_limited_result.sid + - whoami_out.account.account_name == become_test_username + - whoami_out.label.account_name == 'Medium Mandatory Level' + - whoami_out.label.sid == 'S-1-16-8192' + - whoami_out.logon_type == 'Interactive' + + - name: test with module that will return non-zero exit code (https://github.com/ansible/ansible/issues/30468) + vars: *become_vars + setup: + + - name: test become with invalid password + win_whoami: + vars: + ansible_become_pass: '{{ gen_pw }}abc' + become: yes + become_method: runas + become_user: '{{ become_test_username }}' + register: become_invalid_pass + failed_when: + - '"Failed to become user " + become_test_username not in become_invalid_pass.msg' + - '"LogonUser failed" not in become_invalid_pass.msg' + - '"Win32ErrorCode 1326 - 0x0000052E)" not in become_invalid_pass.msg' + + - name: test become password precedence + win_whoami: + become: yes + become_method: runas + become_user: '{{ become_test_username }}' + vars: + ansible_become_pass: broken + ansible_runas_pass: '{{ gen_pw }}' # should have a higher precedence than ansible_become_pass + + - name: test become + async + vars: *become_vars + win_command: whoami + async: 10 + register: whoami_out + + - name: verify become + async worked + assert: + that: + - whoami_out is successful + - become_test_username in whoami_out.stdout + + - name: test failure with string become invalid key + vars: *become_vars + win_whoami: + become_flags: logon_type=batch invalid_flags=a + become_method: runas + register: failed_flags_invalid_key + failed_when: "failed_flags_invalid_key.msg != \"internal error: failed to parse become_flags 'logon_type=batch invalid_flags=a': become_flags key 'invalid_flags' is not a valid runas flag, must be 'logon_type' or 'logon_flags'\"" + + - name: test failure with invalid logon_type + vars: *become_vars + win_whoami: + become_flags: logon_type=invalid + register: failed_flags_invalid_type + failed_when: "failed_flags_invalid_type.msg != \"internal error: failed to parse become_flags 'logon_type=invalid': become_flags logon_type value 'invalid' is not valid, valid values are: interactive, network, batch, service, unlock, network_cleartext, new_credentials\"" + + - name: test failure with invalid logon_flag + vars: *become_vars + win_whoami: + become_flags: logon_flags=with_profile,invalid + register: failed_flags_invalid_flag + failed_when: "failed_flags_invalid_flag.msg != \"internal error: failed to parse become_flags 'logon_flags=with_profile,invalid': become_flags logon_flags value 'invalid' is not valid, valid values are: with_profile, netcredentials_only\"" + + - name: echo some non ascii characters + win_command: cmd.exe /c echo über den Fußgängerübergang gehen + vars: *become_vars + register: nonascii_output + + - name: assert echo some non ascii characters + assert: + that: + - nonascii_output is changed + - nonascii_output.rc == 0 + - nonascii_output.stdout_lines|count == 1 + - nonascii_output.stdout_lines[0] == 'über den Fußgängerübergang gehen' + - nonascii_output.stderr == '' + + - name: get PS events containing password or module args created since test start + raw: | + $dt=[datetime]"{{ test_starttime.stdout|trim }}" + (Get-WinEvent -LogName Microsoft-Windows-Powershell/Operational | + ? { $_.TimeCreated -ge $dt -and $_.Message -match "{{ gen_pw }}" }).Count + register: ps_log_count + + - name: assert no PS events contain password or module args + assert: + that: + - ps_log_count.stdout | int == 0 + +# FUTURE: test raw + script become behavior once they're running under the exec wrapper again +# FUTURE: add standalone playbook tests to include password prompting and play become keywords + + always: + - name: remove explicit logon rights for test user + win_user_right: + name: '{{item}}' + users: '{{become_test_username}}' + action: remove + with_items: + - SeNetworkLogonRight + - SeInteractiveLogonRight + - SeBatchLogonRight + + - name: ensure underprivileged test user is deleted + win_user: + name: "{{ become_test_username }}" + state: absent + + - name: ensure privileged test user is deleted + win_user: + name: "{{ become_test_admin_username }}" + state: absent + + - name: ensure underprivileged test user profile is deleted + # NB: have to work around powershell limitation of long filenames until win_file fixes it + win_shell: rmdir /S /Q {{ profile_dir_out.stdout_lines[0] }} + args: + executable: cmd.exe + when: become_test_username in profile_dir_out.stdout_lines[0] + + - name: ensure privileged test user profile is deleted + # NB: have to work around powershell limitation of long filenames until win_file fixes it + win_shell: rmdir /S /Q {{ admin_profile_dir_out.stdout_lines[0] }} + args: + executable: cmd.exe + when: become_test_admin_username in admin_profile_dir_out.stdout_lines[0] diff --git a/test/integration/targets/win_exec_wrapper/aliases b/test/integration/targets/win_exec_wrapper/aliases new file mode 100644 index 00000000..1eed2ecf --- /dev/null +++ b/test/integration/targets/win_exec_wrapper/aliases @@ -0,0 +1,2 @@ +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/win_exec_wrapper/library/test_all_options.ps1 b/test/integration/targets/win_exec_wrapper/library/test_all_options.ps1 new file mode 100644 index 00000000..7c2c9c7b --- /dev/null +++ b/test/integration/targets/win_exec_wrapper/library/test_all_options.ps1 @@ -0,0 +1,12 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.SID +#Requires -Version 3.0 +#AnsibleRequires -OSVersion 6 +#AnsibleRequires -Become + +$output = &whoami.exe +$sid = Convert-ToSID -account_name $output.Trim() + +Exit-Json -obj @{ output = $sid; changed = $false } diff --git a/test/integration/targets/win_exec_wrapper/library/test_common_functions.ps1 b/test/integration/targets/win_exec_wrapper/library/test_common_functions.ps1 new file mode 100644 index 00000000..9a5918f9 --- /dev/null +++ b/test/integration/targets/win_exec_wrapper/library/test_common_functions.ps1 @@ -0,0 +1,40 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy + +$ErrorActionPreference = "Stop" + +Function Assert-Equals($actual, $expected) { + if ($actual -cne $expected) { + $call_stack = (Get-PSCallStack)[1] + $error_msg = "AssertionError:`r`nActual: `"$actual`" != Expected: `"$expected`"`r`nLine: $($call_stack.ScriptLineNumber), Method: $($call_stack.Position.Text)" + Fail-Json -obj $result -message $error_msg + } +} + +$result = @{ + changed = $false +} + +#ConvertFrom-AnsibleJso +$input_json = '{"string":"string","float":3.1415926,"dict":{"string":"string","int":1},"list":["entry 1","entry 2"],"null":null,"int":1}' +$actual = ConvertFrom-AnsibleJson -InputObject $input_json +Assert-Equals -actual $actual.GetType() -expected ([Hashtable]) +Assert-Equals -actual $actual.string.GetType() -expected ([String]) +Assert-Equals -actual $actual.string -expected "string" +Assert-Equals -actual $actual.int.GetType() -expected ([Int32]) +Assert-Equals -actual $actual.int -expected 1 +Assert-Equals -actual $actual.null -expected $null +Assert-Equals -actual $actual.float.GetType() -expected ([Decimal]) +Assert-Equals -actual $actual.float -expected 3.1415926 +Assert-Equals -actual $actual.list.GetType() -expected ([Object[]]) +Assert-Equals -actual $actual.list.Count -expected 2 +Assert-Equals -actual $actual.list[0] -expected "entry 1" +Assert-Equals -actual $actual.list[1] -expected "entry 2" +Assert-Equals -actual $actual.GetType() -expected ([Hashtable]) +Assert-Equals -actual $actual.dict.string -expected "string" +Assert-Equals -actual $actual.dict.int -expected 1 + +$result.msg = "good" +Exit-Json -obj $result + diff --git a/test/integration/targets/win_exec_wrapper/library/test_fail.ps1 b/test/integration/targets/win_exec_wrapper/library/test_fail.ps1 new file mode 100644 index 00000000..06c63f72 --- /dev/null +++ b/test/integration/targets/win_exec_wrapper/library/test_fail.ps1 @@ -0,0 +1,58 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy + +$params = Parse-Args $args -supports_check_mode $true + +$data = Get-AnsibleParam -obj $params -name "data" -type "str" -default "normal" +$result = @{ + changed = $false +} + +<# +This module tests various error events in PowerShell to verify our hidden trap +catches them all and outputs a pretty error message with a traceback to help +users debug the actual issue + +normal - normal execution, no errors +fail - Calls Fail-Json like normal +throw - throws an exception +error - Write-Error with ErrorActionPreferenceStop +cmdlet_error - Calls a Cmdlet with an invalid error +dotnet_exception - Calls a .NET function that will throw an error +function_throw - Throws an exception in a function +proc_exit_fine - calls an executable with a non-zero exit code with Exit-Json +proc_exit_fail - calls an executable with a non-zero exit code with Fail-Json +#> + +Function Test-ThrowException { + throw "exception in function" +} + +if ($data -eq "normal") { + Exit-Json -obj $result +} elseif ($data -eq "fail") { + Fail-Json -obj $result -message "fail message" +} elseif ($data -eq "throw") { + throw [ArgumentException]"module is thrown" +} elseif ($data -eq "error") { + Write-Error -Message $data +} elseif ($data -eq "cmdlet_error") { + Get-Item -Path "fake:\path" +} elseif ($data -eq "dotnet_exception") { + [System.IO.Path]::GetFullPath($null) +} elseif ($data -eq "function_throw") { + Test-ThrowException +} elseif ($data -eq "proc_exit_fine") { + # verifies that if no error was actually fired and we have an output, we + # don't use the RC to validate if the module failed + &cmd.exe /c exit 2 + Exit-Json -obj $result +} elseif ($data -eq "proc_exit_fail") { + &cmd.exe /c exit 2 + Fail-Json -obj $result -message "proc_exit_fail" +} + +# verify no exception were silently caught during our tests +Fail-Json -obj $result -message "end of module" + diff --git a/test/integration/targets/win_exec_wrapper/library/test_invalid_requires.ps1 b/test/integration/targets/win_exec_wrapper/library/test_invalid_requires.ps1 new file mode 100644 index 00000000..89727ef1 --- /dev/null +++ b/test/integration/targets/win_exec_wrapper/library/test_invalid_requires.ps1 @@ -0,0 +1,9 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +# Requires -Version 20 +# AnsibleRequires -OSVersion 20 + +# requires statement must be straight after the original # with now space, this module won't fail + +Exit-Json -obj @{ output = "output"; changed = $false } diff --git a/test/integration/targets/win_exec_wrapper/library/test_min_os_version.ps1 b/test/integration/targets/win_exec_wrapper/library/test_min_os_version.ps1 new file mode 100644 index 00000000..39b1ded1 --- /dev/null +++ b/test/integration/targets/win_exec_wrapper/library/test_min_os_version.ps1 @@ -0,0 +1,8 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#AnsibleRequires -OSVersion 20.0 + +# this shouldn't run as no Windows OS will meet the version of 20.0 + +Exit-Json -obj @{ output = "output"; changed = $false } diff --git a/test/integration/targets/win_exec_wrapper/library/test_min_ps_version.ps1 b/test/integration/targets/win_exec_wrapper/library/test_min_ps_version.ps1 new file mode 100644 index 00000000..bb5fd0f2 --- /dev/null +++ b/test/integration/targets/win_exec_wrapper/library/test_min_ps_version.ps1 @@ -0,0 +1,8 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Version 20.0.0.0 + +# this shouldn't run as no PS Version will be at 20 in the near future + +Exit-Json -obj @{ output = "output"; changed = $false } diff --git a/test/integration/targets/win_exec_wrapper/tasks/main.yml b/test/integration/targets/win_exec_wrapper/tasks/main.yml new file mode 100644 index 00000000..8fc54f7c --- /dev/null +++ b/test/integration/targets/win_exec_wrapper/tasks/main.yml @@ -0,0 +1,274 @@ +--- +- name: fetch current target date/time for log filtering + raw: '[datetime]::now | Out-String' + register: test_starttime + +- name: test normal module execution + test_fail: + register: normal + +- name: assert test normal module execution + assert: + that: + - not normal is failed + +- name: test fail module execution + test_fail: + data: fail + register: fail_module + ignore_errors: yes + +- name: assert test fail module execution + assert: + that: + - fail_module is failed + - fail_module.msg == "fail message" + - not fail_module.exception is defined + +- name: test module with exception thrown + test_fail: + data: throw + register: throw_module + ignore_errors: yes + +- name: assert test module with exception thrown + assert: + that: + - throw_module is failed + - 'throw_module.msg == "Unhandled exception while executing module: module is thrown"' + - '"throw [ArgumentException]\"module is thrown\"" in throw_module.exception' + +- name: test module with error msg + test_fail: + data: error + register: error_module + ignore_errors: yes + vars: + # Running with coverage means the module is run from a script and not as a psuedo script in a pipeline. This + # results in a different error message being returned so we disable coverage collection for this task. + _ansible_coverage_remote_output: '' + +- name: assert test module with error msg + assert: + that: + - error_module is failed + - 'error_module.msg == "Unhandled exception while executing module: error"' + - '"Write-Error -Message $data" in error_module.exception' + +- name: test module with cmdlet error + test_fail: + data: cmdlet_error + register: cmdlet_error + ignore_errors: yes + +- name: assert test module with cmdlet error + assert: + that: + - cmdlet_error is failed + - 'cmdlet_error.msg == "Unhandled exception while executing module: Cannot find drive. A drive with the name ''fake'' does not exist."' + - '"Get-Item -Path \"fake:\\path\"" in cmdlet_error.exception' + +- name: test module with .NET exception + test_fail: + data: dotnet_exception + register: dotnet_exception + ignore_errors: yes + +- name: assert test module with .NET exception + assert: + that: + - dotnet_exception is failed + - 'dotnet_exception.msg == "Unhandled exception while executing module: Exception calling \"GetFullPath\" with \"1\" argument(s): \"The path is not of a legal form.\""' + - '"[System.IO.Path]::GetFullPath($null)" in dotnet_exception.exception' + +- name: test module with function exception + test_fail: + data: function_throw + register: function_exception + ignore_errors: yes + vars: + _ansible_coverage_remote_output: '' + +- name: assert test module with function exception + assert: + that: + - function_exception is failed + - 'function_exception.msg == "Unhandled exception while executing module: exception in function"' + - '"throw \"exception in function\"" in function_exception.exception' + - '"at Test-ThrowException, : line" in function_exception.exception' + +- name: test module with fail process but Exit-Json + test_fail: + data: proc_exit_fine + register: proc_exit_fine + +- name: assert test module with fail process but Exit-Json + assert: + that: + - not proc_exit_fine is failed + +- name: test module with fail process but Fail-Json + test_fail: + data: proc_exit_fail + register: proc_exit_fail + ignore_errors: yes + +- name: assert test module with fail process but Fail-Json + assert: + that: + - proc_exit_fail is failed + - proc_exit_fail.msg == "proc_exit_fail" + - not proc_exit_fail.exception is defined + +- name: test out invalid options + test_invalid_requires: + register: invalid_options + +- name: assert test out invalid options + assert: + that: + - invalid_options is successful + - invalid_options.output == "output" + +- name: test out invalid os version + test_min_os_version: + register: invalid_os_version + ignore_errors: yes + +- name: assert test out invalid os version + assert: + that: + - invalid_os_version is failed + - '"This module cannot run on this OS as it requires a minimum version of 20.0, actual was " in invalid_os_version.msg' + +- name: test out invalid powershell version + test_min_ps_version: + register: invalid_ps_version + ignore_errors: yes + +- name: assert test out invalid powershell version + assert: + that: + - invalid_ps_version is failed + - '"This module cannot run as it requires a minimum PowerShell version of 20.0.0.0, actual was " in invalid_ps_version.msg' + +- name: test out environment block for task + win_shell: set + args: + executable: cmd.exe + environment: + String: string value + Int: 1234 + Bool: True + double_quote: 'double " quote' + single_quote: "single ' quote" + hyphen-var: abc@123 + '_-(){}[]<>*+-/\?"''!@#$%^&|;:i,.`~0': '_-(){}[]<>*+-/\?"''!@#$%^&|;:i,.`~0' + '‘key': 'value‚' + register: environment_block + +- name: assert environment block for task + assert: + that: + - '"String=string value" in environment_block.stdout_lines' + - '"Int=1234" in environment_block.stdout_lines' + - '"Bool=True" in environment_block.stdout_lines' + - '"double_quote=double \" quote" in environment_block.stdout_lines' + - '"single_quote=single '' quote" in environment_block.stdout_lines' + - '"hyphen-var=abc@123" in environment_block.stdout_lines' + # yaml escaping rules - (\\ == \), (\" == "), ('' == ') + - '"_-(){}[]<>*+-/\\?\"''!@#$%^&|;:i,.`~0=_-(){}[]<>*+-/\\?\"''!@#$%^&|;:i,.`~0" in environment_block.stdout_lines' + - '"‘key=value‚" in environment_block.stdout_lines' + +- name: test out become requires without become_user set + test_all_options: + register: become_system + +- name: assert become requires without become_user set + assert: + that: + - become_system is successful + - become_system.output == "S-1-5-18" + +- set_fact: + become_test_username: ansible_become_test + gen_pw: "{{ 'password123!' + lookup('password', '/dev/null chars=ascii_letters,digits length=8') }}" + +- name: create unprivileged user + win_user: + name: "{{ become_test_username }}" + password: "{{ gen_pw }}" + update_password: always + groups: Users + register: become_test_user_result + +- name: execute tests and ensure that test user is deleted regardless of success/failure + block: + - name: ensure current user is not the become user + win_shell: whoami + register: whoami_out + + - name: verify output + assert: + that: + - not whoami_out.stdout_lines[0].endswith(become_test_username) + + - name: get become user profile dir so we can clean it up later + vars: &become_vars + ansible_become_user: "{{ become_test_username }}" + ansible_become_password: "{{ gen_pw }}" + ansible_become_method: runas + ansible_become: yes + win_shell: $env:USERPROFILE + register: profile_dir_out + + - name: ensure profile dir contains test username (eg, if become fails silently, prevent deletion of real user profile) + assert: + that: + - become_test_username in profile_dir_out.stdout_lines[0] + + - name: test out become requires when become_user set + test_all_options: + vars: *become_vars + register: become_system + + - name: assert become requires when become_user set + assert: + that: + - become_system is successful + - become_system.output == become_test_user_result.sid + + always: + - name: ensure test user is deleted + win_user: + name: "{{ become_test_username }}" + state: absent + + - name: ensure test user profile is deleted + # NB: have to work around powershell limitation of long filenames until win_file fixes it + win_shell: rmdir /S /Q {{ profile_dir_out.stdout_lines[0] }} + args: + executable: cmd.exe + when: become_test_username in profile_dir_out.stdout_lines[0] + +- name: test common functions in exec + test_common_functions: + register: common_functions_res + +- name: assert test common functions in exec + assert: + that: + - not common_functions_res is failed + - common_functions_res.msg == "good" + +- name: get PS events containing module args or envvars created since test start + raw: | + $dt=[datetime]"{{ test_starttime.stdout|trim }}" + (Get-WinEvent -LogName Microsoft-Windows-Powershell/Operational | + ? { $_.TimeCreated -ge $dt -and $_.Message -match "fail_module|hyphen-var" }).Count + register: ps_log_count + +- name: assert no PS events contain module args or envvars + assert: + that: + - ps_log_count.stdout | int == 0 diff --git a/test/integration/targets/win_fetch/aliases b/test/integration/targets/win_fetch/aliases new file mode 100644 index 00000000..4cd27b3c --- /dev/null +++ b/test/integration/targets/win_fetch/aliases @@ -0,0 +1 @@ +shippable/windows/group1 diff --git a/test/integration/targets/win_fetch/meta/main.yml b/test/integration/targets/win_fetch/meta/main.yml new file mode 100644 index 00000000..9f37e96c --- /dev/null +++ b/test/integration/targets/win_fetch/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- setup_remote_tmp_dir diff --git a/test/integration/targets/win_fetch/tasks/main.yml b/test/integration/targets/win_fetch/tasks/main.yml new file mode 100644 index 00000000..78b6fa02 --- /dev/null +++ b/test/integration/targets/win_fetch/tasks/main.yml @@ -0,0 +1,212 @@ +# test code for the fetch module when using winrm connection +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: define host-specific host_output_dir + set_fact: + host_output_dir: "{{ output_dir }}/{{ inventory_hostname }}" + +- name: clean out the test directory + file: name={{ host_output_dir|mandatory }} state=absent + delegate_to: localhost + run_once: true + +- name: create the test directory + file: name={{ host_output_dir }} state=directory + delegate_to: localhost + run_once: true + +- name: fetch a small file + fetch: src="C:/Windows/win.ini" dest={{ host_output_dir }} + register: fetch_small + +- name: check fetch small result + assert: + that: + - "fetch_small.changed" + +- name: check file created by fetch small + stat: path={{ fetch_small.dest }} + delegate_to: localhost + register: fetch_small_stat + +- name: verify fetched small file exists locally + assert: + that: + - "fetch_small_stat.stat.exists" + - "fetch_small_stat.stat.isreg" + - "fetch_small_stat.stat.checksum == fetch_small.checksum" + +- name: fetch the same small file + fetch: src="C:/Windows/win.ini" dest={{ host_output_dir }} + register: fetch_small_again + +- name: check fetch small result again + assert: + that: + - "not fetch_small_again.changed" + +- name: fetch a small file to flat namespace + fetch: src="C:/Windows/win.ini" dest="{{ host_output_dir }}/" flat=yes + register: fetch_flat + +- name: check fetch flat result + assert: + that: + - "fetch_flat.changed" + +- name: check file created by fetch flat + stat: path="{{ host_output_dir }}/win.ini" + delegate_to: localhost + register: fetch_flat_stat + +- name: verify fetched file exists locally in host_output_dir + assert: + that: + - "fetch_flat_stat.stat.exists" + - "fetch_flat_stat.stat.isreg" + - "fetch_flat_stat.stat.checksum == fetch_flat.checksum" + +#- name: fetch a small file to flat directory (without trailing slash) +# fetch: src="C:/Windows/win.ini" dest="{{ host_output_dir }}" flat=yes +# register: fetch_flat_dir + +#- name: check fetch flat to directory result +# assert: +# that: +# - "fetch_flat_dir is not changed" + +- name: fetch a large binary file + fetch: src="C:/Windows/explorer.exe" dest={{ host_output_dir }} + register: fetch_large + +- name: check fetch large binary file result + assert: + that: + - "fetch_large.changed" + +- name: check file created by fetch large binary + stat: path={{ fetch_large.dest }} + delegate_to: localhost + register: fetch_large_stat + +- name: verify fetched large file exists locally + assert: + that: + - "fetch_large_stat.stat.exists" + - "fetch_large_stat.stat.isreg" + - "fetch_large_stat.stat.checksum == fetch_large.checksum" + +- name: fetch a large binary file again + fetch: src="C:/Windows/explorer.exe" dest={{ host_output_dir }} + register: fetch_large_again + +- name: check fetch large binary file result again + assert: + that: + - "not fetch_large_again.changed" + +- name: fetch a small file using backslashes in src path + fetch: src="C:\\Windows\\system.ini" dest={{ host_output_dir }} + register: fetch_small_bs + +- name: check fetch small result with backslashes + assert: + that: + - "fetch_small_bs.changed" + +- name: check file created by fetch small with backslashes + stat: path={{ fetch_small_bs.dest }} + delegate_to: localhost + register: fetch_small_bs_stat + +- name: verify fetched small file with backslashes exists locally + assert: + that: + - "fetch_small_bs_stat.stat.exists" + - "fetch_small_bs_stat.stat.isreg" + - "fetch_small_bs_stat.stat.checksum == fetch_small_bs.checksum" + +- name: attempt to fetch a non-existent file - do not fail on missing + fetch: src="C:/this_file_should_not_exist.txt" dest={{ host_output_dir }} fail_on_missing=no + register: fetch_missing_nofail + +- name: check fetch missing no fail result + assert: + that: + - "fetch_missing_nofail is not failed" + - "fetch_missing_nofail.msg" + - "fetch_missing_nofail is not changed" + +- name: attempt to fetch a non-existent file - fail on missing + fetch: src="~/this_file_should_not_exist.txt" dest={{ host_output_dir }} fail_on_missing=yes + register: fetch_missing + ignore_errors: true + +- name: check fetch missing with failure + assert: + that: + - "fetch_missing is failed" + - "fetch_missing.msg" + - "fetch_missing is not changed" + +- name: attempt to fetch a non-existent file - fail on missing implicit + fetch: src="~/this_file_should_not_exist.txt" dest={{ host_output_dir }} + register: fetch_missing_implicit + ignore_errors: true + +- name: check fetch missing with failure on implicit + assert: + that: + - "fetch_missing_implicit is failed" + - "fetch_missing_implicit.msg" + - "fetch_missing_implicit is not changed" + +- name: attempt to fetch a directory + fetch: src="C:\\Windows" dest={{ host_output_dir }} + register: fetch_dir + ignore_errors: true + +- name: check fetch directory result + assert: + that: + # Doesn't fail anymore, only returns a message. + - "fetch_dir is not changed" + - "fetch_dir.msg" + +- name: create file with special characters + raw: Set-Content -LiteralPath '{{ remote_tmp_dir }}\abc$not var''quote‘‘' -Value 'abc' + +- name: fetch file with special characters + fetch: + src: '{{ remote_tmp_dir }}\abc$not var''quote‘' + dest: '{{ host_output_dir }}/' + flat: yes + register: fetch_special_file + +- name: get content of fetched file + command: cat {{ (host_output_dir ~ "/abc$not var'quote‘") | quote }} + register: fetch_special_file_actual + delegate_to: localhost + +- name: assert fetch file with special characters + assert: + that: + - fetch_special_file is changed + - fetch_special_file.checksum == '34d4150adc3347f1dd8ce19fdf65b74d971ab602' + - fetch_special_file.dest == host_output_dir + "/abc$not var'quote‘" + - fetch_special_file_actual.stdout == 'abc' diff --git a/test/integration/targets/win_module_utils/aliases b/test/integration/targets/win_module_utils/aliases new file mode 100644 index 00000000..1eed2ecf --- /dev/null +++ b/test/integration/targets/win_module_utils/aliases @@ -0,0 +1,2 @@ +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/win_module_utils/library/csharp_util.ps1 b/test/integration/targets/win_module_utils/library/csharp_util.ps1 new file mode 100644 index 00000000..cf2dc452 --- /dev/null +++ b/test/integration/targets/win_module_utils/library/csharp_util.ps1 @@ -0,0 +1,12 @@ +#1powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#AnsibleRequires -CSharpUtil Ansible.Test + +$result = @{ + res = [Ansible.Test.OutputTest]::GetString() + changed = $false +} + +Exit-Json -obj $result + diff --git a/test/integration/targets/win_module_utils/library/legacy_only_new_way.ps1 b/test/integration/targets/win_module_utils/library/legacy_only_new_way.ps1 new file mode 100644 index 00000000..8ea3e061 --- /dev/null +++ b/test/integration/targets/win_module_utils/library/legacy_only_new_way.ps1 @@ -0,0 +1,5 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy + +Exit-Json @{ data="success" } diff --git a/test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1 b/test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1 new file mode 100644 index 00000000..d9c2e008 --- /dev/null +++ b/test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1 @@ -0,0 +1,6 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy + +Exit-Json @{ data="success" } + diff --git a/test/integration/targets/win_module_utils/library/legacy_only_old_way.ps1 b/test/integration/targets/win_module_utils/library/legacy_only_old_way.ps1 new file mode 100644 index 00000000..652e1281 --- /dev/null +++ b/test/integration/targets/win_module_utils/library/legacy_only_old_way.ps1 @@ -0,0 +1,5 @@ +#!powershell + +# POWERSHELL_COMMON + +Exit-Json @{ data="success" } diff --git a/test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1 b/test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1 new file mode 100644 index 00000000..d5d328a5 --- /dev/null +++ b/test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1 @@ -0,0 +1,4 @@ +#!powershell +# POWERSHELL_COMMON + +Exit-Json @{ data="success" } diff --git a/test/integration/targets/win_module_utils/library/recursive_requires.ps1 b/test/integration/targets/win_module_utils/library/recursive_requires.ps1 new file mode 100644 index 00000000..db8c23e9 --- /dev/null +++ b/test/integration/targets/win_module_utils/library/recursive_requires.ps1 @@ -0,0 +1,13 @@ +#!powershell + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.Recursive3 +#Requires -Version 2 + +$ErrorActionPreference = "Stop" + +$result = @{ + changed = $false + value = Get-Test3 +} +Exit-Json -obj $result diff --git a/test/integration/targets/win_module_utils/library/uses_bogus_utils.ps1 b/test/integration/targets/win_module_utils/library/uses_bogus_utils.ps1 new file mode 100644 index 00000000..0a1c21a3 --- /dev/null +++ b/test/integration/targets/win_module_utils/library/uses_bogus_utils.ps1 @@ -0,0 +1,6 @@ +#!powershell + +# this should fail +#Requires -Module Ansible.ModuleUtils.BogusModule + +Exit-Json @{ data="success" } diff --git a/test/integration/targets/win_module_utils/library/uses_local_utils.ps1 b/test/integration/targets/win_module_utils/library/uses_local_utils.ps1 new file mode 100644 index 00000000..3dfc940c --- /dev/null +++ b/test/integration/targets/win_module_utils/library/uses_local_utils.ps1 @@ -0,0 +1,9 @@ +#!powershell + +# use different cases, spacing and plural of 'module' to exercise flexible powershell dialect +#ReQuiReS -ModUleS Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.ValidTestModule + +$o = CustomFunction + +Exit-Json @{data=$o} diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive1.psm1 b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive1.psm1 new file mode 100644 index 00000000..a63ece34 --- /dev/null +++ b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive1.psm1 @@ -0,0 +1,9 @@ +Function Get-Test1 { + <# + .SYNOPSIS + Test function + #> + return "Get-Test1" +} + +Export-ModuleMember -Function Get-Test1 diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive2.psm1 b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive2.psm1 new file mode 100644 index 00000000..f9c07ca7 --- /dev/null +++ b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive2.psm1 @@ -0,0 +1,12 @@ +#Requires -Module Ansible.ModuleUtils.Recursive1 +#Requires -Module Ansible.ModuleUtils.Recursive3 + +Function Get-Test2 { + <# + .SYNOPSIS + Test function + #> + return "Get-Test2, 1: $(Get-Test1), 3: $(Get-NewTest3)" +} + +Export-ModuleMember -Function Get-Test2 diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive3.psm1 b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive3.psm1 new file mode 100644 index 00000000..ce6e70c1 --- /dev/null +++ b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive3.psm1 @@ -0,0 +1,20 @@ +#Requires -Module Ansible.ModuleUtils.Recursive2 +#Requires -Version 3.0 + +Function Get-Test3 { + <# + .SYNOPSIS + Test function + #> + return "Get-Test3: 2: $(Get-Test2)" +} + +Function Get-NewTest3 { + <# + .SYNOPSIS + Test function + #> + return "Get-NewTest3" +} + +Export-ModuleMember -Function Get-Test3, Get-NewTest3 diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.ValidTestModule.psm1 b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.ValidTestModule.psm1 new file mode 100644 index 00000000..a60b799f --- /dev/null +++ b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.ValidTestModule.psm1 @@ -0,0 +1,3 @@ +Function CustomFunction { + return "ValueFromCustomFunction" +} diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.Test.cs b/test/integration/targets/win_module_utils/module_utils/Ansible.Test.cs new file mode 100644 index 00000000..9556d9af --- /dev/null +++ b/test/integration/targets/win_module_utils/module_utils/Ansible.Test.cs @@ -0,0 +1,26 @@ +//AssemblyReference -Name System.Web.Extensions.dll + +using System; +using System.Collections.Generic; +using System.Web.Script.Serialization; + +namespace Ansible.Test +{ + public class OutputTest + { + public static string GetString() + { + Dictionary obj = new Dictionary(); + obj["a"] = "a"; + obj["b"] = 1; + return ToJson(obj); + } + + private static string ToJson(object obj) + { + JavaScriptSerializer jss = new JavaScriptSerializer(); + return jss.Serialize(obj); + } + } +} + diff --git a/test/integration/targets/win_module_utils/tasks/main.yml b/test/integration/targets/win_module_utils/tasks/main.yml new file mode 100644 index 00000000..87f2592c --- /dev/null +++ b/test/integration/targets/win_module_utils/tasks/main.yml @@ -0,0 +1,71 @@ +- name: call old WANTS_JSON module + legacy_only_old_way: + register: old_way + +- assert: + that: + - old_way.data == 'success' + +- name: call module with only legacy requires + legacy_only_new_way: + register: new_way + +- assert: + that: + - new_way.data == 'success' + +- name: call old WANTS_JSON module with windows line endings + legacy_only_old_way_win_line_ending: + register: old_way_win + +- assert: + that: + - old_way_win.data == 'success' + +- name: call module with only legacy requires and windows line endings + legacy_only_new_way_win_line_ending: + register: new_way_win + +- assert: + that: + - new_way_win.data == 'success' + +- name: call module with local module_utils + uses_local_utils: + register: local_utils + +- assert: + that: + - local_utils.data == "ValueFromCustomFunction" + +- name: call module that imports bogus Ansible-named module_utils + uses_bogus_utils: + ignore_errors: true + register: bogus_utils + +- assert: + that: + - bogus_utils is failed + - bogus_utils.msg is search("Could not find") + +- name: call module that imports module_utils with further imports + recursive_requires: + register: recursive_requires + vars: + # Our coverage runner does not work with recursive required. This is a limitation on PowerShell so we need to + # disable coverage for this task + _ansible_coverage_remote_output: '' + +- assert: + that: + - 'recursive_requires.value == "Get-Test3: 2: Get-Test2, 1: Get-Test1, 3: Get-NewTest3"' + +- name: call module with C# reference + csharp_util: + register: csharp_res + +- name: assert call module with C# reference + assert: + that: + - not csharp_res is failed + - csharp_res.res == '{"a":"a","b":1}' diff --git a/test/integration/targets/win_raw/aliases b/test/integration/targets/win_raw/aliases new file mode 100644 index 00000000..1eed2ecf --- /dev/null +++ b/test/integration/targets/win_raw/aliases @@ -0,0 +1,2 @@ +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/win_raw/tasks/main.yml b/test/integration/targets/win_raw/tasks/main.yml new file mode 100644 index 00000000..31f90b85 --- /dev/null +++ b/test/integration/targets/win_raw/tasks/main.yml @@ -0,0 +1,143 @@ +# test code for the raw module when using winrm connection +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: run getmac + raw: getmac + register: getmac_result + +- name: assert that getmac ran + assert: + that: + - "getmac_result.rc == 0" + - "getmac_result.stdout" + - "not getmac_result.stderr" + - "getmac_result is not failed" + - "getmac_result is changed" + +- name: run ipconfig with /all argument + raw: ipconfig /all + register: ipconfig_result + +- name: assert that ipconfig ran with /all argument + assert: + that: + - "ipconfig_result.rc == 0" + - "ipconfig_result.stdout" + - "'Physical Address' in ipconfig_result.stdout" + - "not ipconfig_result.stderr" + - "ipconfig_result is not failed" + - "ipconfig_result is changed" + +- name: run ipconfig with invalid argument + raw: ipconfig /badswitch + register: ipconfig_invalid_result + ignore_errors: true + +- name: assert that ipconfig with invalid argument failed + assert: + that: + - "ipconfig_invalid_result.rc != 0" + - "ipconfig_invalid_result.stdout" # ipconfig displays errors on stdout. +# - "not ipconfig_invalid_result.stderr" + - "ipconfig_invalid_result is failed" + - "ipconfig_invalid_result is changed" + +- name: run an unknown command + raw: uname -a + register: unknown_result + ignore_errors: true + +- name: assert that an unknown command failed + assert: + that: + - "unknown_result.rc != 0" + - "not unknown_result.stdout" + - "unknown_result.stderr" # An unknown command displays error on stderr. + - "unknown_result is failed" + - "unknown_result is changed" + +- name: run a command that takes longer than 60 seconds + raw: Start-Sleep -s 75 + register: sleep_command + +- name: assert that the sleep command ran + assert: + that: + - "sleep_command.rc == 0" + - "not sleep_command.stdout" + - "not sleep_command.stderr" + - "sleep_command is not failed" + - "sleep_command is changed" + +- name: run a raw command with key=value arguments + raw: echo wwe=raw + register: raw_result + +- name: make sure raw is really raw and not removing key=value arguments + assert: + that: + - "raw_result.stdout_lines[0] == 'wwe=raw'" + +- name: unicode tests for winrm + when: ansible_connection != 'psrp' # Write-Host does not work over PSRP + block: + - name: run a raw command with unicode chars and quoted args (from https://github.com/ansible/ansible-modules-core/issues/1929) + raw: Write-Host --% icacls D:\somedir\ /grant "! ЗАО. Руководство":F + register: raw_result2 + + - name: make sure raw passes command as-is and doesn't split/rejoin args + assert: + that: + - "raw_result2.stdout_lines[0] == '--% icacls D:\\\\somedir\\\\ /grant \"! ЗАО. Руководство\":F'" + +- name: unicode tests for psrp + when: ansible_connection == 'psrp' + block: + # Cannot test unicode passed into separate exec as PSRP doesn't run with a preset CP of 65001 which reuslts in ? for unicode chars + - name: run a raw command with unicode chars + raw: Write-Output "! ЗАО. Руководство" + register: raw_result2 + + - name: make sure raw passes command as-is and doesn't split/rejoin args + assert: + that: + - "raw_result2.stdout_lines[0] == '! ЗАО. Руководство'" + +# Assumes MaxShellsPerUser == 30 (the default) + +- name: test raw + with_items to verify that winrm connection is reused for each item + raw: echo "{{item}}" + with_items: "{{range(32)|list}}" + register: raw_with_items_result + +- name: check raw + with_items result + assert: + that: + - "raw_with_items_result is not failed" + - "raw_with_items_result.results|length == 32" + +# TODO: this test fails, since we're back to passing raw commands without modification +#- name: test raw with job to ensure that preamble-free InputEncoding is working +# raw: Start-Job { echo yo } | Receive-Job -Wait +# register: raw_job_result +# +#- name: check raw with job result +# assert: +# that: +# - raw_job_result is successful +# - raw_job_result.stdout_lines[0] == 'yo' diff --git a/test/integration/targets/win_script/aliases b/test/integration/targets/win_script/aliases new file mode 100644 index 00000000..1eed2ecf --- /dev/null +++ b/test/integration/targets/win_script/aliases @@ -0,0 +1,2 @@ +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/win_script/defaults/main.yml b/test/integration/targets/win_script/defaults/main.yml new file mode 100644 index 00000000..a2c6475e --- /dev/null +++ b/test/integration/targets/win_script/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +# Parameters to pass to test scripts. +test_win_script_value: VaLuE +test_win_script_splat: "@{This='THIS'; That='THAT'; Other='OTHER'}" diff --git a/test/integration/targets/win_script/files/fail.bat b/test/integration/targets/win_script/files/fail.bat new file mode 100644 index 00000000..02562a8a --- /dev/null +++ b/test/integration/targets/win_script/files/fail.bat @@ -0,0 +1 @@ +bang-run-a-thing-that-doesnt-exist diff --git a/test/integration/targets/win_script/files/space path/test_script.ps1 b/test/integration/targets/win_script/files/space path/test_script.ps1 new file mode 100644 index 00000000..10dd9c8c --- /dev/null +++ b/test/integration/targets/win_script/files/space path/test_script.ps1 @@ -0,0 +1 @@ +Write-Output "Ansible supports spaces in the path to the script." diff --git a/test/integration/targets/win_script/files/test_script.bat b/test/integration/targets/win_script/files/test_script.bat new file mode 100644 index 00000000..05cc2d19 --- /dev/null +++ b/test/integration/targets/win_script/files/test_script.bat @@ -0,0 +1,2 @@ +@ECHO OFF +ECHO We can even run a batch file! diff --git a/test/integration/targets/win_script/files/test_script.cmd b/test/integration/targets/win_script/files/test_script.cmd new file mode 100644 index 00000000..0e36312d --- /dev/null +++ b/test/integration/targets/win_script/files/test_script.cmd @@ -0,0 +1,2 @@ +@ECHO OFF +ECHO We can even run a batch file with cmd extension! diff --git a/test/integration/targets/win_script/files/test_script.ps1 b/test/integration/targets/win_script/files/test_script.ps1 new file mode 100644 index 00000000..9978f363 --- /dev/null +++ b/test/integration/targets/win_script/files/test_script.ps1 @@ -0,0 +1,2 @@ +# Test script to make sure the Ansible script module works. +Write-Host "Woohoo! We can run a PowerShell script via Ansible!" diff --git a/test/integration/targets/win_script/files/test_script_bool.ps1 b/test/integration/targets/win_script/files/test_script_bool.ps1 new file mode 100644 index 00000000..970dedce --- /dev/null +++ b/test/integration/targets/win_script/files/test_script_bool.ps1 @@ -0,0 +1,6 @@ +Param( +[bool]$boolvariable +) + +Write-Output $boolvariable.GetType().FullName +Write-Output $boolvariable diff --git a/test/integration/targets/win_script/files/test_script_creates_file.ps1 b/test/integration/targets/win_script/files/test_script_creates_file.ps1 new file mode 100644 index 00000000..47f85a2d --- /dev/null +++ b/test/integration/targets/win_script/files/test_script_creates_file.ps1 @@ -0,0 +1,3 @@ +# Test script to create a file. + +echo $null > $args[0] diff --git a/test/integration/targets/win_script/files/test_script_removes_file.ps1 b/test/integration/targets/win_script/files/test_script_removes_file.ps1 new file mode 100644 index 00000000..f0549a5b --- /dev/null +++ b/test/integration/targets/win_script/files/test_script_removes_file.ps1 @@ -0,0 +1,3 @@ +# Test script to remove a file. + +Remove-Item $args[0] -Force diff --git a/test/integration/targets/win_script/files/test_script_whoami.ps1 b/test/integration/targets/win_script/files/test_script_whoami.ps1 new file mode 100644 index 00000000..79a1c475 --- /dev/null +++ b/test/integration/targets/win_script/files/test_script_whoami.ps1 @@ -0,0 +1,2 @@ +whoami.exe +Write-Output "finished" diff --git a/test/integration/targets/win_script/files/test_script_with_args.ps1 b/test/integration/targets/win_script/files/test_script_with_args.ps1 new file mode 100644 index 00000000..520aafa3 --- /dev/null +++ b/test/integration/targets/win_script/files/test_script_with_args.ps1 @@ -0,0 +1,7 @@ +# Test script to make sure the Ansible script module works when arguments are +# passed to the script. + +foreach ($i in $args) +{ + Write-Host $i; +} diff --git a/test/integration/targets/win_script/files/test_script_with_env.ps1 b/test/integration/targets/win_script/files/test_script_with_env.ps1 new file mode 100644 index 00000000..b54fd928 --- /dev/null +++ b/test/integration/targets/win_script/files/test_script_with_env.ps1 @@ -0,0 +1 @@ +$env:taskenv \ No newline at end of file diff --git a/test/integration/targets/win_script/files/test_script_with_errors.ps1 b/test/integration/targets/win_script/files/test_script_with_errors.ps1 new file mode 100644 index 00000000..2d60dc1f --- /dev/null +++ b/test/integration/targets/win_script/files/test_script_with_errors.ps1 @@ -0,0 +1,9 @@ +# Test script to make sure we handle non-zero exit codes. + +trap +{ + Write-Error -ErrorRecord $_ + exit 1; +} + +throw "Oh noes I has an error" diff --git a/test/integration/targets/win_script/files/test_script_with_splatting.ps1 b/test/integration/targets/win_script/files/test_script_with_splatting.ps1 new file mode 100644 index 00000000..429a9a3b --- /dev/null +++ b/test/integration/targets/win_script/files/test_script_with_splatting.ps1 @@ -0,0 +1,6 @@ +# Test script to make sure the Ansible script module works when arguments are +# passed via splatting (http://technet.microsoft.com/en-us/magazine/gg675931.aspx) + +Write-Host $args.This +Write-Host $args.That +Write-Host $args.Other diff --git a/test/integration/targets/win_script/tasks/main.yml b/test/integration/targets/win_script/tasks/main.yml new file mode 100644 index 00000000..4d57eda2 --- /dev/null +++ b/test/integration/targets/win_script/tasks/main.yml @@ -0,0 +1,316 @@ +# test code for the script module when using winrm connection +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: run setup to allow skipping OS-specific tests + setup: + gather_subset: min + +- name: get tempdir path + raw: $env:TEMP + register: tempdir + +- name: set script path dynamically + set_fact: + test_win_script_filename: "{{ tempdir.stdout_lines[0] }}/testing_win_script.txt" + +- name: run simple test script + script: test_script.ps1 + register: test_script_result + +- name: check that script ran + assert: + that: + - "test_script_result.rc == 0" + - "test_script_result.stdout" + - "'Woohoo' in test_script_result.stdout" + - "not test_script_result.stderr" + - "test_script_result is not failed" + - "test_script_result is changed" + +- name: run test script that takes arguments including a unicode char + script: test_script_with_args.ps1 /this /that /Ӧther + register: test_script_with_args_result + +- name: check that script ran and received arguments and returned unicode + assert: + that: + - "test_script_with_args_result.rc == 0" + - "test_script_with_args_result.stdout" + - "test_script_with_args_result.stdout_lines[0] == '/this'" + - "test_script_with_args_result.stdout_lines[1] == '/that'" + - "test_script_with_args_result.stdout_lines[2] == '/Ӧther'" + - "not test_script_with_args_result.stderr" + - "test_script_with_args_result is not failed" + - "test_script_with_args_result is changed" + +# Bug: https://github.com/ansible/ansible/issues/32850 +- name: set fact of long string + set_fact: + long_string: "{{ lookup('pipe', 'printf \"a%.0s\" {1..1000}') }}" + +- name: run test script with args that exceed the stdin buffer + script: test_script_with_args.ps1 {{ long_string }} + register: test_script_with_large_args_result + +- name: check that script ran and received arguments correctly + assert: + that: + - test_script_with_large_args_result.rc == 0 + - not test_script_with_large_args_result.stderr + - test_script_with_large_args_result is not failed + - test_script_with_large_args_result is changed + +- name: check that script ran and received arguments correctly with winrm output + assert: + that: + - test_script_with_large_args_result.stdout == long_string + "\r\n" + when: ansible_connection != 'psrp' + +- name: check that script ran and received arguments correctly with psrp output + assert: + that: + - test_script_with_large_args_result.stdout == long_string + when: ansible_connection == 'psrp' + +- name: run test script that takes parameters passed via splatting + script: test_script_with_splatting.ps1 @{ This = 'this'; That = '{{ test_win_script_value }}'; Other = 'other'} + register: test_script_with_splatting_result + +- name: check that script ran and received parameters via splatting + assert: + that: + - "test_script_with_splatting_result.rc == 0" + - "test_script_with_splatting_result.stdout" + - "test_script_with_splatting_result.stdout_lines[0] == 'this'" + - "test_script_with_splatting_result.stdout_lines[1] == test_win_script_value" + - "test_script_with_splatting_result.stdout_lines[2] == 'other'" + - "not test_script_with_splatting_result.stderr" + - "test_script_with_splatting_result is not failed" + - "test_script_with_splatting_result is changed" + +- name: run test script that takes splatted parameters from a variable + script: test_script_with_splatting.ps1 {{ test_win_script_splat }} + register: test_script_with_splatting2_result + +- name: check that script ran and received parameters via splatting from a variable + assert: + that: + - "test_script_with_splatting2_result.rc == 0" + - "test_script_with_splatting2_result.stdout" + - "test_script_with_splatting2_result.stdout_lines[0] == 'THIS'" + - "test_script_with_splatting2_result.stdout_lines[1] == 'THAT'" + - "test_script_with_splatting2_result.stdout_lines[2] == 'OTHER'" + - "not test_script_with_splatting2_result.stderr" + - "test_script_with_splatting2_result is not failed" + - "test_script_with_splatting2_result is changed" + +- name: run test script that has errors + script: test_script_with_errors.ps1 + register: test_script_with_errors_result + ignore_errors: true + +- name: check that script ran but failed with errors + assert: + that: + - "test_script_with_errors_result.rc != 0" + - "not test_script_with_errors_result.stdout" + - "test_script_with_errors_result.stderr" + - "test_script_with_errors_result is failed" + - "test_script_with_errors_result is changed" + +- name: cleanup test file if it exists + raw: Remove-Item "{{ test_win_script_filename }}" -Force + ignore_errors: true + +- name: run test script that creates a file + script: test_script_creates_file.ps1 {{ test_win_script_filename }} + args: + creates: "{{ test_win_script_filename }}" + register: test_script_creates_file_result + +- name: check that script ran and indicated a change + assert: + that: + - "test_script_creates_file_result.rc == 0" + - "not test_script_creates_file_result.stdout" + - "not test_script_creates_file_result.stderr" + - "test_script_creates_file_result is not failed" + - "test_script_creates_file_result is changed" + +- name: run test script that creates a file again + script: test_script_creates_file.ps1 {{ test_win_script_filename }} + args: + creates: "{{ test_win_script_filename }}" + register: test_script_creates_file_again_result + +- name: check that the script did not run since the remote file exists + assert: + that: + - "test_script_creates_file_again_result is not failed" + - "test_script_creates_file_again_result is not changed" + - "test_script_creates_file_again_result is skipped" + +- name: run test script that removes a file + script: test_script_removes_file.ps1 {{ test_win_script_filename }} + args: + removes: "{{ test_win_script_filename }}" + register: test_script_removes_file_result + +- name: check that the script ran since the remote file exists + assert: + that: + - "test_script_removes_file_result.rc == 0" + - "not test_script_removes_file_result.stdout" + - "not test_script_removes_file_result.stderr" + - "test_script_removes_file_result is not failed" + - "test_script_removes_file_result is changed" + +- name: run test script that removes a file again + script: test_script_removes_file.ps1 {{ test_win_script_filename }} + args: + removes: "{{ test_win_script_filename }}" + register: test_script_removes_file_again_result + +- name: check that the script did not run since the remote file does not exist + assert: + that: + - "test_script_removes_file_again_result is not failed" + - "test_script_removes_file_again_result is not changed" + - "test_script_removes_file_again_result is skipped" + +- name: skip batch tests on 6.0 (UTF8 codepage prevents it from working, see https://github.com/ansible/ansible/issues/21915) + block: + - name: run simple batch file + script: test_script.bat + register: test_batch_result + + - name: check that batch file ran + assert: + that: + - "test_batch_result.rc == 0" + - "test_batch_result.stdout" + - "'batch' in test_batch_result.stdout" + - "not test_batch_result.stderr" + - "test_batch_result is not failed" + - "test_batch_result is changed" + + - name: run simple batch file with .cmd extension + script: test_script.cmd + register: test_cmd_result + + - name: check that batch file with .cmd extension ran + assert: + that: + - "test_cmd_result.rc == 0" + - "test_cmd_result.stdout" + - "'cmd extension' in test_cmd_result.stdout" + - "not test_cmd_result.stderr" + - "test_cmd_result is not failed" + - "test_cmd_result is changed" + + - name: run simple batch file with .bat extension that fails + script: fail.bat + ignore_errors: true + register: test_batch_result + + - name: check that batch file with .bat extension reported failure + assert: + that: + - test_batch_result.rc == 1 + - test_batch_result.stdout + - test_batch_result.stderr + - test_batch_result is failed + - test_batch_result is changed + when: not ansible_distribution_version.startswith('6.0') + +- name: run test script that takes a boolean parameter + script: test_script_bool.ps1 $false # use false as that can pick up more errors + register: test_script_bool_result + +- name: check that the script ran and the parameter was treated as a boolean + assert: + that: + - test_script_bool_result.stdout_lines[0] == 'System.Boolean' + - test_script_bool_result.stdout_lines[1] == 'False' + +- name: run test script that uses envvars + script: test_script_with_env.ps1 + environment: + taskenv: task + register: test_script_env_result + +- name: ensure that script ran and that environment var was passed + assert: + that: + - test_script_env_result is successful + - test_script_env_result.stdout_lines[0] == 'task' + +# check mode +- name: Run test script that creates a file in check mode + script: test_script_creates_file.ps1 {{ test_win_script_filename }} + args: + creates: "{{ test_win_script_filename }}" + check_mode: yes + register: test_script_creates_file_check_mode + +- name: Get state of file created by script + win_stat: + path: "{{ test_win_script_filename }}" + register: create_file_stat + +- name: Assert that a change was reported but the script did not make changes + assert: + that: + - test_script_creates_file_check_mode is changed + - not create_file_stat.stat.exists + +- name: Run test script that creates a file + script: test_script_creates_file.ps1 {{ test_win_script_filename }} + args: + creates: "{{ test_win_script_filename }}" + +- name: Run test script that removes a file in check mode + script: test_script_removes_file.ps1 {{ test_win_script_filename }} + args: + removes: "{{ test_win_script_filename }}" + check_mode: yes + register: test_script_removes_file_check_mode + +- name: Get state of file removed by script + win_stat: + path: "{{ test_win_script_filename }}" + register: remove_file_stat + +- name: Assert that a change was reported but the script did not make changes + assert: + that: + - test_script_removes_file_check_mode is changed + - remove_file_stat.stat.exists + +- name: run test script with become that outputs 2 lines + script: test_script_whoami.ps1 + register: test_script_result_become + become: yes + become_user: SYSTEM + become_method: runas + +- name: check that the script ran and we get both outputs on new lines + assert: + that: + - test_script_result_become.stdout_lines[0]|lower == 'nt authority\\system' + - test_script_result_become.stdout_lines[1] == 'finished' diff --git a/test/integration/targets/windows-minimal/aliases b/test/integration/targets/windows-minimal/aliases new file mode 100644 index 00000000..479948a1 --- /dev/null +++ b/test/integration/targets/windows-minimal/aliases @@ -0,0 +1,4 @@ +shippable/windows/group1 +shippable/windows/minimal +shippable/windows/smoketest +windows diff --git a/test/integration/targets/windows-minimal/library/win_ping.ps1 b/test/integration/targets/windows-minimal/library/win_ping.ps1 new file mode 100644 index 00000000..c848b912 --- /dev/null +++ b/test/integration/targets/windows-minimal/library/win_ping.ps1 @@ -0,0 +1,21 @@ +#!powershell + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic + +$spec = @{ + options = @{ + data = @{ type = "str"; default = "pong" } + } + supports_check_mode = $true +} +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) +$data = $module.Params.data + +if ($data -eq "crash") { + throw "boom" +} + +$module.Result.ping = $data +$module.ExitJson() diff --git a/test/integration/targets/windows-minimal/library/win_ping.py b/test/integration/targets/windows-minimal/library/win_ping.py new file mode 100644 index 00000000..6d35f379 --- /dev/null +++ b/test/integration/targets/windows-minimal/library/win_ping.py @@ -0,0 +1,55 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Michael DeHaan , and others +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_ping +version_added: "1.7" +short_description: A windows version of the classic ping module +description: + - Checks management connectivity of a windows host. + - This is NOT ICMP ping, this is just a trivial test module. + - For non-Windows targets, use the M(ping) module instead. + - For Network targets, use the M(net_ping) module instead. +options: + data: + description: + - Alternate data to return instead of 'pong'. + - If this parameter is set to C(crash), the module will cause an exception. + type: str + default: pong +seealso: +- module: ping +author: +- Chris Church (@cchurch) +''' + +EXAMPLES = r''' +# Test connectivity to a windows host +# ansible winserver -m win_ping + +- name: Example from an Ansible Playbook + win_ping: + +- name: Induce an exception to see what happens + win_ping: + data: crash +''' + +RETURN = r''' +ping: + description: Value provided with the data parameter. + returned: success + type: str + sample: pong +''' diff --git a/test/integration/targets/windows-minimal/library/win_ping_set_attr.ps1 b/test/integration/targets/windows-minimal/library/win_ping_set_attr.ps1 new file mode 100644 index 00000000..f1704964 --- /dev/null +++ b/test/integration/targets/windows-minimal/library/win_ping_set_attr.ps1 @@ -0,0 +1,31 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +$params = Parse-Args $args $true; + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = "pong" +}; + +# Test that Set-Attr will replace an existing attribute. +Set-Attr $result "ping" $data + +Exit-Json $result; diff --git a/test/integration/targets/windows-minimal/library/win_ping_strict_mode_error.ps1 b/test/integration/targets/windows-minimal/library/win_ping_strict_mode_error.ps1 new file mode 100644 index 00000000..508174af --- /dev/null +++ b/test/integration/targets/windows-minimal/library/win_ping_strict_mode_error.ps1 @@ -0,0 +1,30 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +$params = Parse-Args $args $true; + +$params.thisPropertyDoesNotExist + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = $data +}; + +Exit-Json $result; diff --git a/test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1 b/test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1 new file mode 100644 index 00000000..d4c9f07a --- /dev/null +++ b/test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1 @@ -0,0 +1,30 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +$blah = 'I can't quote my strings correctly.' + +$params = Parse-Args $args $true; + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = $data +}; + +Exit-Json $result; diff --git a/test/integration/targets/windows-minimal/library/win_ping_throw.ps1 b/test/integration/targets/windows-minimal/library/win_ping_throw.ps1 new file mode 100644 index 00000000..7306f4d2 --- /dev/null +++ b/test/integration/targets/windows-minimal/library/win_ping_throw.ps1 @@ -0,0 +1,30 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +throw + +$params = Parse-Args $args $true; + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = $data +}; + +Exit-Json $result; diff --git a/test/integration/targets/windows-minimal/library/win_ping_throw_string.ps1 b/test/integration/targets/windows-minimal/library/win_ping_throw_string.ps1 new file mode 100644 index 00000000..09e3b7cb --- /dev/null +++ b/test/integration/targets/windows-minimal/library/win_ping_throw_string.ps1 @@ -0,0 +1,30 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# POWERSHELL_COMMON + +throw "no ping for you" + +$params = Parse-Args $args $true; + +$data = Get-Attr $params "data" "pong"; + +$result = @{ + changed = $false + ping = $data +}; + +Exit-Json $result; diff --git a/test/integration/targets/windows-minimal/tasks/main.yml b/test/integration/targets/windows-minimal/tasks/main.yml new file mode 100644 index 00000000..a7e6ba7f --- /dev/null +++ b/test/integration/targets/windows-minimal/tasks/main.yml @@ -0,0 +1,67 @@ +# test code for the win_ping module +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: test win_ping + action: win_ping + register: win_ping_result + +- name: check win_ping result + assert: + that: + - win_ping_result is not failed + - win_ping_result is not changed + - win_ping_result.ping == 'pong' + +- name: test win_ping with data + win_ping: + data: ☠ + register: win_ping_with_data_result + +- name: check win_ping result with data + assert: + that: + - win_ping_with_data_result is not failed + - win_ping_with_data_result is not changed + - win_ping_with_data_result.ping == '☠' + +- name: test win_ping.ps1 with data as complex args + # win_ping.ps1: # TODO: do we want to actually support this? no other tests that I can see... + win_ping: + data: bleep + register: win_ping_ps1_result + +- name: check win_ping.ps1 result with data + assert: + that: + - win_ping_ps1_result is not failed + - win_ping_ps1_result is not changed + - win_ping_ps1_result.ping == 'bleep' + +- name: test win_ping using data=crash so that it throws an exception + win_ping: + data: crash + register: win_ping_crash_result + ignore_errors: yes + +- name: check win_ping_crash result + assert: + that: + - win_ping_crash_result is failed + - win_ping_crash_result is not changed + - 'win_ping_crash_result.msg == "Unhandled exception while executing module: boom"' + - '"throw \"boom\"" in win_ping_crash_result.exception' diff --git a/test/integration/targets/windows-paths/aliases b/test/integration/targets/windows-paths/aliases new file mode 100644 index 00000000..cf714783 --- /dev/null +++ b/test/integration/targets/windows-paths/aliases @@ -0,0 +1,3 @@ +windows +shippable/windows/group1 +shippable/windows/smoketest diff --git a/test/integration/targets/windows-paths/tasks/main.yml b/test/integration/targets/windows-paths/tasks/main.yml new file mode 100644 index 00000000..4d222659 --- /dev/null +++ b/test/integration/targets/windows-paths/tasks/main.yml @@ -0,0 +1,191 @@ +- name: Set variables in YAML syntax + set_fact: + no_quotes_single: C:\Windows\Temp + single_quotes_single: 'C:\Windows\Temp' +# double_quotes_single: "C:\Windows\Temp" + no_quotes_double: C:\\Windows\\Temp + single_quotes_double: 'C:\\Windows\\Temp' + double_quotes_double: "C:\\Windows\\Temp" + no_quotes_slash: C:/Windows/Temp + no_quotes_trailing: C:\Windows\Temp\ + single_quotes_trailing: 'C:\Windows\Temp\' +# double_quotes_trailing: "C:\Windows\Temp\" + good: C:\Windows\Temp + works1: C:\\Windows\\Temp + works2: C:/Windows/Temp +# fail: "C:\Windows\Temp" + trailing: C:\Windows\Temp\ + register: yaml_syntax + +- assert: + that: + - no_quotes_single == good + - single_quotes_single == good +# - double_quotes_single == fail + - no_quotes_double == works1 + - single_quotes_double == works1 + - double_quotes_double == good + - no_quotes_slash == works2 + - no_quotes_trailing == trailing + - single_quotes_trailing == trailing +# - double_quotes_trailing == fail + - good != works1 + - good != works2 + - good != trailing + - works1 != works2 + - works1 != trailing + - works2 != trailing + +- name: Test good path {{ good }} + win_stat: + path: '{{ good }}' + register: good_result + +- assert: + that: + - good_result is successful + - good_result.stat.attributes == 'Directory' + - good_result.stat.exists == true + - good_result.stat.path == good + +- name: Test works1 path {{ works1 }} + win_stat: + path: '{{ works1 }}' + register: works1_result + +- assert: + that: + - works1_result is successful + - works1_result.stat.attributes == 'Directory' + - works1_result.stat.exists == true + - works1_result.stat.path == good + +- name: Test works2 path {{ works2 }} + win_stat: + path: '{{ works2 }}' + register: works2_result + +- assert: + that: + - works2_result is successful + - works2_result.stat.attributes == 'Directory' + - works2_result.stat.exists == true + - works2_result.stat.path == good + +- name: Test trailing path {{ trailing }} + win_stat: + path: '{{ trailing }}' + register: trailing_result + +- assert: + that: + - trailing_result is successful + - trailing_result.stat.attributes == 'Directory' + - trailing_result.stat.exists == true + - trailing_result.stat.path == trailing + +- name: Set variables in key=value syntax + set_fact: + no_quotes_single=C:\Windows\Temp + single_quotes_single='C:\Windows\Temp' + double_quotes_single="C:\Windows\Temp" + no_quotes_single_tab=C:\Windows\temp + single_quotes_single_tab='C:\Windows\temp' + double_quotes_single_tab="C:\Windows\temp" + no_quotes_double=C:\\Windows\\Temp + single_quotes_double='C:\\Windows\\Temp' + double_quotes_double="C:\\Windows\\Temp" + no_quotes_slash=C:/Windows/Temp + no_quotes_trailing=C:\Windows\Temp\ + good=C:\Windows\Temp + works1=C:\\Windows\\Temp + works2=C:/Windows/Temp + fail="C:\Windows\Temp" + trailing=C:\Windows\Temp\ + tab=C:\Windows\x09emp + eof=foobar +# single_quotes_trailing='C:\Windows\Temp\' +# double_quotes_trailing="C:\Windows\Temp\" + register: legacy_syntax + +- assert: + that: + - no_quotes_single == good + - single_quotes_single == good + - double_quotes_single == good + - no_quotes_double == works1 + - single_quotes_double == works1 + - double_quotes_double == works1 + - no_quotes_slash == works2 + - no_quotes_single_tab == tab + - single_quotes_single_tab == tab + - double_quotes_single_tab == tab + - no_quotes_trailing == trailing + - good == works1 + - good != works2 + - good != tab + - good != trailing + - works1 != works2 + - works1 != tab + - works1 != trailing + - works2 != tab + - works2 != trailing + - tab != trailing + +- name: Test good path {{ good }} + win_stat: + path: '{{ good }}' + register: good_result + +- assert: + that: + - good_result is successful + - good_result.stat.attributes == 'Directory' + - good_result.stat.exists == true + - good_result.stat.path == good + +- name: Test works1 path {{ works1 }} + win_stat: + path: '{{ works1 }}' + register: works1_result + +- assert: + that: + - works1_result is successful + - works1_result.stat.attributes == 'Directory' + - works1_result.stat.exists == true + - works1_result.stat.path == good + +- name: Test works2 path {{ works2 }} + win_stat: + path: '{{ works2 }}' + register: works2_result + +- assert: + that: + - works2_result is successful + - works2_result.stat.attributes == 'Directory' + - works2_result.stat.exists == true + - works2_result.stat.path == good + +- name: Test trailing path {{ trailing }} + win_stat: + path: '{{ trailing }}' + register: trailing_result + +- assert: + that: + - trailing_result is successful + - trailing_result.stat.attributes == 'Directory' + - trailing_result.stat.exists == true + - trailing_result.stat.path == trailing + +- name: Test tab path {{ tab }} + win_stat: + path: '{{ tab }}' + register: tab_result + ignore_errors: yes + +- assert: + that: + - tab_result is failed diff --git a/test/integration/targets/yum/aliases b/test/integration/targets/yum/aliases new file mode 100644 index 00000000..5aba303d --- /dev/null +++ b/test/integration/targets/yum/aliases @@ -0,0 +1,6 @@ +destructive +shippable/posix/group4 +skip/aix +skip/freebsd +skip/osx +skip/macos diff --git a/test/integration/targets/yum/files/yum.conf b/test/integration/targets/yum/files/yum.conf new file mode 100644 index 00000000..5a5fca60 --- /dev/null +++ b/test/integration/targets/yum/files/yum.conf @@ -0,0 +1,5 @@ +[main] +gpgcheck=1 +installonly_limit=3 +clean_requirements_on_remove=True +tsflags=nodocs diff --git a/test/integration/targets/yum/meta/main.yml b/test/integration/targets/yum/meta/main.yml new file mode 100644 index 00000000..34d81261 --- /dev/null +++ b/test/integration/targets/yum/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - prepare_tests + - setup_rpm_repo + - setup_remote_tmp_dir diff --git a/test/integration/targets/yum/tasks/check_mode_consistency.yml b/test/integration/targets/yum/tasks/check_mode_consistency.yml new file mode 100644 index 00000000..e2a99d95 --- /dev/null +++ b/test/integration/targets/yum/tasks/check_mode_consistency.yml @@ -0,0 +1,61 @@ +- name: install htop in check mode to verify changes dict returned + yum: + name: htop + state: present + check_mode: yes + register: yum_changes_check_mode_result + +- name: install verify changes dict returned in check mode + assert: + that: + - "yum_changes_check_mode_result is success" + - "yum_changes_check_mode_result is changed" + - "'changes' in yum_changes_check_mode_result" + - "'installed' in yum_changes_check_mode_result['changes']" + - "'htop' in yum_changes_check_mode_result['changes']['installed']" + +- name: install htop to verify changes dict returned + yum: + name: htop + state: present + register: yum_changes_result + +- name: install verify changes dict returned + assert: + that: + - "yum_changes_result is success" + - "yum_changes_result is changed" + - "'changes' in yum_changes_result" + - "'installed' in yum_changes_result['changes']" + - "'htop' in yum_changes_result['changes']['installed']" + +- name: remove htop in check mode to verify changes dict returned + yum: + name: htop + state: absent + check_mode: yes + register: yum_changes_check_mode_result + +- name: remove verify changes dict returned in check mode + assert: + that: + - "yum_changes_check_mode_result is success" + - "yum_changes_check_mode_result is changed" + - "'changes' in yum_changes_check_mode_result" + - "'removed' in yum_changes_check_mode_result['changes']" + - "'htop' in yum_changes_check_mode_result['changes']['removed']" + +- name: remove htop to verify changes dict returned + yum: + name: htop + state: absent + register: yum_changes_result + +- name: remove verify changes dict returned + assert: + that: + - "yum_changes_result is success" + - "yum_changes_result is changed" + - "'changes' in yum_changes_result" + - "'removed' in yum_changes_result['changes']" + - "'htop' in yum_changes_result['changes']['removed']" diff --git a/test/integration/targets/yum/tasks/lock.yml b/test/integration/targets/yum/tasks/lock.yml new file mode 100644 index 00000000..3f585c1d --- /dev/null +++ b/test/integration/targets/yum/tasks/lock.yml @@ -0,0 +1,28 @@ +- block: + - name: Make sure testing package is not installed + yum: + name: sos + state: absent + + - name: Create bogus lock file + copy: + content: bogus content for this lock file + dest: /var/run/yum.pid + + - name: Install a package, lock file should be deleted by the module + yum: + name: sos + state: present + register: yum_result + + - assert: + that: + - yum_result is success + + always: + - name: Clean up + yum: + name: sos + state: absent + + when: ansible_pkg_mgr == 'yum' diff --git a/test/integration/targets/yum/tasks/main.yml b/test/integration/targets/yum/tasks/main.yml new file mode 100644 index 00000000..3a7f4cf5 --- /dev/null +++ b/test/integration/targets/yum/tasks/main.yml @@ -0,0 +1,71 @@ +# (c) 2014, James Tanner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Note: We install the yum package onto Fedora so that this will work on dnf systems +# We want to test that for people who don't want to upgrade their systems. + +- block: + - name: ensure test packages are removed before starting + yum: + name: + - sos + state: absent + + - import_tasks: yum.yml + always: + - name: remove installed packages + yum: + name: + - sos + state: absent + + - name: remove installed group + yum: + name: "@Custom Group" + state: absent + + - name: On Fedora 28 the above won't remove the group which results in a failure in repo.yml below + yum: + name: dinginessentail + state: absent + when: + - ansible_distribution in ['Fedora'] + + when: + - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora'] + + +- block: + - import_tasks: repo.yml + - import_tasks: yum_group_remove.yml + when: + - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] + always: + - yum_repository: + name: "{{ item }}" + state: absent + loop: "{{ repos }}" + + - command: yum clean metadata + when: + - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora'] + + +- import_tasks: yuminstallroot.yml + when: + - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora'] + + +- import_tasks: proxy.yml + when: + - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora'] + + +- import_tasks: check_mode_consistency.yml + when: + - (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version|int == 7) + + +- import_tasks: lock.yml + when: + - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] diff --git a/test/integration/targets/yum/tasks/proxy.yml b/test/integration/targets/yum/tasks/proxy.yml new file mode 100644 index 00000000..f42eb179 --- /dev/null +++ b/test/integration/targets/yum/tasks/proxy.yml @@ -0,0 +1,186 @@ +- name: test yum proxy settings + block: + - name: install tinyproxy + yum: + name: 'https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/tinyproxy-1.10.0-3.el7.x86_64.rpm' + state: installed + + # systemd doesn't play nice with this in a container for some reason + - name: start tinyproxy (systemd with tiny proxy does not work in container) + shell: tinyproxy + changed_when: false + + # test proxy without auth + - name: set unauthenticated proxy in yum.conf + lineinfile: + path: /etc/yum.conf + line: "proxy=http://127.0.0.1:8888" + state: present + + - name: clear proxy logs + shell: ': > /var/log/tinyproxy/tinyproxy.log' + changed_when: false + args: + executable: /usr/bin/bash + + - name: install ninvaders with unauthenticated proxy + yum: + name: 'https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/ninvaders-0.1.1-18.el7.x86_64.rpm' + state: installed + register: yum_proxy_result + + - assert: + that: + - "yum_proxy_result.changed" + - "'msg' in yum_proxy_result" + - "'rc' in yum_proxy_result" + + - name: check that it install via unauthenticated proxy + command: grep -q Request /var/log/tinyproxy/tinyproxy.log + + - name: uninstall ninvaders with unauthenticated proxy + yum: + name: ninvaders + state: absent + register: yum_proxy_result + + - assert: + that: + - "yum_proxy_result.changed" + - "'msg' in yum_proxy_result" + - "'rc' in yum_proxy_result" + + - name: unset unauthenticated proxy in yum.conf + lineinfile: + path: /etc/yum.conf + line: "proxy=http://127.0.0.1:8888" + state: absent + + # test proxy with auth + - name: set authenticated proxy config in tinyproxy.conf + lineinfile: + path: /etc/tinyproxy/tinyproxy.conf + line: "BasicAuth 1testuser 1testpassword" + state: present + + # systemd doesn't play nice with this in a container for some reason + - name: SIGHUP tinyproxy to reload config (workaround because of systemd+tinyproxy in container) + shell: kill -HUP $(ps -ef | grep tinyproxy | grep -v grep | awk '{print $2}') + changed_when: false + args: + executable: /usr/bin/bash + + - name: set authenticated proxy config in yum.conf + lineinfile: + path: /etc/yum.conf + line: "proxy=http://1testuser:1testpassword@127.0.0.1:8888" + state: present + + - name: clear proxy logs + shell: ': > /var/log/tinyproxy/tinyproxy.log' + changed_when: false + args: + executable: /usr/bin/bash + + - name: install ninvaders with authenticated proxy + yum: + name: 'https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/ninvaders-0.1.1-18.el7.x86_64.rpm' + state: installed + register: yum_proxy_result + + - assert: + that: + - "yum_proxy_result.changed" + - "'msg' in yum_proxy_result" + - "'rc' in yum_proxy_result" + + - name: check that it install via authenticated proxy + command: grep -q Request /var/log/tinyproxy/tinyproxy.log + + - name: uninstall ninvaders with authenticated proxy + yum: + name: ninvaders + state: absent + + - name: unset authenticated proxy config in yum.conf + lineinfile: + path: /etc/yum.conf + line: "proxy=http://1testuser:1testpassword@127.0.0.1:8888" + state: absent + + - name: set proxy config in yum.conf + lineinfile: + path: /etc/yum.conf + line: "proxy=http://127.0.0.1:8888" + state: present + + - name: set proxy_username config in yum.conf + lineinfile: + path: /etc/yum.conf + line: "proxy_username=1testuser" + state: present + + - name: set proxy_password config in yum.conf + lineinfile: + path: /etc/yum.conf + line: "proxy_password=1testpassword" + state: present + + - name: clear proxy logs + shell: ': > /var/log/tinyproxy/tinyproxy.log' + changed_when: false + args: + executable: /usr/bin/bash + + - name: install ninvaders with proxy, proxy_username, and proxy_password config in yum.conf + yum: + name: 'https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/ninvaders-0.1.1-18.el7.x86_64.rpm' + state: installed + register: yum_proxy_result + + - assert: + that: + - "yum_proxy_result.changed" + - "'msg' in yum_proxy_result" + - "'rc' in yum_proxy_result" + + - name: check that it install via proxy with proxy_username, proxy_password config in yum.conf + command: grep -q Request /var/log/tinyproxy/tinyproxy.log + + always: + #cleanup + - name: uninstall tinyproxy + yum: + name: tinyproxy + state: absent + + - name: uninstall ninvaders + yum: + name: ninvaders + state: absent + + - name: ensure unset authenticated proxy + lineinfile: + path: /etc/yum.conf + line: "proxy=http://1testuser:1testpassword@127.0.0.1:8888" + state: absent + + - name: ensure unset proxy + lineinfile: + path: /etc/yum.conf + line: "proxy=http://127.0.0.1:8888" + state: absent + + - name: ensure unset proxy_username + lineinfile: + path: /etc/yum.conf + line: "proxy_username=1testuser" + state: absent + + - name: ensure unset proxy_password + lineinfile: + path: /etc/yum.conf + line: "proxy_password=1testpassword" + state: absent + when: + - (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version|int == 7 and ansible_architecture in ['x86_64']) diff --git a/test/integration/targets/yum/tasks/repo.yml b/test/integration/targets/yum/tasks/repo.yml new file mode 100644 index 00000000..c1a7a016 --- /dev/null +++ b/test/integration/targets/yum/tasks/repo.yml @@ -0,0 +1,705 @@ +- block: + - name: Install dinginessentail-1.0-1 + yum: + name: dinginessentail-1.0-1 + state: present + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Install dinginessentail-1.0-1 again + yum: + name: dinginessentail-1.0-1 + state: present + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "not yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Install dinginessentail-1:1.0-2 + yum: + name: "dinginessentail-1:1.0-2.{{ ansible_architecture }}" + state: present + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + + - name: Remove dinginessentail + yum: + name: dinginessentail + state: absent + # ============================================================================ + - name: Downgrade dinginessentail + yum: + name: dinginessentail-1.0-1 + state: present + allow_downgrade: yes + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Update to the latest dinginessentail + yum: + name: dinginessentail + state: latest + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.1-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Install dinginessentail-1.0-1 from a file (higher version is already installed) + yum: + name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm" + state: present + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "not yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.1-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + + - name: Remove dinginessentail + yum: + name: dinginessentail + state: absent + # ============================================================================ + - name: Install dinginessentail-1.0-1 from a file + yum: + name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm" + state: present + disable_gpg_check: true + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Install dinginessentail-1.0-1 from a file again + yum: + name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm" + state: present + disable_gpg_check: true + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "not yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Install dinginessentail-1.0-2 from a file + yum: + name: "{{ repodir }}/dinginessentail-1.0-2.{{ ansible_architecture }}.rpm" + state: present + disable_gpg_check: true + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Install dinginessentail-1.0-2 from a file again + yum: + name: "{{ repodir }}/dinginessentail-1.0-2.{{ ansible_architecture }}.rpm" + state: present + disable_gpg_check: true + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "not yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Try to downgrade dinginessentail without allow_downgrade being set + yum: + name: dinginessentail-1.0-1 + state: present + allow_downgrade: no + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "not yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Update dinginessentail with update_only set + yum: + name: dinginessentail + state: latest + update_only: yes + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.1-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + + - name: Remove dinginessentail + yum: + name: dinginessentail + state: absent + # ============================================================================ + - name: Try to update dinginessentail which is not installed, update_only is set + yum: + name: dinginessentail + state: latest + update_only: yes + register: yum_result + ignore_errors: yes + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + ignore_errors: yes + + - name: Verify installation + assert: + that: + - "rpm_result.rc == 1" + - "yum_result.rc == 0" + - "not yum_result.changed" + - "not yum_result is failed" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Try to install incompatible arch + yum: + name: "{{ repodir_ppc64 }}/dinginessentail-1.0-1.ppc64.rpm" + state: present + register: yum_result + ignore_errors: yes + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + ignore_errors: yes + + - name: Verify installation + assert: + that: + - "rpm_result.rc == 1" + - "yum_result.rc == 1" + - "not yum_result.changed" + - "yum_result is failed" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - name: Make sure latest dinginessentail is installed + yum: + name: dinginessentail + state: latest + + - name: Downgrade dinginessentail using rpm file + yum: + name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm" + state: present + allow_downgrade: yes + disable_gpg_check: yes + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + # ============================================================================ + - block: + - name: make sure dinginessentail is not installed + yum: + name: dinginessentail + state: absent + + - name: install dinginessentail both archs + yum: + name: "{{ pkgs }}" + state: present + disable_gpg_check: true + vars: + pkgs: + - "{{ repodir }}/dinginessentail-1.1-1.x86_64.rpm" + - "{{ repodir_i686 }}/dinginessentail-1.1-1.i686.rpm" + + - name: try to install lower version of dinginessentail from rpm file, without allow_downgrade, just one arch + yum: + name: "{{ repodir_i686 }}/dinginessentail-1.0-1.i686.rpm" + state: present + register: yum_result + + - name: check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: verify installation + assert: + that: + - "not yum_result.changed" + - "rpm_result.stdout_lines[0].startswith('dinginessentail-1.1-1')" + - "rpm_result.stdout_lines[1].startswith('dinginessentail-1.1-1')" + + - name: verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + when: ansible_architecture == "x86_64" + # ============================================================================ + - block: + - name: make sure dinginessentail is not installed + yum: + name: dinginessentail + state: absent + + - name: install dinginessentail both archs + yum: + name: "{{ pkgs }}" + state: present + disable_gpg_check: true + vars: + pkgs: + - "{{ repodir }}/dinginessentail-1.0-1.x86_64.rpm" + - "{{ repodir_i686 }}/dinginessentail-1.0-1.i686.rpm" + + - name: Update both arch in one task using rpm files + yum: + name: "{{ repodir }}/dinginessentail-1.1-1.x86_64.rpm,{{ repodir_i686 }}/dinginessentail-1.1-1.i686.rpm" + state: present + disable_gpg_check: yes + register: yum_result + + - name: check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout_lines[0].startswith('dinginessentail-1.1-1')" + - "rpm_result.stdout_lines[1].startswith('dinginessentail-1.1-1')" + + - name: verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + when: ansible_architecture == "x86_64" + # ============================================================================ + always: + - name: Clean up + yum: + name: dinginessentail + state: absent + +# FIXME: dnf currently doesn't support epoch as part of it's pkg_spec for +# finding install candidates +# https://bugzilla.redhat.com/show_bug.cgi?id=1619687 +- block: + - name: Install 1:dinginessentail-1.0-2 + yum: + name: "1:dinginessentail-1.0-2.{{ ansible_architecture }}" + state: present + disable_gpg_check: true + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + always: + - name: Clean up + yum: + name: dinginessentail + state: absent + + when: ansible_pkg_mgr == 'yum' + +# DNF1 (Fedora < 26) had some issues: +# - did not accept architecture tag as valid component of a package spec unless +# installing a file (i.e. can't search the repo) +# - doesn't handle downgrade transactions via the API properly, marks it as a +# conflict +# +# NOTE: Both DNF1 and Fedora < 26 have long been EOL'd by their respective +# upstreams +- block: + # ============================================================================ + - name: Install dinginessentail-1.0-2 + yum: + name: "dinginessentail-1.0-2.{{ ansible_architecture }}" + state: present + disable_gpg_check: true + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + + - name: Install dinginessentail-1.0-2 again + yum: + name: dinginessentail-1.0-2 + state: present + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "not yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-2')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + always: + - name: Clean up + yum: + name: dinginessentail + state: absent + when: not (ansible_distribution == "Fedora" and ansible_distribution_major_version|int < 26) + +# https://github.com/ansible/ansible/issues/47689 +- block: + - name: Install dinginessentail == 1.0 + yum: + name: "dinginessentail == 1.0" + state: present + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0-1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + always: + - name: Clean up + yum: + name: dinginessentail + state: absent + + when: ansible_pkg_mgr == 'yum' + + +# https://github.com/ansible/ansible/pull/54603 +- block: + - name: Install dinginessentail < 1.1 + yum: + name: "dinginessentail < 1.1" + state: present + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.0')" + + - name: Install dinginessentail >= 1.1 + yum: + name: "dinginessentail >= 1.1" + state: present + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify installation + assert: + that: + - "yum_result.changed" + - "rpm_result.stdout.startswith('dinginessentail-1.1')" + + - name: Verify yum module outputs + assert: + that: + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + + always: + - name: Clean up + yum: + name: dinginessentail + state: absent + + when: ansible_pkg_mgr == 'yum' + +# https://github.com/ansible/ansible/issues/45250 +- block: + - name: Install dinginessentail-1.0, dinginessentail-olive-1.0, landsidescalping-1.0 + yum: + name: "dinginessentail-1.0,dinginessentail-olive-1.0,landsidescalping-1.0" + state: present + + - name: Upgrade dinginessentail* + yum: + name: dinginessentail* + state: latest + register: yum_result + + - name: Check dinginessentail with rpm + shell: rpm -q dinginessentail + register: rpm_result + + - name: Verify update of dinginessentail + assert: + that: + - "rpm_result.stdout.startswith('dinginessentail-1.1-1')" + + - name: Check dinginessentail-olive with rpm + shell: rpm -q dinginessentail-olive + register: rpm_result + + - name: Verify update of dinginessentail-olive + assert: + that: + - "rpm_result.stdout.startswith('dinginessentail-olive-1.1-1')" + + - name: Check landsidescalping with rpm + shell: rpm -q landsidescalping + register: rpm_result + + - name: Verify landsidescalping did NOT get updated + assert: + that: + - "rpm_result.stdout.startswith('landsidescalping-1.0-1')" + + - name: Verify yum module outputs + assert: + that: + - "yum_result is changed" + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + always: + - name: Clean up + yum: + name: dinginessentail,dinginessentail-olive,landsidescalping + state: absent diff --git a/test/integration/targets/yum/tasks/yum.yml b/test/integration/targets/yum/tasks/yum.yml new file mode 100644 index 00000000..9ed00af8 --- /dev/null +++ b/test/integration/targets/yum/tasks/yum.yml @@ -0,0 +1,873 @@ +# Setup by setup_rpm_repo +- set_fact: + package1: dinginessentail + package2: dinginessentail-olive + +# UNINSTALL +- name: uninstall {{ package1 }} + yum: name={{ package1 }} state=removed + register: yum_result + +- name: check {{ package1 }} with rpm + shell: rpm -q {{ package1 }} + ignore_errors: True + register: rpm_result + +- name: verify uninstallation of {{ package1 }} + assert: + that: + - "yum_result is success" + - "rpm_result is failed" + +# UNINSTALL AGAIN +- name: uninstall {{ package1 }} again in check mode + yum: name={{ package1 }} state=removed + check_mode: true + register: yum_result + +- name: verify no change on re-uninstall in check mode + assert: + that: + - "not yum_result is changed" + +- name: uninstall {{ package1 }} again + yum: name={{ package1 }} state=removed + register: yum_result + +- name: verify no change on re-uninstall + assert: + that: + - "not yum_result is changed" + +# INSTALL +- name: install {{ package1 }} in check mode + yum: name={{ package1 }} state=present + check_mode: true + register: yum_result + +- name: verify installation of {{ package1 }} in check mode + assert: + that: + - "yum_result is changed" + +- name: install {{ package1 }} + yum: name={{ package1 }} state=present + register: yum_result + +- name: verify installation of {{ package1 }} + assert: + that: + - "yum_result is success" + - "yum_result is changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: check {{ package1 }} with rpm + shell: rpm -q {{ package1 }} + +# INSTALL AGAIN +- name: install {{ package1 }} again in check mode + yum: name={{ package1 }} state=present + check_mode: true + register: yum_result +- name: verify no change on second install in check mode + assert: + that: + - "not yum_result is changed" + +- name: install {{ package1 }} again + yum: name={{ package1 }} state=present + register: yum_result +- name: verify no change on second install + assert: + that: + - "not yum_result is changed" + +- name: install {{ package1 }} again with empty string enablerepo + yum: name={{ package1 }} state=present enablerepo="" + register: yum_result +- name: verify no change on third install with empty string enablerepo + assert: + that: + - "yum_result is success" + - "not yum_result is changed" + +# This test case is unfortunately distro specific because we have to specify +# repo names which are not the same across Fedora/RHEL/CentOS for base/updates +- name: install {{ package1 }} again with missing repo enablerepo + yum: + name: '{{ package1 }}' + state: present + enablerepo: '{{ repos + ["thisrepodoesnotexist"] }}' + disablerepo: "*" + register: yum_result + when: ansible_distribution == 'CentOS' +- name: verify no change on fourth install with missing repo enablerepo (yum) + assert: + that: + - "yum_result is success" + - "yum_result is not changed" + when: ansible_distribution == 'CentOS' + +# This test case is unfortunately distro specific because we have to specify +# repo names which are not the same across Fedora/RHEL/CentOS for base/updates +- name: install repos again with disable all and enable select repo(s) + yum: + name: '{{ package1 }}' + state: present + enablerepo: '{{ repos }}' + disablerepo: "*" + register: yum_result + when: ansible_distribution == 'CentOS' +- name: verify no change on fourth install with missing repo enablerepo (yum) + assert: + that: + - "yum_result is success" + - "yum_result is not changed" + when: ansible_distribution == 'CentOS' + +- name: install {{ package1 }} again with only missing repo enablerepo + yum: + name: '{{ package1 }}' + state: present + enablerepo: "thisrepodoesnotexist" + ignore_errors: true + register: yum_result +- name: verify no change on fifth install with only missing repo enablerepo (yum) + assert: + that: + - "yum_result is not success" + when: ansible_pkg_mgr == 'yum' +- name: verify no change on fifth install with only missing repo enablerepo (dnf) + assert: + that: + - "yum_result is success" + when: ansible_pkg_mgr == 'dnf' + +# INSTALL AGAIN WITH LATEST +- name: install {{ package1 }} again with state latest in check mode + yum: name={{ package1 }} state=latest + check_mode: true + register: yum_result +- name: verify install {{ package1 }} again with state latest in check mode + assert: + that: + - "not yum_result is changed" + +- name: install {{ package1 }} again with state latest idempotence + yum: name={{ package1 }} state=latest + register: yum_result +- name: verify install {{ package1 }} again with state latest idempotence + assert: + that: + - "not yum_result is changed" + +# INSTALL WITH LATEST +- name: uninstall {{ package1 }} + yum: name={{ package1 }} state=removed + register: yum_result +- name: verify uninstall {{ package1 }} + assert: + that: + - "yum_result is successful" + +- name: copy yum.conf file in case it is missing + copy: + src: yum.conf + dest: /etc/yum.conf + force: False + register: yum_conf_copy + +- block: + - name: install {{ package1 }} with state latest in check mode with config file param + yum: name={{ package1 }} state=latest conf_file=/etc/yum.conf + check_mode: true + register: yum_result + - name: verify install {{ package1 }} with state latest in check mode with config file param + assert: + that: + - "yum_result is changed" + + always: + - name: remove tmp yum.conf file if we created it + file: + path: /etc/yum.conf + state: absent + when: yum_conf_copy is changed + +- name: install {{ package1 }} with state latest in check mode + yum: name={{ package1 }} state=latest + check_mode: true + register: yum_result +- name: verify install {{ package1 }} with state latest in check mode + assert: + that: + - "yum_result is changed" + +- name: install {{ package1 }} with state latest + yum: name={{ package1 }} state=latest + register: yum_result +- name: verify install {{ package1 }} with state latest + assert: + that: + - "yum_result is changed" + +- name: install {{ package1 }} with state latest idempotence + yum: name={{ package1 }} state=latest + register: yum_result +- name: verify install {{ package1 }} with state latest idempotence + assert: + that: + - "not yum_result is changed" + +- name: install {{ package1 }} with state latest idempotence with config file param + yum: name={{ package1 }} state=latest + register: yum_result +- name: verify install {{ package1 }} with state latest idempotence with config file param + assert: + that: + - "not yum_result is changed" + + +# Multiple packages +- name: uninstall {{ package1 }} and {{ package2 }} + yum: name={{ package1 }},{{ package2 }} state=removed + +- name: check {{ package1 }} with rpm + shell: rpm -q {{ package1 }} + ignore_errors: True + register: rpm_package1_result + +- name: check {{ package2 }} with rpm + shell: rpm -q {{ package2 }} + ignore_errors: True + register: rpm_package2_result + +- name: verify packages installed + assert: + that: + - "rpm_package1_result is failed" + - "rpm_package2_result is failed" + +- name: install {{ package1 }} and {{ package2 }} as comma separated + yum: name={{ package1 }},{{ package2 }} state=present + register: yum_result + +- name: verify packages installed + assert: + that: + - "yum_result is success" + - "yum_result is changed" + +- name: check {{ package1 }} with rpm + shell: rpm -q {{ package1 }} + +- name: check {{ package2 }} with rpm + shell: rpm -q {{ package2 }} + +- name: uninstall {{ package1 }} and {{ package2 }} + yum: name={{ package1 }},{{ package2 }} state=removed + register: yum_result + +- name: install {{ package1 }} and {{ package2 }} as list + yum: + name: + - '{{ package1 }}' + - '{{ package2 }}' + state: present + register: yum_result + +- name: verify packages installed + assert: + that: + - "yum_result is success" + - "yum_result is changed" + +- name: check {{ package1 }} with rpm + shell: rpm -q {{ package1 }} + +- name: check {{ package2 }} with rpm + shell: rpm -q {{ package2 }} + +- name: uninstall {{ package1 }} and {{ package2 }} + yum: name={{ package1 }},{{ package2 }} state=removed + register: yum_result + +- name: install {{ package1 }} and {{ package2 }} as comma separated with spaces + yum: + name: "{{ package1 }}, {{ package2 }}" + state: present + register: yum_result + +- name: verify packages installed + assert: + that: + - "yum_result is success" + - "yum_result is changed" + +- name: check {{ package1 }} with rpm + shell: rpm -q {{ package1 }} + +- name: check {{ package2 }} with rpm + shell: rpm -q {{ package2 }} + +- name: uninstall {{ package1 }} and {{ package2 }} + yum: name={{ package1 }},{{ package2 }} state=removed + +- name: install non-existent rpm + yum: + name: does-not-exist + register: non_existent_rpm + ignore_errors: True + +- name: check non-existent rpm install failed + assert: + that: + - non_existent_rpm is failed + +# Install in installroot='/' +- name: install {{ package1 }} + yum: name={{ package1 }} state=present installroot='/' + register: yum_result + +- name: verify installation of {{ package1 }} + assert: + that: + - "yum_result is success" + - "yum_result is changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: check {{ package1 }} with rpm + shell: rpm -q {{ package1 }} --root=/ + +- name: uninstall {{ package1 }} + yum: + name: '{{ package1 }}' + installroot: '/' + state: removed + register: yum_result + +# Seems like some yum versions won't download a package from local file repository, continue to use sos for this test. +# https://stackoverflow.com/questions/58295660/yum-downloadonly-ignores-packages-in-local-repo +- name: Test download_only + yum: + name: sos + state: latest + download_only: true + register: yum_result + +- name: verify download of sos (part 1 -- yum "install" succeeded) + assert: + that: + - "yum_result is success" + - "yum_result is changed" + +- name: uninstall sos (noop) + yum: + name: sos + state: removed + register: yum_result + +- name: verify download of sos (part 2 -- nothing removed during uninstall) + assert: + that: + - "yum_result is success" + - "not yum_result is changed" + +- name: uninstall sos for downloadonly/downloaddir test + yum: + name: sos + state: absent + +- name: Test download_only/download_dir + yum: + name: sos + state: latest + download_only: true + download_dir: "/var/tmp/packages" + register: yum_result + +- name: verify yum output + assert: + that: + - "yum_result is success" + - "yum_result is changed" + +- command: "ls /var/tmp/packages" + register: ls_out + +- name: Verify specified download_dir was used + assert: + that: + - "'sos' in ls_out.stdout" + +- name: install group + yum: + name: "@Custom Group" + state: present + register: yum_result + +- name: verify installation of the group + assert: + that: + - "yum_result is success" + - "yum_result is changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: install the group again + yum: + name: "@Custom Group" + state: present + register: yum_result + +- name: verify nothing changed + assert: + that: + - "yum_result is success" + - "not yum_result is changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: install the group again but also with a package that is not yet installed + yum: + name: + - "@Custom Group" + - '{{ package2 }}' + state: present + register: yum_result + +- name: verify {{ package3 }} is installed + assert: + that: + - "yum_result is success" + - "yum_result is changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: try to install the group again, with --check to check 'changed' + yum: + name: "@Custom Group" + state: present + check_mode: yes + register: yum_result + +- name: verify nothing changed + assert: + that: + - "not yum_result is changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: try to install non existing group + yum: + name: "@non-existing-group" + state: present + register: yum_result + ignore_errors: True + +- name: verify installation of the non existing group failed + assert: + that: + - "yum_result is failed" + - "not yum_result is changed" + - "yum_result is failed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: try to install non existing file + yum: + name: /tmp/non-existing-1.0.0.fc26.noarch.rpm + state: present + register: yum_result + ignore_errors: yes + +- name: verify installation failed + assert: + that: + - "yum_result is failed" + - "not yum_result is changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + +- name: try to install from non existing url + yum: + name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/non-existing-1.0.0.fc26.noarch.rpm + state: present + register: yum_result + ignore_errors: yes + +- name: verify installation failed + assert: + that: + - "yum_result is failed" + - "not yum_result is changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + +- name: use latest to install httpd + yum: + name: httpd + state: latest + register: yum_result + +- name: verify httpd was installed + assert: + that: + - "'changed' in yum_result" + +- name: uninstall httpd + yum: + name: httpd + state: removed + +- name: update httpd only if it exists + yum: + name: httpd + state: latest + update_only: yes + register: yum_result + +- name: verify httpd not installed + assert: + that: + - "not yum_result is changed" + - "'Packages providing httpd not installed due to update_only specified' in yum_result.results" + +- name: try to install uncompatible arch rpm on non-ppc64le, should fail + yum: + name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/banner-1.3.4-3.el7.ppc64le.rpm + state: present + register: yum_result + ignore_errors: True + when: + - ansible_architecture not in ['ppc64le'] + +- name: verify that yum failed on non-ppc64le + assert: + that: + - "not yum_result is changed" + - "yum_result is failed" + when: + - ansible_architecture not in ['ppc64le'] + +- name: try to install uncompatible arch rpm on ppc64le, should fail + yum: + name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/tinyproxy-1.10.0-3.el7.x86_64.rpm + state: present + register: yum_result + ignore_errors: True + when: + - ansible_architecture in ['ppc64le'] + +- name: verify that yum failed on ppc64le + assert: + that: + - "not yum_result is changed" + - "yum_result is failed" + when: + - ansible_architecture in ['ppc64le'] + +# setup for testing installing an RPM from url + +- set_fact: + pkg_name: fpaste + +- name: cleanup + yum: + name: "{{ pkg_name }}" + state: absent + +- set_fact: + pkg_url: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/fpaste-0.3.7.4.1-2.el7.noarch.rpm + when: ansible_python.version.major == 2 + +- set_fact: + pkg_url: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/fpaste-0.3.9.2-1.fc28.noarch.rpm + when: ansible_python.version.major == 3 +# setup end + +- name: download an rpm + get_url: + url: "{{ pkg_url }}" + dest: "/tmp/{{ pkg_name }}.rpm" + +- name: install the downloaded rpm + yum: + name: "/tmp/{{ pkg_name }}.rpm" + state: present + disable_gpg_check: true + register: yum_result + +- name: verify installation + assert: + that: + - "yum_result is success" + - "yum_result is changed" + - "yum_result is not failed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: install the downloaded rpm again + yum: + name: "/tmp/{{ pkg_name }}.rpm" + state: present + register: yum_result + +- name: verify installation + assert: + that: + - "yum_result is success" + - "not yum_result is changed" + - "yum_result is not failed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: clean up + yum: + name: "{{ pkg_name }}" + state: absent + +- name: install from url + yum: + name: "{{ pkg_url }}" + state: present + disable_gpg_check: true + register: yum_result + +- name: verify installation + assert: + that: + - "yum_result is success" + - "yum_result is changed" + - "yum_result is not failed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: Create a temp RPM file which does not contain nevra information + file: + name: "/tmp/non_existent_pkg.rpm" + state: touch + +- name: Try installing RPM file which does not contain nevra information + yum: + name: "/tmp/non_existent_pkg.rpm" + state: present + register: no_nevra_info_result + ignore_errors: yes + +- name: Verify RPM failed to install + assert: + that: + - "'changed' in no_nevra_info_result" + - "'msg' in no_nevra_info_result" + +- name: Delete a temp RPM file + file: + name: "/tmp/non_existent_pkg.rpm" + state: absent + +- name: get yum version + yum: + list: yum + register: yum_version + +- name: set yum_version of installed version + set_fact: + yum_version: "{%- if item.yumstate == 'installed' -%}{{ item.version }}{%- else -%}{{ yum_version }}{%- endif -%}" + with_items: "{{ yum_version.results }}" + +- name: Ensure double uninstall of wildcard globs works + block: + - name: "Install lohit-*-fonts" + yum: + name: "lohit-*-fonts" + state: present + + - name: "Remove lohit-*-fonts (1st time)" + yum: + name: "lohit-*-fonts" + state: absent + register: remove_lohit_fonts_1 + + - name: "Verify lohit-*-fonts (1st time)" + assert: + that: + - "remove_lohit_fonts_1 is changed" + - "'msg' in remove_lohit_fonts_1" + - "'results' in remove_lohit_fonts_1" + + - name: "Remove lohit-*-fonts (2nd time)" + yum: + name: "lohit-*-fonts" + state: absent + register: remove_lohit_fonts_2 + + - name: "Verify lohit-*-fonts (2nd time)" + assert: + that: + - "remove_lohit_fonts_2 is not changed" + - "'msg' in remove_lohit_fonts_2" + - "'results' in remove_lohit_fonts_2" + - "'lohit-*-fonts is not installed' in remove_lohit_fonts_2['results']" + +- block: + - name: uninstall {{ package2 }} + yum: name={{ package2 }} state=removed + + - name: check {{ package2 }} with rpm + shell: rpm -q {{ package2 }} + ignore_errors: True + register: rpm_package2_result + + - name: verify {{ package2 }} is uninstalled + assert: + that: + - "rpm_package2_result is failed" + + - name: exclude {{ package2 }} (yum backend) + lineinfile: + dest: /etc/yum.conf + regexp: (^exclude=)(.)* + line: "exclude={{ package2 }}*" + state: present + when: ansible_pkg_mgr == 'yum' + + - name: exclude {{ package2 }} (dnf backend) + lineinfile: + dest: /etc/dnf/dnf.conf + regexp: (^excludepkgs=)(.)* + line: "excludepkgs={{ package2 }}*" + state: present + when: ansible_pkg_mgr == 'dnf' + + # begin test case where disable_excludes is supported + - name: Try install {{ package2 }} without disable_excludes + yum: name={{ package2 }} state=latest + register: yum_package2_result + ignore_errors: True + + - name: verify {{ package2 }} did not install because it is in exclude list + assert: + that: + - "yum_package2_result is failed" + + - name: install {{ package2 }} with disable_excludes + yum: name={{ package2 }} state=latest disable_excludes=all + register: yum_package2_result_using_excludes + + - name: verify {{ package2 }} did install using disable_excludes=all + assert: + that: + - "yum_package2_result_using_excludes is success" + - "yum_package2_result_using_excludes is changed" + - "yum_package2_result_using_excludes is not failed" + + - name: remove exclude {{ package2 }} (cleanup yum.conf) + lineinfile: + dest: /etc/yum.conf + regexp: (^exclude={{ package2 }}*) + line: "exclude=" + state: present + when: ansible_pkg_mgr == 'yum' + + - name: remove exclude {{ package2 }} (cleanup dnf.conf) + lineinfile: + dest: /etc/dnf/dnf.conf + regexp: (^excludepkgs={{ package2 }}*) + line: "excludepkgs=" + state: present + when: ansible_pkg_mgr == 'dnf' + + # Fedora < 26 has a bug in dnf where package excludes in dnf.conf aren't + # actually honored and those releases are EOL'd so we have no expectation they + # will ever be fixed + when: not ((ansible_distribution == "Fedora") and (ansible_distribution_major_version|int < 26)) + +- name: Check that packages with Provides are handled correctly in state=absent + block: + - name: Install test packages + yum: + name: + - https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yum/test-package-that-provides-toaster-1.3.3.7-1.el7.noarch.rpm + - https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yum/toaster-1.2.3.4-1.el7.noarch.rpm + disable_gpg_check: true + register: install + + - name: Remove toaster + yum: + name: toaster + state: absent + register: remove + + - name: rpm -qa + command: rpm -qa + register: rpmqa + + - assert: + that: + - install is successful + - install is changed + - remove is successful + - remove is changed + - "'toaster-1.2.3.4' not in rpmqa.stdout" + - "'test-package-that-provides-toaster' in rpmqa.stdout" diff --git a/test/integration/targets/yum/tasks/yum_group_remove.yml b/test/integration/targets/yum/tasks/yum_group_remove.yml new file mode 100644 index 00000000..22c6dcb1 --- /dev/null +++ b/test/integration/targets/yum/tasks/yum_group_remove.yml @@ -0,0 +1,152 @@ +- name: install a group to test and yum-utils + yum: + name: "{{ pkgs }}" + state: present + vars: + pkgs: + - "@Custom Group" + - yum-utils + when: ansible_pkg_mgr == "yum" + +- name: install a group to test and dnf-utils + yum: + name: "{{ pkgs }}" + state: present + vars: + pkgs: + - "@Custom Group" + - dnf-utils + when: ansible_pkg_mgr == "dnf" + +- name: check mode remove the group + yum: + name: "@Custom Group" + state: absent + check_mode: yes + register: yum_result + +- name: verify changed + assert: + that: + - "yum_result.changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'results' in yum_result" + +- name: remove the group + yum: + name: "@Custom Group" + state: absent + register: yum_result + +- name: verify changed + assert: + that: + - "yum_result.rc == 0" + - "yum_result.changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: remove the group again + yum: + name: "@Custom Group" + state: absent + register: yum_result + +- name: verify changed + assert: + that: + - "not yum_result.changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: check mode remove the group again + yum: + name: "@Custom Group" + state: absent + check_mode: yes + register: yum_result + +- name: verify changed + assert: + that: + - "not yum_result.changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'results' in yum_result" + +- name: install a group and a package to test + yum: + name: "@Custom Group,sos" + state: present + register: yum_output + +- name: check mode remove the group along with the package + yum: + name: "@Custom Group,sos" + state: absent + register: yum_result + check_mode: yes + +- name: verify changed + assert: + that: + - "yum_result.changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'results' in yum_result" + +- name: remove the group along with the package + yum: + name: "@Custom Group,sos" + state: absent + register: yum_result + +- name: verify changed + assert: + that: + - "yum_result.changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'results' in yum_result" + +- name: check mode remove the group along with the package + yum: + name: "@Custom Group,sos" + state: absent + register: yum_result + check_mode: yes + +- name: verify not changed + assert: + that: + - "not yum_result.changed" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'results' in yum_result" diff --git a/test/integration/targets/yum/tasks/yuminstallroot.yml b/test/integration/targets/yum/tasks/yuminstallroot.yml new file mode 100644 index 00000000..f9bee6f9 --- /dev/null +++ b/test/integration/targets/yum/tasks/yuminstallroot.yml @@ -0,0 +1,122 @@ +# make a installroot +- name: Create installroot + command: mktemp -d "{{ remote_tmp_dir }}/ansible.test.XXXXXX" + register: yumroot + +#- name: Populate directory +# file: +# path: "/{{ yumroot.stdout }}/etc/" +# state: directory +# mode: 0755 +# +#- name: Populate directory2 +# copy: +# content: "[main]\ndistropkgver={{ ansible_distribution_version }}\n" +# dest: "/{{ yumroot.stdout }}/etc/yum.conf" + +- name: Make a necessary directory + file: + path: "{{ yumroot.stdout }}/etc/yum/vars/" + state: directory + mode: 0755 + +- name: get yum releasever + command: "{{ ansible_python_interpreter }} -c 'import yum; yb = yum.YumBase(); print(yb.conf.yumvar[\"releasever\"])'" + register: releasever + ignore_errors: yes + +- name: Populate directory + copy: + content: "{{ releasever.stdout_lines[-1] }}\n" + dest: "/{{ yumroot.stdout }}/etc/yum/vars/releasever" + when: releasever is successful + +# This will drag in > 200 MB. +- name: attempt installroot + yum: name=zlib installroot="{{ yumroot.stdout }}/" disable_gpg_check=yes + register: yum_result + +- name: check sos with rpm in installroot + shell: rpm -q zlib --root="{{ yumroot.stdout }}/" + failed_when: False + register: rpm_result + +- name: verify installation of sos + assert: + that: + - "yum_result.rc == 0" + - "yum_result.changed" + - "rpm_result.rc == 0" + +- name: verify yum module outputs + assert: + that: + - "'changed' in yum_result" + - "'msg' in yum_result" + - "'rc' in yum_result" + - "'results' in yum_result" + +- name: cleanup installroot + file: + path: "{{ yumroot.stdout }}/" + state: absent + +# Test for releasever working correctly +# +# Bugfix: https://github.com/ansible/ansible/issues/67050 +# +# This test case is based on a reproducer originally reported on Reddit: +# https://www.reddit.com/r/ansible/comments/g2ps32/ansible_yum_module_throws_up_an_error_when/ +# +# NOTE: For the Ansible upstream CI we can only run this for RHEL7 because the +# containerized runtimes in shippable don't allow the nested mounting of +# buildah container volumes. +- name: perform yuminstallroot in a buildah mount with releasever + when: + - ansible_facts["distribution_major_version"] == "7" + - ansible_facts["distribution"] == "RedHat" + block: + # Need to enable this RHUI repo for RHEL7 testing in AWS, CentOS has Extras + # enabled by default and this is not needed there. + - name: enable rhel-7-server-rhui-extras-rpms repo for RHEL7 + command: yum-config-manager --enable rhel-7-server-rhui-extras-rpms + - name: update cache to pull repodata + yum: + update_cache: yes + - name: install required packages for buildah test + yum: + state: present + name: + - buildah + - name: create buildah container from scratch + command: "buildah --name yum_installroot_releasever_test from scratch" + - name: mount the buildah container + command: "buildah mount yum_installroot_releasever_test" + register: buildah_mount + - name: figure out yum value of $releasever + shell: python -c 'import yum; yb = yum.YumBase(); print(yb.conf.yumvar["releasever"])' | tail -1 + register: buildah_host_releasever + - name: test yum install of python using releasever + yum: + name: 'python' + state: present + installroot: "{{ buildah_mount.stdout }}" + releasever: "{{ buildah_host_releasever.stdout }}" + register: yum_result + - name: verify installation of python + assert: + that: + - "yum_result.rc == 0" + - "yum_result.changed" + - "rpm_result.rc == 0" + always: + - name: remove buildah container + command: "buildah rm yum_installroot_releasever_test" + ignore_errors: yes + - name: remove buildah from CI system + yum: + state: absent + name: + - buildah + - name: disable rhel-7-server-rhui-extras-rpms repo for RHEL7 + command: yum-config-manager --disable rhel-7-server-rhui-extras-rpms diff --git a/test/integration/targets/yum_repository/aliases b/test/integration/targets/yum_repository/aliases new file mode 100644 index 00000000..0b484bba --- /dev/null +++ b/test/integration/targets/yum_repository/aliases @@ -0,0 +1,3 @@ +shippable/posix/group1 +destructive +skip/aix diff --git a/test/integration/targets/yum_repository/defaults/main.yml b/test/integration/targets/yum_repository/defaults/main.yml new file mode 100644 index 00000000..4c1fbc65 --- /dev/null +++ b/test/integration/targets/yum_repository/defaults/main.yml @@ -0,0 +1,5 @@ +yum_repository_test_package: dinginessentail +yum_repository_test_repo: + name: fakerepo + description: Fake Repo + baseurl: "file://{{ repodir }}" diff --git a/test/integration/targets/yum_repository/handlers/main.yml b/test/integration/targets/yum_repository/handlers/main.yml new file mode 100644 index 00000000..f96c2391 --- /dev/null +++ b/test/integration/targets/yum_repository/handlers/main.yml @@ -0,0 +1,4 @@ +- name: remove listtest repo + yum_repository: + name: listtest + state: absent diff --git a/test/integration/targets/yum_repository/meta/main.yml b/test/integration/targets/yum_repository/meta/main.yml new file mode 100644 index 00000000..56539a4f --- /dev/null +++ b/test/integration/targets/yum_repository/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: setup_rpm_repo + vars: + install_repos: no diff --git a/test/integration/targets/yum_repository/tasks/main.yml b/test/integration/targets/yum_repository/tasks/main.yml new file mode 100644 index 00000000..0ff0bcff --- /dev/null +++ b/test/integration/targets/yum_repository/tasks/main.yml @@ -0,0 +1,187 @@ +- name: Run tests + when: ansible_facts.distribution in ['CentOS', 'Fedora'] + block: + - name: ensure {{ yum_repository_test_package }} is uninstalled to begin with + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: "{{ yum_repository_test_package }}" + state: absent + + - name: disable {{ yum_repository_test_repo.name }} + yum_repository: + name: "{{ yum_repository_test_repo.name }}" + state: absent + + - name: disable {{ yum_repository_test_repo.name }} (Idempotant) + yum_repository: + name: "{{ yum_repository_test_repo.name }}" + state: absent + register: test_repo_remove + + - name: check return values + assert: + that: + - "test_repo_remove.repo == yum_repository_test_repo.name" + - "test_repo_remove.state == 'absent'" + + - name: check Idempotant + assert: + that: not test_repo_remove.changed + + - name: install {{ yum_repository_test_package }}, which should fail + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: "{{ yum_repository_test_package }}" + state: present + ignore_errors: yes + register: test_package_result + + - name: check that install failed + assert: + that: + - test_package_result.failed + - test_package_result.msg in expected_messages + vars: + expected_messages: + - No package matching '{{ yum_repository_test_package }}' found available, installed or updated + - Failed to install some of the specified packages + + - name: re-add {{ yum_repository_test_repo.name }} + yum_repository: + name: "{{ yum_repository_test_repo.name }}" + description: "{{ yum_repository_test_repo.description }}" + baseurl: "{{ yum_repository_test_repo.baseurl }}" + gpgcheck: no + state: present + register: test_repo_add + + - name: check return values + assert: + that: + - test_repo_add.repo == yum_repository_test_repo.name + - test_repo_add.state == 'present' + + - name: get repolist + shell: yum repolist + register: repolist + until: repolist.rc == 0 + retries: 5 + args: + warn: no + + - name: ensure {{ yum_repository_test_repo.name }} was added + assert: + that: + - yum_repository_test_repo.name in repolist.stdout + - test_repo_add.changed + + - name: install {{ yum_repository_test_package }} + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: "{{ yum_repository_test_package }}" + state: present + register: test_package_result + + - name: check that {{ yum_repository_test_package }} was successfully installed + assert: + that: + - test_package_result.changed + + - name: remove {{ yum_repository_test_package }} + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: "{{ yum_repository_test_package }}" + state: absent + + - name: change configuration of {{ yum_repository_test_repo.name }} repo + yum_repository: + name: "{{ yum_repository_test_repo.name }}" + baseurl: "{{ yum_repository_test_repo.baseurl }}" + description: New description + async: no + enablegroups: no + file: "{{ yum_repository_test_repo.name ~ 2 }}" + ip_resolve: 4 + keepalive: no + register: test_repo_add1 + + - name: check that options are correctly getting written to the repo file + assert: + that: + - "'async = 0' in repo_file_contents" + - "'name = New description' in repo_file_contents" + - "'enablegroups = 0' in repo_file_contents" + - "'ip_resolve = 4' in repo_file_contents" + - "'keepalive = 0' in repo_file_contents" + vars: + repo_file: "{{ '/etc/yum.repos.d/' ~ yum_repository_test_repo.name ~ '2.repo' }}" + repo_file_contents: "{{ lookup('file', repo_file) }}" + + - name: check new config doesn't change (Idempotant) + yum_repository: + name: "{{ yum_repository_test_repo.name }}" + baseurl: "{{ yum_repository_test_repo.baseurl }}" + description: New description + async: no + enablegroups: no + file: "{{ yum_repository_test_repo.name ~ 2 }}" + ip_resolve: 4 + keepalive: no + register: test_repo_add2 + + - name: check Idempotant + assert: + that: + - test_repo_add1 is changed + - test_repo_add2 is not changed + + - name: re-enable the {{ yum_repository_test_repo.name }} repo + yum_repository: + name: "{{ yum_repository_test_repo.name }}" + description: "{{ yum_repository_test_repo.description }}" + baseurl: "{{ yum_repository_test_repo.baseurl }}" + state: present + + - name: re-enable the {{ yum_repository_test_repo.name }} repo (Idempotant) + yum_repository: + name: "{{ yum_repository_test_repo.name }}" + description: "{{ yum_repository_test_repo.description }}" + baseurl: "{{ yum_repository_test_repo.baseurl }}" + state: present + register: test_repo_add + + - name: check Idempotant + assert: + that: test_repo_add is not changed + + - name: Test list options + yum_repository: + name: listtest + description: Testing list feature + baseurl: + - "{{ yum_repository_test_repo.baseurl }}" + - "{{ yum_repository_test_repo.baseurl | replace('download[0-9]?\\.', 'download2\\.', 1) }}" + gpgkey: + - gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_facts.distribution_major_version }} + - gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG2-KEY-EPEL-{{ ansible_facts.distribution_major_version }} + exclude: + - aaa + - bbb + includepkgs: + - ccc + - ddd + notify: remove listtest repo + + - name: Assert that lists were properly inserted + assert: + that: + - url_hostname in repofile + - url_hostname2 in repofile + - "'RPM-GPG-KEY-EPEL' in repofile" + - "'RPM-GPG2-KEY-EPEL' in repofile" + - "'aaa bbb' in repofile" + - "'ccc ddd' in repofile" + vars: + repofile: "{{ lookup('file', '/etc/yum.repos.d/listtest.repo') }}" + url_hostname: "{{ yum_repository_test_repo.baseurl | urlsplit('hostname') }}" + url_hostname2: "{{ url_hostname | replace('download[0-9]?\\.', 'download2\\.', 1) }}" diff --git a/test/lib/ansible_test/__init__.py b/test/lib/ansible_test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/lib/ansible_test/_data/ansible.cfg b/test/lib/ansible_test/_data/ansible.cfg new file mode 100644 index 00000000..e69de29b diff --git a/test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py b/test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py new file mode 100755 index 00000000..d12b6334 --- /dev/null +++ b/test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +# PYTHON_ARGCOMPLETE_OK +"""Command line entry point for ansible-test.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + + +def main(): + """Main program entry point.""" + ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + source_root = os.path.join(ansible_root, 'test', 'lib') + + if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', 'cli.py')): + # running from source, use that version of ansible-test instead of any version that may already be installed + sys.path.insert(0, source_root) + + # noinspection PyProtectedMember + from ansible_test._internal.cli import main as cli_main + + cli_main() + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/collection_detail.py b/test/lib/ansible_test/_data/collection_detail.py new file mode 100644 index 00000000..e7c883ca --- /dev/null +++ b/test/lib/ansible_test/_data/collection_detail.py @@ -0,0 +1,95 @@ +"""Retrieve collection detail.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import re +import sys + +import yaml + + +# See semantic versioning specification (https://semver.org/) +NUMERIC_IDENTIFIER = r'(?:0|[1-9][0-9]*)' +ALPHANUMERIC_IDENTIFIER = r'(?:[0-9]*[a-zA-Z-][a-zA-Z0-9-]*)' + +PRE_RELEASE_IDENTIFIER = r'(?:' + NUMERIC_IDENTIFIER + r'|' + ALPHANUMERIC_IDENTIFIER + r')' +BUILD_IDENTIFIER = r'[a-zA-Z0-9-]+' # equivalent to r'(?:[0-9]+|' + ALPHANUMERIC_IDENTIFIER + r')' + +VERSION_CORE = NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER +PRE_RELEASE = r'(?:-' + PRE_RELEASE_IDENTIFIER + r'(?:\.' + PRE_RELEASE_IDENTIFIER + r')*)?' +BUILD = r'(?:\+' + BUILD_IDENTIFIER + r'(?:\.' + BUILD_IDENTIFIER + r')*)?' + +SEMVER_REGULAR_EXPRESSION = r'^' + VERSION_CORE + PRE_RELEASE + BUILD + r'$' + + +def validate_version(version): + """Raise exception if the provided version is not None or a valid semantic version.""" + if version is None: + return + if not re.match(SEMVER_REGULAR_EXPRESSION, version): + raise Exception('Invalid version number "{0}". Collection version numbers must ' + 'follow semantic versioning (https://semver.org/).'.format(version)) + + +def read_manifest_json(collection_path): + """Return collection information from the MANIFEST.json file.""" + manifest_path = os.path.join(collection_path, 'MANIFEST.json') + + if not os.path.exists(manifest_path): + return None + + try: + with open(manifest_path) as manifest_file: + manifest = json.load(manifest_file) + + collection_info = manifest.get('collection_info') or dict() + + result = dict( + version=collection_info.get('version'), + ) + validate_version(result['version']) + except Exception as ex: # pylint: disable=broad-except + raise Exception('{0}: {1}'.format(os.path.basename(manifest_path), ex)) + + return result + + +def read_galaxy_yml(collection_path): + """Return collection information from the galaxy.yml file.""" + galaxy_path = os.path.join(collection_path, 'galaxy.yml') + + if not os.path.exists(galaxy_path): + return None + + try: + with open(galaxy_path) as galaxy_file: + galaxy = yaml.safe_load(galaxy_file) + + result = dict( + version=galaxy.get('version'), + ) + validate_version(result['version']) + except Exception as ex: # pylint: disable=broad-except + raise Exception('{0}: {1}'.format(os.path.basename(galaxy_path), ex)) + + return result + + +def main(): + """Retrieve collection detail.""" + collection_path = sys.argv[1] + + try: + result = read_manifest_json(collection_path) or read_galaxy_yml(collection_path) or dict() + except Exception as ex: # pylint: disable=broad-except + result = dict( + error='{0}'.format(ex), + ) + + print(json.dumps(result)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/completion/docker.txt b/test/lib/ansible_test/_data/completion/docker.txt new file mode 100644 index 00000000..3e4566dc --- /dev/null +++ b/test/lib/ansible_test/_data/completion/docker.txt @@ -0,0 +1,13 @@ +default name=quay.io/ansible/default-test-container:2.9.0 python=3.6,2.6,2.7,3.5,3.7,3.8,3.9 seccomp=unconfined context=collection +default name=quay.io/ansible/ansible-base-test-container:1.7.0 python=3.6,2.6,2.7,3.5,3.7,3.8,3.9 seccomp=unconfined context=ansible-base +centos6 name=quay.io/ansible/centos6-test-container:1.26.0 python=2.6 seccomp=unconfined +centos7 name=quay.io/ansible/centos7-test-container:1.17.0 python=2.7 seccomp=unconfined +centos8 name=quay.io/ansible/centos8-test-container:1.21.0 python=3.6 seccomp=unconfined +fedora30 name=quay.io/ansible/fedora30-test-container:1.17.0 python=3.7 +fedora31 name=quay.io/ansible/fedora31-test-container:1.17.0 python=3.7 +fedora32 name=quay.io/ansible/fedora32-test-container:1.17.0 python=3.8 +opensuse15py2 name=quay.io/ansible/opensuse15py2-test-container:1.21.0 python=2.7 +opensuse15 name=quay.io/ansible/opensuse15-test-container:1.21.0 python=3.6 +ubuntu1604 name=quay.io/ansible/ubuntu1604-test-container:1.21.0 python=2.7 seccomp=unconfined +ubuntu1804 name=quay.io/ansible/ubuntu1804-test-container:1.21.0 python=3.6 seccomp=unconfined +ubuntu2004 name=quay.io/ansible/ubuntu2004-test-container:1.21.0 python=3.8 seccomp=unconfined diff --git a/test/lib/ansible_test/_data/completion/network.txt b/test/lib/ansible_test/_data/completion/network.txt new file mode 100644 index 00000000..dca911f8 --- /dev/null +++ b/test/lib/ansible_test/_data/completion/network.txt @@ -0,0 +1,2 @@ +ios/csr1000v collection=cisco.ios connection=ansible.netcommon.network_cli +vyos/1.1.8 collection=vyos.vyos connection=ansible.netcommon.network_cli diff --git a/test/lib/ansible_test/_data/completion/remote.txt b/test/lib/ansible_test/_data/completion/remote.txt new file mode 100644 index 00000000..dea4367b --- /dev/null +++ b/test/lib/ansible_test/_data/completion/remote.txt @@ -0,0 +1,12 @@ +freebsd/11.1 python=2.7,3.6 python_dir=/usr/local/bin +freebsd/12.1 python=3.6,2.7 python_dir=/usr/local/bin +osx/10.11 python=2.7 python_dir=/usr/local/bin +macos/10.15 python=3.8 python_dir=/usr/local/bin +macos/11.1 python=3.9 python_dir=/usr/local/bin +rhel/7.6 python=2.7 +rhel/7.8 python=2.7 +rhel/7.9 python=2.7 +rhel/8.1 python=3.6 +rhel/8.2 python=3.6 +aix/7.2 python=2.7 httptester=disabled temp-unicode=disabled pip-check=disabled +power/centos/7 python=2.7 diff --git a/test/lib/ansible_test/_data/completion/windows.txt b/test/lib/ansible_test/_data/completion/windows.txt new file mode 100644 index 00000000..a4f3bf58 --- /dev/null +++ b/test/lib/ansible_test/_data/completion/windows.txt @@ -0,0 +1,6 @@ +2008 +2008-R2 +2012 +2012-R2 +2016 +2019 \ No newline at end of file diff --git a/test/lib/ansible_test/_data/coveragerc b/test/lib/ansible_test/_data/coveragerc new file mode 100644 index 00000000..e69de29b diff --git a/test/lib/ansible_test/_data/cryptography-constraints.txt b/test/lib/ansible_test/_data/cryptography-constraints.txt new file mode 100644 index 00000000..8e3e99b4 --- /dev/null +++ b/test/lib/ansible_test/_data/cryptography-constraints.txt @@ -0,0 +1,3 @@ +# do not add a cryptography constraint here, see the get_cryptography_requirement function in executor.py for details +idna < 2.8 ; python_version < '2.7' # idna 2.8+ requires python 2.7+ +cffi != 1.14.4 # Fails on systems with older gcc. Should be fixed in the next release. https://foss.heptapod.net/pypy/cffi/-/issues/480 diff --git a/test/lib/ansible_test/_data/injector/ansible b/test/lib/ansible_test/_data/injector/ansible new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-config b/test/lib/ansible_test/_data/injector/ansible-config new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-config @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-connection b/test/lib/ansible_test/_data/injector/ansible-connection new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-connection @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-console b/test/lib/ansible_test/_data/injector/ansible-console new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-console @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-doc b/test/lib/ansible_test/_data/injector/ansible-doc new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-doc @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-galaxy b/test/lib/ansible_test/_data/injector/ansible-galaxy new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-galaxy @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-inventory b/test/lib/ansible_test/_data/injector/ansible-inventory new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-inventory @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-playbook b/test/lib/ansible_test/_data/injector/ansible-playbook new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-playbook @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-pull b/test/lib/ansible_test/_data/injector/ansible-pull new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-pull @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-test b/test/lib/ansible_test/_data/injector/ansible-test new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-test @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/ansible-vault b/test/lib/ansible_test/_data/injector/ansible-vault new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/ansible-vault @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/importer.py b/test/lib/ansible_test/_data/injector/importer.py new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/importer.py @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/pytest b/test/lib/ansible_test/_data/injector/pytest new file mode 120000 index 00000000..6bbbfe4d --- /dev/null +++ b/test/lib/ansible_test/_data/injector/pytest @@ -0,0 +1 @@ +python.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/injector/python.py b/test/lib/ansible_test/_data/injector/python.py new file mode 100755 index 00000000..290b995c --- /dev/null +++ b/test/lib/ansible_test/_data/injector/python.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +"""Provides an entry point for python scripts and python modules on the controller with the current python interpreter and optional code coverage collection.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + + +def main(): + """Main entry point.""" + name = os.path.basename(__file__) + args = [sys.executable] + + coverage_config = os.environ.get('COVERAGE_CONF') + coverage_output = os.environ.get('COVERAGE_FILE') + + if coverage_config: + if coverage_output: + args += ['-m', 'coverage.__main__', 'run', '--rcfile', coverage_config] + else: + if sys.version_info >= (3, 4): + # noinspection PyUnresolvedReferences + import importlib.util + + # noinspection PyUnresolvedReferences + found = bool(importlib.util.find_spec('coverage')) + else: + # noinspection PyDeprecation + import imp + + try: + # noinspection PyDeprecation + imp.find_module('coverage') + found = True + except ImportError: + found = False + + if not found: + sys.exit('ERROR: Could not find `coverage` module. ' + 'Did you use a virtualenv created without --system-site-packages or with the wrong interpreter?') + + if name == 'python.py': + if sys.argv[1] == '-c': + # prevent simple misuse of python.py with -c which does not work with coverage + sys.exit('ERROR: Use `python -c` instead of `python.py -c` to avoid errors when code coverage is collected.') + elif name == 'pytest': + args += ['-m', 'pytest'] + else: + args += [find_executable(name)] + + args += sys.argv[1:] + + os.execv(args[0], args) + + +def find_executable(name): + """ + :type name: str + :rtype: str + """ + path = os.environ.get('PATH', os.path.defpath) + seen = set([os.path.abspath(__file__)]) + + for base in path.split(os.path.pathsep): + candidate = os.path.abspath(os.path.join(base, name)) + + if candidate in seen: + continue + + seen.add(candidate) + + if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK): + return candidate + + raise Exception('Executable "%s" not found in path: %s' % (name, path)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/injector/virtualenv-isolated.sh b/test/lib/ansible_test/_data/injector/virtualenv-isolated.sh new file mode 100644 index 00000000..af92a056 --- /dev/null +++ b/test/lib/ansible_test/_data/injector/virtualenv-isolated.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Create and activate a fresh virtual environment with `source virtualenv-isolated.sh`. + +rm -rf "${OUTPUT_DIR}/venv" + +# Try to use 'venv' if it is available, then fallback to 'virtualenv' since some systems provide 'venv' although it is non-functional. +if [ -z "${ANSIBLE_TEST_PREFER_VENV:-}" ] || [[ "${ANSIBLE_TEST_PYTHON_VERSION}" =~ ^2\. ]] || ! "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m venv "${OUTPUT_DIR}/venv" > /dev/null 2>&1; then + rm -rf "${OUTPUT_DIR}/venv" + "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m virtualenv --python "${ANSIBLE_TEST_PYTHON_INTERPRETER}" "${OUTPUT_DIR}/venv" +fi + +set +ux +source "${OUTPUT_DIR}/venv/bin/activate" +set -ux + +if [[ "${ANSIBLE_TEST_COVERAGE}" ]]; then + pip install coverage -c ../../../runner/requirements/constraints.txt --disable-pip-version-check +fi diff --git a/test/lib/ansible_test/_data/injector/virtualenv.sh b/test/lib/ansible_test/_data/injector/virtualenv.sh new file mode 100644 index 00000000..282e6074 --- /dev/null +++ b/test/lib/ansible_test/_data/injector/virtualenv.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# Create and activate a fresh virtual environment with `source virtualenv.sh`. + +rm -rf "${OUTPUT_DIR}/venv" + +# Try to use 'venv' if it is available, then fallback to 'virtualenv' since some systems provide 'venv' although it is non-functional. +if [ -z "${ANSIBLE_TEST_PREFER_VENV:-}" ] || [[ "${ANSIBLE_TEST_PYTHON_VERSION}" =~ ^2\. ]] || ! "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m venv --system-site-packages "${OUTPUT_DIR}/venv" > /dev/null 2>&1; then + rm -rf "${OUTPUT_DIR}/venv" + "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m virtualenv --system-site-packages --python "${ANSIBLE_TEST_PYTHON_INTERPRETER}" "${OUTPUT_DIR}/venv" +fi + +set +ux +source "${OUTPUT_DIR}/venv/bin/activate" +set -ux diff --git a/test/lib/ansible_test/_data/inventory b/test/lib/ansible_test/_data/inventory new file mode 100644 index 00000000..1b77a7ea --- /dev/null +++ b/test/lib/ansible_test/_data/inventory @@ -0,0 +1,6 @@ +# Do not put test specific entries in this inventory file. +# For script based test targets (using runme.sh) put the inventory file in the test's directory instead. + +[testgroup] +# ansible_python_interpreter must be set to avoid interpreter discovery +testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml b/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml new file mode 100644 index 00000000..2e5ff9c6 --- /dev/null +++ b/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml @@ -0,0 +1,19 @@ +--- +- name: setup global coverage directory for Windows test targets + hosts: windows + gather_facts: no + tasks: + - name: create temp directory + ansible.windows.win_file: + path: '{{ remote_temp_path }}' + state: directory + + - name: allow everyone to write to coverage test dir + ansible.windows.win_acl: + path: '{{ remote_temp_path }}' + user: Everyone + rights: Modify + inherit: ContainerInherit, ObjectInherit + propagation: 'None' + type: allow + state: present \ No newline at end of file diff --git a/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml b/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml new file mode 100644 index 00000000..ab34dc27 --- /dev/null +++ b/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml @@ -0,0 +1,77 @@ +--- +- name: collect the coverage files from the Windows host + hosts: windows + gather_facts: no + tasks: + - name: make sure all vars have been set + assert: + that: + - local_temp_path is defined + - remote_temp_path is defined + + - name: zip up all coverage files in the + ansible.windows.win_shell: | + $coverage_dir = '{{ remote_temp_path }}' + $zip_file = Join-Path -Path $coverage_dir -ChildPath 'coverage.zip' + if (Test-Path -LiteralPath $zip_file) { + Remove-Item -LiteralPath $zip_file -Force + } + + $coverage_files = Get-ChildItem -LiteralPath $coverage_dir -Include '*=coverage*' -File + + $legacy = $false + try { + # Requires .NET 4.5+ which isn't present on older WIndows versions. Remove once 2008/R2 is EOL. + # We also can't use the Shell.Application as it will fail on GUI-less servers (Server Core). + Add-Type -AssemblyName System.IO.Compression -ErrorAction Stop > $null + } catch { + $legacy = $true + } + + if ($legacy) { + New-Item -Path $zip_file -ItemType File > $null + $shell = New-Object -ComObject Shell.Application + $zip = $shell.Namespace($zip_file) + foreach ($file in $coverage_files) { + $zip.CopyHere($file.FullName) + } + } else { + $fs = New-Object -TypeName System.IO.FileStream -ArgumentList $zip_file, 'CreateNew' + try { + $archive = New-Object -TypeName System.IO.Compression.ZipArchive -ArgumentList @( + $fs, + [System.IO.Compression.ZipArchiveMode]::Create + ) + try { + foreach ($file in $coverage_files) { + $archive_entry = $archive.CreateEntry($file.Name, 'Optimal') + $entry_fs = $archive_entry.Open() + try { + $file_fs = [System.IO.File]::OpenRead($file.FullName) + try { + $file_fs.CopyTo($entry_fs) + } finally { + $file_fs.Dispose() + } + } finally { + $entry_fs.Dispose() + } + } + } finally { + $archive.Dispose() + } + } finally { + $fs.Dispose() + } + } + + - name: fetch coverage zip file to localhost + fetch: + src: '{{ remote_temp_path }}\coverage.zip' + dest: '{{ local_temp_path }}/coverage-{{ inventory_hostname }}.zip' + flat: yes + + - name: remove the temporary coverage directory + ansible.windows.win_file: + path: '{{ remote_temp_path }}' + state: absent \ No newline at end of file diff --git a/test/lib/ansible_test/_data/pytest.ini b/test/lib/ansible_test/_data/pytest.ini new file mode 100644 index 00000000..2ac56423 --- /dev/null +++ b/test/lib/ansible_test/_data/pytest.ini @@ -0,0 +1,9 @@ +[pytest] +xfail_strict = true +mock_use_standalone_module = true +# It was decided to stick with "legacy" (aka "xunit1") for now. +# Currently used pytest versions all support xunit2 format too. +# Except the one used under Python 2.6 — it doesn't process this option +# at all. Ref: +# https://github.com/ansible/ansible/pull/66445#discussion_r372530176 +junit_family = xunit1 diff --git a/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py new file mode 100644 index 00000000..67c69f15 --- /dev/null +++ b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py @@ -0,0 +1,67 @@ +"""Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +# set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself +ANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATH'], 'ansible_collections') + + +# this monkeypatch to _pytest.pathlib.resolve_package_path fixes PEP420 resolution for collections in pytest >= 6.0.0 +# NB: this code should never run under py2 +def collection_resolve_package_path(path): + """Configure the Python package path so that pytest can find our collections.""" + for parent in path.parents: + if str(parent) == ANSIBLE_COLLECTIONS_PATH: + return parent + + raise Exception('File "%s" not found in collection path "%s".' % (path, ANSIBLE_COLLECTIONS_PATH)) + + +# this monkeypatch to py.path.local.LocalPath.pypkgpath fixes PEP420 resolution for collections in pytest < 6.0.0 +def collection_pypkgpath(self): + """Configure the Python package path so that pytest can find our collections.""" + for parent in self.parts(reverse=True): + if str(parent) == ANSIBLE_COLLECTIONS_PATH: + return parent + + raise Exception('File "%s" not found in collection path "%s".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH)) + + +def pytest_configure(): + """Configure this pytest plugin.""" + try: + if pytest_configure.executed: + return + except AttributeError: + pytest_configure.executed = True + + # noinspection PyProtectedMember + from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder + + # allow unit tests to import code from collections + + # noinspection PyProtectedMember + _AnsibleCollectionFinder(paths=[os.path.dirname(ANSIBLE_COLLECTIONS_PATH)])._install() # pylint: disable=protected-access + + try: + # noinspection PyProtectedMember + from _pytest import pathlib as _pytest_pathlib + except ImportError: + _pytest_pathlib = None + + if hasattr(_pytest_pathlib, 'resolve_package_path'): + _pytest_pathlib.resolve_package_path = collection_resolve_package_path + else: + # looks like pytest <= 6.0.0, use the old hack against py.path + # noinspection PyProtectedMember + import py._path.local + + # force collections unit tests to be loaded with the ansible_collections namespace + # original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552 + # noinspection PyProtectedMember + py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access + + +pytest_configure() diff --git a/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py new file mode 100644 index 00000000..b05298ab --- /dev/null +++ b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py @@ -0,0 +1,68 @@ +"""Monkey patch os._exit when running under coverage so we don't lose coverage data in forks, such as with `pytest --boxed`. PYTEST_DONT_REWRITE""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def pytest_configure(): + """Configure this pytest plugin.""" + try: + if pytest_configure.executed: + return + except AttributeError: + pytest_configure.executed = True + + try: + import coverage + except ImportError: + coverage = None + + try: + coverage.Coverage + except AttributeError: + coverage = None + + if not coverage: + return + + import gc + import os + + coverage_instances = [] + + for obj in gc.get_objects(): + if isinstance(obj, coverage.Coverage): + coverage_instances.append(obj) + + if not coverage_instances: + coverage_config = os.environ.get('COVERAGE_CONF') + + if not coverage_config: + return + + coverage_output = os.environ.get('COVERAGE_FILE') + + if not coverage_output: + return + + cov = coverage.Coverage(config_file=coverage_config) + coverage_instances.append(cov) + else: + cov = None + + # noinspection PyProtectedMember + os_exit = os._exit # pylint: disable=protected-access + + def coverage_exit(*args, **kwargs): + for instance in coverage_instances: + instance.stop() + instance.save() + + os_exit(*args, **kwargs) + + os._exit = coverage_exit # pylint: disable=protected-access + + if cov: + cov.start() + + +pytest_configure() diff --git a/test/lib/ansible_test/_data/quiet_pip.py b/test/lib/ansible_test/_data/quiet_pip.py new file mode 100644 index 00000000..7d2a6d16 --- /dev/null +++ b/test/lib/ansible_test/_data/quiet_pip.py @@ -0,0 +1,70 @@ +"""Custom entry-point for pip that filters out unwanted logging and warnings.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import logging +import re +import runpy +import warnings + +BUILTIN_FILTERER_FILTER = logging.Filterer.filter + +LOGGING_MESSAGE_FILTER = re.compile("^(" + ".*Running pip install with root privileges is generally not a good idea.*|" # custom Fedora patch [1] + "DEPRECATION: Python 2.7 will reach the end of its life .*|" # pip 19.2.3 + "Ignoring .*: markers .* don't match your environment|" + "Requirement already satisfied.*" + ")$") + +# [1] https://src.fedoraproject.org/rpms/python-pip/blob/master/f/emit-a-warning-when-running-with-root-privileges.patch + +WARNING_MESSAGE_FILTERS = ( + # DEPRECATION: Python 2.6 is no longer supported by the Python core team, please upgrade your Python. + # A future version of pip will drop support for Python 2.6 + 'Python 2.6 is no longer supported by the Python core team, ', + + # {path}/python2.6/lib/python2.6/site-packages/pip/_vendor/urllib3/util/ssl_.py:137: InsecurePlatformWarning: + # A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. + # You can upgrade to a newer version of Python to solve this. + # For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings + 'A true SSLContext object is not available. ', + + # {path}/python2.6/lib/python2.6/site-packages/pip/_vendor/urllib3/util/ssl_.py:339: SNIMissingWarning: + # An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. + # This may cause the server to present an incorrect TLS certificate, which can cause validation failures. + # You can upgrade to a newer version of Python to solve this. + # For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings + 'An HTTPS request has been made, but the SNI ', + + # DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. + # pip 21.0 will drop support for Python 2.7 in January 2021. + # More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support + 'DEPRECATION: Python 2.7 reached the end of its life ', +) + + +def custom_filterer_filter(self, record): + """Globally omit logging of unwanted messages.""" + if LOGGING_MESSAGE_FILTER.search(record.getMessage()): + return 0 + + return BUILTIN_FILTERER_FILTER(self, record) + + +def main(): + """Main program entry point.""" + # Filtering logging output globally avoids having to intercept stdout/stderr. + # It also avoids problems with loss of color output and mixing up the order of stdout/stderr messages. + logging.Filterer.filter = custom_filterer_filter + + for message_filter in WARNING_MESSAGE_FILTERS: + # Setting filterwarnings in code is necessary because of the following: + # Python 2.6 does not support the PYTHONWARNINGS environment variable. It does support the -W option. + # Python 2.7 cannot use the -W option to match warning text after a colon. This makes it impossible to match specific warning messages. + warnings.filterwarnings('ignore', message_filter) + + runpy.run_module('pip.__main__', run_name='__main__', alter_sys=True) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/requirements/ansible-test.txt b/test/lib/ansible_test/_data/requirements/ansible-test.txt new file mode 100644 index 00000000..7b596e1b --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/ansible-test.txt @@ -0,0 +1,6 @@ +argparse ; python_version < '2.7' + +# pip 7.1 added support for constraints, which are required by ansible-test to install most python requirements +# see https://github.com/pypa/pip/blame/e648e00dc0226ade30ade99591b245b0c98e86c9/NEWS.rst#L1258 +pip >= 7.1, < 10 ; python_version < '2.7' # pip 10+ drops support for python 2.6 (sanity_ok) +pip >= 7.1 ; python_version >= '2.7' # sanity_ok diff --git a/test/lib/ansible_test/_data/requirements/constraints.txt b/test/lib/ansible_test/_data/requirements/constraints.txt new file mode 100644 index 00000000..81ee480c --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/constraints.txt @@ -0,0 +1,64 @@ +coverage >= 4.5.1, < 5.0.0 ; python_version < '3.7' # coverage 4.4 required for "disable_warnings" support but 4.5.1 needed for bug fixes, coverage 5.0+ incompatible +coverage >= 4.5.2, < 5.0.0 ; python_version == '3.7' # coverage 4.5.2 fixes bugs in support for python 3.7, coverage 5.0+ incompatible +coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible +cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6 +# do not add a cryptography constraint here unless it is for python version incompatibility, see the get_cryptography_requirement function in executor.py for details +deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3 +jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later +urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later +pywinrm >= 0.3.0 # message encryption support +sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later +sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3 +pygments >= 2.4.0 # Pygments 2.4.0 includes bugfixes for YAML and YAML+Jinja lexers +wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later +yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+ +pycrypto >= 2.6 # Need features found in 2.6 and greater +ncclient >= 0.5.2 # Need features added in 0.5.2 and greater +idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead +paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6 +pytest < 3.3.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6 +pytest < 5.0.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7 +pytest-forked < 1.0.2 ; python_version < '2.7' # pytest-forked 1.0.2 and later require python 2.7 or later +pytest-forked >= 1.0.2 ; python_version >= '2.7' # pytest-forked before 1.0.2 does not work with pytest 4.2.0+ (which requires python 2.7+) +ntlm-auth >= 1.3.0 # message encryption support using cryptography +requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6 +requests-ntlm >= 1.1.0 # message encryption support +requests-credssp >= 0.1.0 # message encryption support +voluptuous >= 0.11.0 # Schema recursion via Self +openshift >= 0.6.2, < 0.9.0 # merge_type support +virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later +pathspec < 0.6.0 ; python_version < '2.7' # pathspec 0.6.0 and later require python 2.7 or later +pyopenssl < 18.0.0 ; python_version < '2.7' # pyOpenSSL 18.0.0 and later require python 2.7 or later +pyparsing < 3.0.0 ; python_version < '3.5' # pyparsing 3 and later require python 3.5 or later +pyfmg == 0.6.1 # newer versions do not pass current unit tests +pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later +pycparser < 2.19 ; python_version < '2.7' # pycparser 2.19 and later require python 2.7 or later +mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...) +pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option +xmltodict < 0.12.0 ; python_version < '2.7' # xmltodict 0.12.0 and later require python 2.7 or later +lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later +pyvmomi < 6.0.0 ; python_version < '2.7' # pyvmomi 6.0.0 and later require python 2.7 or later +pyone == 1.1.9 # newer versions do not pass current integration tests +boto3 < 1.11 ; python_version < '2.7' # boto3 1.11 drops Python 2.6 support +botocore >= 1.10.0, < 1.14 ; python_version < '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca; botocore 1.14 drops Python 2.6 support +botocore >= 1.10.0 ; python_version >= '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca +setuptools < 37 ; python_version == '2.6' # setuptools 37 and later require python 2.7 or later +setuptools < 45 ; python_version == '2.7' # setuptools 45 and later require python 3.5 or later + +# freeze antsibull-changelog for consistent test results +antsibull-changelog == 0.9.0 + +# Make sure we have a new enough antsibull for the CLI args we use +antsibull >= 0.21.0 + +# freeze pylint and its requirements for consistent test results +astroid == 2.3.3 +isort == 4.3.15 +lazy-object-proxy == 1.4.3 +mccabe == 0.6.1 +pylint == 2.3.1 +typed-ast == 1.4.1 +wrapt == 1.11.1 + +# freeze pycodestyle for consistent test results +pycodestyle == 2.6.0 diff --git a/test/lib/ansible_test/_data/requirements/coverage.txt b/test/lib/ansible_test/_data/requirements/coverage.txt new file mode 100644 index 00000000..4ebc8aea --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/coverage.txt @@ -0,0 +1 @@ +coverage diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt new file mode 100644 index 00000000..aa2f71cc --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt @@ -0,0 +1,3 @@ +boto +boto3 +botocore diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt new file mode 100644 index 00000000..6df1a4e8 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt @@ -0,0 +1,39 @@ +packaging +requests[security] +xmltodict +azure-cli-core==2.0.35 +azure-cli-nspkg==3.0.2 +azure-common==1.1.11 +azure-mgmt-authorization==0.51.1 +azure-mgmt-batch==5.0.1 +azure-mgmt-cdn==3.0.0 +azure-mgmt-compute==10.0.0 +azure-mgmt-containerinstance==1.4.0 +azure-mgmt-containerregistry==2.0.0 +azure-mgmt-containerservice==4.4.0 +azure-mgmt-dns==2.1.0 +azure-mgmt-keyvault==1.1.0 +azure-mgmt-marketplaceordering==0.1.0 +azure-mgmt-monitor==0.5.2 +azure-mgmt-network==4.0.0 +azure-mgmt-nspkg==2.0.0 +azure-mgmt-redis==5.0.0 +azure-mgmt-resource==2.1.0 +azure-mgmt-rdbms==1.4.1 +azure-mgmt-servicebus==0.5.3 +azure-mgmt-sql==0.10.0 +azure-mgmt-storage==3.1.0 +azure-mgmt-trafficmanager==0.50.0 +azure-mgmt-web==0.41.0 +azure-nspkg==2.0.0 +azure-storage==0.35.1 +msrest==0.6.10 +msrestazure==0.6.2 +azure-keyvault==1.0.0a1 +azure-graphrbac==0.40.0 +azure-mgmt-cosmosdb==0.5.2 +azure-mgmt-hdinsight==0.1.0 +azure-mgmt-devtestlabs==3.0.0 +azure-mgmt-loganalytics==0.2.0 +azure-mgmt-automation==0.1.1 +azure-mgmt-iothub==0.7.0 diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt new file mode 100644 index 00000000..f0a89b91 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt @@ -0,0 +1,2 @@ +cs +sshpubkeys diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt new file mode 100644 index 00000000..a6580e69 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt @@ -0,0 +1 @@ +hcloud>=1.6.0 ; python_version >= '2.7' and python_version < '3.9' # Python 2.6 is not supported (sanity_ok); Only hcloud >= 1.6.0 supports Floating IPs with names; Python 3.9 and later are not supported diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt new file mode 100644 index 00000000..be611454 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt @@ -0,0 +1 @@ +infoblox-client diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt new file mode 100644 index 00000000..acd34668 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt @@ -0,0 +1 @@ +pyone \ No newline at end of file diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt new file mode 100644 index 00000000..269bf090 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt @@ -0,0 +1 @@ +openshift diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt new file mode 100644 index 00000000..fd8f1398 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt @@ -0,0 +1,2 @@ +pyvmomi +git+https://github.com/vmware/vsphere-automation-sdk-python.git ; python_version >= '2.7' # Python 2.6 is not supported diff --git a/test/lib/ansible_test/_data/requirements/integration.txt b/test/lib/ansible_test/_data/requirements/integration.txt new file mode 100644 index 00000000..2c562615 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/integration.txt @@ -0,0 +1,6 @@ +cryptography +jinja2 +junit-xml +ordereddict ; python_version < '2.7' +packaging +pyyaml diff --git a/test/lib/ansible_test/_data/requirements/network-integration.txt b/test/lib/ansible_test/_data/requirements/network-integration.txt new file mode 100644 index 00000000..726d2943 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/network-integration.txt @@ -0,0 +1,7 @@ +cryptography +jinja2 +junit-xml +ordereddict ; python_version < '2.7' # ansible-test junit callback plugin requirement +packaging +paramiko +pyyaml diff --git a/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt new file mode 100644 index 00000000..abd6c5fd --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt @@ -0,0 +1,2 @@ +jinja2 # ansible-base requirement +pyyaml # ansible-base requirement diff --git a/test/lib/ansible_test/_data/requirements/sanity.changelog.txt b/test/lib/ansible_test/_data/requirements/sanity.changelog.txt new file mode 100644 index 00000000..8a98acc9 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.changelog.txt @@ -0,0 +1,2 @@ +# changelog build requires python 3.6+ +antsibull-changelog ; python_version >= '3.6' diff --git a/test/lib/ansible_test/_data/requirements/sanity.import.txt b/test/lib/ansible_test/_data/requirements/sanity.import.txt new file mode 100644 index 00000000..17e375ce --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.import.txt @@ -0,0 +1,2 @@ +pyyaml # required for the collection loader to parse yaml for plugin routing +virtualenv ; python_version <= '2.7' # virtualenv required on Python 2.x, but on Python 3.x we can use the built-in venv instead diff --git a/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt new file mode 100644 index 00000000..c3726e8b --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt @@ -0,0 +1 @@ +pyyaml diff --git a/test/lib/ansible_test/_data/requirements/sanity.pep8.txt b/test/lib/ansible_test/_data/requirements/sanity.pep8.txt new file mode 100644 index 00000000..282a93fb --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.pep8.txt @@ -0,0 +1 @@ +pycodestyle diff --git a/test/lib/ansible_test/_data/requirements/sanity.ps1 b/test/lib/ansible_test/_data/requirements/sanity.ps1 new file mode 100755 index 00000000..1ea1f8e5 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.ps1 @@ -0,0 +1,45 @@ +#!/usr/bin/env pwsh +param ( + [Switch] + $IsContainer +) + +#Requires -Version 6 + +Set-StrictMode -Version 2.0 +$ErrorActionPreference = "Stop" +$ProgressPreference = 'SilentlyContinue' + +Function Install-PSModule { + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [String] + $Name, + + [Parameter(Mandatory=$true)] + [Version] + $RequiredVersion + ) + + # In case PSGallery is down we check if the module is already installed. + $installedModule = Get-Module -Name $Name -ListAvailable | Where-Object Version -eq $RequiredVersion + if (-not $installedModule) { + Install-Module -Name $Name -RequiredVersion $RequiredVersion -Scope CurrentUser + } +} + +Set-PSRepository -Name PSGallery -InstallationPolicy Trusted +Install-PSModule -Name PSScriptAnalyzer -RequiredVersion 1.18.0 + +if ($IsContainer) { + # PSScriptAnalyzer contain lots of json files for the UseCompatibleCommands check. We don't use this rule so by + # removing the contents we can save 200MB in the docker image (or more in the future). + # https://github.com/PowerShell/PSScriptAnalyzer/blob/master/RuleDocumentation/UseCompatibleCommands.md + $pssaPath = (Get-Module -ListAvailable -Name PSScriptAnalyzer).ModuleBase + $compatPath = Join-Path -Path $pssaPath -ChildPath compatibility_profiles -AdditionalChildPath '*' + Remove-Item -Path $compatPath -Recurse -Force +} + +# Installed the PSCustomUseLiteralPath rule +Install-PSModule -Name PSSA-PSCustomUseLiteralPath -RequiredVersion 0.1.1 diff --git a/test/lib/ansible_test/_data/requirements/sanity.pylint.txt b/test/lib/ansible_test/_data/requirements/sanity.pylint.txt new file mode 100644 index 00000000..438ca51d --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.pylint.txt @@ -0,0 +1,3 @@ +pylint +pyyaml # needed for collection_detail.py +mccabe # pylint complexity testing diff --git a/test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt b/test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt new file mode 100644 index 00000000..3a5eeed1 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt @@ -0,0 +1 @@ +rstcheck diff --git a/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt new file mode 100644 index 00000000..edd96991 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt @@ -0,0 +1,2 @@ +pyyaml +voluptuous diff --git a/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt new file mode 100644 index 00000000..5c0fca78 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt @@ -0,0 +1,3 @@ +jinja2 # ansible-base requirement +pyyaml # needed for collection_detail.py +voluptuous diff --git a/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt b/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt new file mode 100644 index 00000000..b2c729ca --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt @@ -0,0 +1 @@ +yamllint diff --git a/test/lib/ansible_test/_data/requirements/units.txt b/test/lib/ansible_test/_data/requirements/units.txt new file mode 100644 index 00000000..307d7c35 --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/units.txt @@ -0,0 +1,7 @@ +cryptography +jinja2 +mock +pytest +pytest-mock +pytest-xdist +pyyaml diff --git a/test/lib/ansible_test/_data/requirements/windows-integration.txt b/test/lib/ansible_test/_data/requirements/windows-integration.txt new file mode 100644 index 00000000..86de35ee --- /dev/null +++ b/test/lib/ansible_test/_data/requirements/windows-integration.txt @@ -0,0 +1,11 @@ +cryptography +jinja2 +junit-xml +ntlm-auth +ordereddict ; python_version < '2.7' # ansible-test junit callback plugin requirement +requests-ntlm +requests-credssp +packaging +pypsrp +pywinrm[credssp] +pyyaml diff --git a/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json new file mode 100644 index 00000000..12bbe0d1 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json @@ -0,0 +1,13 @@ +{ + "all_targets": true, + "prefixes": [ + "lib/ansible/modules/", + "lib/ansible/plugins/action/", + "plugins/modules/", + "plugins/action/" + ], + "extensions": [ + ".py" + ], + "output": "path-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py new file mode 100755 index 00000000..65142e00 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +"""Test to verify action plugins have an associated module to provide documentation.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + + +def main(): + """Main entry point.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + + module_names = set() + + module_prefixes = { + 'lib/ansible/modules/': True, + 'plugins/modules/': False, + } + + action_prefixes = { + 'lib/ansible/plugins/action/': True, + 'plugins/action/': False, + } + + for path in paths: + full_name = get_full_name(path, module_prefixes) + + if full_name: + module_names.add(full_name) + + for path in paths: + full_name = get_full_name(path, action_prefixes) + + if full_name and full_name not in module_names: + print('%s: action plugin has no matching module to provide documentation' % path) + + +def get_full_name(path, prefixes): + """Return the full name of the plugin at the given path by matching against the given path prefixes, or None if no match is found.""" + for prefix, flat in prefixes.items(): + if path.startswith(prefix): + relative_path = os.path.relpath(path, prefix) + + if flat: + full_name = os.path.basename(relative_path) + else: + full_name = relative_path + + full_name = os.path.splitext(full_name)[0] + + name = os.path.basename(full_name) + + if name == '__init__': + return None + + if name.startswith('_'): + name = name[1:] + + full_name = os.path.join(os.path.dirname(full_name), name).replace(os.path.sep, '.') + + return full_name + + return None + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/changelog.json b/test/lib/ansible_test/_data/sanity/code-smell/changelog.json new file mode 100644 index 00000000..87f223b1 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/changelog.json @@ -0,0 +1,9 @@ +{ + "intercept": true, + "minimum_python_version": "3.6", + "prefixes": [ + "changelogs/config.yaml", + "changelogs/fragments/" + ], + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/changelog.py b/test/lib/ansible_test/_data/sanity/code-smell/changelog.py new file mode 100755 index 00000000..710b10f6 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/changelog.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys +import subprocess + + +def main(): + paths = sys.argv[1:] or sys.stdin.read().splitlines() + + allowed_extensions = ('.yml', '.yaml') + config_path = 'changelogs/config.yaml' + + # config must be detected independent of the file list since the file list only contains files under test (changed) + has_config = os.path.exists(config_path) + paths_to_check = [] + for path in paths: + if path == config_path: + continue + + if path.startswith('changelogs/fragments/.'): + if path in ('changelogs/fragments/.keep', 'changelogs/fragments/.gitkeep'): + continue + + print('%s:%d:%d: file must not be a dotfile' % (path, 0, 0)) + continue + + ext = os.path.splitext(path)[1] + + if ext not in allowed_extensions: + print('%s:%d:%d: extension must be one of: %s' % (path, 0, 0, ', '.join(allowed_extensions))) + + paths_to_check.append(path) + + if not has_config: + print('changelogs/config.yaml:0:0: config file does not exist') + return + + if not paths_to_check: + return + + cmd = [sys.executable, '-m', 'antsibull_changelog', 'lint'] + paths_to_check + subprocess.call(cmd) # ignore the return code, rely on the output instead + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/empty-init.json b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.json new file mode 100644 index 00000000..9835f9b6 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.json @@ -0,0 +1,14 @@ +{ + "prefixes": [ + "lib/ansible/modules/", + "lib/ansible/module_utils/", + "plugins/modules/", + "plugins/module_utils/", + "test/units/", + "tests/unit/" + ], + "files": [ + "__init__.py" + ], + "output": "path-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/empty-init.py b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.py new file mode 100755 index 00000000..8bcd7f9e --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + if os.path.getsize(path) > 0: + print('%s: empty __init__.py required' % path) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json new file mode 100644 index 00000000..6f1edb78 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json @@ -0,0 +1,6 @@ +{ + "extensions": [ + ".py" + ], + "output": "path-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py new file mode 100755 index 00000000..81081eed --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ast +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'rb') as path_fd: + lines = path_fd.read().splitlines() + + missing = True + if not lines: + # Files are allowed to be empty of everything including boilerplate + missing = False + + for text in lines: + if text in (b'from __future__ import (absolute_import, division, print_function)', + b'from __future__ import absolute_import, division, print_function'): + missing = False + break + + if missing: + with open(path) as file: + contents = file.read() + + # noinspection PyBroadException + try: + node = ast.parse(contents) + + # files consisting of only assignments have no need for future import boilerplate + # the only exception would be division during assignment, but we'll overlook that for simplicity + # the most likely case is that of a documentation only python file + if all(isinstance(statement, ast.Assign) for statement in node.body): + missing = False + except Exception: # pylint: disable=broad-except + pass # the compile sanity test will report this error + + if missing: + print('%s: missing: from __future__ import (absolute_import, division, print_function)' % path) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/line-endings.json b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.json new file mode 100644 index 00000000..db5c3c98 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.json @@ -0,0 +1,4 @@ +{ + "text": true, + "output": "path-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/line-endings.py b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.py new file mode 100755 index 00000000..1e4212d1 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'rb') as path_fd: + contents = path_fd.read() + + if b'\r' in contents: + print('%s: use "\\n" for line endings instead of "\\r\\n"' % path) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json new file mode 100644 index 00000000..6f1edb78 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json @@ -0,0 +1,6 @@ +{ + "extensions": [ + ".py" + ], + "output": "path-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py new file mode 100755 index 00000000..28d06f36 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ast +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'rb') as path_fd: + lines = path_fd.read().splitlines() + + missing = True + if not lines: + # Files are allowed to be empty of everything including boilerplate + missing = False + + for text in lines: + if text == b'__metaclass__ = type': + missing = False + break + + if missing: + with open(path) as file: + contents = file.read() + + # noinspection PyBroadException + try: + node = ast.parse(contents) + + # files consisting of only assignments have no need for metaclass boilerplate + # the most likely case is that of a documentation only python file + if all(isinstance(statement, ast.Assign) for statement in node.body): + missing = False + except Exception: # pylint: disable=broad-except + pass # the compile sanity test will report this error + + if missing: + print('%s: missing: __metaclass__ = type' % path) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-assert.json b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.json new file mode 100644 index 00000000..ccee80a2 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.json @@ -0,0 +1,10 @@ +{ + "extensions": [ + ".py" + ], + "prefixes": [ + "lib/ansible/", + "plugins/" + ], + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-assert.py b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.py new file mode 100755 index 00000000..78561d96 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + +ASSERT_RE = re.compile(r'^\s*assert[^a-z0-9_:]') + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as f: + for i, line in enumerate(f.readlines()): + matches = ASSERT_RE.findall(line) + + if matches: + lineno = i + 1 + colno = line.index('assert') + 1 + print('%s:%d:%d: raise AssertionError instead of: %s' % (path, lineno, colno, matches[0][colno - 1:])) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json new file mode 100644 index 00000000..88858aeb --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json @@ -0,0 +1,7 @@ +{ + "extensions": [ + ".py" + ], + "ignore_self": true, + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py new file mode 100755 index 00000000..a35650ef --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as path_fd: + for line, text in enumerate(path_fd.readlines()): + match = re.search(r'(isinstance.*basestring)', text) + + if match: + print('%s:%d:%d: do not use `isinstance(s, basestring)`' % ( + path, line + 1, match.start(1) + 1)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json new file mode 100644 index 00000000..88858aeb --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json @@ -0,0 +1,7 @@ +{ + "extensions": [ + ".py" + ], + "ignore_self": true, + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py new file mode 100755 index 00000000..e28b24f4 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as path_fd: + for line, text in enumerate(path_fd.readlines()): + match = re.search(r'(?', + b':', + b'"', + b'/', + b'\\', + b'|', + b'?', + b'*' +] + [struct.pack("b", i) for i in range(32)] + +ILLEGAL_NAMES = [ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +] + +ILLEGAL_END_CHARS = [ + '.', + ' ', +] + + +def check_path(path, is_dir=False): + type_name = 'directory' if is_dir else 'file' + file_name = os.path.basename(path.rstrip(os.path.sep)) + name = os.path.splitext(file_name)[0] + + if name.upper() in ILLEGAL_NAMES: + print("%s: illegal %s name %s" % (path, type_name, name.upper())) + + if file_name[-1] in ILLEGAL_END_CHARS: + print("%s: illegal %s name end-char '%s'" % (path, type_name, file_name[-1])) + + bfile = to_bytes(file_name, encoding='utf-8') + for char in ILLEGAL_CHARS: + if char in bfile: + bpath = to_bytes(path, encoding='utf-8') + print("%s: illegal char '%s' in %s name" % (bpath, char, type_name)) + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + check_path(path, is_dir=path.endswith(os.path.sep)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json new file mode 100644 index 00000000..ccee80a2 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json @@ -0,0 +1,10 @@ +{ + "extensions": [ + ".py" + ], + "prefixes": [ + "lib/ansible/", + "plugins/" + ], + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py new file mode 100755 index 00000000..74a36ecc --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys + +MAIN_DISPLAY_IMPORT = 'from __main__ import display' + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as f: + for i, line in enumerate(f.readlines()): + if MAIN_DISPLAY_IMPORT in line: + lineno = i + 1 + colno = line.index(MAIN_DISPLAY_IMPORT) + 1 + print('%s:%d:%d: Display is a singleton, just import and instantiate' % (path, lineno, colno)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json new file mode 100644 index 00000000..54d9fff5 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json @@ -0,0 +1,5 @@ +{ + "text": true, + "ignore_self": true, + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py new file mode 100755 index 00000000..e44005a5 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'rb') as path_fd: + for line, text in enumerate(path_fd.readlines()): + try: + text = text.decode('utf-8') + except UnicodeDecodeError as ex: + print('%s:%d:%d: UnicodeDecodeError: %s' % (path, line + 1, ex.start + 1, ex)) + continue + + match = re.search(u'([‘’“”])', text) + + if match: + print('%s:%d:%d: use ASCII quotes `\'` and `"` instead of Unicode quotes' % ( + path, line + 1, match.start(1) + 1)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json new file mode 100644 index 00000000..88858aeb --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json @@ -0,0 +1,7 @@ +{ + "extensions": [ + ".py" + ], + "ignore_self": true, + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py new file mode 100755 index 00000000..e2201ab1 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as path_fd: + for line, text in enumerate(path_fd.readlines()): + match = re.search(r'(unicode_literals)', text) + + if match: + print('%s:%d:%d: do not use `unicode_literals`' % ( + path, line + 1, match.start(1) + 1)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json new file mode 100644 index 00000000..88858aeb --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json @@ -0,0 +1,7 @@ +{ + "extensions": [ + ".py" + ], + "ignore_self": true, + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py new file mode 100755 index 00000000..b2de1ba8 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as path_fd: + for line, text in enumerate(path_fd.readlines()): + match = re.search(r'^(?:[^#]*?)(urlopen)', text) + + if match: + print('%s:%d:%d: use `ansible.module_utils.urls.open_url` instead of `urlopen`' % ( + path, line + 1, match.start(1) + 1)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json new file mode 100644 index 00000000..44003ec0 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json @@ -0,0 +1,11 @@ +{ + "prefixes": [ + "lib/ansible/config/ansible_builtin_runtime.yml", + "meta/routing.yml", + "meta/runtime.yml" + ], + "extensions": [ + ".yml" + ], + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py new file mode 100755 index 00000000..b986db2b --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +"""Schema validation of ansible-base's ansible_builtin_runtime.yml and collection's meta/runtime.yml""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import os +import re +import sys +import yaml + +from voluptuous import Any, MultipleInvalid, PREVENT_EXTRA +from voluptuous import Required, Schema, Invalid +from voluptuous.humanize import humanize_error + +from ansible.module_utils.six import string_types + + +def isodate(value): + """Validate a datetime.date or ISO 8601 date string.""" + # datetime.date objects come from YAML dates, these are ok + if isinstance(value, datetime.date): + return value + # make sure we have a string + msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date' + if not isinstance(value, string_types): + raise Invalid(msg) + try: + datetime.datetime.strptime(value, '%Y-%m-%d').date() + except ValueError: + raise Invalid(msg) + return value + + +def validate_metadata_file(path): + """Validate explicit runtime metadata file""" + try: + with open(path, 'r') as f_path: + routing = yaml.safe_load(f_path) + except yaml.error.MarkedYAMLError as ex: + print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line + + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex)))) + return + except Exception as ex: # pylint: disable=broad-except + print('%s:%d:%d: YAML load failed: %s' % + (path, 0, 0, re.sub(r'\s+', ' ', str(ex)))) + return + + # Updates to schema MUST also be reflected in the documentation + # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html + + # plugin_routing schema + + deprecation_tombstoning_schema = Any(Schema( + { + Required('removal_date'): Any(isodate), + 'warning_text': Any(*string_types), + }, + extra=PREVENT_EXTRA + ), Schema( + { + Required('removal_version'): Any(*string_types), + 'warning_text': Any(*string_types), + }, + extra=PREVENT_EXTRA + )) + + plugin_routing_schema = Any( + Schema({ + ('deprecation'): Any(deprecation_tombstoning_schema), + ('tombstone'): Any(deprecation_tombstoning_schema), + ('redirect'): Any(*string_types), + }, extra=PREVENT_EXTRA), + ) + + list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema} + for str_type in string_types] + + plugin_schema = Schema({ + ('action'): Any(None, *list_dict_plugin_routing_schema), + ('become'): Any(None, *list_dict_plugin_routing_schema), + ('cache'): Any(None, *list_dict_plugin_routing_schema), + ('callback'): Any(None, *list_dict_plugin_routing_schema), + ('cliconf'): Any(None, *list_dict_plugin_routing_schema), + ('connection'): Any(None, *list_dict_plugin_routing_schema), + ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema), + ('filter'): Any(None, *list_dict_plugin_routing_schema), + ('httpapi'): Any(None, *list_dict_plugin_routing_schema), + ('inventory'): Any(None, *list_dict_plugin_routing_schema), + ('lookup'): Any(None, *list_dict_plugin_routing_schema), + ('module_utils'): Any(None, *list_dict_plugin_routing_schema), + ('modules'): Any(None, *list_dict_plugin_routing_schema), + ('netconf'): Any(None, *list_dict_plugin_routing_schema), + ('shell'): Any(None, *list_dict_plugin_routing_schema), + ('strategy'): Any(None, *list_dict_plugin_routing_schema), + ('terminal'): Any(None, *list_dict_plugin_routing_schema), + ('test'): Any(None, *list_dict_plugin_routing_schema), + ('vars'): Any(None, *list_dict_plugin_routing_schema), + }, extra=PREVENT_EXTRA) + + # import_redirection schema + + import_redirection_schema = Any( + Schema({ + ('redirect'): Any(*string_types), + # import_redirect doesn't currently support deprecation + }, extra=PREVENT_EXTRA) + ) + + list_dict_import_redirection_schema = [{str_type: import_redirection_schema} + for str_type in string_types] + + # top level schema + + schema = Schema({ + # All of these are optional + ('plugin_routing'): Any(plugin_schema), + ('import_redirection'): Any(None, *list_dict_import_redirection_schema), + # requires_ansible: In the future we should validate this with SpecifierSet + ('requires_ansible'): Any(*string_types), + ('action_groups'): dict, + }, extra=PREVENT_EXTRA) + + # Ensure schema is valid + + try: + schema(routing) + except MultipleInvalid as ex: + for error in ex.errors: + # No way to get line/column numbers + print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error))) + + +def main(): + """Validate runtime metadata""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + + collection_legacy_file = 'meta/routing.yml' + collection_runtime_file = 'meta/runtime.yml' + + for path in paths: + if path == collection_legacy_file: + print('%s:%d:%d: %s' % (path, 0, 0, ("Should be called '%s'" % collection_runtime_file))) + continue + + validate_metadata_file(path) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/shebang.json b/test/lib/ansible_test/_data/sanity/code-smell/shebang.json new file mode 100644 index 00000000..5648429e --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/shebang.json @@ -0,0 +1,4 @@ +{ + "text": true, + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/shebang.py b/test/lib/ansible_test/_data/sanity/code-smell/shebang.py new file mode 100755 index 00000000..7cf3cf72 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/shebang.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re +import stat +import sys + + +def main(): + standard_shebangs = set([ + b'#!/bin/bash -eu', + b'#!/bin/bash -eux', + b'#!/bin/sh', + b'#!/usr/bin/env bash', + b'#!/usr/bin/env fish', + b'#!/usr/bin/env pwsh', + b'#!/usr/bin/env python', + b'#!/usr/bin/make -f', + ]) + + integration_shebangs = set([ + b'#!/bin/sh', + b'#!/usr/bin/env bash', + b'#!/usr/bin/env python', + ]) + + module_shebangs = { + '': b'#!/usr/bin/python', + '.py': b'#!/usr/bin/python', + '.ps1': b'#!powershell', + } + + # see https://unicode.org/faq/utf_bom.html#bom1 + byte_order_marks = ( + (b'\x00\x00\xFE\xFF', 'UTF-32 (BE)'), + (b'\xFF\xFE\x00\x00', 'UTF-32 (LE)'), + (b'\xFE\xFF', 'UTF-16 (BE)'), + (b'\xFF\xFE', 'UTF-16 (LE)'), + (b'\xEF\xBB\xBF', 'UTF-8'), + ) + + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'rb') as path_fd: + shebang = path_fd.readline().strip() + mode = os.stat(path).st_mode + executable = (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & mode + + if not shebang or not shebang.startswith(b'#!'): + if executable: + print('%s:%d:%d: file without shebang should not be executable' % (path, 0, 0)) + + for mark, name in byte_order_marks: + if shebang.startswith(mark): + print('%s:%d:%d: file starts with a %s byte order mark' % (path, 0, 0, name)) + break + + continue + + is_module = False + is_integration = False + + dirname = os.path.dirname(path) + + if path.startswith('lib/ansible/modules/'): + is_module = True + elif re.search('^test/support/[^/]+/plugins/modules/', path): + is_module = True + elif re.search('^test/support/[^/]+/collections/ansible_collections/[^/]+/[^/]+/plugins/modules/', path): + is_module = True + elif path.startswith('test/lib/ansible_test/_data/'): + pass + elif path.startswith('lib/') or path.startswith('test/lib/'): + if executable: + print('%s:%d:%d: should not be executable' % (path, 0, 0)) + + if shebang: + print('%s:%d:%d: should not have a shebang' % (path, 0, 0)) + + continue + elif path.startswith('test/integration/targets/') or path.startswith('tests/integration/targets/'): + is_integration = True + + if dirname.endswith('/library') or '/plugins/modules' in dirname or dirname in ( + # non-standard module library directories + 'test/integration/targets/module_precedence/lib_no_extension', + 'test/integration/targets/module_precedence/lib_with_extension', + ): + is_module = True + elif path.startswith('plugins/modules/'): + is_module = True + + if is_module: + if executable: + print('%s:%d:%d: module should not be executable' % (path, 0, 0)) + + ext = os.path.splitext(path)[1] + expected_shebang = module_shebangs.get(ext) + expected_ext = ' or '.join(['"%s"' % k for k in module_shebangs]) + + if expected_shebang: + if shebang == expected_shebang: + continue + + print('%s:%d:%d: expected module shebang "%s" but found: %s' % (path, 1, 1, expected_shebang, shebang)) + else: + print('%s:%d:%d: expected module extension %s but found: %s' % (path, 0, 0, expected_ext, ext)) + else: + if is_integration: + allowed = integration_shebangs + else: + allowed = standard_shebangs + + if shebang not in allowed: + print('%s:%d:%d: unexpected non-module shebang: %s' % (path, 1, 1, shebang)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/symlinks.json b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.json new file mode 100644 index 00000000..6f13c86b --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.json @@ -0,0 +1,5 @@ +{ + "include_directories": true, + "include_symlinks": true, + "output": "path-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/symlinks.py b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.py new file mode 100755 index 00000000..0585c6b1 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + + +def main(): + root_dir = os.getcwd() + os.path.sep + + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + if not os.path.islink(path.rstrip(os.path.sep)): + continue + + if not os.path.exists(path): + print('%s: broken symlinks are not allowed' % path) + continue + + if path.endswith(os.path.sep): + print('%s: symlinks to directories are not allowed' % path) + continue + + real_path = os.path.realpath(path) + + if not real_path.startswith(root_dir): + print('%s: symlinks outside content tree are not allowed: %s' % (path, os.path.relpath(real_path, os.path.dirname(path)))) + continue + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json new file mode 100644 index 00000000..36103051 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json @@ -0,0 +1,10 @@ +{ + "prefixes": [ + "lib/ansible/modules/", + "plugins/modules/" + ], + "extensions": [ + ".py" + ], + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py new file mode 100755 index 00000000..687136dc --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as path_fd: + for line, text in enumerate(path_fd.readlines()): + match = re.search(r'(expanduser)', text) + + if match: + print('%s:%d:%d: use argspec type="path" instead of type="str" to avoid use of `expanduser`' % ( + path, line + 1, match.start(1) + 1)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json new file mode 100644 index 00000000..776590b7 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json @@ -0,0 +1,6 @@ +{ + "extensions": [ + ".py" + ], + "output": "path-line-column-message" +} diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py new file mode 100755 index 00000000..49cb76c5 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as path_fd: + for line, text in enumerate(path_fd.readlines()): + match = re.search(r'((^\s*import\s+six\b)|(^\s*from\s+six\b))', text) + + if match: + print('%s:%d:%d: use `ansible.module_utils.six` instead of `six`' % ( + path, line + 1, match.start(1) + 1)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/compile/compile.py b/test/lib/ansible_test/_data/sanity/compile/compile.py new file mode 100755 index 00000000..61910eee --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/compile/compile.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +"""Python syntax checker with lint friendly output.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import warnings + +with warnings.catch_warnings(): + # The parser module is deprecated as of Python 3.9. + # This implementation will need to be updated to use another solution. + # Until then, disable the deprecation warnings to prevent test failures. + warnings.simplefilter('ignore', DeprecationWarning) + import parser + +import sys + + +def main(): + status = 0 + + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'rb') as source_fd: + if sys.version_info[0] == 3: + source = source_fd.read().decode('utf-8') + else: + source = source_fd.read() + + try: + parser.suite(source) + except SyntaxError: + ex = sys.exc_info()[1] + status = 1 + message = ex.text.splitlines()[0].strip() + sys.stdout.write("%s:%d:%d: SyntaxError: %s\n" % (path, ex.lineno, ex.offset, message)) + sys.stdout.flush() + + sys.exit(status) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/import/importer.py b/test/lib/ansible_test/_data/sanity/import/importer.py new file mode 100755 index 00000000..ef8db71b --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/import/importer.py @@ -0,0 +1,467 @@ +#!/usr/bin/env python +"""Import the given python module(s) and report error(s) encountered.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def main(): + """ + Main program function used to isolate globals from imported code. + Changes to globals in imported modules on Python 2.x will overwrite our own globals. + """ + import ansible + import contextlib + import datetime + import json + import os + import re + import runpy + import subprocess + import sys + import traceback + import types + import warnings + + ansible_path = os.path.dirname(os.path.dirname(ansible.__file__)) + temp_path = os.environ['SANITY_TEMP_PATH'] + os.path.sep + external_python = os.environ.get('SANITY_EXTERNAL_PYTHON') or sys.executable + collection_full_name = os.environ.get('SANITY_COLLECTION_FULL_NAME') + collection_root = os.environ.get('ANSIBLE_COLLECTIONS_PATH') + + try: + # noinspection PyCompatibility + from importlib import import_module + except ImportError: + def import_module(name): + __import__(name) + return sys.modules[name] + + try: + # noinspection PyCompatibility + from StringIO import StringIO + except ImportError: + from io import StringIO + + if collection_full_name: + # allow importing code from collections when testing a collection + from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native, text_type + from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder + from ansible.utils.collection_loader import _collection_finder + + yaml_to_json_path = os.path.join(os.path.dirname(__file__), 'yaml_to_json.py') + yaml_to_dict_cache = {} + + # unique ISO date marker matching the one present in yaml_to_json.py + iso_date_marker = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:' + iso_date_re = re.compile('^%s([0-9]{4})-([0-9]{2})-([0-9]{2})$' % iso_date_marker) + + def parse_value(value): + """Custom value parser for JSON deserialization that recognizes our internal ISO date format.""" + if isinstance(value, text_type): + match = iso_date_re.search(value) + + if match: + value = datetime.date(int(match.group(1)), int(match.group(2)), int(match.group(3))) + + return value + + def object_hook(data): + """Object hook for custom ISO date deserialization from JSON.""" + return dict((key, parse_value(value)) for key, value in data.items()) + + def yaml_to_dict(yaml, content_id): + """ + Return a Python dict version of the provided YAML. + Conversion is done in a subprocess since the current Python interpreter does not have access to PyYAML. + """ + if content_id in yaml_to_dict_cache: + return yaml_to_dict_cache[content_id] + + try: + cmd = [external_python, yaml_to_json_path] + proc = subprocess.Popen([to_bytes(c) for c in cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout_bytes, stderr_bytes = proc.communicate(to_bytes(yaml)) + + if proc.returncode != 0: + raise Exception('command %s failed with return code %d: %s' % ([to_native(c) for c in cmd], proc.returncode, to_native(stderr_bytes))) + + data = yaml_to_dict_cache[content_id] = json.loads(to_text(stdout_bytes), object_hook=object_hook) + + return data + except Exception as ex: + raise Exception('internal importer error - failed to parse yaml: %s' % to_native(ex)) + + _collection_finder._meta_yml_to_dict = yaml_to_dict # pylint: disable=protected-access + + collection_loader = _AnsibleCollectionFinder(paths=[collection_root]) + collection_loader._install() # pylint: disable=protected-access + else: + # do not support collection loading when not testing a collection + collection_loader = None + + # remove all modules under the ansible package + list(map(sys.modules.pop, [m for m in sys.modules if m.partition('.')[0] == ansible.__name__])) + + # pre-load an empty ansible package to prevent unwanted code in __init__.py from loading + # this more accurately reflects the environment that AnsiballZ runs modules under + # it also avoids issues with imports in the ansible package that are not allowed + ansible_module = types.ModuleType(ansible.__name__) + ansible_module.__file__ = ansible.__file__ + ansible_module.__path__ = ansible.__path__ + ansible_module.__package__ = ansible.__package__ + + sys.modules[ansible.__name__] = ansible_module + + class ImporterAnsibleModuleException(Exception): + """Exception thrown during initialization of ImporterAnsibleModule.""" + + class ImporterAnsibleModule: + """Replacement for AnsibleModule to support import testing.""" + def __init__(self, *args, **kwargs): + raise ImporterAnsibleModuleException() + + class ImportBlacklist: + """Blacklist inappropriate imports.""" + def __init__(self, path, name): + self.path = path + self.name = name + self.loaded_modules = set() + + def find_module(self, fullname, path=None): + """Return self if the given fullname is blacklisted, otherwise return None. + :param fullname: str + :param path: str + :return: ImportBlacklist | None + """ + if fullname in self.loaded_modules: + return None # ignore modules that are already being loaded + + if is_name_in_namepace(fullname, ['ansible']): + if fullname in ('ansible.module_utils.basic', 'ansible.module_utils.common.removed'): + return self # intercept loading so we can modify the result + + if is_name_in_namepace(fullname, ['ansible.module_utils', self.name]): + return None # module_utils and module under test are always allowed + + if any(os.path.exists(candidate_path) for candidate_path in convert_ansible_name_to_absolute_paths(fullname)): + return self # blacklist ansible files that exist + + return None # ansible file does not exist, do not blacklist + + if is_name_in_namepace(fullname, ['ansible_collections']): + if not collection_loader: + return self # blacklist collections when we are not testing a collection + + if is_name_in_namepace(fullname, ['ansible_collections...plugins.module_utils', self.name]): + return None # module_utils and module under test are always allowed + + if collection_loader.find_module(fullname, path): + return self # blacklist collection files that exist + + return None # collection file does not exist, do not blacklist + + # not a namespace we care about + return None + + def load_module(self, fullname): + """Raise an ImportError. + :type fullname: str + """ + if fullname == 'ansible.module_utils.basic': + module = self.__load_module(fullname) + + # stop Ansible module execution during AnsibleModule instantiation + module.AnsibleModule = ImporterAnsibleModule + # no-op for _load_params since it may be called before instantiating AnsibleModule + module._load_params = lambda *args, **kwargs: {} # pylint: disable=protected-access + + return module + + if fullname == 'ansible.module_utils.common.removed': + module = self.__load_module(fullname) + + # no-op for removed_module since it is called in place of AnsibleModule instantiation + module.removed_module = lambda *args, **kwargs: None + + return module + + raise ImportError('import of "%s" is not allowed in this context' % fullname) + + def __load_module(self, fullname): + """Load the requested module while avoiding infinite recursion. + :type fullname: str + :rtype: module + """ + self.loaded_modules.add(fullname) + return import_module(fullname) + + def run(): + """Main program function.""" + base_dir = os.getcwd() + messages = set() + + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + name = convert_relative_path_to_name(path) + test_python_module(path, name, base_dir, messages) + + if messages: + sys.exit(10) + + def test_python_module(path, name, base_dir, messages): + """Test the given python module by importing it. + :type path: str + :type name: str + :type base_dir: str + :type messages: set[str] + """ + if name in sys.modules: + return # cannot be tested because it has already been loaded + + is_ansible_module = (path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/')) and os.path.basename(path) != '__init__.py' + run_main = is_ansible_module + + if path == 'lib/ansible/modules/async_wrapper.py': + # async_wrapper is a non-standard Ansible module (does not use AnsibleModule) so we cannot test the main function + run_main = False + + capture_normal = Capture() + capture_main = Capture() + + try: + with monitor_sys_modules(path, messages): + with blacklist_imports(path, name, messages): + with capture_output(capture_normal): + import_module(name) + + if run_main: + with monitor_sys_modules(path, messages): + with blacklist_imports(path, name, messages): + with capture_output(capture_main): + runpy.run_module(name, run_name='__main__', alter_sys=True) + except ImporterAnsibleModuleException: + # module instantiated AnsibleModule without raising an exception + pass + except BaseException as ex: # pylint: disable=locally-disabled, broad-except + # intentionally catch all exceptions, including calls to sys.exit + exc_type, _exc, exc_tb = sys.exc_info() + message = str(ex) + results = list(reversed(traceback.extract_tb(exc_tb))) + line = 0 + offset = 0 + full_path = os.path.join(base_dir, path) + base_path = base_dir + os.path.sep + source = None + + # avoid line wraps in messages + message = re.sub(r'\n *', ': ', message) + + for result in results: + if result[0] == full_path: + # save the line number for the file under test + line = result[1] or 0 + + if not source and result[0].startswith(base_path) and not result[0].startswith(temp_path): + # save the first path and line number in the traceback which is in our source tree + source = (os.path.relpath(result[0], base_path), result[1] or 0, 0) + + if isinstance(ex, SyntaxError): + # SyntaxError has better information than the traceback + if ex.filename == full_path: # pylint: disable=locally-disabled, no-member + # syntax error was reported in the file under test + line = ex.lineno or 0 # pylint: disable=locally-disabled, no-member + offset = ex.offset or 0 # pylint: disable=locally-disabled, no-member + elif ex.filename.startswith(base_path) and not ex.filename.startswith(temp_path): # pylint: disable=locally-disabled, no-member + # syntax error was reported in our source tree + source = (os.path.relpath(ex.filename, base_path), ex.lineno or 0, ex.offset or 0) # pylint: disable=locally-disabled, no-member + + # remove the filename and line number from the message + # either it was extracted above, or it's not really useful information + message = re.sub(r' \(.*?, line [0-9]+\)$', '', message) + + if source and source[0] != path: + message += ' (at %s:%d:%d)' % (source[0], source[1], source[2]) + + report_message(path, line, offset, 'traceback', '%s: %s' % (exc_type.__name__, message), messages) + finally: + capture_report(path, capture_normal, messages) + capture_report(path, capture_main, messages) + + def is_name_in_namepace(name, namespaces): + """Returns True if the given name is one of the given namespaces, otherwise returns False.""" + name_parts = name.split('.') + + for namespace in namespaces: + namespace_parts = namespace.split('.') + length = min(len(name_parts), len(namespace_parts)) + + truncated_name = name_parts[0:length] + truncated_namespace = namespace_parts[0:length] + + # empty parts in the namespace are treated as wildcards + # to simplify the comparison, use those empty parts to indicate the positions in the name to be empty as well + for idx, part in enumerate(truncated_namespace): + if not part: + truncated_name[idx] = part + + # example: name=ansible, allowed_name=ansible.module_utils + # example: name=ansible.module_utils.system.ping, allowed_name=ansible.module_utils + if truncated_name == truncated_namespace: + return True + + return False + + def check_sys_modules(path, before, messages): + """Check for unwanted changes to sys.modules. + :type path: str + :type before: dict[str, module] + :type messages: set[str] + """ + after = sys.modules + removed = set(before.keys()) - set(after.keys()) + changed = set(key for key, value in before.items() if key in after and value != after[key]) + + # additions are checked by our custom PEP 302 loader, so we don't need to check them again here + + for module in sorted(removed): + report_message(path, 0, 0, 'unload', 'unloading of "%s" in sys.modules is not supported' % module, messages) + + for module in sorted(changed): + report_message(path, 0, 0, 'reload', 'reloading of "%s" in sys.modules is not supported' % module, messages) + + def convert_ansible_name_to_absolute_paths(name): + """Calculate the module path from the given name. + :type name: str + :rtype: list[str] + """ + return [ + os.path.join(ansible_path, name.replace('.', os.path.sep)), + os.path.join(ansible_path, name.replace('.', os.path.sep)) + '.py', + ] + + def convert_relative_path_to_name(path): + """Calculate the module name from the given path. + :type path: str + :rtype: str + """ + if path.endswith('/__init__.py'): + clean_path = os.path.dirname(path) + else: + clean_path = path + + clean_path = os.path.splitext(clean_path)[0] + + name = clean_path.replace(os.path.sep, '.') + + if collection_loader: + # when testing collections the relative paths (and names) being tested are within the collection under test + name = 'ansible_collections.%s.%s' % (collection_full_name, name) + else: + # when testing ansible all files being imported reside under the lib directory + name = name[len('lib/'):] + + return name + + class Capture: + """Captured output and/or exception.""" + def __init__(self): + self.stdout = StringIO() + self.stderr = StringIO() + + def capture_report(path, capture, messages): + """Report on captured output. + :type path: str + :type capture: Capture + :type messages: set[str] + """ + if capture.stdout.getvalue(): + first = capture.stdout.getvalue().strip().splitlines()[0].strip() + report_message(path, 0, 0, 'stdout', first, messages) + + if capture.stderr.getvalue(): + first = capture.stderr.getvalue().strip().splitlines()[0].strip() + report_message(path, 0, 0, 'stderr', first, messages) + + def report_message(path, line, column, code, message, messages): + """Report message if not already reported. + :type path: str + :type line: int + :type column: int + :type code: str + :type message: str + :type messages: set[str] + """ + message = '%s:%d:%d: %s: %s' % (path, line, column, code, message) + + if message not in messages: + messages.add(message) + print(message) + + @contextlib.contextmanager + def blacklist_imports(path, name, messages): + """Blacklist imports. + :type path: str + :type name: str + :type messages: set[str] + """ + blacklist = ImportBlacklist(path, name) + + sys.meta_path.insert(0, blacklist) + sys.path_importer_cache.clear() + + try: + yield + finally: + if sys.meta_path[0] != blacklist: + report_message(path, 0, 0, 'metapath', 'changes to sys.meta_path[0] are not permitted', messages) + + while blacklist in sys.meta_path: + sys.meta_path.remove(blacklist) + + sys.path_importer_cache.clear() + + @contextlib.contextmanager + def monitor_sys_modules(path, messages): + """Monitor sys.modules for unwanted changes, reverting any additions made to our own namespaces.""" + snapshot = sys.modules.copy() + + try: + yield + finally: + check_sys_modules(path, snapshot, messages) + + for key in set(sys.modules.keys()) - set(snapshot.keys()): + if is_name_in_namepace(key, ('ansible', 'ansible_collections')): + del sys.modules[key] # only unload our own code since we know it's native Python + + @contextlib.contextmanager + def capture_output(capture): + """Capture sys.stdout and sys.stderr. + :type capture: Capture + """ + old_stdout = sys.stdout + old_stderr = sys.stderr + + sys.stdout = capture.stdout + sys.stderr = capture.stderr + + # clear all warnings registries to make all warnings available + for module in sys.modules.values(): + try: + module.__warningregistry__.clear() + except AttributeError: + pass + + with warnings.catch_warnings(): + warnings.simplefilter('error') + + try: + yield + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + + run() + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/import/yaml_to_json.py b/test/lib/ansible_test/_data/sanity/import/yaml_to_json.py new file mode 100644 index 00000000..09be9576 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/import/yaml_to_json.py @@ -0,0 +1,27 @@ +"""Read YAML from stdin and write JSON to stdout.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import json +import sys + +from yaml import load + +try: + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeLoader + +# unique ISO date marker matching the one present in importer.py +ISO_DATE_MARKER = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:' + + +def default(value): + if isinstance(value, datetime.date): + return '%s%s' % (ISO_DATE_MARKER, value.isoformat()) + + raise TypeError('cannot serialize type: %s' % type(value)) + + +json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout, default=default) diff --git a/test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py b/test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py new file mode 100644 index 00000000..74a45f00 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py @@ -0,0 +1,15 @@ +"""Read YAML from stdin and write JSON to stdout.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +from yaml import load + +try: + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeLoader + +json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout) diff --git a/test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt b/test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt new file mode 100644 index 00000000..659c7f59 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt @@ -0,0 +1,4 @@ +E402 +W503 +W504 +E741 diff --git a/test/lib/ansible_test/_data/sanity/pslint/pslint.ps1 b/test/lib/ansible_test/_data/sanity/pslint/pslint.ps1 new file mode 100755 index 00000000..1ef2743a --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pslint/pslint.ps1 @@ -0,0 +1,43 @@ +#!/usr/bin/env pwsh +#Requires -Version 6 +#Requires -Modules PSScriptAnalyzer, PSSA-PSCustomUseLiteralPath + +Set-StrictMode -Version 2.0 +$ErrorActionPreference = "Stop" +$WarningPreference = "Stop" + +# Until https://github.com/PowerShell/PSScriptAnalyzer/issues/1217 is fixed we need to import Pester if it's +# available. +if (Get-Module -Name Pester -ListAvailable -ErrorAction SilentlyContinue) { + Import-Module -Name Pester +} + +$LiteralPathRule = Import-Module -Name PSSA-PSCustomUseLiteralPath -PassThru +$LiteralPathRulePath = Join-Path -Path $LiteralPathRule.ModuleBase -ChildPath $LiteralPathRule.RootModule + +$PSSAParams = @{ + CustomRulePath = @($LiteralPathRulePath) + IncludeDefaultRules = $true + Setting = (Join-Path -Path $PSScriptRoot -ChildPath "settings.psd1") +} + +$Results = @() + +ForEach ($Path in $Args) { + $Retries = 3 + + Do { + Try { + $Results += Invoke-ScriptAnalyzer -Path $Path @PSSAParams 3> $null + $Retries = 0 + } + Catch { + If (--$Retries -le 0) { + Throw + } + } + } + Until ($Retries -le 0) +} + +ConvertTo-Json -InputObject $Results diff --git a/test/lib/ansible_test/_data/sanity/pslint/settings.psd1 b/test/lib/ansible_test/_data/sanity/pslint/settings.psd1 new file mode 100644 index 00000000..7646ec35 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pslint/settings.psd1 @@ -0,0 +1,13 @@ +@{ + ExcludeRules=@( + 'PSUseOutputTypeCorrectly', + 'PSUseShouldProcessForStateChangingFunctions', + # We send strings as plaintext so will always come across the 3 issues + 'PSAvoidUsingPlainTextForPassword', + 'PSAvoidUsingConvertToSecureStringWithPlainText', + 'PSAvoidUsingUserNameAndPassWordParams', + # We send the module as a base64 encoded string and a BOM will cause + # issues here + 'PSUseBOMForUnicodeEncodedFile' + ) +} diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg new file mode 100644 index 00000000..d3643162 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg @@ -0,0 +1,39 @@ +[MESSAGES CONTROL] + +disable= + cyclic-import, # consistent results require running with --jobs 1 and testing all files + duplicate-code, # consistent results require running with --jobs 1 and testing all files + too-few-public-methods, + too-many-arguments, + too-many-branches, + too-many-instance-attributes, + too-many-lines, + too-many-locals, + too-many-nested-blocks, + too-many-return-statements, + too-many-statements, + no-self-use, + unused-import, # pylint does not understand PEP 484 type hints + consider-using-dict-comprehension, # requires Python 2.6, which we still support + consider-using-set-comprehension, # requires Python 2.6, which we still support + +[BASIC] + +bad-names=foo, + bar, + baz, + toto, + tutu, + tata, + _, + +good-names=i, + j, + k, + ex, + Run, + C, + __metaclass__, + +method-rgx=[a-z_][a-z0-9_]{2,40}$ +function-rgx=[a-z_][a-z0-9_]{2,40}$ diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg new file mode 100644 index 00000000..c2d75b1c --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg @@ -0,0 +1,135 @@ +[MESSAGES CONTROL] + +disable= + abstract-method, + access-member-before-definition, + arguments-differ, + assignment-from-no-return, + assignment-from-none, + attribute-defined-outside-init, + bad-continuation, + bad-indentation, + bad-mcs-classmethod-argument, + broad-except, + c-extension-no-member, + cell-var-from-loop, + chained-comparison, + comparison-with-callable, + consider-iterating-dictionary, + consider-merging-isinstance, + consider-using-dict-comprehension, + consider-using-enumerate, + consider-using-get, + consider-using-in, + consider-using-set-comprehension, + consider-using-ternary, + cyclic-import, # consistent results require running with --jobs 1 and testing all files + deprecated-lambda, + deprecated-method, + deprecated-module, + duplicate-code, # consistent results require running with --jobs 1 and testing all files + eval-used, + exec-used, + expression-not-assigned, + fixme, + function-redefined, + global-statement, + global-variable-undefined, + import-error, + import-self, + inconsistent-return-statements, + invalid-envvar-default, + invalid-name, + invalid-sequence-index, + keyword-arg-before-vararg, + len-as-condition, + line-too-long, + literal-comparison, + locally-disabled, + method-hidden, + misplaced-comparison-constant, + missing-docstring, + no-else-raise, + no-else-return, + no-init, + no-member, + no-name-in-module, + no-self-use, + no-value-for-parameter, + non-iterator-returned, + not-a-mapping, + not-an-iterable, + not-callable, + old-style-class, + pointless-statement, + pointless-string-statement, + possibly-unused-variable, + protected-access, + redefined-argument-from-local, + redefined-builtin, + redefined-outer-name, + redefined-variable-type, + reimported, + relative-beyond-top-level, # https://github.com/PyCQA/pylint/issues/2967 + signature-differs, + simplifiable-if-expression, + simplifiable-if-statement, + subprocess-popen-preexec-fn, + super-init-not-called, + superfluous-parens, + too-few-public-methods, + too-many-ancestors, + too-many-arguments, + too-many-boolean-expressions, + too-many-branches, + too-many-function-args, + too-many-instance-attributes, + too-many-lines, + too-many-locals, + too-many-nested-blocks, + too-many-public-methods, + too-many-return-statements, + too-many-statements, + trailing-comma-tuple, + trailing-comma-tuple, + try-except-raise, + unbalanced-tuple-unpacking, + undefined-loop-variable, + unexpected-keyword-arg, + ungrouped-imports, + unidiomatic-typecheck, + unnecessary-pass, + unsubscriptable-object, + unsupported-assignment-operation, + unsupported-delete-operation, + unsupported-membership-test, + unused-argument, + unused-import, + unused-variable, + used-before-assignment, + useless-object-inheritance, + useless-return, + useless-super-delegation, + wrong-import-order, + wrong-import-position, + +[BASIC] + +bad-names=foo, + bar, + baz, + toto, + tutu, + tata, + _, + +good-names=i, + j, + k, + ex, + Run, + +[TYPECHECK] + +ignored-modules= + _MovedItems, diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/default.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/default.cfg new file mode 100644 index 00000000..45199078 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pylint/config/default.cfg @@ -0,0 +1,135 @@ +[MESSAGES CONTROL] + +disable= + abstract-method, + access-member-before-definition, + arguments-differ, + assignment-from-no-return, + assignment-from-none, + attribute-defined-outside-init, + bad-continuation, + bad-indentation, + bad-mcs-classmethod-argument, + broad-except, + c-extension-no-member, + cell-var-from-loop, + chained-comparison, + comparison-with-callable, + consider-iterating-dictionary, + consider-merging-isinstance, + consider-using-dict-comprehension, + consider-using-enumerate, + consider-using-get, + consider-using-in, + consider-using-set-comprehension, + consider-using-ternary, + cyclic-import, # consistent results require running with --jobs 1 and testing all files + deprecated-lambda, + deprecated-method, + deprecated-module, + duplicate-code, # consistent results require running with --jobs 1 and testing all files + eval-used, + exec-used, + expression-not-assigned, + fixme, + function-redefined, + global-statement, + global-variable-undefined, + import-error, + import-self, + inconsistent-return-statements, + invalid-envvar-default, + invalid-name, + invalid-sequence-index, + keyword-arg-before-vararg, + len-as-condition, + line-too-long, + literal-comparison, + locally-disabled, + method-hidden, + misplaced-comparison-constant, + missing-docstring, + no-else-raise, + no-else-return, + no-init, + no-member, + no-name-in-module, + no-self-use, + no-value-for-parameter, + non-iterator-returned, + not-a-mapping, + not-an-iterable, + not-callable, + old-style-class, + pointless-statement, + pointless-string-statement, + possibly-unused-variable, + protected-access, + redefined-argument-from-local, + redefined-builtin, + redefined-outer-name, + redefined-variable-type, + reimported, + relative-import, + signature-differs, + simplifiable-if-expression, + simplifiable-if-statement, + subprocess-popen-preexec-fn, + super-init-not-called, + superfluous-parens, + too-few-public-methods, + too-many-ancestors, + too-many-arguments, + too-many-boolean-expressions, + too-many-branches, + too-many-function-args, + too-many-instance-attributes, + too-many-lines, + too-many-locals, + too-many-nested-blocks, + too-many-public-methods, + too-many-return-statements, + too-many-statements, + trailing-comma-tuple, + trailing-comma-tuple, + try-except-raise, + unbalanced-tuple-unpacking, + undefined-loop-variable, + unexpected-keyword-arg, + ungrouped-imports, + unidiomatic-typecheck, + unnecessary-pass, + unsubscriptable-object, + unsupported-assignment-operation, + unsupported-delete-operation, + unsupported-membership-test, + unused-argument, + unused-import, + unused-variable, + used-before-assignment, + useless-object-inheritance, + useless-return, + useless-super-delegation, + wrong-import-order, + wrong-import-position, + +[BASIC] + +bad-names=foo, + bar, + baz, + toto, + tutu, + tata, + _, + +good-names=i, + j, + k, + ex, + Run, + +[TYPECHECK] + +ignored-modules= + _MovedItems, diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg new file mode 100644 index 00000000..f601ab57 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg @@ -0,0 +1,42 @@ +[MESSAGES CONTROL] + +disable= + cyclic-import, # consistent results require running with --jobs 1 and testing all files + duplicate-code, # consistent results require running with --jobs 1 and testing all files + too-few-public-methods, + too-many-arguments, + too-many-branches, + too-many-instance-attributes, + too-many-lines, + too-many-locals, + too-many-nested-blocks, + too-many-return-statements, + too-many-statements, + missing-docstring, + unused-import, # pylint does not understand PEP 484 type hints + consider-using-dict-comprehension, # requires Python 2.6, which we still support + consider-using-set-comprehension, # requires Python 2.6, which we still support + +[BASIC] + +bad-names=foo, + bar, + baz, + toto, + tutu, + tata, + _, + +good-names=i, + j, + k, + f, + e, + ex, + Run, + C, + __metaclass__, + +module-rgx=[a-z_][a-z0-9_-]{2,40}$ +method-rgx=[a-z_][a-z0-9_]{2,40}$ +function-rgx=[a-z_][a-z0-9_]{2,40}$ diff --git a/test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py b/test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py new file mode 100644 index 00000000..c06059c4 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py @@ -0,0 +1,250 @@ +# (c) 2018, Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import re + +from distutils.version import LooseVersion + +import astroid + +from pylint.interfaces import IAstroidChecker +from pylint.checkers import BaseChecker +from pylint.checkers.utils import check_messages + +from ansible.module_utils.six import string_types +from ansible.release import __version__ as ansible_version_raw +from ansible.utils.version import SemanticVersion + +MSGS = { + 'E9501': ("Deprecated version (%r) found in call to Display.deprecated " + "or AnsibleModule.deprecate", + "ansible-deprecated-version", + "Used when a call to Display.deprecated specifies a version " + "less than or equal to the current version of Ansible", + {'minversion': (2, 6)}), + 'E9502': ("Display.deprecated call without a version or date", + "ansible-deprecated-no-version", + "Used when a call to Display.deprecated does not specify a " + "version or date", + {'minversion': (2, 6)}), + 'E9503': ("Invalid deprecated version (%r) found in call to " + "Display.deprecated or AnsibleModule.deprecate", + "ansible-invalid-deprecated-version", + "Used when a call to Display.deprecated specifies an invalid " + "Ansible version number", + {'minversion': (2, 6)}), + 'E9504': ("Deprecated version (%r) found in call to Display.deprecated " + "or AnsibleModule.deprecate", + "collection-deprecated-version", + "Used when a call to Display.deprecated specifies a collection " + "version less than or equal to the current version of this " + "collection", + {'minversion': (2, 6)}), + 'E9505': ("Invalid deprecated version (%r) found in call to " + "Display.deprecated or AnsibleModule.deprecate", + "collection-invalid-deprecated-version", + "Used when a call to Display.deprecated specifies an invalid " + "collection version number", + {'minversion': (2, 6)}), + 'E9506': ("No collection name found in call to Display.deprecated or " + "AnsibleModule.deprecate", + "ansible-deprecated-no-collection-name", + "The current collection name in format `namespace.name` must " + "be provided as collection_name when calling Display.deprecated " + "or AnsibleModule.deprecate (`ansible.builtin` for ansible-base)", + {'minversion': (2, 6)}), + 'E9507': ("Wrong collection name (%r) found in call to " + "Display.deprecated or AnsibleModule.deprecate", + "wrong-collection-deprecated", + "The name of the current collection must be passed to the " + "Display.deprecated resp. AnsibleModule.deprecate calls " + "(`ansible.builtin` for ansible-base)", + {'minversion': (2, 6)}), + 'E9508': ("Expired date (%r) found in call to Display.deprecated " + "or AnsibleModule.deprecate", + "ansible-deprecated-date", + "Used when a call to Display.deprecated specifies a date " + "before today", + {'minversion': (2, 6)}), + 'E9509': ("Invalid deprecated date (%r) found in call to " + "Display.deprecated or AnsibleModule.deprecate", + "ansible-invalid-deprecated-date", + "Used when a call to Display.deprecated specifies an invalid " + "date. It must be a string in format `YYYY-MM-DD` (ISO 8601)", + {'minversion': (2, 6)}), + 'E9510': ("Both version and date found in call to " + "Display.deprecated or AnsibleModule.deprecate", + "ansible-deprecated-both-version-and-date", + "Only one of version and date must be specified", + {'minversion': (2, 6)}), +} + + +ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version_raw.split('.')[:3])) + + +def _get_expr_name(node): + """Funciton to get either ``attrname`` or ``name`` from ``node.func.expr`` + + Created specifically for the case of ``display.deprecated`` or ``self._display.deprecated`` + """ + try: + return node.func.expr.attrname + except AttributeError: + # If this fails too, we'll let it raise, the caller should catch it + return node.func.expr.name + + +def parse_isodate(value): + msg = 'Expected ISO 8601 date string (YYYY-MM-DD)' + if not isinstance(value, string_types): + raise ValueError(msg) + # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions, + # we have to do things manually. + if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): + raise ValueError(msg) + try: + return datetime.datetime.strptime(value, '%Y-%m-%d').date() + except ValueError: + raise ValueError(msg) + + +class AnsibleDeprecatedChecker(BaseChecker): + """Checks for Display.deprecated calls to ensure that the ``version`` + has not passed or met the time for removal + """ + + __implements__ = (IAstroidChecker,) + name = 'deprecated' + msgs = MSGS + + options = ( + ('collection-name', { + 'default': None, + 'type': 'string', + 'metavar': '', + 'help': 'The collection\'s name used to check collection names in deprecations.', + }), + ('collection-version', { + 'default': None, + 'type': 'string', + 'metavar': '', + 'help': 'The collection\'s version number used to check deprecations.', + }), + ) + + def __init__(self, *args, **kwargs): + self.collection_version = None + self.collection_name = None + super(AnsibleDeprecatedChecker, self).__init__(*args, **kwargs) + + def set_option(self, optname, value, action=None, optdict=None): + super(AnsibleDeprecatedChecker, self).set_option(optname, value, action, optdict) + if optname == 'collection-version' and value is not None: + self.collection_version = SemanticVersion(self.config.collection_version) + if optname == 'collection-name' and value is not None: + self.collection_name = self.config.collection_name + + def _check_date(self, node, date): + if not isinstance(date, str): + self.add_message('invalid-date', node=node, args=(date,)) + return + + try: + date_parsed = parse_isodate(date) + except ValueError: + self.add_message('ansible-invalid-deprecated-date', node=node, args=(date,)) + return + + if date_parsed < datetime.date.today(): + self.add_message('ansible-deprecated-date', node=node, args=(date,)) + + def _check_version(self, node, version, collection_name): + if not isinstance(version, (str, float)): + self.add_message('invalid-version', node=node, args=(version,)) + return + + version_no = str(version) + + if collection_name == 'ansible.builtin': + # Ansible-base + try: + if not version_no: + raise ValueError('Version string should not be empty') + loose_version = LooseVersion(str(version_no)) + if ANSIBLE_VERSION >= loose_version: + self.add_message('ansible-deprecated-version', node=node, args=(version,)) + except ValueError: + self.add_message('ansible-invalid-deprecated-version', node=node, args=(version,)) + elif collection_name: + # Collections + try: + if not version_no: + raise ValueError('Version string should not be empty') + semantic_version = SemanticVersion(version_no) + if collection_name == self.collection_name and self.collection_version is not None: + if self.collection_version >= semantic_version: + self.add_message('collection-deprecated-version', node=node, args=(version,)) + except ValueError: + self.add_message('collection-invalid-deprecated-version', node=node, args=(version,)) + + @check_messages(*(MSGS.keys())) + def visit_call(self, node): + version = None + date = None + collection_name = None + try: + if (node.func.attrname == 'deprecated' and 'display' in _get_expr_name(node) or + node.func.attrname == 'deprecate' and _get_expr_name(node)): + if node.keywords: + for keyword in node.keywords: + if len(node.keywords) == 1 and keyword.arg is None: + # This is likely a **kwargs splat + return + if keyword.arg == 'version': + if isinstance(keyword.value.value, astroid.Name): + # This is likely a variable + return + version = keyword.value.value + if keyword.arg == 'date': + if isinstance(keyword.value.value, astroid.Name): + # This is likely a variable + return + date = keyword.value.value + if keyword.arg == 'collection_name': + if isinstance(keyword.value.value, astroid.Name): + # This is likely a variable + return + collection_name = keyword.value.value + if not version and not date: + try: + version = node.args[1].value + except IndexError: + self.add_message('ansible-deprecated-no-version', node=node) + return + if version and date: + self.add_message('ansible-deprecated-both-version-and-date', node=node) + + if collection_name: + this_collection = collection_name == (self.collection_name or 'ansible.builtin') + if not this_collection: + self.add_message('wrong-collection-deprecated', node=node, args=(collection_name,)) + elif self.collection_name is not None: + self.add_message('ansible-deprecated-no-collection-name', node=node) + + if date: + self._check_date(node, date) + elif version: + self._check_version(node, version, collection_name) + except AttributeError: + # Not the type of node we are interested in + pass + + +def register(linter): + """required method to auto register this checker """ + linter.register_checker(AnsibleDeprecatedChecker(linter)) diff --git a/test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py b/test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py new file mode 100644 index 00000000..eafde73b --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py @@ -0,0 +1,90 @@ +# (c) 2018, Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys + +import six + +import astroid +from pylint.interfaces import IAstroidChecker +from pylint.checkers import BaseChecker +from pylint.checkers import utils +from pylint.checkers.utils import check_messages +try: + from pylint.checkers.utils import parse_format_method_string +except ImportError: + # noinspection PyUnresolvedReferences + from pylint.checkers.strings import parse_format_method_string + +_PY3K = sys.version_info[:2] >= (3, 0) + +MSGS = { + 'E9305': ("Format string contains automatic field numbering " + "specification", + "ansible-format-automatic-specification", + "Used when a PEP 3101 format string contains automatic " + "field numbering (e.g. '{}').", + {'minversion': (2, 6)}), + 'E9390': ("bytes object has no .format attribute", + "ansible-no-format-on-bytestring", + "Used when a bytestring was used as a PEP 3101 format string " + "as Python3 bytestrings do not have a .format attribute", + {'minversion': (3, 0)}), +} + + +class AnsibleStringFormatChecker(BaseChecker): + """Checks string formatting operations to ensure that the format string + is valid and the arguments match the format string. + """ + + __implements__ = (IAstroidChecker,) + name = 'string' + msgs = MSGS + + @check_messages(*(MSGS.keys())) + def visit_call(self, node): + func = utils.safe_infer(node.func) + if (isinstance(func, astroid.BoundMethod) + and isinstance(func.bound, astroid.Instance) + and func.bound.name in ('str', 'unicode', 'bytes')): + if func.name == 'format': + self._check_new_format(node, func) + + def _check_new_format(self, node, func): + """ Check the new string formatting """ + if (isinstance(node.func, astroid.Attribute) + and not isinstance(node.func.expr, astroid.Const)): + return + try: + strnode = next(func.bound.infer()) + except astroid.InferenceError: + return + if not isinstance(strnode, astroid.Const): + return + + if _PY3K and isinstance(strnode.value, six.binary_type): + self.add_message('ansible-no-format-on-bytestring', node=node) + return + if not isinstance(strnode.value, six.string_types): + return + + if node.starargs or node.kwargs: + return + try: + num_args = parse_format_method_string(strnode.value)[1] + except utils.IncompleteFormatString: + return + + if num_args: + self.add_message('ansible-format-automatic-specification', + node=node) + return + + +def register(linter): + """required method to auto register this checker """ + linter.register_checker(AnsibleStringFormatChecker(linter)) diff --git a/test/lib/ansible_test/_data/sanity/pylint/plugins/unwanted.py b/test/lib/ansible_test/_data/sanity/pylint/plugins/unwanted.py new file mode 100644 index 00000000..7012feaa --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/pylint/plugins/unwanted.py @@ -0,0 +1,242 @@ +"""A plugin for pylint to identify imports and functions which should not be used.""" +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import os + +import astroid + +from pylint.checkers import BaseChecker +from pylint.interfaces import IAstroidChecker + +ANSIBLE_TEST_MODULES_PATH = os.environ['ANSIBLE_TEST_MODULES_PATH'] +ANSIBLE_TEST_MODULE_UTILS_PATH = os.environ['ANSIBLE_TEST_MODULE_UTILS_PATH'] + + +class UnwantedEntry: + """Defines an unwanted import.""" + def __init__(self, alternative, modules_only=False, names=None, ignore_paths=None): + """ + :type alternative: str + :type modules_only: bool + :type names: tuple[str] | None + :type ignore_paths: tuple[str] | None + """ + self.alternative = alternative + self.modules_only = modules_only + self.names = set(names) if names else set() + self.ignore_paths = ignore_paths + + def applies_to(self, path, name=None): + """ + :type path: str + :type name: str | None + :rtype: bool + """ + if self.names: + if not name: + return False + + if name not in self.names: + return False + + if self.ignore_paths and any(path.endswith(ignore_path) for ignore_path in self.ignore_paths): + return False + + if self.modules_only: + return is_module_path(path) + + return True + + +def is_module_path(path): + """ + :type path: str + :rtype: bool + """ + return path.startswith(ANSIBLE_TEST_MODULES_PATH) or path.startswith(ANSIBLE_TEST_MODULE_UTILS_PATH) + + +class AnsibleUnwantedChecker(BaseChecker): + """Checker for unwanted imports and functions.""" + __implements__ = (IAstroidChecker,) + + name = 'unwanted' + + BAD_IMPORT = 'ansible-bad-import' + BAD_IMPORT_FROM = 'ansible-bad-import-from' + BAD_FUNCTION = 'ansible-bad-function' + BAD_MODULE_IMPORT = 'ansible-bad-module-import' + + msgs = dict( + E5101=('Import %s instead of %s', + BAD_IMPORT, + 'Identifies imports which should not be used.'), + E5102=('Import %s from %s instead of %s', + BAD_IMPORT_FROM, + 'Identifies imports which should not be used.'), + E5103=('Call %s instead of %s', + BAD_FUNCTION, + 'Identifies functions which should not be used.'), + E5104=('Import external package or ansible.module_utils not %s', + BAD_MODULE_IMPORT, + 'Identifies imports which should not be used.'), + ) + + unwanted_imports = dict( + # Additional imports that we may want to start checking: + # boto=UnwantedEntry('boto3', modules_only=True), + # requests=UnwantedEntry('ansible.module_utils.urls', modules_only=True), + # urllib=UnwantedEntry('ansible.module_utils.urls', modules_only=True), + + # see https://docs.python.org/2/library/urllib2.html + urllib2=UnwantedEntry('ansible.module_utils.urls', + ignore_paths=( + '/lib/ansible/module_utils/urls.py', + )), + + # see https://docs.python.org/3.7/library/collections.abc.html + collections=UnwantedEntry('ansible.module_utils.common._collections_compat', + ignore_paths=( + '/lib/ansible/module_utils/common/_collections_compat.py', + ), + names=( + 'MappingView', + 'ItemsView', + 'KeysView', + 'ValuesView', + 'Mapping', 'MutableMapping', + 'Sequence', 'MutableSequence', + 'Set', 'MutableSet', + 'Container', + 'Hashable', + 'Sized', + 'Callable', + 'Iterable', + 'Iterator', + )), + ) + + unwanted_functions = { + # see https://docs.python.org/2/library/tempfile.html#tempfile.mktemp + 'tempfile.mktemp': UnwantedEntry('tempfile.mkstemp'), + + 'sys.exit': UnwantedEntry('exit_json or fail_json', + ignore_paths=( + '/lib/ansible/module_utils/basic.py', + '/lib/ansible/modules/async_wrapper.py', + '/lib/ansible/module_utils/common/removed.py', + ), + modules_only=True), + + 'builtins.print': UnwantedEntry('module.log or module.debug', + ignore_paths=( + '/lib/ansible/module_utils/basic.py', + '/lib/ansible/module_utils/common/removed.py', + ), + modules_only=True), + } + + def visit_import(self, node): + """ + :type node: astroid.node_classes.Import + """ + for name in node.names: + self._check_import(node, name[0]) + + def visit_importfrom(self, node): + """ + :type node: astroid.node_classes.ImportFrom + """ + self._check_importfrom(node, node.modname, node.names) + + def visit_attribute(self, node): + """ + :type node: astroid.node_classes.Attribute + """ + last_child = node.last_child() + + # this is faster than using type inference and will catch the most common cases + if not isinstance(last_child, astroid.node_classes.Name): + return + + module = last_child.name + + entry = self.unwanted_imports.get(module) + + if entry and entry.names: + if entry.applies_to(self.linter.current_file, node.attrname): + self.add_message(self.BAD_IMPORT_FROM, args=(node.attrname, entry.alternative, module), node=node) + + def visit_call(self, node): + """ + :type node: astroid.node_classes.Call + """ + try: + for i in node.func.inferred(): + func = None + + if isinstance(i, astroid.scoped_nodes.FunctionDef) and isinstance(i.parent, astroid.scoped_nodes.Module): + func = '%s.%s' % (i.parent.name, i.name) + + if not func: + continue + + entry = self.unwanted_functions.get(func) + + if entry and entry.applies_to(self.linter.current_file): + self.add_message(self.BAD_FUNCTION, args=(entry.alternative, func), node=node) + except astroid.exceptions.InferenceError: + pass + + def _check_import(self, node, modname): + """ + :type node: astroid.node_classes.Import + :type modname: str + """ + self._check_module_import(node, modname) + + entry = self.unwanted_imports.get(modname) + + if not entry: + return + + if entry.applies_to(self.linter.current_file): + self.add_message(self.BAD_IMPORT, args=(entry.alternative, modname), node=node) + + def _check_importfrom(self, node, modname, names): + """ + :type node: astroid.node_classes.ImportFrom + :type modname: str + :type names: list[str[ + """ + self._check_module_import(node, modname) + + entry = self.unwanted_imports.get(modname) + + if not entry: + return + + for name in names: + if entry.applies_to(self.linter.current_file, name[0]): + self.add_message(self.BAD_IMPORT_FROM, args=(name[0], entry.alternative, modname), node=node) + + def _check_module_import(self, node, modname): + """ + :type node: astroid.node_classes.Import | astroid.node_classes.ImportFrom + :type modname: str + """ + if not is_module_path(self.linter.current_file): + return + + if modname == 'ansible.module_utils' or modname.startswith('ansible.module_utils.'): + return + + if modname == 'ansible' or modname.startswith('ansible.'): + self.add_message(self.BAD_MODULE_IMPORT, args=(modname,), node=node) + + +def register(linter): + """required method to auto register this checker """ + linter.register_checker(AnsibleUnwantedChecker(linter)) diff --git a/test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt b/test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt new file mode 100644 index 00000000..961e9bd9 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt @@ -0,0 +1,5 @@ +version +release +today +br +_ diff --git a/test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt b/test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt new file mode 100644 index 00000000..29588ddd --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt @@ -0,0 +1,3 @@ +SC1090 +SC1091 +SC2164 diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/main.py b/test/lib/ansible_test/_data/sanity/validate-modules/main.py new file mode 100755 index 00000000..c1e2bdaa --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/validate-modules/main.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from validate_modules.main import main + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate-modules b/test/lib/ansible_test/_data/sanity/validate-modules/validate-modules new file mode 120000 index 00000000..11a5d8e1 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate-modules @@ -0,0 +1 @@ +main.py \ No newline at end of file diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py new file mode 100644 index 00000000..d8ff2dc0 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2015 Matt Martz +# Copyright (C) 2015 Rackspace US, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +__version__ = '0.0.1b' diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py new file mode 100644 index 00000000..e7379288 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py @@ -0,0 +1,2444 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2015 Matt Martz +# Copyright (C) 2015 Rackspace US, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import abc +import argparse +import ast +import datetime +import json +import errno +import os +import re +import subprocess +import sys +import tempfile +import traceback + +from collections import OrderedDict +from contextlib import contextmanager +from distutils.version import StrictVersion, LooseVersion +from fnmatch import fnmatch + +import yaml + +from ansible import __version__ as ansible_version +from ansible.executor.module_common import REPLACER_WINDOWS +from ansible.module_utils.common._collections_compat import Mapping +from ansible.module_utils._text import to_native +from ansible.plugins.loader import fragment_loader +from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder +from ansible.utils.plugin_docs import BLACKLIST, add_collection_to_versions_and_dates, add_fragments, get_docstring +from ansible.utils.version import SemanticVersion + +from .module_args import AnsibleModuleImportError, AnsibleModuleNotInitialized, get_argument_spec + +from .schema import ansible_module_kwargs_schema, doc_schema, return_schema + +from .utils import CaptureStd, NoArgsAnsibleModule, compare_unordered_lists, is_empty, parse_yaml, parse_isodate +from voluptuous.humanize import humanize_error + +from ansible.module_utils.six import PY3, with_metaclass, string_types + +if PY3: + # Because there is no ast.TryExcept in Python 3 ast module + TRY_EXCEPT = ast.Try + # REPLACER_WINDOWS from ansible.executor.module_common is byte + # string but we need unicode for Python 3 + REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8') +else: + TRY_EXCEPT = ast.TryExcept + +BLACKLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea')) +INDENT_REGEX = re.compile(r'([\t]*)') +TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(? 1: + self.reporter.error( + path=self.object_path, + code='use-short-gplv3-license', + msg='Found old style GPLv3 license header: ' + 'https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright' + ) + + def _check_for_subprocess(self): + for child in self.ast.body: + if isinstance(child, ast.Import): + if child.names[0].name == 'subprocess': + for line_no, line in enumerate(self.text.splitlines()): + sp_match = SUBPROCESS_REGEX.search(line) + if sp_match: + self.reporter.error( + path=self.object_path, + code='use-run-command-not-popen', + msg=('subprocess.Popen call found. Should be module.run_command'), + line=(line_no + 1), + column=(sp_match.span()[0] + 1) + ) + + def _check_for_os_call(self): + if 'os.call' in self.text: + for line_no, line in enumerate(self.text.splitlines()): + os_call_match = OS_CALL_REGEX.search(line) + if os_call_match: + self.reporter.error( + path=self.object_path, + code='use-run-command-not-os-call', + msg=('os.call() call found. Should be module.run_command'), + line=(line_no + 1), + column=(os_call_match.span()[0] + 1) + ) + + def _find_blacklist_imports(self): + for child in self.ast.body: + names = [] + if isinstance(child, ast.Import): + names.extend(child.names) + elif isinstance(child, TRY_EXCEPT): + bodies = child.body + for handler in child.handlers: + bodies.extend(handler.body) + for grandchild in bodies: + if isinstance(grandchild, ast.Import): + names.extend(grandchild.names) + for name in names: + # TODO: Add line/col + for blacklist_import, options in BLACKLIST_IMPORTS.items(): + if re.search(blacklist_import, name.name): + new_only = options['new_only'] + if self._is_new_module() and new_only: + self.reporter.error( + path=self.object_path, + **options['error'] + ) + elif not new_only: + self.reporter.error( + path=self.object_path, + **options['error'] + ) + + def _find_module_utils(self, main): + linenos = [] + found_basic = False + for child in self.ast.body: + if isinstance(child, (ast.Import, ast.ImportFrom)): + names = [] + try: + names.append(child.module) + if child.module.endswith('.basic'): + found_basic = True + except AttributeError: + pass + names.extend([n.name for n in child.names]) + + if [n for n in names if n.startswith('ansible.module_utils')]: + linenos.append(child.lineno) + + for name in child.names: + if ('module_utils' in getattr(child, 'module', '') and + isinstance(name, ast.alias) and + name.name == '*'): + msg = ( + 'module-utils-specific-import', + ('module_utils imports should import specific ' + 'components, not "*"') + ) + if self._is_new_module(): + self.reporter.error( + path=self.object_path, + code=msg[0], + msg=msg[1], + line=child.lineno + ) + else: + self.reporter.warning( + path=self.object_path, + code=msg[0], + msg=msg[1], + line=child.lineno + ) + + if (isinstance(name, ast.alias) and + name.name == 'basic'): + found_basic = True + + if not found_basic: + self.reporter.warning( + path=self.object_path, + code='missing-module-utils-basic-import', + msg='Did not find "ansible.module_utils.basic" import' + ) + + return linenos + + def _get_first_callable(self): + linenos = [] + for child in self.ast.body: + if isinstance(child, (ast.FunctionDef, ast.ClassDef)): + linenos.append(child.lineno) + + return min(linenos) + + def _find_main_call(self, look_for="main"): + """ Ensure that the module ends with: + if __name__ == '__main__': + main() + OR, in the case of modules that are in the docs-only deprecation phase + if __name__ == '__main__': + removed_module() + """ + lineno = False + if_bodies = [] + for child in self.ast.body: + if isinstance(child, ast.If): + try: + if child.test.left.id == '__name__': + if_bodies.extend(child.body) + except AttributeError: + pass + + bodies = self.ast.body + bodies.extend(if_bodies) + + for child in bodies: + + # validate that the next to last line is 'if __name__ == "__main__"' + if child.lineno == (self.length - 1): + + mainchecked = False + try: + if isinstance(child, ast.If) and \ + child.test.left.id == '__name__' and \ + len(child.test.ops) == 1 and \ + isinstance(child.test.ops[0], ast.Eq) and \ + child.test.comparators[0].s == '__main__': + mainchecked = True + except Exception: + pass + + if not mainchecked: + self.reporter.error( + path=self.object_path, + code='missing-if-name-main', + msg='Next to last line should be: if __name__ == "__main__":', + line=child.lineno + ) + + # validate that the final line is a call to main() + if isinstance(child, ast.Expr): + if isinstance(child.value, ast.Call): + if (isinstance(child.value.func, ast.Name) and + child.value.func.id == look_for): + lineno = child.lineno + if lineno < self.length - 1: + self.reporter.error( + path=self.object_path, + code='last-line-main-call', + msg=('Call to %s() not the last line' % look_for), + line=lineno + ) + + if not lineno: + self.reporter.error( + path=self.object_path, + code='missing-main-call', + msg=('Did not find a call to %s()' % look_for) + ) + + return lineno or 0 + + def _find_has_import(self): + for child in self.ast.body: + found_try_except_import = False + found_has = False + if isinstance(child, TRY_EXCEPT): + bodies = child.body + for handler in child.handlers: + bodies.extend(handler.body) + for grandchild in bodies: + if isinstance(grandchild, ast.Import): + found_try_except_import = True + if isinstance(grandchild, ast.Assign): + for target in grandchild.targets: + if not isinstance(target, ast.Name): + continue + if target.id.lower().startswith('has_'): + found_has = True + if found_try_except_import and not found_has: + # TODO: Add line/col + self.reporter.warning( + path=self.object_path, + code='try-except-missing-has', + msg='Found Try/Except block without HAS_ assignment' + ) + + def _ensure_imports_below_docs(self, doc_info, first_callable): + try: + min_doc_line = min( + [doc_info[key]['lineno'] for key in doc_info if doc_info[key]['lineno']] + ) + except ValueError: + # We can't perform this validation, as there are no DOCs provided at all + return + + max_doc_line = max( + [doc_info[key]['end_lineno'] for key in doc_info if doc_info[key]['end_lineno']] + ) + + import_lines = [] + + for child in self.ast.body: + if isinstance(child, (ast.Import, ast.ImportFrom)): + if isinstance(child, ast.ImportFrom) and child.module == '__future__': + # allowed from __future__ imports + for future_import in child.names: + if future_import.name not in self.WHITELIST_FUTURE_IMPORTS: + self.reporter.error( + path=self.object_path, + code='illegal-future-imports', + msg=('Only the following from __future__ imports are allowed: %s' + % ', '.join(self.WHITELIST_FUTURE_IMPORTS)), + line=child.lineno + ) + break + else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import + continue + import_lines.append(child.lineno) + if child.lineno < min_doc_line: + self.reporter.error( + path=self.object_path, + code='import-before-documentation', + msg=('Import found before documentation variables. ' + 'All imports must appear below ' + 'DOCUMENTATION/EXAMPLES/RETURN.'), + line=child.lineno + ) + break + elif isinstance(child, TRY_EXCEPT): + bodies = child.body + for handler in child.handlers: + bodies.extend(handler.body) + for grandchild in bodies: + if isinstance(grandchild, (ast.Import, ast.ImportFrom)): + import_lines.append(grandchild.lineno) + if grandchild.lineno < min_doc_line: + self.reporter.error( + path=self.object_path, + code='import-before-documentation', + msg=('Import found before documentation ' + 'variables. All imports must appear below ' + 'DOCUMENTATION/EXAMPLES/RETURN.'), + line=child.lineno + ) + break + + for import_line in import_lines: + if not (max_doc_line < import_line < first_callable): + msg = ( + 'import-placement', + ('Imports should be directly below DOCUMENTATION/EXAMPLES/' + 'RETURN.') + ) + if self._is_new_module(): + self.reporter.error( + path=self.object_path, + code=msg[0], + msg=msg[1], + line=import_line + ) + else: + self.reporter.warning( + path=self.object_path, + code=msg[0], + msg=msg[1], + line=import_line + ) + + def _validate_ps_replacers(self): + # loop all (for/else + error) + # get module list for each + # check "shape" of each module name + + module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)' + csharp_requires = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*(Ansible\..+)' + found_requires = False + + for req_stmt in re.finditer(module_requires, self.text): + found_requires = True + # this will bomb on dictionary format - "don't do that" + module_list = [x.strip() for x in req_stmt.group(1).split(',')] + if len(module_list) > 1: + self.reporter.error( + path=self.object_path, + code='multiple-utils-per-requires', + msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0) + ) + continue + + module_name = module_list[0] + + if module_name.lower().endswith('.psm1'): + self.reporter.error( + path=self.object_path, + code='invalid-requires-extension', + msg='Module #Requires should not end in .psm1: "%s"' % module_name + ) + + for req_stmt in re.finditer(csharp_requires, self.text): + found_requires = True + # this will bomb on dictionary format - "don't do that" + module_list = [x.strip() for x in req_stmt.group(1).split(',')] + if len(module_list) > 1: + self.reporter.error( + path=self.object_path, + code='multiple-csharp-utils-per-requires', + msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0) + ) + continue + + module_name = module_list[0] + + if module_name.lower().endswith('.cs'): + self.reporter.error( + path=self.object_path, + code='illegal-extension-cs', + msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name + ) + + # also accept the legacy #POWERSHELL_COMMON replacer signal + if not found_requires and REPLACER_WINDOWS not in self.text: + self.reporter.error( + path=self.object_path, + code='missing-module-utils-import-csharp-requirements', + msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found' + ) + + def _find_ps_docs_py_file(self): + if self.object_name in self.PS_DOC_BLACKLIST: + return + py_path = self.path.replace('.ps1', '.py') + if not os.path.isfile(py_path): + self.reporter.error( + path=self.object_path, + code='missing-python-doc', + msg='Missing python documentation file' + ) + return py_path + + def _get_docs(self): + docs = { + 'DOCUMENTATION': { + 'value': None, + 'lineno': 0, + 'end_lineno': 0, + }, + 'EXAMPLES': { + 'value': None, + 'lineno': 0, + 'end_lineno': 0, + }, + 'RETURN': { + 'value': None, + 'lineno': 0, + 'end_lineno': 0, + }, + } + for child in self.ast.body: + if isinstance(child, ast.Assign): + for grandchild in child.targets: + if not isinstance(grandchild, ast.Name): + continue + + if grandchild.id == 'DOCUMENTATION': + docs['DOCUMENTATION']['value'] = child.value.s + docs['DOCUMENTATION']['lineno'] = child.lineno + docs['DOCUMENTATION']['end_lineno'] = ( + child.lineno + len(child.value.s.splitlines()) + ) + elif grandchild.id == 'EXAMPLES': + docs['EXAMPLES']['value'] = child.value.s + docs['EXAMPLES']['lineno'] = child.lineno + docs['EXAMPLES']['end_lineno'] = ( + child.lineno + len(child.value.s.splitlines()) + ) + elif grandchild.id == 'RETURN': + docs['RETURN']['value'] = child.value.s + docs['RETURN']['lineno'] = child.lineno + docs['RETURN']['end_lineno'] = ( + child.lineno + len(child.value.s.splitlines()) + ) + + return docs + + def _validate_docs_schema(self, doc, schema, name, error_code): + # TODO: Add line/col + errors = [] + try: + schema(doc) + except Exception as e: + for error in e.errors: + error.data = doc + errors.extend(e.errors) + + for error in errors: + path = [str(p) for p in error.path] + + local_error_code = getattr(error, 'ansible_error_code', error_code) + + if isinstance(error.data, dict): + error_message = humanize_error(error.data, error) + else: + error_message = error + + if path: + combined_path = '%s.%s' % (name, '.'.join(path)) + else: + combined_path = name + + self.reporter.error( + path=self.object_path, + code=local_error_code, + msg='%s: %s' % (combined_path, error_message) + ) + + def _validate_docs(self): + doc_info = self._get_docs() + doc = None + documentation_exists = False + examples_exist = False + returns_exist = False + # We have three ways of marking deprecated/removed files. Have to check each one + # individually and then make sure they all agree + filename_deprecated_or_removed = False + deprecated = False + removed = False + doc_deprecated = None # doc legally might not exist + routing_says_deprecated = False + + if self.object_name.startswith('_') and not os.path.islink(self.object_path): + filename_deprecated_or_removed = True + + # We are testing a collection + if self.routing: + routing_deprecation = self.routing.get('plugin_routing', {}).get('modules', {}).get(self.name, {}).get('deprecation', {}) + if routing_deprecation: + # meta/runtime.yml says this is deprecated + routing_says_deprecated = True + deprecated = True + + if not removed: + if not bool(doc_info['DOCUMENTATION']['value']): + self.reporter.error( + path=self.object_path, + code='missing-documentation', + msg='No DOCUMENTATION provided' + ) + else: + documentation_exists = True + doc, errors, traces = parse_yaml( + doc_info['DOCUMENTATION']['value'], + doc_info['DOCUMENTATION']['lineno'], + self.name, 'DOCUMENTATION' + ) + if doc: + add_collection_to_versions_and_dates(doc, self.collection_name, is_module=True) + for error in errors: + self.reporter.error( + path=self.object_path, + code='documentation-syntax-error', + **error + ) + for trace in traces: + self.reporter.trace( + path=self.object_path, + tracebk=trace + ) + if not errors and not traces: + missing_fragment = False + with CaptureStd(): + try: + get_docstring(self.path, fragment_loader, verbose=True, + collection_name=self.collection_name, is_module=True) + except AssertionError: + fragment = doc['extends_documentation_fragment'] + self.reporter.error( + path=self.object_path, + code='missing-doc-fragment', + msg='DOCUMENTATION fragment missing: %s' % fragment + ) + missing_fragment = True + except Exception as e: + self.reporter.trace( + path=self.object_path, + tracebk=traceback.format_exc() + ) + self.reporter.error( + path=self.object_path, + code='documentation-error', + msg='Unknown DOCUMENTATION error, see TRACE: %s' % e + ) + + if not missing_fragment: + add_fragments(doc, self.object_path, fragment_loader=fragment_loader, is_module=True) + + if 'options' in doc and doc['options'] is None: + self.reporter.error( + path=self.object_path, + code='invalid-documentation-options', + msg='DOCUMENTATION.options must be a dictionary/hash when used', + ) + + if 'deprecated' in doc and doc.get('deprecated'): + doc_deprecated = True + doc_deprecation = doc['deprecated'] + documentation_collection = doc_deprecation.get('removed_from_collection') + if documentation_collection != self.collection_name: + self.reporter.error( + path=self.object_path, + code='deprecation-wrong-collection', + msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % ( + documentation_collection, self.collection_name) + ) + else: + doc_deprecated = False + + if os.path.islink(self.object_path): + # This module has an alias, which we can tell as it's a symlink + # Rather than checking for `module: $filename` we need to check against the true filename + self._validate_docs_schema( + doc, + doc_schema( + os.readlink(self.object_path).split('.')[0], + for_collection=bool(self.collection), + deprecated_module=deprecated, + ), + 'DOCUMENTATION', + 'invalid-documentation', + ) + else: + # This is the normal case + self._validate_docs_schema( + doc, + doc_schema( + self.object_name.split('.')[0], + for_collection=bool(self.collection), + deprecated_module=deprecated, + ), + 'DOCUMENTATION', + 'invalid-documentation', + ) + + if not self.collection: + existing_doc = self._check_for_new_args(doc) + self._check_version_added(doc, existing_doc) + + if not bool(doc_info['EXAMPLES']['value']): + self.reporter.error( + path=self.object_path, + code='missing-examples', + msg='No EXAMPLES provided' + ) + else: + _doc, errors, traces = parse_yaml(doc_info['EXAMPLES']['value'], + doc_info['EXAMPLES']['lineno'], + self.name, 'EXAMPLES', load_all=True) + for error in errors: + self.reporter.error( + path=self.object_path, + code='invalid-examples', + **error + ) + for trace in traces: + self.reporter.trace( + path=self.object_path, + tracebk=trace + ) + + if not bool(doc_info['RETURN']['value']): + if self._is_new_module(): + self.reporter.error( + path=self.object_path, + code='missing-return', + msg='No RETURN provided' + ) + else: + self.reporter.warning( + path=self.object_path, + code='missing-return-legacy', + msg='No RETURN provided' + ) + else: + data, errors, traces = parse_yaml(doc_info['RETURN']['value'], + doc_info['RETURN']['lineno'], + self.name, 'RETURN') + if data: + add_collection_to_versions_and_dates(data, self.collection_name, is_module=True, return_docs=True) + self._validate_docs_schema(data, return_schema(for_collection=bool(self.collection)), + 'RETURN', 'return-syntax-error') + + for error in errors: + self.reporter.error( + path=self.object_path, + code='return-syntax-error', + **error + ) + for trace in traces: + self.reporter.trace( + path=self.object_path, + tracebk=trace + ) + + # Check for mismatched deprecation + if not self.collection: + mismatched_deprecation = True + if not (filename_deprecated_or_removed or removed or deprecated or doc_deprecated): + mismatched_deprecation = False + else: + if (filename_deprecated_or_removed and deprecated and doc_deprecated): + mismatched_deprecation = False + if (filename_deprecated_or_removed and removed and not (documentation_exists or examples_exist or returns_exist)): + mismatched_deprecation = False + + if mismatched_deprecation: + self.reporter.error( + path=self.object_path, + code='deprecation-mismatch', + msg='Module deprecation/removed must agree in documentaiton, by prepending filename with' + ' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all' + ' documentation for removed' + ) + else: + # We are testing a collection + if self.object_name.startswith('_'): + self.reporter.error( + path=self.object_path, + code='collections-no-underscore-on-deprecation', + msg='Deprecated content in collections MUST NOT start with "_", update meta/runtime.yml instead', + ) + + if not (doc_deprecated == routing_says_deprecated): + # DOCUMENTATION.deprecated and meta/runtime.yml disagree + self.reporter.error( + path=self.object_path, + code='deprecation-mismatch', + msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.' + ) + elif routing_says_deprecated: + # Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated. + # Make sure they give the same version or date. + routing_date = routing_deprecation.get('removal_date') + routing_version = routing_deprecation.get('removal_version') + # The versions and dates in the module documentation are auto-tagged, so remove the tag + # to make comparison possible and to avoid confusing the user. + documentation_date = doc_deprecation.get('removed_at_date') + documentation_version = doc_deprecation.get('removed_in') + if not compare_dates(routing_date, documentation_date): + self.reporter.error( + path=self.object_path, + code='deprecation-mismatch', + msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % ( + routing_date, documentation_date) + ) + if routing_version != documentation_version: + self.reporter.error( + path=self.object_path, + code='deprecation-mismatch', + msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % ( + routing_version, documentation_version) + ) + + # In the future we should error if ANSIBLE_METADATA exists in a collection + + return doc_info, doc + + def _check_version_added(self, doc, existing_doc): + version_added_raw = doc.get('version_added') + try: + collection_name = doc.get('version_added_collection') + version_added = self._create_strict_version( + str(version_added_raw or '0.0'), + collection_name=collection_name) + except ValueError as e: + version_added = version_added_raw or '0.0' + if self._is_new_module() or version_added != 'historical': + # already reported during schema validation, except: + if version_added == 'historical': + self.reporter.error( + path=self.object_path, + code='module-invalid-version-added', + msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e) + ) + return + + if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')): + self.reporter.error( + path=self.object_path, + code='module-incorrect-version-added', + msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw) + ) + + if not self._is_new_module(): + return + + should_be = '.'.join(ansible_version.split('.')[:2]) + strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin') + + if (version_added < strict_ansible_version or + strict_ansible_version < version_added): + self.reporter.error( + path=self.object_path, + code='module-incorrect-version-added', + msg='version_added should be %r. Currently %r' % (should_be, version_added_raw) + ) + + def _validate_ansible_module_call(self, docs): + try: + spec, args, kwargs = get_argument_spec(self.path, self.collection) + except AnsibleModuleNotInitialized: + self.reporter.error( + path=self.object_path, + code='ansible-module-not-initialized', + msg="Execution of the module did not result in initialization of AnsibleModule", + ) + return + except AnsibleModuleImportError as e: + self.reporter.error( + path=self.object_path, + code='import-error', + msg="Exception attempting to import module for argument_spec introspection, '%s'" % e + ) + self.reporter.trace( + path=self.object_path, + tracebk=traceback.format_exc() + ) + return + + self._validate_docs_schema(kwargs, ansible_module_kwargs_schema(for_collection=bool(self.collection)), + 'AnsibleModule', 'invalid-ansiblemodule-schema') + + self._validate_argument_spec(docs, spec, kwargs) + + def _validate_list_of_module_args(self, name, terms, spec, context): + if terms is None: + return + if not isinstance(terms, (list, tuple)): + # This is already reported by schema checking + return + for check in terms: + if not isinstance(check, (list, tuple)): + # This is already reported by schema checking + continue + bad_term = False + for term in check: + if not isinstance(term, string_types): + msg = name + if context: + msg += " found in %s" % " -> ".join(context) + msg += " must contain strings in the lists or tuples; found value %r" % (term, ) + self.reporter.error( + path=self.object_path, + code=name + '-type', + msg=msg, + ) + bad_term = True + if bad_term: + continue + if len(set(check)) != len(check): + msg = name + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has repeated terms" + self.reporter.error( + path=self.object_path, + code=name + '-collision', + msg=msg, + ) + if not set(check) <= set(spec): + msg = name + if context: + msg += " found in %s" % " -> ".join(context) + msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec)))) + self.reporter.error( + path=self.object_path, + code=name + '-unknown', + msg=msg, + ) + + def _validate_required_if(self, terms, spec, context, module): + if terms is None: + return + if not isinstance(terms, (list, tuple)): + # This is already reported by schema checking + return + for check in terms: + if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]: + # This is already reported by schema checking + continue + if len(check) == 4 and not isinstance(check[3], bool): + msg = "required_if" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " must have forth value omitted or of type bool; got %r" % (check[3], ) + self.reporter.error( + path=self.object_path, + code='required_if-is_one_of-type', + msg=msg, + ) + requirements = check[2] + if not isinstance(requirements, (list, tuple)): + msg = "required_if" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, ) + self.reporter.error( + path=self.object_path, + code='required_if-requirements-type', + msg=msg, + ) + continue + bad_term = False + for term in requirements: + if not isinstance(term, string_types): + msg = "required_if" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " must have only strings in third value (requirements); got %r" % (term, ) + self.reporter.error( + path=self.object_path, + code='required_if-requirements-type', + msg=msg, + ) + bad_term = True + if bad_term: + continue + if len(set(requirements)) != len(requirements): + msg = "required_if" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has repeated terms in requirements" + self.reporter.error( + path=self.object_path, + code='required_if-requirements-collision', + msg=msg, + ) + if not set(requirements) <= set(spec): + msg = "required_if" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec)))) + self.reporter.error( + path=self.object_path, + code='required_if-requirements-unknown', + msg=msg, + ) + key = check[0] + if key not in spec: + msg = "required_if" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " must have its key %s in argument_spec" % key + self.reporter.error( + path=self.object_path, + code='required_if-unknown-key', + msg=msg, + ) + continue + if key in requirements: + msg = "required_if" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " contains its key %s in requirements" % key + self.reporter.error( + path=self.object_path, + code='required_if-key-in-requirements', + msg=msg, + ) + value = check[1] + if value is not None: + _type = spec[key].get('type', 'str') + if callable(_type): + _type_checker = _type + else: + _type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type) + try: + with CaptureStd(): + dummy = _type_checker(value) + except (Exception, SystemExit): + msg = "required_if" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type) + self.reporter.error( + path=self.object_path, + code='required_if-value-type', + msg=msg, + ) + + def _validate_required_by(self, terms, spec, context): + if terms is None: + return + if not isinstance(terms, Mapping): + # This is already reported by schema checking + return + for key, value in terms.items(): + if isinstance(value, string_types): + value = [value] + if not isinstance(value, (list, tuple)): + # This is already reported by schema checking + continue + for term in value: + if not isinstance(term, string_types): + # This is already reported by schema checking + continue + if len(set(value)) != len(value) or key in value: + msg = "required_by" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has repeated terms" + self.reporter.error( + path=self.object_path, + code='required_by-collision', + msg=msg, + ) + if not set(value) <= set(spec) or key not in spec: + msg = "required_by" + if context: + msg += " found in %s" % " -> ".join(context) + msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec)))) + self.reporter.error( + path=self.object_path, + code='required_by-unknown', + msg=msg, + ) + + def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None): + if not self.analyze_arg_spec: + return + + if docs is None: + docs = {} + + if context is None: + context = [] + + if last_context_spec is None: + last_context_spec = kwargs + + try: + if not context: + add_fragments(docs, self.object_path, fragment_loader=fragment_loader, is_module=True) + except Exception: + # Cannot merge fragments + return + + # Use this to access type checkers later + module = NoArgsAnsibleModule({}) + + self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context) + self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context) + self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context) + self._validate_required_if(last_context_spec.get('required_if'), spec, context, module) + self._validate_required_by(last_context_spec.get('required_by'), spec, context) + + provider_args = set() + args_from_argspec = set() + deprecated_args_from_argspec = set() + doc_options = docs.get('options', {}) + if doc_options is None: + doc_options = {} + for arg, data in spec.items(): + restricted_argument_names = ('message', 'syslog_facility') + if arg.lower() in restricted_argument_names: + msg = "Argument '%s' in argument_spec " % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += "must not be one of %s as it is used " \ + "internally by Ansible Core Engine" % (",".join(restricted_argument_names)) + self.reporter.error( + path=self.object_path, + code='invalid-argument-name', + msg=msg, + ) + continue + if 'aliases' in data: + for al in data['aliases']: + if al.lower() in restricted_argument_names: + msg = "Argument alias '%s' in argument_spec " % al + if context: + msg += " found in %s" % " -> ".join(context) + msg += "must not be one of %s as it is used " \ + "internally by Ansible Core Engine" % (",".join(restricted_argument_names)) + self.reporter.error( + path=self.object_path, + code='invalid-argument-name', + msg=msg, + ) + continue + + if not isinstance(data, dict): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " must be a dictionary/hash when used" + self.reporter.error( + path=self.object_path, + code='invalid-argument-spec', + msg=msg, + ) + continue + + removed_at_date = data.get('removed_at_date', None) + if removed_at_date is not None: + try: + if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today(): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has a removed_at_date '%s' before today" % removed_at_date + self.reporter.error( + path=self.object_path, + code='deprecated-date', + msg=msg, + ) + except ValueError: + # This should only happen when removed_at_date is not in ISO format. Since schema + # validation already reported this as an error, don't report it a second time. + pass + + deprecated_aliases = data.get('deprecated_aliases', None) + if deprecated_aliases is not None: + for deprecated_alias in deprecated_aliases: + if 'name' in deprecated_alias and 'date' in deprecated_alias: + try: + date = deprecated_alias['date'] + if parse_isodate(date, allow_date=False) < datetime.date.today(): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has deprecated aliases '%s' with removal date '%s' before today" % ( + deprecated_alias['name'], deprecated_alias['date']) + self.reporter.error( + path=self.object_path, + code='deprecated-date', + msg=msg, + ) + except ValueError: + # This should only happen when deprecated_alias['date'] is not in ISO format. Since + # schema validation already reported this as an error, don't report it a second + # time. + pass + + has_version = False + if self.collection and self.collection_version is not None: + compare_version = self.collection_version + version_of_what = "this collection (%s)" % self.collection_version_str + code_prefix = 'collection' + has_version = True + elif not self.collection: + compare_version = LOOSE_ANSIBLE_VERSION + version_of_what = "Ansible (%s)" % ansible_version + code_prefix = 'ansible' + has_version = True + + removed_in_version = data.get('removed_in_version', None) + if removed_in_version is not None: + try: + collection_name = data.get('removed_from_collection') + removed_in = self._create_version(str(removed_in_version), collection_name=collection_name) + if has_version and collection_name == self.collection_name and compare_version >= removed_in: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has a deprecated removed_in_version %r," % removed_in_version + msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what + self.reporter.error( + path=self.object_path, + code=code_prefix + '-deprecated-version', + msg=msg, + ) + except ValueError as e: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e) + self.reporter.error( + path=self.object_path, + code='invalid-deprecated-version', + msg=msg, + ) + except TypeError: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has an invalid removed_in_version number %r: " % (removed_in_version, ) + msg += " error while comparing to version of %s" % version_of_what + self.reporter.error( + path=self.object_path, + code='invalid-deprecated-version', + msg=msg, + ) + + if deprecated_aliases is not None: + for deprecated_alias in deprecated_aliases: + if 'name' in deprecated_alias and 'version' in deprecated_alias: + try: + collection_name = deprecated_alias.get('collection_name') + version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name) + if has_version and collection_name == self.collection_name and compare_version >= version: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has deprecated aliases '%s' with removal in version %r," % ( + deprecated_alias['name'], deprecated_alias['version']) + msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what + self.reporter.error( + path=self.object_path, + code=code_prefix + '-deprecated-version', + msg=msg, + ) + except ValueError as e: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % ( + deprecated_alias['name'], deprecated_alias['version'], e) + self.reporter.error( + path=self.object_path, + code='invalid-deprecated-version', + msg=msg, + ) + except TypeError: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has deprecated aliases '%s' with invalid removal version %r:" % ( + deprecated_alias['name'], deprecated_alias['version']) + msg += " error while comparing to version of %s" % version_of_what + self.reporter.error( + path=self.object_path, + code='invalid-deprecated-version', + msg=msg, + ) + + aliases = data.get('aliases', []) + if arg in aliases: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " is specified as its own alias" + self.reporter.error( + path=self.object_path, + code='parameter-alias-self', + msg=msg + ) + if len(aliases) > len(set(aliases)): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has at least one alias specified multiple times in aliases" + self.reporter.error( + path=self.object_path, + code='parameter-alias-repeated', + msg=msg + ) + if not context and arg == 'state': + bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set())) + for bad_state in bad_states: + self.reporter.error( + path=self.object_path, + code='parameter-state-invalid-choice', + msg="Argument 'state' includes the value '%s' as a choice" % bad_state) + if not data.get('removed_in_version', None) and not data.get('removed_at_date', None): + args_from_argspec.add(arg) + args_from_argspec.update(aliases) + else: + deprecated_args_from_argspec.add(arg) + deprecated_args_from_argspec.update(aliases) + if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'): + if data.get('options') is not None and not isinstance(data.get('options'), Mapping): + self.reporter.error( + path=self.object_path, + code='invalid-argument-spec-options', + msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used", + ) + elif data.get('options'): + # Record provider options from network modules, for later comparison + for provider_arg, provider_data in data.get('options', {}).items(): + provider_args.add(provider_arg) + provider_args.update(provider_data.get('aliases', [])) + + if data.get('required') and data.get('default', object) != object: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " is marked as required but specifies a default. Arguments with a" \ + " default should not be marked as required" + self.reporter.error( + path=self.object_path, + code='no-default-for-required-parameter', + msg=msg + ) + + if arg in provider_args: + # Provider args are being removed from network module top level + # don't validate docs<->arg_spec checks below + continue + + _type = data.get('type', 'str') + if callable(_type): + _type_checker = _type + else: + _type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type) + + _elements = data.get('elements') + if (_type == 'list') and not _elements: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines type as list but elements is not defined" + self.reporter.error( + path=self.object_path, + code='parameter-list-no-elements', + msg=msg + ) + if _elements: + if not callable(_elements): + module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_elements) + if _type != 'list': + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements + self.reporter.error( + path=self.object_path, + code='parameter-invalid-elements', + msg=msg + ) + + arg_default = None + if 'default' in data and not is_empty(data['default']): + try: + with CaptureStd(): + arg_default = _type_checker(data['default']) + except (Exception, SystemExit): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type) + self.reporter.error( + path=self.object_path, + code='incompatible-default-type', + msg=msg + ) + continue + + doc_options_args = [] + for alias in sorted(set([arg] + list(aliases))): + if alias in doc_options: + doc_options_args.append(alias) + if len(doc_options_args) == 0: + # Undocumented arguments will be handled later (search for undocumented-parameter) + doc_options_arg = {} + else: + doc_options_arg = doc_options[doc_options_args[0]] + if len(doc_options_args) > 1: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " with aliases %s is documented multiple times, namely as %s" % ( + ", ".join([("'%s'" % alias) for alias in aliases]), + ", ".join([("'%s'" % alias) for alias in doc_options_args]) + ) + self.reporter.error( + path=self.object_path, + code='parameter-documented-multiple-times', + msg=msg + ) + + try: + doc_default = None + if 'default' in doc_options_arg and not is_empty(doc_options_arg['default']): + with CaptureStd(): + doc_default = _type_checker(doc_options_arg['default']) + except (Exception, SystemExit): + msg = "Argument '%s' in documentation" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type) + self.reporter.error( + path=self.object_path, + code='doc-default-incompatible-type', + msg=msg + ) + continue + + if arg_default != doc_default: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default) + self.reporter.error( + path=self.object_path, + code='doc-default-does-not-match-spec', + msg=msg + ) + + doc_type = doc_options_arg.get('type') + if 'type' in data and data['type'] is not None: + if doc_type is None: + if not arg.startswith('_'): # hidden parameter, for example _raw_params + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines type as %r but documentation doesn't define type" % (data['type']) + self.reporter.error( + path=self.object_path, + code='parameter-type-not-in-doc', + msg=msg + ) + elif data['type'] != doc_type: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type) + self.reporter.error( + path=self.object_path, + code='doc-type-does-not-match-spec', + msg=msg + ) + else: + if doc_type is None: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " uses default type ('str') but documentation doesn't define type" + self.reporter.error( + path=self.object_path, + code='doc-missing-type', + msg=msg + ) + elif doc_type != 'str': + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " implies type as 'str' but documentation defines as %r" % doc_type + self.reporter.error( + path=self.object_path, + code='implied-parameter-type-mismatch', + msg=msg + ) + + doc_choices = [] + try: + for choice in doc_options_arg.get('choices', []): + try: + with CaptureStd(): + doc_choices.append(_type_checker(choice)) + except (Exception, SystemExit): + msg = "Argument '%s' in documentation" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type) + self.reporter.error( + path=self.object_path, + code='doc-choices-incompatible-type', + msg=msg + ) + raise StopIteration() + except StopIteration: + continue + + arg_choices = [] + try: + for choice in data.get('choices', []): + try: + with CaptureStd(): + arg_choices.append(_type_checker(choice)) + except (Exception, SystemExit): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type) + self.reporter.error( + path=self.object_path, + code='incompatible-choices', + msg=msg + ) + raise StopIteration() + except StopIteration: + continue + + if not compare_unordered_lists(arg_choices, doc_choices): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices) + self.reporter.error( + path=self.object_path, + code='doc-choices-do-not-match-spec', + msg=msg + ) + + doc_required = doc_options_arg.get('required', False) + data_required = data.get('required', False) + if (doc_required or data_required) and not (doc_required and data_required): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + if doc_required: + msg += " is not required, but is documented as being required" + else: + msg += " is required, but is not documented as being required" + self.reporter.error( + path=self.object_path, + code='doc-required-mismatch', + msg=msg + ) + + doc_elements = doc_options_arg.get('elements', None) + doc_type = doc_options_arg.get('type', 'str') + data_elements = data.get('elements', None) + if (doc_elements and not doc_type == 'list'): + msg = "Argument '%s " % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " defines parameter elements as %s but it is valid only when value of parameter type is list" % doc_elements + self.reporter.error( + path=self.object_path, + code='doc-elements-invalid', + msg=msg + ) + if (doc_elements or data_elements) and not (doc_elements == data_elements): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + if data_elements: + msg += " specifies elements as %s," % data_elements + else: + msg += " does not specify elements," + if doc_elements: + msg += "but elements is documented as being %s" % doc_elements + else: + msg += "but elements is not documented" + self.reporter.error( + path=self.object_path, + code='doc-elements-mismatch', + msg=msg + ) + + spec_suboptions = data.get('options') + doc_suboptions = doc_options_arg.get('suboptions', {}) + if spec_suboptions: + if not doc_suboptions: + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " has sub-options but documentation does not define it" + self.reporter.error( + path=self.object_path, + code='missing-suboption-docs', + msg=msg + ) + self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs, + context=context + [arg], last_context_spec=data) + + for arg in args_from_argspec: + if not str(arg).isidentifier(): + msg = "Argument '%s' in argument_spec" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " is not a valid python identifier" + self.reporter.error( + path=self.object_path, + code='parameter-invalid', + msg=msg + ) + + if docs: + args_from_docs = set() + for arg, data in doc_options.items(): + args_from_docs.add(arg) + args_from_docs.update(data.get('aliases', [])) + + args_missing_from_docs = args_from_argspec.difference(args_from_docs) + docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec) + for arg in args_missing_from_docs: + if arg in provider_args: + # Provider args are being removed from network module top level + # So they are likely not documented on purpose + continue + msg = "Argument '%s'" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " is listed in the argument_spec, but not documented in the module documentation" + self.reporter.error( + path=self.object_path, + code='undocumented-parameter', + msg=msg + ) + for arg in docs_missing_from_args: + msg = "Argument '%s'" % arg + if context: + msg += " found in %s" % " -> ".join(context) + msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec" + self.reporter.error( + path=self.object_path, + code='nonexistent-parameter-documented', + msg=msg + ) + + def _check_for_new_args(self, doc): + if not self.base_branch or self._is_new_module(): + return + + with CaptureStd(): + try: + existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring( + self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=True) + existing_options = existing_doc.get('options', {}) or {} + except AssertionError: + fragment = doc['extends_documentation_fragment'] + self.reporter.warning( + path=self.object_path, + code='missing-existing-doc-fragment', + msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment + ) + return + except Exception as e: + self.reporter.warning_trace( + path=self.object_path, + tracebk=e + ) + self.reporter.warning( + path=self.object_path, + code='unknown-doc-fragment', + msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated') + ) + return + + try: + mod_collection_name = existing_doc.get('version_added_collection') + mod_version_added = self._create_strict_version( + str(existing_doc.get('version_added', '0.0')), + collection_name=mod_collection_name) + except ValueError: + mod_collection_name = self.collection_name + mod_version_added = self._create_strict_version('0.0') + + options = doc.get('options', {}) or {} + + should_be = '.'.join(ansible_version.split('.')[:2]) + strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin') + + for option, details in options.items(): + try: + names = [option] + details.get('aliases', []) + except (TypeError, AttributeError): + # Reporting of this syntax error will be handled by schema validation. + continue + + if any(name in existing_options for name in names): + # The option already existed. Make sure version_added didn't change. + for name in names: + existing_collection_name = existing_options.get(name, {}).get('version_added_collection') + existing_version = existing_options.get(name, {}).get('version_added') + if existing_version: + break + current_collection_name = details.get('version_added_collection') + current_version = details.get('version_added') + if current_collection_name != existing_collection_name: + self.reporter.error( + path=self.object_path, + code='option-incorrect-version-added-collection', + msg=('version_added for existing option (%s) should ' + 'belong to collection %r. Currently belongs to %r' % + (option, current_collection_name, existing_collection_name)) + ) + elif str(current_version) != str(existing_version): + self.reporter.error( + path=self.object_path, + code='option-incorrect-version-added', + msg=('version_added for existing option (%s) should ' + 'be %r. Currently %r' % + (option, existing_version, current_version)) + ) + continue + + try: + collection_name = details.get('version_added_collection') + version_added = self._create_strict_version( + str(details.get('version_added', '0.0')), + collection_name=collection_name) + except ValueError as e: + # already reported during schema validation + continue + + if collection_name != self.collection_name: + continue + if (strict_ansible_version != mod_version_added and + (version_added < strict_ansible_version or + strict_ansible_version < version_added)): + self.reporter.error( + path=self.object_path, + code='option-incorrect-version-added', + msg=('version_added for new option (%s) should ' + 'be %r. Currently %r' % + (option, should_be, version_added)) + ) + + return existing_doc + + @staticmethod + def is_blacklisted(path): + base_name = os.path.basename(path) + file_name = os.path.splitext(base_name)[0] + + if file_name.startswith('_') and os.path.islink(path): + return True + + if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.BLACKLIST): + return True + + for pat in ModuleValidator.BLACKLIST_PATTERNS: + if fnmatch(base_name, pat): + return True + + return False + + def validate(self): + super(ModuleValidator, self).validate() + if not self._python_module() and not self._powershell_module(): + self.reporter.error( + path=self.object_path, + code='invalid-extension', + msg=('Official Ansible modules must have a .py ' + 'extension for python modules or a .ps1 ' + 'for powershell modules') + ) + self._python_module_override = True + + if self._python_module() and self.ast is None: + self.reporter.error( + path=self.object_path, + code='python-syntax-error', + msg='Python SyntaxError while parsing module' + ) + try: + compile(self.text, self.path, 'exec') + except Exception: + self.reporter.trace( + path=self.object_path, + tracebk=traceback.format_exc() + ) + return + + end_of_deprecation_should_be_removed_only = False + if self._python_module(): + doc_info, docs = self._validate_docs() + + # See if current version => deprecated.removed_in, ie, should be docs only + if docs and docs.get('deprecated', False): + + if 'removed_in' in docs['deprecated']: + removed_in = None + collection_name = docs['deprecated'].get('removed_from_collection') + version = docs['deprecated']['removed_in'] + if collection_name != self.collection_name: + self.reporter.error( + path=self.object_path, + code='invalid-module-deprecation-source', + msg=('The deprecation version for a module must be added in this collection') + ) + else: + try: + removed_in = self._create_strict_version(str(version), collection_name=collection_name) + except ValueError as e: + self.reporter.error( + path=self.object_path, + code='invalid-module-deprecation-version', + msg=('The deprecation version %r cannot be parsed: %s' % (version, e)) + ) + + if removed_in: + if not self.collection: + strict_ansible_version = self._create_strict_version( + '.'.join(ansible_version.split('.')[:2]), self.collection_name) + end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in + elif self.collection_version: + strict_ansible_version = self.collection_version + end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in + + # handle deprecation by date + if 'removed_at_date' in docs['deprecated']: + try: + removed_at_date = docs['deprecated']['removed_at_date'] + if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today(): + msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date + self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg) + except ValueError: + # This happens if the date cannot be parsed. This is already checked by the schema. + pass + + if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only: + self._validate_ansible_module_call(docs) + self._check_for_sys_exit() + self._find_blacklist_imports() + main = self._find_main_call() + self._find_module_utils(main) + self._find_has_import() + first_callable = self._get_first_callable() + self._ensure_imports_below_docs(doc_info, first_callable) + self._check_for_subprocess() + self._check_for_os_call() + + if self._powershell_module(): + if self.basename in self.PS_DOC_BLACKLIST: + return + + self._validate_ps_replacers() + docs_path = self._find_ps_docs_py_file() + + # We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util + pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic' + if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_BLACKLIST: + with ModuleValidator(docs_path, base_branch=self.base_branch, git_cache=self.git_cache) as docs_mv: + docs = docs_mv._validate_docs()[1] + self._validate_ansible_module_call(docs) + + self._check_gpl3_header() + if not self._just_docs() and not end_of_deprecation_should_be_removed_only: + self._check_interpreter(powershell=self._powershell_module()) + self._check_type_instead_of_isinstance( + powershell=self._powershell_module() + ) + if end_of_deprecation_should_be_removed_only: + # Ensure that `if __name__ == '__main__':` calls `removed_module()` which ensure that the module has no code in + main = self._find_main_call('removed_module') + # FIXME: Ensure that the version in the call to removed_module is less than +2. + # Otherwise it's time to remove the file (This may need to be done in another test to + # avoid breaking whenever the Ansible version bumps) + + +class PythonPackageValidator(Validator): + BLACKLIST_FILES = frozenset(('__pycache__',)) + + def __init__(self, path, reporter=None): + super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter()) + + self.path = path + self.basename = os.path.basename(path) + + @property + def object_name(self): + return self.basename + + @property + def object_path(self): + return self.path + + def validate(self): + super(PythonPackageValidator, self).validate() + + if self.basename in self.BLACKLIST_FILES: + return + + init_file = os.path.join(self.path, '__init__.py') + if not os.path.exists(init_file): + self.reporter.error( + path=self.object_path, + code='subdirectory-missing-init', + msg='Ansible module subdirectories must contain an __init__.py' + ) + + +def setup_collection_loader(): + collections_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATH', '').split(os.pathsep) + _AnsibleCollectionFinder(collections_paths) + + +def re_compile(value): + """ + Argparse expects things to raise TypeError, re.compile raises an re.error + exception + + This function is a shorthand to convert the re.error exception to a + TypeError + """ + + try: + return re.compile(value) + except re.error as e: + raise TypeError(e) + + +def run(): + parser = argparse.ArgumentParser(prog="validate-modules") + parser.add_argument('modules', nargs='+', + help='Path to module or module directory') + parser.add_argument('-w', '--warnings', help='Show warnings', + action='store_true') + parser.add_argument('--exclude', help='RegEx exclusion pattern', + type=re_compile) + parser.add_argument('--arg-spec', help='Analyze module argument spec', + action='store_true', default=False) + parser.add_argument('--base-branch', default=None, + help='Used in determining if new options were added') + parser.add_argument('--format', choices=['json', 'plain'], default='plain', + help='Output format. Default: "%(default)s"') + parser.add_argument('--output', default='-', + help='Output location, use "-" for stdout. ' + 'Default "%(default)s"') + parser.add_argument('--collection', + help='Specifies the path to the collection, when ' + 'validating files within a collection. Ensure ' + 'that ANSIBLE_COLLECTIONS_PATH is set so the ' + 'contents of the collection can be located') + parser.add_argument('--collection-version', + help='The collection\'s version number used to check ' + 'deprecations') + + args = parser.parse_args() + + args.modules = [m.rstrip('/') for m in args.modules] + + reporter = Reporter() + git_cache = GitCache(args.base_branch) + + check_dirs = set() + + routing = None + if args.collection: + setup_collection_loader() + routing_file = 'meta/runtime.yml' + # Load meta/runtime.yml if it exists, as it may contain deprecation information + if os.path.isfile(routing_file): + try: + with open(routing_file) as f: + routing = yaml.safe_load(f) + except yaml.error.MarkedYAMLError as ex: + print('%s:%d:%d: YAML load failed: %s' % (routing_file, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex)))) + except Exception as ex: # pylint: disable=broad-except + print('%s:%d:%d: YAML load failed: %s' % (routing_file, 0, 0, re.sub(r'\s+', ' ', str(ex)))) + + for module in args.modules: + if os.path.isfile(module): + path = module + if args.exclude and args.exclude.search(path): + continue + if ModuleValidator.is_blacklisted(path): + continue + with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version, + analyze_arg_spec=args.arg_spec, base_branch=args.base_branch, + git_cache=git_cache, reporter=reporter, routing=routing) as mv1: + mv1.validate() + check_dirs.add(os.path.dirname(path)) + + for root, dirs, files in os.walk(module): + basedir = root[len(module) + 1:].split('/', 1)[0] + if basedir in BLACKLIST_DIRS: + continue + for dirname in dirs: + if root == module and dirname in BLACKLIST_DIRS: + continue + path = os.path.join(root, dirname) + if args.exclude and args.exclude.search(path): + continue + check_dirs.add(path) + + for filename in files: + path = os.path.join(root, filename) + if args.exclude and args.exclude.search(path): + continue + if ModuleValidator.is_blacklisted(path): + continue + with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version, + analyze_arg_spec=args.arg_spec, base_branch=args.base_branch, + git_cache=git_cache, reporter=reporter, routing=routing) as mv2: + mv2.validate() + + if not args.collection: + for path in sorted(check_dirs): + pv = PythonPackageValidator(path, reporter=reporter) + pv.validate() + + if args.format == 'plain': + sys.exit(reporter.plain(warnings=args.warnings, output=args.output)) + else: + sys.exit(reporter.json(warnings=args.warnings, output=args.output)) + + +class GitCache: + def __init__(self, base_branch): + self.base_branch = base_branch + + if self.base_branch: + self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, 'lib/ansible/modules/']) + else: + self.base_tree = [] + + try: + self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', 'lib/ansible/modules/']) + except GitError as ex: + if ex.status == 128: + # fallback when there is no .git directory + self.head_tree = self._get_module_files() + else: + raise + except OSError as ex: + if ex.errno == errno.ENOENT: + # fallback when git is not installed + self.head_tree = self._get_module_files() + else: + raise + + self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in ('.py', '.ps1')) + + self.base_module_paths.pop('__init__.py', None) + + self.head_aliased_modules = set() + + for path in self.head_tree: + filename = os.path.basename(path) + + if filename.startswith('_') and filename != '__init__.py': + if os.path.islink(path): + self.head_aliased_modules.add(os.path.basename(os.path.realpath(path))) + + @staticmethod + def _get_module_files(): + module_files = [] + + for (dir_path, dir_names, file_names) in os.walk('lib/ansible/modules/'): + for file_name in file_names: + module_files.append(os.path.join(dir_path, file_name)) + + return module_files + + @staticmethod + def _git(args): + cmd = ['git'] + args + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise GitError(stderr, p.returncode) + return stdout.decode('utf-8').splitlines() + + +class GitError(Exception): + def __init__(self, message, status): + super(GitError, self).__init__(message) + + self.status = status + + +def main(): + try: + run() + except KeyboardInterrupt: + pass diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py new file mode 100644 index 00000000..ac025291 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py @@ -0,0 +1,170 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2016 Matt Martz +# Copyright (C) 2016 Rackspace US, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import runpy +import json +import os +import subprocess +import sys + +from contextlib import contextmanager + +from ansible.executor.powershell.module_manifest import PSModuleDepFinder +from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS +from ansible.module_utils.six import reraise +from ansible.module_utils._text import to_bytes, to_text + +from .utils import CaptureStd, find_executable, get_module_name_from_filename + + +class AnsibleModuleCallError(RuntimeError): + pass + + +class AnsibleModuleImportError(ImportError): + pass + + +class AnsibleModuleNotInitialized(Exception): + pass + + +class _FakeAnsibleModuleInit: + def __init__(self): + self.args = tuple() + self.kwargs = {} + self.called = False + + def __call__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + self.called = True + raise AnsibleModuleCallError('AnsibleModuleCallError') + + +def _fake_load_params(): + pass + + +@contextmanager +def setup_env(filename): + # Used to clean up imports later + pre_sys_modules = list(sys.modules.keys()) + + fake = _FakeAnsibleModuleInit() + module = __import__('ansible.module_utils.basic').module_utils.basic + _original_init = module.AnsibleModule.__init__ + _original_load_params = module._load_params + setattr(module.AnsibleModule, '__init__', fake) + setattr(module, '_load_params', _fake_load_params) + + try: + yield fake + finally: + setattr(module.AnsibleModule, '__init__', _original_init) + setattr(module, '_load_params', _original_load_params) + + # Clean up imports to prevent issues with mutable data being used in modules + for k in list(sys.modules.keys()): + # It's faster if we limit to items in ansible.module_utils + # But if this causes problems later, we should remove it + if k not in pre_sys_modules and k.startswith('ansible.module_utils.'): + del sys.modules[k] + + +def get_ps_argument_spec(filename, collection): + fqc_name = get_module_name_from_filename(filename, collection) + + pwsh = find_executable('pwsh') + if not pwsh: + raise FileNotFoundError('Required program for PowerShell arg spec inspection "pwsh" not found.') + + module_path = os.path.join(os.getcwd(), filename) + b_module_path = to_bytes(module_path, errors='surrogate_or_strict') + with open(b_module_path, mode='rb') as module_fd: + b_module_data = module_fd.read() + + ps_dep_finder = PSModuleDepFinder() + ps_dep_finder.scan_module(b_module_data, fqn=fqc_name) + + # For ps_argspec.ps1 to compile Ansible.Basic it also needs the AddType module_util. + ps_dep_finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1", None), wrapper=False) + + util_manifest = json.dumps({ + 'module_path': to_text(module_path, errors='surrogiate_or_strict'), + 'ansible_basic': ps_dep_finder.cs_utils_module["Ansible.Basic"]['path'], + 'ps_utils': dict([(name, info['path']) for name, info in ps_dep_finder.ps_modules.items()]), + }) + + script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ps_argspec.ps1') + proc = subprocess.Popen([script_path, util_manifest], stdout=subprocess.PIPE, stderr=subprocess.PIPE, + shell=False) + stdout, stderr = proc.communicate() + + if proc.returncode != 0: + raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (stdout.decode('utf-8'), stderr.decode('utf-8'))) + + kwargs = json.loads(stdout) + + # the validate-modules code expects the options spec to be under the argument_spec key not options as set in PS + kwargs['argument_spec'] = kwargs.pop('options', {}) + + return kwargs['argument_spec'], (), kwargs + + +def get_py_argument_spec(filename, collection): + name = get_module_name_from_filename(filename, collection) + + with setup_env(filename) as fake: + try: + with CaptureStd(): + runpy.run_module(name, run_name='__main__', alter_sys=True) + except AnsibleModuleCallError: + pass + except BaseException as e: + # we want to catch all exceptions here, including sys.exit + reraise(AnsibleModuleImportError, AnsibleModuleImportError('%s' % e), sys.exc_info()[2]) + + if not fake.called: + raise AnsibleModuleNotInitialized() + + try: + # for ping kwargs == {'argument_spec':{'data':{'type':'str','default':'pong'}}, 'supports_check_mode':True} + if 'argument_spec' in fake.kwargs: + argument_spec = fake.kwargs['argument_spec'] + else: + argument_spec = fake.args[0] + # If add_file_common_args is truish, add options from FILE_COMMON_ARGUMENTS when not present. + # This is the only modification to argument_spec done by AnsibleModule itself, and which is + # not caught by setup_env's AnsibleModule replacement + if fake.kwargs.get('add_file_common_args'): + for k, v in FILE_COMMON_ARGUMENTS.items(): + if k not in argument_spec: + argument_spec[k] = v + return argument_spec, fake.args, fake.kwargs + except (TypeError, IndexError): + return {}, (), {} + + +def get_argument_spec(filename, collection): + if filename.endswith('.py'): + return get_py_argument_spec(filename, collection) + else: + return get_ps_argument_spec(filename, collection) diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1 b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1 new file mode 100755 index 00000000..5ceb9d50 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1 @@ -0,0 +1,110 @@ +#!/usr/bin/env pwsh +#Requires -Version 6 + +Set-StrictMode -Version 2.0 +$ErrorActionPreference = "Stop" +$WarningPreference = "Stop" + +Function Resolve-CircularReference { + <# + .SYNOPSIS + Removes known types that cause a circular reference in their json serialization. + + .PARAMETER Hash + The hash to scan for circular references + #> + [CmdletBinding()] + param ( + [Parameter(Mandatory=$true)] + [System.Collections.IDictionary] + $Hash + ) + + foreach ($key in [String[]]$Hash.Keys) { + $value = $Hash[$key] + if ($value -is [System.Collections.IDictionary]) { + Resolve-CircularReference -Hash $value + } elseif ($value -is [Array] -or $value -is [System.Collections.IList]) { + $values = @(foreach ($v in $value) { + if ($v -is [System.Collections.IDictionary]) { + Resolve-CircularReference -Hash $v + } + ,$v + }) + $Hash[$key] = $values + } elseif ($value -is [DateTime]) { + $Hash[$key] = $value.ToString("yyyy-MM-dd") + } elseif ($value -is [delegate]) { + # Type can be set to a delegate function which defines it's own type. For the documentation we just + # reflection that as raw + if ($key -eq 'type') { + $Hash[$key] = 'raw' + } else { + $Hash[$key] = $value.ToString() # Shouldn't ever happen but just in case. + } + } + } +} + +$manifest = ConvertFrom-Json -InputObject $args[0] -AsHashtable +if (-not $manifest.Contains('module_path') -or -not $manifest.module_path) { + Write-Error -Message "No module specified." + exit 1 +} +$module_path = $manifest.module_path + +# Check if the path is relative and get the full path to the module +if (-not ([System.IO.Path]::IsPathRooted($module_path))) { + $module_path = $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($module_path) +} + +if (-not (Test-Path -LiteralPath $module_path -PathType Leaf)) { + Write-Error -Message "The module at '$module_path' does not exist." + exit 1 +} + +$module_code = Get-Content -LiteralPath $module_path -Raw + +$powershell = [PowerShell]::Create() +$powershell.Runspace.SessionStateProxy.SetVariable("ErrorActionPreference", "Stop") + +# Load the PowerShell module utils as the module may be using them to refer to shared module options. Currently we +# can only load the PowerShell utils due to cross platform compatibility issues. +if ($manifest.Contains('ps_utils')) { + foreach ($util_info in $manifest.ps_utils.GetEnumerator()) { + $util_name = $util_info.Key + $util_path = $util_info.Value + + if (-not (Test-Path -LiteralPath $util_path -PathType Leaf)) { + # Failed to find the util path, just silently ignore for now and hope for the best. + continue + } + + $util_sb = [ScriptBlock]::Create((Get-Content -LiteralPath $util_path -Raw)) + $powershell.AddCommand('New-Module').AddParameters(@{ + Name = $util_name + ScriptBlock = $util_sb + }) > $null + $powershell.AddCommand('Import-Module').AddParameter('WarningAction', 'SilentlyContinue') > $null + $powershell.AddCommand('Out-Null').AddStatement() > $null + + # Also import it into the current runspace in case ps_argspec.ps1 needs to use it. + $null = New-Module -Name $util_name -ScriptBlock $util_sb | Import-Module -WarningAction SilentlyContinue + } +} + +Add-CSharpType -References @(Get-Content -LiteralPath $manifest.ansible_basic -Raw) +[Ansible.Basic.AnsibleModule]::_DebugArgSpec = $true + +$powershell.AddScript($module_code) > $null +$powershell.Invoke() > $null + +if ($powershell.HadErrors) { + $powershell.Streams.Error + exit 1 +} + +$arg_spec = $powershell.Runspace.SessionStateProxy.GetVariable('ansibleTestArgSpec') +Resolve-CircularReference -Hash $arg_spec + +ConvertTo-Json -InputObject $arg_spec -Compress -Depth 99 diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py new file mode 100644 index 00000000..42a2ada4 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py @@ -0,0 +1,488 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Matt Martz +# Copyright: (c) 2015, Rackspace US, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from distutils.version import StrictVersion +from functools import partial + +from voluptuous import ALLOW_EXTRA, PREVENT_EXTRA, All, Any, Invalid, Length, Required, Schema, Self, ValueInvalid +from ansible.module_utils.six import string_types +from ansible.module_utils.common.collections import is_iterable +from ansible.utils.version import SemanticVersion + +from .utils import parse_isodate + +list_string_types = list(string_types) +tuple_string_types = tuple(string_types) +any_string_types = Any(*string_types) + +# Valid DOCUMENTATION.author lines +# Based on Ansibulbot's extract_github_id() +# author: First Last (@name) [optional anything] +# "Ansible Core Team" - Used by the Bot +# "Michael DeHaan" - nop +# "OpenStack Ansible SIG" - OpenStack does not use GitHub +# "Name (!UNKNOWN)" - For the few untraceable authors +author_line = re.compile(r'^\w.*(\(@([\w-]+)\)|!UNKNOWN)(?![\w.])|^Ansible Core Team$|^Michael DeHaan$|^OpenStack Ansible SIG$') + + +def _add_ansible_error_code(exception, error_code): + setattr(exception, 'ansible_error_code', error_code) + return exception + + +def isodate(v, error_code=None): + try: + parse_isodate(v, allow_date=True) + except ValueError as e: + raise _add_ansible_error_code(Invalid(str(e)), error_code or 'ansible-invalid-date') + return v + + +COLLECTION_NAME_RE = re.compile('^([^.]+.[^.]+)$') + + +def collection_name(v, error_code=None): + if not isinstance(v, string_types): + raise _add_ansible_error_code( + Invalid('Collection name must be a string'), error_code or 'collection-invalid-name') + m = COLLECTION_NAME_RE.match(v) + if not m: + raise _add_ansible_error_code( + Invalid('Collection name must be of format `.`'), error_code or 'collection-invalid-name') + return v + + +def version(for_collection=False): + if for_collection: + # We do not accept floats for versions in collections + return Any(*string_types) + return Any(float, *string_types) + + +def date(error_code=None): + return Any(isodate, error_code=error_code) + + +def is_callable(v): + if not callable(v): + raise ValueInvalid('not a valid value') + return v + + +def sequence_of_sequences(min=None, max=None): + return All( + Any( + None, + [Any(list, tuple)], + tuple([Any(list, tuple)]), + ), + Any( + None, + [Length(min=min, max=max)], + tuple([Length(min=min, max=max)]), + ), + ) + + +seealso_schema = Schema( + [ + Any( + { + Required('module'): Any(*string_types), + 'description': Any(*string_types), + }, + { + Required('ref'): Any(*string_types), + Required('description'): Any(*string_types), + }, + { + Required('name'): Any(*string_types), + Required('link'): Any(*string_types), + Required('description'): Any(*string_types), + }, + ), + ] +) + + +argument_spec_types = ['bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', + 'sid', 'str'] + + +argument_spec_modifiers = { + 'mutually_exclusive': sequence_of_sequences(min=2), + 'required_together': sequence_of_sequences(min=2), + 'required_one_of': sequence_of_sequences(min=2), + 'required_if': sequence_of_sequences(min=3, max=4), + 'required_by': Schema({str: Any(list_string_types, tuple_string_types, *string_types)}), +} + + +def no_required_with_default(v): + if v.get('default') and v.get('required'): + raise Invalid('required=True cannot be supplied with a default') + return v + + +def elements_with_list(v): + if v.get('elements') and v.get('type') != 'list': + raise Invalid('type must be list to use elements') + return v + + +def options_with_apply_defaults(v): + if v.get('apply_defaults') and not v.get('options'): + raise Invalid('apply_defaults=True requires options to be set') + return v + + +def option_deprecation(v): + if v.get('removed_in_version') or v.get('removed_at_date'): + if v.get('removed_in_version') and v.get('removed_at_date'): + raise _add_ansible_error_code( + Invalid('Only one of removed_in_version and removed_at_date must be specified'), + error_code='deprecation-either-date-or-version') + if not v.get('removed_from_collection'): + raise _add_ansible_error_code( + Invalid('If removed_in_version or removed_at_date is specified, ' + 'removed_from_collection must be specified as well'), + error_code='deprecation-collection-missing') + return + if v.get('removed_from_collection'): + raise Invalid('removed_from_collection cannot be specified without either ' + 'removed_in_version or removed_at_date') + + +def argument_spec_schema(for_collection): + any_string_types = Any(*string_types) + schema = { + any_string_types: { + 'type': Any(is_callable, *argument_spec_types), + 'elements': Any(*argument_spec_types), + 'default': object, + 'fallback': Any( + (is_callable, list_string_types), + [is_callable, list_string_types], + ), + 'choices': Any([object], (object,)), + 'required': bool, + 'no_log': bool, + 'aliases': Any(list_string_types, tuple(list_string_types)), + 'apply_defaults': bool, + 'removed_in_version': version(for_collection), + 'removed_at_date': date(), + 'removed_from_collection': collection_name, + 'options': Self, + 'deprecated_aliases': Any([Any( + { + Required('name'): Any(*string_types), + Required('date'): date(), + Required('collection_name'): collection_name, + }, + { + Required('name'): Any(*string_types), + Required('version'): version(for_collection), + Required('collection_name'): collection_name, + }, + )]), + } + } + schema[any_string_types].update(argument_spec_modifiers) + schemas = All( + schema, + Schema({any_string_types: no_required_with_default}), + Schema({any_string_types: elements_with_list}), + Schema({any_string_types: options_with_apply_defaults}), + Schema({any_string_types: option_deprecation}), + ) + return Schema(schemas) + + +def ansible_module_kwargs_schema(for_collection): + schema = { + 'argument_spec': argument_spec_schema(for_collection), + 'bypass_checks': bool, + 'no_log': bool, + 'check_invalid_arguments': Any(None, bool), + 'add_file_common_args': bool, + 'supports_check_mode': bool, + } + schema.update(argument_spec_modifiers) + return Schema(schema) + + +json_value = Schema(Any( + None, + int, + float, + [Self], + *(list({str_type: Self} for str_type in string_types) + list(string_types)) +)) + + +def version_added(v, error_code='version-added-invalid', accept_historical=False): + if 'version_added' in v: + version_added = v.get('version_added') + if isinstance(version_added, string_types): + # If it is not a string, schema validation will have already complained + # - or we have a float and we are in ansible/ansible, in which case we're + # also happy. + if v.get('version_added_collection') == 'ansible.builtin': + if version_added == 'historical' and accept_historical: + return v + try: + version = StrictVersion() + version.parse(version_added) + except ValueError as exc: + raise _add_ansible_error_code( + Invalid('version_added (%r) is not a valid ansible-base version: ' + '%s' % (version_added, exc)), + error_code=error_code) + else: + try: + version = SemanticVersion() + version.parse(version_added) + except ValueError as exc: + raise _add_ansible_error_code( + Invalid('version_added (%r) is not a valid collection version ' + '(see specification at https://semver.org/): ' + '%s' % (version_added, exc)), + error_code=error_code) + elif 'version_added_collection' in v: + # Must have been manual intervention, since version_added_collection is only + # added automatically when version_added is present + raise Invalid('version_added_collection cannot be specified without version_added') + return v + + +def list_dict_option_schema(for_collection): + suboption_schema = Schema( + { + Required('description'): Any(list_string_types, *string_types), + 'required': bool, + 'choices': list, + 'aliases': Any(list_string_types), + 'version_added': version(for_collection), + 'version_added_collection': collection_name, + 'default': json_value, + # Note: Types are strings, not literal bools, such as True or False + 'type': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'), + # in case of type='list' elements define type of individual item in list + 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'), + # Recursive suboptions + 'suboptions': Any(None, *list({str_type: Self} for str_type in string_types)), + }, + extra=PREVENT_EXTRA + ) + + # This generates list of dicts with keys from string_types and suboption_schema value + # for example in Python 3: {str: suboption_schema} + list_dict_suboption_schema = [{str_type: suboption_schema} for str_type in string_types] + + option_schema = Schema( + { + Required('description'): Any(list_string_types, *string_types), + 'required': bool, + 'choices': list, + 'aliases': Any(list_string_types), + 'version_added': version(for_collection), + 'version_added_collection': collection_name, + 'default': json_value, + 'suboptions': Any(None, *list_dict_suboption_schema), + # Note: Types are strings, not literal bools, such as True or False + 'type': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'), + # in case of type='list' elements define type of individual item in list + 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'), + }, + extra=PREVENT_EXTRA + ) + + option_version_added = Schema( + All({ + 'suboptions': Any(None, *[{str_type: Self} for str_type in string_types]), + }, partial(version_added, error_code='option-invalid-version-added')), + extra=ALLOW_EXTRA + ) + + # This generates list of dicts with keys from string_types and option_schema value + # for example in Python 3: {str: option_schema} + return [{str_type: All(option_schema, option_version_added)} for str_type in string_types] + + +def return_contains(v): + schema = Schema( + { + Required('contains'): Any(dict, list, *string_types) + }, + extra=ALLOW_EXTRA + ) + if v.get('type') == 'complex': + return schema(v) + return v + + +def return_schema(for_collection): + return_contains_schema = Any( + All( + Schema( + { + Required('description'): Any(list_string_types, *string_types), + 'returned': Any(*string_types), # only returned on top level + Required('type'): Any('bool', 'complex', 'dict', 'float', 'int', 'list', 'str'), + 'version_added': version(for_collection), + 'version_added_collection': collection_name, + 'sample': json_value, + 'example': json_value, + 'contains': Any(None, *list({str_type: Self} for str_type in string_types)), + # in case of type='list' elements define type of individual item in list + 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'), + } + ), + Schema(return_contains), + Schema(partial(version_added, error_code='option-invalid-version-added')), + ), + Schema(type(None)), + ) + + # This generates list of dicts with keys from string_types and return_contains_schema value + # for example in Python 3: {str: return_contains_schema} + list_dict_return_contains_schema = [{str_type: return_contains_schema} for str_type in string_types] + + return Any( + All( + Schema( + { + any_string_types: { + Required('description'): Any(list_string_types, *string_types), + Required('returned'): Any(*string_types), + Required('type'): Any('bool', 'complex', 'dict', 'float', 'int', 'list', 'str'), + 'version_added': version(for_collection), + 'version_added_collection': collection_name, + 'sample': json_value, + 'example': json_value, + 'contains': Any(None, *list_dict_return_contains_schema), + # in case of type='list' elements define type of individual item in list + 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'), + } + } + ), + Schema({any_string_types: return_contains}), + Schema({any_string_types: partial(version_added, error_code='option-invalid-version-added')}), + ), + Schema(type(None)), + ) + + +def deprecation_schema(for_collection): + main_fields = { + Required('why'): Any(*string_types), + Required('alternative'): Any(*string_types), + Required('removed_from_collection'): collection_name, + 'removed': Any(True), + } + + date_schema = { + Required('removed_at_date'): date(), + } + date_schema.update(main_fields) + + if for_collection: + version_schema = { + Required('removed_in'): version(for_collection), + } + else: + version_schema = { + # Only list branches that are deprecated or may have docs stubs in + # Deprecation cycle changed at 2.4 (though not retroactively) + # 2.3 -> removed_in: "2.5" + n for docs stub + # 2.4 -> removed_in: "2.8" + n for docs stub + Required('removed_in'): Any( + "2.2", "2.3", "2.4", "2.5", "2.6", "2.8", "2.9", "2.10", "2.11", "2.12", "2.13", "2.14"), + } + version_schema.update(main_fields) + + return Any( + Schema(version_schema, extra=PREVENT_EXTRA), + Schema(date_schema, extra=PREVENT_EXTRA), + ) + + +def author(value): + if value is None: + return value # let schema checks handle + + if not is_iterable(value): + value = [value] + + for line in value: + if not isinstance(line, string_types): + continue # let schema checks handle + m = author_line.search(line) + if not m: + raise Invalid("Invalid author") + + return value + + +def doc_schema(module_name, for_collection=False, deprecated_module=False): + + if module_name.startswith('_'): + module_name = module_name[1:] + deprecated_module = True + doc_schema_dict = { + Required('module'): module_name, + Required('short_description'): Any(*string_types), + Required('description'): Any(list_string_types, *string_types), + Required('author'): All(Any(None, list_string_types, *string_types), author), + 'notes': Any(None, list_string_types), + 'seealso': Any(None, seealso_schema), + 'requirements': list_string_types, + 'todo': Any(None, list_string_types, *string_types), + 'options': Any(None, *list_dict_option_schema(for_collection)), + 'extends_documentation_fragment': Any(list_string_types, *string_types), + 'version_added_collection': collection_name, + } + + if for_collection: + # Optional + doc_schema_dict['version_added'] = version(for_collection=True) + else: + doc_schema_dict[Required('version_added')] = version(for_collection=False) + + if deprecated_module: + deprecation_required_scheme = { + Required('deprecated'): Any(deprecation_schema(for_collection=for_collection)), + } + + doc_schema_dict.update(deprecation_required_scheme) + return Schema( + All( + Schema( + doc_schema_dict, + extra=PREVENT_EXTRA + ), + partial(version_added, error_code='module-invalid-version-added', accept_historical=not for_collection), + ) + ) + + +# Things to add soon +#################### +# 1) Recursively validate `type: complex` fields +# This will improve documentation, though require fair amount of module tidyup + +# Possible Future Enhancements +############################## + +# 1) Don't allow empty options for choices, aliases, etc +# 2) If type: bool ensure choices isn't set - perhaps use Exclusive +# 3) both version_added should be quoted floats + +# Tool that takes JSON and generates RETURN skeleton (needs to support complex structures) diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py new file mode 100644 index 00000000..939ae651 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2015 Matt Martz +# Copyright (C) 2015 Rackspace US, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ast +import datetime +import os +import re +import sys + +from io import BytesIO, TextIOWrapper + +import yaml +import yaml.reader + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import string_types + + +class AnsibleTextIOWrapper(TextIOWrapper): + def write(self, s): + super(AnsibleTextIOWrapper, self).write(to_text(s, self.encoding, errors='replace')) + + +def find_executable(executable, cwd=None, path=None): + """Finds the full path to the executable specified""" + match = None + real_cwd = os.getcwd() + + if not cwd: + cwd = real_cwd + + if os.path.dirname(executable): + target = os.path.join(cwd, executable) + if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK): + match = executable + else: + path = os.environ.get('PATH', os.path.defpath) + + path_dirs = path.split(os.path.pathsep) + seen_dirs = set() + + for path_dir in path_dirs: + if path_dir in seen_dirs: + continue + + seen_dirs.add(path_dir) + + if os.path.abspath(path_dir) == real_cwd: + path_dir = cwd + + candidate = os.path.join(path_dir, executable) + + if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK): + match = candidate + break + + return match + + +def find_globals(g, tree): + """Uses AST to find globals in an ast tree""" + for child in tree: + if hasattr(child, 'body') and isinstance(child.body, list): + find_globals(g, child.body) + elif isinstance(child, (ast.FunctionDef, ast.ClassDef)): + g.add(child.name) + continue + elif isinstance(child, ast.Assign): + try: + g.add(child.targets[0].id) + except (IndexError, AttributeError): + pass + elif isinstance(child, ast.Import): + g.add(child.names[0].name) + elif isinstance(child, ast.ImportFrom): + for name in child.names: + g_name = name.asname or name.name + if g_name == '*': + continue + g.add(g_name) + + +class CaptureStd(): + """Context manager to handle capturing stderr and stdout""" + + def __enter__(self): + self.sys_stdout = sys.stdout + self.sys_stderr = sys.stderr + sys.stdout = self.stdout = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stdout.encoding) + sys.stderr = self.stderr = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stderr.encoding) + return self + + def __exit__(self, exc_type, exc_value, traceback): + sys.stdout = self.sys_stdout + sys.stderr = self.sys_stderr + + def get(self): + """Return ``(stdout, stderr)``""" + + return self.stdout.buffer.getvalue(), self.stderr.buffer.getvalue() + + +def get_module_name_from_filename(filename, collection): + # Calculate the module's name so that relative imports work correctly + if collection: + # collection is a relative path, example: ansible_collections/my_namespace/my_collection + # filename is a relative path, example: plugins/modules/my_module.py + path = os.path.join(collection, filename) + else: + # filename is a relative path, example: lib/ansible/modules/system/ping.py + path = os.path.relpath(filename, 'lib') + + name = os.path.splitext(path)[0].replace(os.path.sep, '.') + + return name + + +def parse_yaml(value, lineno, module, name, load_all=False): + traces = [] + errors = [] + data = None + + if load_all: + loader = yaml.safe_load_all + else: + loader = yaml.safe_load + + try: + data = loader(value) + if load_all: + data = list(data) + except yaml.MarkedYAMLError as e: + e.problem_mark.line += lineno - 1 + e.problem_mark.name = '%s.%s' % (module, name) + errors.append({ + 'msg': '%s is not valid YAML' % name, + 'line': e.problem_mark.line + 1, + 'column': e.problem_mark.column + 1 + }) + traces.append(e) + except yaml.reader.ReaderError as e: + traces.append(e) + # TODO: Better line/column detection + errors.append({ + 'msg': ('%s is not valid YAML. Character ' + '0x%x at position %d.' % (name, e.character, e.position)), + 'line': lineno + }) + except yaml.YAMLError as e: + traces.append(e) + errors.append({ + 'msg': '%s is not valid YAML: %s: %s' % (name, type(e), e), + 'line': lineno + }) + + return data, errors, traces + + +def is_empty(value): + """Evaluate null like values excluding False""" + if value is False: + return False + return not bool(value) + + +def compare_unordered_lists(a, b): + """Safe list comparisons + + Supports: + - unordered lists + - unhashable elements + """ + return len(a) == len(b) and all(x in b for x in a) + + +class NoArgsAnsibleModule(AnsibleModule): + """AnsibleModule that does not actually load params. This is used to get access to the + methods within AnsibleModule without having to fake a bunch of data + """ + def _load_params(self): + self.params = {'_ansible_selinux_special_fs': [], '_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False, '_ansible_check_mode': False} + + +def parse_isodate(v, allow_date): + if allow_date: + if isinstance(v, datetime.date): + return v + msg = 'Expected ISO 8601 date string (YYYY-MM-DD) or YAML date' + else: + msg = 'Expected ISO 8601 date string (YYYY-MM-DD)' + if not isinstance(v, string_types): + raise ValueError(msg) + # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions, + # we have to do things manually. + if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', v): + raise ValueError(msg) + try: + return datetime.datetime.strptime(v, '%Y-%m-%d').date() + except ValueError: + raise ValueError(msg) diff --git a/test/lib/ansible_test/_data/sanity/yamllint/config/default.yml b/test/lib/ansible_test/_data/sanity/yamllint/config/default.yml new file mode 100644 index 00000000..45d8b7ad --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/yamllint/config/default.yml @@ -0,0 +1,19 @@ +extends: default + +rules: + braces: {max-spaces-inside: 1, level: error} + brackets: {max-spaces-inside: 1, level: error} + colons: {max-spaces-after: -1, level: error} + commas: {max-spaces-after: -1, level: error} + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: {max: 3, level: error} + hyphens: {level: error} + indentation: disable + key-duplicates: enable + line-length: disable + new-line-at-end-of-file: disable + new-lines: {type: unix} + trailing-spaces: disable + truthy: disable diff --git a/test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml b/test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml new file mode 100644 index 00000000..da7e6049 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml @@ -0,0 +1,19 @@ +extends: default + +rules: + braces: disable + brackets: disable + colons: disable + commas: disable + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: disable + hyphens: disable + indentation: disable + key-duplicates: enable + line-length: disable + new-line-at-end-of-file: disable + new-lines: {type: unix} + trailing-spaces: disable + truthy: disable diff --git a/test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml b/test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml new file mode 100644 index 00000000..6d418137 --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml @@ -0,0 +1,19 @@ +extends: default + +rules: + braces: disable + brackets: disable + colons: disable + commas: disable + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: disable + hyphens: disable + indentation: disable + key-duplicates: disable + line-length: disable + new-line-at-end-of-file: disable + new-lines: {type: unix} + trailing-spaces: disable + truthy: disable diff --git a/test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py b/test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py new file mode 100644 index 00000000..c9cdc19c --- /dev/null +++ b/test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python +"""Wrapper around yamllint that supports YAML embedded in Ansible modules.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ast +import json +import os +import sys + +import yaml +from yaml.resolver import Resolver +from yaml.constructor import SafeConstructor +from yaml.error import MarkedYAMLError +from _yaml import CParser # pylint: disable=no-name-in-module + +from yamllint import linter +from yamllint.config import YamlLintConfig + + +def main(): + """Main program body.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + + checker = YamlChecker() + checker.check(paths) + checker.report() + + +class TestConstructor(SafeConstructor): + """Yaml Safe Constructor that knows about Ansible tags""" + + +TestConstructor.add_constructor( + u'!unsafe', + TestConstructor.construct_yaml_str) + + +TestConstructor.add_constructor( + u'!vault', + TestConstructor.construct_yaml_str) + + +TestConstructor.add_constructor( + u'!vault-encrypted', + TestConstructor.construct_yaml_str) + + +class TestLoader(CParser, TestConstructor, Resolver): + def __init__(self, stream): + CParser.__init__(self, stream) + TestConstructor.__init__(self) + Resolver.__init__(self) + + +class YamlChecker: + """Wrapper around yamllint that supports YAML embedded in Ansible modules.""" + def __init__(self): + self.messages = [] + + def report(self): + """Print yamllint report to stdout.""" + report = dict( + messages=self.messages, + ) + + print(json.dumps(report, indent=4, sort_keys=True)) + + def check(self, paths): + """ + :type paths: str + """ + config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config') + + yaml_conf = YamlLintConfig(file=os.path.join(config_path, 'default.yml')) + module_conf = YamlLintConfig(file=os.path.join(config_path, 'modules.yml')) + plugin_conf = YamlLintConfig(file=os.path.join(config_path, 'plugins.yml')) + + for path in paths: + extension = os.path.splitext(path)[1] + + with open(path) as f: + contents = f.read() + + if extension in ('.yml', '.yaml'): + self.check_yaml(yaml_conf, path, contents) + elif extension == '.py': + if path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/'): + conf = module_conf + else: + conf = plugin_conf + + self.check_module(conf, path, contents) + else: + raise Exception('unsupported extension: %s' % extension) + + def check_yaml(self, conf, path, contents): + """ + :type conf: YamlLintConfig + :type path: str + :type contents: str + """ + self.check_parsable(path, contents) + self.messages += [self.result_to_message(r, path) for r in linter.run(contents, conf, path)] + + def check_module(self, conf, path, contents): + """ + :type conf: YamlLintConfig + :type path: str + :type contents: str + """ + docs = self.get_module_docs(path, contents) + + for key, value in docs.items(): + yaml_data = value['yaml'] + lineno = value['lineno'] + + if yaml_data.startswith('\n'): + yaml_data = yaml_data[1:] + lineno += 1 + + self.check_parsable(path, yaml_data, lineno) + + messages = list(linter.run(yaml_data, conf, path)) + + self.messages += [self.result_to_message(r, path, lineno - 1, key) for r in messages] + + def check_parsable(self, path, contents, lineno=1): + """ + :type path: str + :type contents: str + :type lineno: int + """ + try: + yaml.load(contents, Loader=TestLoader) + except MarkedYAMLError as e: + self.messages += [{'code': 'unparsable-with-libyaml', + 'message': '%s - %s' % (e.args[0], e.args[2]), + 'path': path, + 'line': e.problem_mark.line + lineno, + 'column': e.problem_mark.column + 1, + 'level': 'error', + }] + + @staticmethod + def result_to_message(result, path, line_offset=0, prefix=''): + """ + :type result: any + :type path: str + :type line_offset: int + :type prefix: str + :rtype: dict[str, any] + """ + if prefix: + prefix = '%s: ' % prefix + + return dict( + code=result.rule or result.level, + message=prefix + result.desc, + path=path, + line=result.line + line_offset, + column=result.column, + level=result.level, + ) + + def get_module_docs(self, path, contents): + """ + :type path: str + :type contents: str + :rtype: dict[str, any] + """ + module_doc_types = [ + 'DOCUMENTATION', + 'EXAMPLES', + 'RETURN', + ] + + docs = {} + + def check_assignment(statement, doc_types=None): + """Check the given statement for a documentation assignment.""" + for target in statement.targets: + if not isinstance(target, ast.Name): + continue + + if doc_types and target.id not in doc_types: + continue + + docs[target.id] = dict( + yaml=statement.value.s, + lineno=statement.lineno, + end_lineno=statement.lineno + len(statement.value.s.splitlines()) + ) + + module_ast = self.parse_module(path, contents) + + if not module_ast: + return {} + + is_plugin = path.startswith('lib/ansible/modules/') or path.startswith('lib/ansible/plugins/') or path.startswith('plugins/') + is_doc_fragment = path.startswith('lib/ansible/plugins/doc_fragments/') or path.startswith('plugins/doc_fragments/') + + if is_plugin and not is_doc_fragment: + for body_statement in module_ast.body: + if isinstance(body_statement, ast.Assign): + check_assignment(body_statement, module_doc_types) + elif is_doc_fragment: + for body_statement in module_ast.body: + if isinstance(body_statement, ast.ClassDef): + for class_statement in body_statement.body: + if isinstance(class_statement, ast.Assign): + check_assignment(class_statement) + else: + raise Exception('unsupported path: %s' % path) + + return docs + + def parse_module(self, path, contents): + """ + :type path: str + :type contents: str + :rtype: ast.Module | None + """ + try: + return ast.parse(contents) + except SyntaxError as ex: + self.messages.append(dict( + code='python-syntax-error', + message=str(ex), + path=path, + line=ex.lineno, + column=ex.offset, + level='error', + )) + except Exception as ex: # pylint: disable=broad-except + self.messages.append(dict( + code='python-parse-error', + message=str(ex), + path=path, + line=0, + column=0, + level='error', + )) + + return None + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 b/test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 new file mode 100644 index 00000000..7e039bb4 --- /dev/null +++ b/test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 @@ -0,0 +1,453 @@ +#Requires -Version 3.0 + +# Configure a Windows host for remote management with Ansible +# ----------------------------------------------------------- +# +# This script checks the current WinRM (PS Remoting) configuration and makes +# the necessary changes to allow Ansible to connect, authenticate and +# execute PowerShell commands. +# +# All events are logged to the Windows EventLog, useful for unattended runs. +# +# Use option -Verbose in order to see the verbose output messages. +# +# Use option -CertValidityDays to specify how long this certificate is valid +# starting from today. So you would specify -CertValidityDays 3650 to get +# a 10-year valid certificate. +# +# Use option -ForceNewSSLCert if the system has been SysPreped and a new +# SSL Certificate must be forced on the WinRM Listener when re-running this +# script. This is necessary when a new SID and CN name is created. +# +# Use option -EnableCredSSP to enable CredSSP as an authentication option. +# +# Use option -DisableBasicAuth to disable basic authentication. +# +# Use option -SkipNetworkProfileCheck to skip the network profile check. +# Without specifying this the script will only run if the device's interfaces +# are in DOMAIN or PRIVATE zones. Provide this switch if you want to enable +# WinRM on a device with an interface in PUBLIC zone. +# +# Use option -SubjectName to specify the CN name of the certificate. This +# defaults to the system's hostname and generally should not be specified. + +# Written by Trond Hindenes +# Updated by Chris Church +# Updated by Michael Crilly +# Updated by Anton Ouzounov +# Updated by Nicolas Simond +# Updated by Dag Wieërs +# Updated by Jordan Borean +# Updated by Erwan Quélin +# Updated by David Norman +# +# Version 1.0 - 2014-07-06 +# Version 1.1 - 2014-11-11 +# Version 1.2 - 2015-05-15 +# Version 1.3 - 2016-04-04 +# Version 1.4 - 2017-01-05 +# Version 1.5 - 2017-02-09 +# Version 1.6 - 2017-04-18 +# Version 1.7 - 2017-11-23 +# Version 1.8 - 2018-02-23 +# Version 1.9 - 2018-09-21 + +# Support -Verbose option +[CmdletBinding()] + +Param ( + [string]$SubjectName = $env:COMPUTERNAME, + [int]$CertValidityDays = 1095, + [switch]$SkipNetworkProfileCheck, + $CreateSelfSignedCert = $true, + [switch]$ForceNewSSLCert, + [switch]$GlobalHttpFirewallAccess, + [switch]$DisableBasicAuth = $false, + [switch]$EnableCredSSP +) + +Function Write-Log +{ + $Message = $args[0] + Write-EventLog -LogName Application -Source $EventSource -EntryType Information -EventId 1 -Message $Message +} + +Function Write-VerboseLog +{ + $Message = $args[0] + Write-Verbose $Message + Write-Log $Message +} + +Function Write-HostLog +{ + $Message = $args[0] + Write-Output $Message + Write-Log $Message +} + +Function New-LegacySelfSignedCert +{ + Param ( + [string]$SubjectName, + [int]$ValidDays = 1095 + ) + + $hostnonFQDN = $env:computerName + $hostFQDN = [System.Net.Dns]::GetHostByName(($env:computerName)).Hostname + $SignatureAlgorithm = "SHA256" + + $name = New-Object -COM "X509Enrollment.CX500DistinguishedName.1" + $name.Encode("CN=$SubjectName", 0) + + $key = New-Object -COM "X509Enrollment.CX509PrivateKey.1" + $key.ProviderName = "Microsoft Enhanced RSA and AES Cryptographic Provider" + $key.KeySpec = 1 + $key.Length = 4096 + $key.SecurityDescriptor = "D:PAI(A;;0xd01f01ff;;;SY)(A;;0xd01f01ff;;;BA)(A;;0x80120089;;;NS)" + $key.MachineContext = 1 + $key.Create() + + $serverauthoid = New-Object -COM "X509Enrollment.CObjectId.1" + $serverauthoid.InitializeFromValue("1.3.6.1.5.5.7.3.1") + $ekuoids = New-Object -COM "X509Enrollment.CObjectIds.1" + $ekuoids.Add($serverauthoid) + $ekuext = New-Object -COM "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1" + $ekuext.InitializeEncode($ekuoids) + + $cert = New-Object -COM "X509Enrollment.CX509CertificateRequestCertificate.1" + $cert.InitializeFromPrivateKey(2, $key, "") + $cert.Subject = $name + $cert.Issuer = $cert.Subject + $cert.NotBefore = (Get-Date).AddDays(-1) + $cert.NotAfter = $cert.NotBefore.AddDays($ValidDays) + + $SigOID = New-Object -ComObject X509Enrollment.CObjectId + $SigOID.InitializeFromValue(([Security.Cryptography.Oid]$SignatureAlgorithm).Value) + + [string[]] $AlternativeName += $hostnonFQDN + $AlternativeName += $hostFQDN + $IAlternativeNames = New-Object -ComObject X509Enrollment.CAlternativeNames + + foreach ($AN in $AlternativeName) + { + $AltName = New-Object -ComObject X509Enrollment.CAlternativeName + $AltName.InitializeFromString(0x3,$AN) + $IAlternativeNames.Add($AltName) + } + + $SubjectAlternativeName = New-Object -ComObject X509Enrollment.CX509ExtensionAlternativeNames + $SubjectAlternativeName.InitializeEncode($IAlternativeNames) + + [String[]]$KeyUsage = ("DigitalSignature", "KeyEncipherment") + $KeyUsageObj = New-Object -ComObject X509Enrollment.CX509ExtensionKeyUsage + $KeyUsageObj.InitializeEncode([int][Security.Cryptography.X509Certificates.X509KeyUsageFlags]($KeyUsage)) + $KeyUsageObj.Critical = $true + + $cert.X509Extensions.Add($KeyUsageObj) + $cert.X509Extensions.Add($ekuext) + $cert.SignatureInformation.HashAlgorithm = $SigOID + $CERT.X509Extensions.Add($SubjectAlternativeName) + $cert.Encode() + + $enrollment = New-Object -COM "X509Enrollment.CX509Enrollment.1" + $enrollment.InitializeFromRequest($cert) + $certdata = $enrollment.CreateRequest(0) + $enrollment.InstallResponse(2, $certdata, 0, "") + + # extract/return the thumbprint from the generated cert + $parsed_cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2 + $parsed_cert.Import([System.Text.Encoding]::UTF8.GetBytes($certdata)) + + return $parsed_cert.Thumbprint +} + +Function Enable-GlobalHttpFirewallAccess +{ + Write-Verbose "Forcing global HTTP firewall access" + # this is a fairly naive implementation; could be more sophisticated about rule matching/collapsing + $fw = New-Object -ComObject HNetCfg.FWPolicy2 + + # try to find/enable the default rule first + $add_rule = $false + $matching_rules = $fw.Rules | Where-Object { $_.Name -eq "Windows Remote Management (HTTP-In)" } + $rule = $null + If ($matching_rules) { + If ($matching_rules -isnot [Array]) { + Write-Verbose "Editing existing single HTTP firewall rule" + $rule = $matching_rules + } + Else { + # try to find one with the All or Public profile first + Write-Verbose "Found multiple existing HTTP firewall rules..." + $rule = $matching_rules | ForEach-Object { $_.Profiles -band 4 }[0] + + If (-not $rule -or $rule -is [Array]) { + Write-Verbose "Editing an arbitrary single HTTP firewall rule (multiple existed)" + # oh well, just pick the first one + $rule = $matching_rules[0] + } + } + } + + If (-not $rule) { + Write-Verbose "Creating a new HTTP firewall rule" + $rule = New-Object -ComObject HNetCfg.FWRule + $rule.Name = "Windows Remote Management (HTTP-In)" + $rule.Description = "Inbound rule for Windows Remote Management via WS-Management. [TCP 5985]" + $add_rule = $true + } + + $rule.Profiles = 0x7FFFFFFF + $rule.Protocol = 6 + $rule.LocalPorts = 5985 + $rule.RemotePorts = "*" + $rule.LocalAddresses = "*" + $rule.RemoteAddresses = "*" + $rule.Enabled = $true + $rule.Direction = 1 + $rule.Action = 1 + $rule.Grouping = "Windows Remote Management" + + If ($add_rule) { + $fw.Rules.Add($rule) + } + + Write-Verbose "HTTP firewall rule $($rule.Name) updated" +} + +# Setup error handling. +Trap +{ + $_ + Exit 1 +} +$ErrorActionPreference = "Stop" + +# Get the ID and security principal of the current user account +$myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent() +$myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID) + +# Get the security principal for the Administrator role +$adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator + +# Check to see if we are currently running "as Administrator" +if (-Not $myWindowsPrincipal.IsInRole($adminRole)) +{ + Write-Output "ERROR: You need elevated Administrator privileges in order to run this script." + Write-Output " Start Windows PowerShell by using the Run as Administrator option." + Exit 2 +} + +$EventSource = $MyInvocation.MyCommand.Name +If (-Not $EventSource) +{ + $EventSource = "Powershell CLI" +} + +If ([System.Diagnostics.EventLog]::Exists('Application') -eq $False -or [System.Diagnostics.EventLog]::SourceExists($EventSource) -eq $False) +{ + New-EventLog -LogName Application -Source $EventSource +} + +# Detect PowerShell version. +If ($PSVersionTable.PSVersion.Major -lt 3) +{ + Write-Log "PowerShell version 3 or higher is required." + Throw "PowerShell version 3 or higher is required." +} + +# Find and start the WinRM service. +Write-Verbose "Verifying WinRM service." +If (!(Get-Service "WinRM")) +{ + Write-Log "Unable to find the WinRM service." + Throw "Unable to find the WinRM service." +} +ElseIf ((Get-Service "WinRM").Status -ne "Running") +{ + Write-Verbose "Setting WinRM service to start automatically on boot." + Set-Service -Name "WinRM" -StartupType Automatic + Write-Log "Set WinRM service to start automatically on boot." + Write-Verbose "Starting WinRM service." + Start-Service -Name "WinRM" -ErrorAction Stop + Write-Log "Started WinRM service." + +} + +# WinRM should be running; check that we have a PS session config. +If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener))) +{ + If ($SkipNetworkProfileCheck) { + Write-Verbose "Enabling PS Remoting without checking Network profile." + Enable-PSRemoting -SkipNetworkProfileCheck -Force -ErrorAction Stop + Write-Log "Enabled PS Remoting without checking Network profile." + } + Else { + Write-Verbose "Enabling PS Remoting." + Enable-PSRemoting -Force -ErrorAction Stop + Write-Log "Enabled PS Remoting." + } +} +Else +{ + Write-Verbose "PS Remoting is already enabled." +} + +# Ensure LocalAccountTokenFilterPolicy is set to 1 +# https://github.com/ansible/ansible/issues/42978 +$token_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System" +$token_prop_name = "LocalAccountTokenFilterPolicy" +$token_key = Get-Item -Path $token_path +$token_value = $token_key.GetValue($token_prop_name, $null) +if ($token_value -ne 1) { + Write-Verbose "Setting LocalAccountTOkenFilterPolicy to 1" + if ($null -ne $token_value) { + Remove-ItemProperty -Path $token_path -Name $token_prop_name + } + New-ItemProperty -Path $token_path -Name $token_prop_name -Value 1 -PropertyType DWORD > $null +} + +# Make sure there is a SSL listener. +$listeners = Get-ChildItem WSMan:\localhost\Listener +If (!($listeners | Where-Object {$_.Keys -like "TRANSPORT=HTTPS"})) +{ + # We cannot use New-SelfSignedCertificate on 2012R2 and earlier + $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays + Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint" + + # Create the hashtables of settings to be used. + $valueset = @{ + Hostname = $SubjectName + CertificateThumbprint = $thumbprint + } + + $selectorset = @{ + Transport = "HTTPS" + Address = "*" + } + + Write-Verbose "Enabling SSL listener." + New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset + Write-Log "Enabled SSL listener." +} +Else +{ + Write-Verbose "SSL listener is already active." + + # Force a new SSL cert on Listener if the $ForceNewSSLCert + If ($ForceNewSSLCert) + { + + # We cannot use New-SelfSignedCertificate on 2012R2 and earlier + $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays + Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint" + + $valueset = @{ + CertificateThumbprint = $thumbprint + Hostname = $SubjectName + } + + # Delete the listener for SSL + $selectorset = @{ + Address = "*" + Transport = "HTTPS" + } + Remove-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset + + # Add new Listener with new SSL cert + New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset + } +} + +# Check for basic authentication. +$basicAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "Basic"} + +If ($DisableBasicAuth) +{ + If (($basicAuthSetting.Value) -eq $true) + { + Write-Verbose "Disabling basic auth support." + Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $false + Write-Log "Disabled basic auth support." + } + Else + { + Write-Verbose "Basic auth is already disabled." + } +} +Else +{ + If (($basicAuthSetting.Value) -eq $false) + { + Write-Verbose "Enabling basic auth support." + Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $true + Write-Log "Enabled basic auth support." + } + Else + { + Write-Verbose "Basic auth is already enabled." + } +} + +# If EnableCredSSP if set to true +If ($EnableCredSSP) +{ + # Check for CredSSP authentication + $credsspAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "CredSSP"} + If (($credsspAuthSetting.Value) -eq $false) + { + Write-Verbose "Enabling CredSSP auth support." + Enable-WSManCredSSP -role server -Force + Write-Log "Enabled CredSSP auth support." + } +} + +If ($GlobalHttpFirewallAccess) { + Enable-GlobalHttpFirewallAccess +} + +# Configure firewall to allow WinRM HTTPS connections. +$fwtest1 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" +$fwtest2 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" profile=any +If ($fwtest1.count -lt 5) +{ + Write-Verbose "Adding firewall rule to allow WinRM HTTPS." + netsh advfirewall firewall add rule profile=any name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow + Write-Log "Added firewall rule to allow WinRM HTTPS." +} +ElseIf (($fwtest1.count -ge 5) -and ($fwtest2.count -lt 5)) +{ + Write-Verbose "Updating firewall rule to allow WinRM HTTPS for any profile." + netsh advfirewall firewall set rule name="Allow WinRM HTTPS" new profile=any + Write-Log "Updated firewall rule to allow WinRM HTTPS for any profile." +} +Else +{ + Write-Verbose "Firewall rule already exists to allow WinRM HTTPS." +} + +# Test a remoting connection to localhost, which should work. +$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock {$env:COMPUTERNAME} -ErrorVariable httpError -ErrorAction SilentlyContinue +$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck + +$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue + +If ($httpResult -and $httpsResult) +{ + Write-Verbose "HTTP: Enabled | HTTPS: Enabled" +} +ElseIf ($httpsResult -and !$httpResult) +{ + Write-Verbose "HTTP: Disabled | HTTPS: Enabled" +} +ElseIf ($httpResult -and !$httpsResult) +{ + Write-Verbose "HTTP: Enabled | HTTPS: Disabled" +} +Else +{ + Write-Log "Unable to establish an HTTP or HTTPS remoting session." + Throw "Unable to establish an HTTP or HTTPS remoting session." +} +Write-VerboseLog "PS Remoting has been successfully configured for Ansible." diff --git a/test/lib/ansible_test/_data/setup/docker.sh b/test/lib/ansible_test/_data/setup/docker.sh new file mode 100644 index 00000000..c65e8ac5 --- /dev/null +++ b/test/lib/ansible_test/_data/setup/docker.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +set -eu + +# Required for newer mysql-server packages to install/upgrade on Ubuntu 16.04. +rm -f /usr/sbin/policy-rc.d + +# Improve prompts on remote host for interactive use. +# shellcheck disable=SC1117 +cat << EOF > ~/.bashrc +alias ls='ls --color=auto' +export PS1='\[\e]0;\u@\h: \w\a\]\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' +cd ~/ansible/ +EOF diff --git a/test/lib/ansible_test/_data/setup/remote.sh b/test/lib/ansible_test/_data/setup/remote.sh new file mode 100644 index 00000000..35167e21 --- /dev/null +++ b/test/lib/ansible_test/_data/setup/remote.sh @@ -0,0 +1,159 @@ +#!/bin/sh + +set -eu + +platform="$1" +python_version="$2" +python_interpreter="python${python_version}" + +cd ~/ + +install_pip () { + if ! "${python_interpreter}" -m pip.__main__ --version --disable-pip-version-check 2>/dev/null; then + case "${python_version}" in + *) + pip_bootstrap_url="https://ansible-ci-files.s3.amazonaws.com/ansible-test/get-pip-20.3.4.py" + ;; + esac + curl --silent --show-error "${pip_bootstrap_url}" -o /tmp/get-pip.py + "${python_interpreter}" /tmp/get-pip.py --disable-pip-version-check --quiet + rm /tmp/get-pip.py + fi +} + +if [ "${platform}" = "freebsd" ]; then + py_version="$(echo "${python_version}" | tr -d '.')" + + while true; do + env ASSUME_ALWAYS_YES=YES pkg bootstrap && \ + pkg install -q -y \ + bash \ + curl \ + gtar \ + "python${py_version}" \ + "py${py_version}-Jinja2" \ + "py${py_version}-virtualenv" \ + "py${py_version}-cryptography" \ + sudo \ + && break + echo "Failed to install packages. Sleeping before trying again..." + sleep 10 + done + + install_pip + + if ! grep '^PermitRootLogin yes$' /etc/ssh/sshd_config > /dev/null; then + sed -i '' 's/^# *PermitRootLogin.*$/PermitRootLogin yes/;' /etc/ssh/sshd_config + service sshd restart + fi +elif [ "${platform}" = "rhel" ]; then + if grep '8\.' /etc/redhat-release; then + while true; do + yum module install -q -y python36 && \ + yum install -q -y \ + gcc \ + python3-devel \ + python3-jinja2 \ + python3-virtualenv \ + python3-cryptography \ + iptables \ + && break + echo "Failed to install packages. Sleeping before trying again..." + sleep 10 + done + else + while true; do + yum install -q -y \ + gcc \ + python-devel \ + python-virtualenv \ + python2-cryptography \ + && break + echo "Failed to install packages. Sleeping before trying again..." + sleep 10 + done + + install_pip + fi + + # pin packaging and pyparsing to match the downstream vendored versions + "${python_interpreter}" -m pip install packaging==20.4 pyparsing==2.4.7 --disable-pip-version-check +elif [ "${platform}" = "centos" ]; then + while true; do + yum install -q -y \ + gcc \ + python-devel \ + python-virtualenv \ + python2-cryptography \ + libffi-devel \ + openssl-devel \ + && break + echo "Failed to install packages. Sleeping before trying again..." + sleep 10 + done + + install_pip +elif [ "${platform}" = "osx" ]; then + while true; do + pip install --disable-pip-version-check --quiet \ + 'virtualenv==16.7.10' \ + && break + echo "Failed to install packages. Sleeping before trying again..." + sleep 10 + done +elif [ "${platform}" = "aix" ]; then + chfs -a size=1G / + chfs -a size=4G /usr + chfs -a size=1G /var + chfs -a size=1G /tmp + chfs -a size=2G /opt + while true; do + yum install -q -y \ + gcc \ + libffi-devel \ + python-jinja2 \ + python-cryptography \ + python-pip && \ + pip install --disable-pip-version-check --quiet virtualenv \ + && break + echo "Failed to install packages. Sleeping before trying again..." + sleep 10 + done +fi + +# Generate our ssh key and add it to our authorized_keys file. +# We also need to add localhost's server keys to known_hosts. + +if [ ! -f "${HOME}/.ssh/id_rsa.pub" ]; then + ssh-keygen -m PEM -q -t rsa -N '' -f "${HOME}/.ssh/id_rsa" + # newer ssh-keygen PEM output (such as on RHEL 8.1) is not recognized by paramiko + touch "${HOME}/.ssh/id_rsa.new" + chmod 0600 "${HOME}/.ssh/id_rsa.new" + sed 's/\(BEGIN\|END\) PRIVATE KEY/\1 RSA PRIVATE KEY/' "${HOME}/.ssh/id_rsa" > "${HOME}/.ssh/id_rsa.new" + mv "${HOME}/.ssh/id_rsa.new" "${HOME}/.ssh/id_rsa" + cat "${HOME}/.ssh/id_rsa.pub" >> "${HOME}/.ssh/authorized_keys" + chmod 0600 "${HOME}/.ssh/authorized_keys" + for key in /etc/ssh/ssh_host_*_key.pub; do + pk=$(cat "${key}") + echo "localhost ${pk}" >> "${HOME}/.ssh/known_hosts" + done +fi + +# Improve prompts on remote host for interactive use. +# shellcheck disable=SC1117 +cat << EOF > ~/.bashrc +if ls --color > /dev/null 2>&1; then + alias ls='ls --color' +elif ls -G > /dev/null 2>&1; then + alias ls='ls -G' +fi +export PS1='\[\e]0;\u@\h: \w\a\]\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' +EOF + +# Make sure ~/ansible/ is the starting directory for interactive shells. +if [ "${platform}" = "osx" ]; then + echo "cd ~/ansible/" >> ~/.bashrc +elif [ "${platform}" = "macos" ] ; then + echo "export BASH_SILENCE_DEPRECATION_WARNING=1" >> ~/.bashrc + echo "cd ~/ansible/" >> ~/.bashrc +fi diff --git a/test/lib/ansible_test/_data/setup/windows-httptester.ps1 b/test/lib/ansible_test/_data/setup/windows-httptester.ps1 new file mode 100644 index 00000000..70bdb332 --- /dev/null +++ b/test/lib/ansible_test/_data/setup/windows-httptester.ps1 @@ -0,0 +1,228 @@ +<# +.SYNOPSIS +Designed to set a Windows host to connect to the httptester container running +on the Ansible host. This will setup the Windows host file and forward the +local ports to use this connection. This will continue to run in the background +until the script is deleted. + +Run this with SSH with the -R arguments to foward ports 8080 and 8443 to the +httptester container. + +.PARAMETER Hosts +A list of hostnames, delimited by '|', to add to the Windows hosts file for the +httptester container, e.g. 'ansible.host.com|secondary.host.test'. +#> +[CmdletBinding()] +param( + [Parameter(Mandatory=$true, Position=0)][String]$Hosts +) +$Hosts = $Hosts.Split('|') + +$ProgressPreference = "SilentlyContinue" +$ErrorActionPreference = "Stop" +$os_version = [Version](Get-Item -Path "$env:SystemRoot\System32\kernel32.dll").VersionInfo.ProductVersion +Write-Verbose -Message "Configuring HTTP Tester on Windows $os_version for '$($Hosts -join "', '")'" + +Function Get-PmapperRuleBytes { + <# + .SYNOPSIS + Create the byte values that configures a rule in the PMapper configuration + file. This isn't really documented but because PMapper is only used for + Server 2008 R2 we will stick to 1 version and just live with the legacy + work for now. + + .PARAMETER ListenPort + The port to listen on localhost, this will be forwarded to the host defined + by ConnectAddress and ConnectPort. + + .PARAMETER ConnectAddress + The hostname or IP to map the traffic to. + + .PARAMETER ConnectPort + This port of ConnectAddress to map the traffic to. + #> + param( + [Parameter(Mandatory=$true)][UInt16]$ListenPort, + [Parameter(Mandatory=$true)][String]$ConnectAddress, + [Parameter(Mandatory=$true)][Int]$ConnectPort + ) + + $connect_field = "$($ConnectAddress):$ConnectPort" + $connect_bytes = [System.Text.Encoding]::ASCII.GetBytes($connect_field) + $data_length = [byte]($connect_bytes.Length + 6) # size of payload minus header, length, and footer + $port_bytes = [System.BitConverter]::GetBytes($ListenPort) + + $payload = [System.Collections.Generic.List`1[Byte]]@() + $payload.Add([byte]16) > $null # header is \x10, means Configure Mapping rule + $payload.Add($data_length) > $null + $payload.AddRange($connect_bytes) + $payload.AddRange($port_bytes) + $payload.AddRange([byte[]]@(0, 0)) # 2 extra bytes of padding + $payload.Add([byte]0) > $null # 0 is TCP, 1 is UDP + $payload.Add([byte]0) > $null # 0 is Any, 1 is Internet + $payload.Add([byte]31) > $null # footer is \x1f, means end of Configure Mapping rule + + return ,$payload.ToArray() +} + +Write-Verbose -Message "Adding host file entries" +$hosts_file = "$env:SystemRoot\System32\drivers\etc\hosts" +$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file) +$changed = $false +foreach ($httptester_host in $Hosts) { + $host_line = "127.0.0.1 $httptester_host # ansible-test httptester" + if ($host_line -notin $hosts_file_lines) { + $hosts_file_lines += $host_line + $changed = $true + } +} +if ($changed) { + Write-Verbose -Message "Host file is missing entries, adding missing entries" + [System.IO.File]::WriteAllLines($hosts_file, $hosts_file_lines) +} + +# forward ports +$forwarded_ports = @{ + 80 = 8080 + 443 = 8443 +} +if ($os_version -ge [Version]"6.2") { + Write-Verbose -Message "Using netsh to configure forwarded ports" + foreach ($forwarded_port in $forwarded_ports.GetEnumerator()) { + $port_set = netsh interface portproxy show v4tov4 | ` + Where-Object { $_ -match "127.0.0.1\s*$($forwarded_port.Key)\s*127.0.0.1\s*$($forwarded_port.Value)" } + + if (-not $port_set) { + Write-Verbose -Message "Adding netsh portproxy rule for $($forwarded_port.Key) -> $($forwarded_port.Value)" + $add_args = @( + "interface", + "portproxy", + "add", + "v4tov4", + "listenaddress=127.0.0.1", + "listenport=$($forwarded_port.Key)", + "connectaddress=127.0.0.1", + "connectport=$($forwarded_port.Value)" + ) + $null = netsh $add_args 2>&1 + } + } +} else { + Write-Verbose -Message "Using Port Mapper to configure forwarded ports" + # netsh interface portproxy doesn't work on local addresses in older + # versions of Windows. Use custom application Port Mapper to acheive the + # same outcome + # http://www.analogx.com/contents/download/Network/pmapper/Freeware.htm + $s3_url = "https://ansible-ci-files.s3.amazonaws.com/ansible-test/pmapper-1.04.exe" + + # download the Port Mapper executable to a temporary directory + $pmapper_folder = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath ([System.IO.Path]::GetRandomFileName()) + $pmapper_exe = Join-Path -Path $pmapper_folder -ChildPath pmapper.exe + $pmapper_config = Join-Path -Path $pmapper_folder -ChildPath pmapper.dat + New-Item -Path $pmapper_folder -ItemType Directory > $null + + $stop = $false + do { + try { + Write-Verbose -Message "Attempting download of '$s3_url'" + (New-Object -TypeName System.Net.WebClient).DownloadFile($s3_url, $pmapper_exe) + $stop = $true + } catch { Start-Sleep -Second 5 } + } until ($stop) + + # create the Port Mapper rule file that contains our forwarded ports + $fs = [System.IO.File]::Create($pmapper_config) + try { + foreach ($forwarded_port in $forwarded_ports.GetEnumerator()) { + Write-Verbose -Message "Creating forwarded port rule for $($forwarded_port.Key) -> $($forwarded_port.Value)" + $pmapper_rule = Get-PmapperRuleBytes -ListenPort $forwarded_port.Key -ConnectAddress 127.0.0.1 -ConnectPort $forwarded_port.Value + $fs.Write($pmapper_rule, 0, $pmapper_rule.Length) + } + } finally { + $fs.Close() + } + + Write-Verbose -Message "Starting Port Mapper '$pmapper_exe' in the background" + $start_args = @{ + CommandLine = $pmapper_exe + CurrentDirectory = $pmapper_folder + } + $res = Invoke-CimMethod -ClassName Win32_Process -MethodName Create -Arguments $start_args + if ($res.ReturnValue -ne 0) { + $error_msg = switch($res.ReturnValue) { + 2 { "Access denied" } + 3 { "Insufficient privilege" } + 8 { "Unknown failure" } + 9 { "Path not found" } + 21 { "Invalid parameter" } + default { "Undefined Error: $($res.ReturnValue)" } + } + Write-Error -Message "Failed to start pmapper: $error_msg" + } + $pmapper_pid = $res.ProcessId + Write-Verbose -Message "Port Mapper PID: $pmapper_pid" +} + +Write-Verbose -Message "Wait for current script at '$PSCommandPath' to be deleted before running cleanup" +$fsw = New-Object -TypeName System.IO.FileSystemWatcher +$fsw.Path = Split-Path -Path $PSCommandPath -Parent +$fsw.Filter = Split-Path -Path $PSCommandPath -Leaf +$fsw.WaitForChanged([System.IO.WatcherChangeTypes]::Deleted, 3600000) > $null +Write-Verbose -Message "Script delete or timeout reached, cleaning up Windows httptester artifacts" + +Write-Verbose -Message "Cleanup host file entries" +$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file) +$new_lines = [System.Collections.ArrayList]@() +$changed = $false +foreach ($host_line in $hosts_file_lines) { + if ($host_line.EndsWith("# ansible-test httptester")) { + $changed = $true + continue + } + $new_lines.Add($host_line) > $null +} +if ($changed) { + Write-Verbose -Message "Host file has extra entries, removing extra entries" + [System.IO.File]::WriteAllLines($hosts_file, $new_lines) +} + +if ($os_version -ge [Version]"6.2") { + Write-Verbose -Message "Cleanup of forwarded port configured in netsh" + foreach ($forwarded_port in $forwarded_ports.GetEnumerator()) { + $port_set = netsh interface portproxy show v4tov4 | ` + Where-Object { $_ -match "127.0.0.1\s*$($forwarded_port.Key)\s*127.0.0.1\s*$($forwarded_port.Value)" } + + if ($port_set) { + Write-Verbose -Message "Removing netsh portproxy rule for $($forwarded_port.Key) -> $($forwarded_port.Value)" + $delete_args = @( + "interface", + "portproxy", + "delete", + "v4tov4", + "listenaddress=127.0.0.1", + "listenport=$($forwarded_port.Key)" + ) + $null = netsh $delete_args 2>&1 + } + } +} else { + Write-Verbose -Message "Stopping Port Mapper executable based on pid $pmapper_pid" + Stop-Process -Id $pmapper_pid -Force + + # the process may not stop straight away, try multiple times to delete the Port Mapper folder + $attempts = 1 + do { + try { + Write-Verbose -Message "Cleanup temporary files for Port Mapper at '$pmapper_folder' - Attempt: $attempts" + Remove-Item -Path $pmapper_folder -Force -Recurse + break + } catch { + Write-Verbose -Message "Cleanup temporary files for Port Mapper failed, waiting 5 seconds before trying again:$($_ | Out-String)" + if ($attempts -ge 5) { + break + } + $attempts += 1 + Start-Sleep -Second 5 + } + } until ($true) +} diff --git a/test/lib/ansible_test/_data/sslcheck.py b/test/lib/ansible_test/_data/sslcheck.py new file mode 100755 index 00000000..37b82279 --- /dev/null +++ b/test/lib/ansible_test/_data/sslcheck.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +"""Show openssl version.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +# noinspection PyBroadException +try: + from ssl import OPENSSL_VERSION_INFO + VERSION = list(OPENSSL_VERSION_INFO[:3]) +except Exception: # pylint: disable=broad-except + VERSION = None + + +def main(): + """Main program entry point.""" + print(json.dumps(dict( + version=VERSION, + ))) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_data/versions.py b/test/lib/ansible_test/_data/versions.py new file mode 100755 index 00000000..4babef01 --- /dev/null +++ b/test/lib/ansible_test/_data/versions.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +"""Show python and pip versions.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys +import warnings + +warnings.simplefilter('ignore') # avoid python version deprecation warnings when using newer pip dependencies + +try: + import pip +except ImportError: + pip = None + +print(sys.version) + +if pip: + print('pip %s from %s' % (pip.__version__, os.path.dirname(pip.__file__))) diff --git a/test/lib/ansible_test/_data/virtualenvcheck.py b/test/lib/ansible_test/_data/virtualenvcheck.py new file mode 100755 index 00000000..552b6e7d --- /dev/null +++ b/test/lib/ansible_test/_data/virtualenvcheck.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +"""Detect the real python interpreter when running in a virtual environment created by the 'virtualenv' module.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os + +try: + from sys import real_prefix +except ImportError: + real_prefix = None + +print(json.dumps(dict( + real_prefix=real_prefix, +))) diff --git a/test/lib/ansible_test/_data/yamlcheck.py b/test/lib/ansible_test/_data/yamlcheck.py new file mode 100755 index 00000000..591842f4 --- /dev/null +++ b/test/lib/ansible_test/_data/yamlcheck.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +"""Show python and pip versions.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +try: + import yaml +except ImportError: + yaml = None + +try: + from yaml import CLoader +except ImportError: + CLoader = None + +print(json.dumps(dict( + yaml=bool(yaml), + cloader=bool(CLoader), +))) diff --git a/test/lib/ansible_test/_internal/__init__.py b/test/lib/ansible_test/_internal/__init__.py new file mode 100644 index 00000000..35f04422 --- /dev/null +++ b/test/lib/ansible_test/_internal/__init__.py @@ -0,0 +1,3 @@ +"""Support code for Ansible testing infrastructure.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/test/lib/ansible_test/_internal/ansible_util.py b/test/lib/ansible_test/_internal/ansible_util.py new file mode 100644 index 00000000..c1cf8552 --- /dev/null +++ b/test/lib/ansible_test/_internal/ansible_util.py @@ -0,0 +1,296 @@ +"""Miscellaneous utility functions and classes specific to ansible cli tools.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os + +from . import types as t + +from .constants import ( + SOFT_RLIMIT_NOFILE, +) + +from .io import ( + write_text_file, +) + +from .util import ( + common_environment, + display, + find_python, + ApplicationError, + ANSIBLE_LIB_ROOT, + ANSIBLE_TEST_DATA_ROOT, + ANSIBLE_BIN_PATH, + ANSIBLE_SOURCE_ROOT, + get_ansible_version, +) + +from .util_common import ( + create_temp_dir, + run_command, + ResultType, +) + +from .config import ( + IntegrationConfig, + PosixIntegrationConfig, + EnvironmentConfig, + CommonConfig, +) + +from .data import ( + data_context, +) + +CHECK_YAML_VERSIONS = {} + + +def ansible_environment(args, color=True, ansible_config=None): + """ + :type args: CommonConfig + :type color: bool + :type ansible_config: str | None + :rtype: dict[str, str] + """ + env = common_environment() + path = env['PATH'] + + if not path.startswith(ANSIBLE_BIN_PATH + os.path.pathsep): + path = ANSIBLE_BIN_PATH + os.path.pathsep + path + + if not ansible_config: + # use the default empty configuration unless one has been provided + ansible_config = args.get_ansible_config() + + if not args.explain and not os.path.exists(ansible_config): + raise ApplicationError('Configuration not found: %s' % ansible_config) + + ansible = dict( + ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE=str(SOFT_RLIMIT_NOFILE), + ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color and color else 'false', + ANSIBLE_FORCE_HANDLERS='true', # allow cleanup handlers to run when tests fail + ANSIBLE_HOST_PATTERN_MISMATCH='error', # prevent tests from unintentionally passing when hosts are not found + ANSIBLE_INVENTORY='/dev/null', # force tests to provide inventory + ANSIBLE_DEPRECATION_WARNINGS='false', + ANSIBLE_HOST_KEY_CHECKING='false', + ANSIBLE_RETRY_FILES_ENABLED='false', + ANSIBLE_CONFIG=ansible_config, + ANSIBLE_LIBRARY='/dev/null', + ANSIBLE_DEVEL_WARNING='false', # Don't show warnings that CI is running devel + PYTHONPATH=get_ansible_python_path(args), + PAGER='/bin/cat', + PATH=path, + # give TQM worker processes time to report code coverage results + # without this the last task in a play may write no coverage file, an empty file, or an incomplete file + # enabled even when not using code coverage to surface warnings when worker processes do not exit cleanly + ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT='100', + ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY='0.1', + ) + + if isinstance(args, IntegrationConfig) and args.coverage: + # standard path injection is not effective for ansible-connection, instead the location must be configured + # ansible-connection only requires the injector for code coverage + # the correct python interpreter is already selected using the sys.executable used to invoke ansible + ansible.update(dict( + ANSIBLE_CONNECTION_PATH=os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector', 'ansible-connection'), + )) + + if isinstance(args, PosixIntegrationConfig): + ansible.update(dict( + ANSIBLE_PYTHON_INTERPRETER='/set/ansible_python_interpreter/in/inventory', # force tests to set ansible_python_interpreter in inventory + )) + + env.update(ansible) + + if args.debug: + env.update(dict( + ANSIBLE_DEBUG='true', + ANSIBLE_LOG_PATH=os.path.join(ResultType.LOGS.name, 'debug.log'), + )) + + if data_context().content.collection: + env.update(dict( + ANSIBLE_COLLECTIONS_PATH=data_context().content.collection.root, + )) + + if data_context().content.is_ansible: + env.update(configure_plugin_paths(args)) + + return env + + +def configure_plugin_paths(args): # type: (CommonConfig) -> t.Dict[str, str] + """Return environment variables with paths to plugins relevant for the current command.""" + if not isinstance(args, IntegrationConfig): + return {} + + support_path = os.path.join(ANSIBLE_SOURCE_ROOT, 'test', 'support', args.command) + + # provide private copies of collections for integration tests + collection_root = os.path.join(support_path, 'collections') + + env = dict( + ANSIBLE_COLLECTIONS_PATH=collection_root, + ) + + # provide private copies of plugins for integration tests + plugin_root = os.path.join(support_path, 'plugins') + + plugin_list = [ + 'action', + 'become', + 'cache', + 'callback', + 'cliconf', + 'connection', + 'filter', + 'httpapi', + 'inventory', + 'lookup', + 'netconf', + # 'shell' is not configurable + 'strategy', + 'terminal', + 'test', + 'vars', + ] + + # most plugins follow a standard naming convention + plugin_map = dict(('%s_plugins' % name, name) for name in plugin_list) + + # these plugins do not follow the standard naming convention + plugin_map.update( + doc_fragment='doc_fragments', + library='modules', + module_utils='module_utils', + ) + + env.update(dict(('ANSIBLE_%s' % key.upper(), os.path.join(plugin_root, value)) for key, value in plugin_map.items())) + + # only configure directories which exist + env = dict((key, value) for key, value in env.items() if os.path.isdir(value)) + + return env + + +def get_ansible_python_path(args): # type: (CommonConfig) -> str + """ + Return a directory usable for PYTHONPATH, containing only the ansible package. + If a temporary directory is required, it will be cached for the lifetime of the process and cleaned up at exit. + """ + try: + return get_ansible_python_path.python_path + except AttributeError: + pass + + if ANSIBLE_SOURCE_ROOT: + # when running from source there is no need for a temporary directory to isolate the ansible package + python_path = os.path.dirname(ANSIBLE_LIB_ROOT) + else: + # when not running from source the installed directory is unsafe to add to PYTHONPATH + # doing so would expose many unwanted packages on sys.path + # instead a temporary directory is created which contains only ansible using a symlink + python_path = create_temp_dir(prefix='ansible-test-') + + os.symlink(ANSIBLE_LIB_ROOT, os.path.join(python_path, 'ansible')) + + if not args.explain: + generate_egg_info(python_path) + + get_ansible_python_path.python_path = python_path + + return python_path + + +def generate_egg_info(path): # type: (str) -> None + """Generate an egg-info in the specified base directory.""" + # minimal PKG-INFO stub following the format defined in PEP 241 + # required for older setuptools versions to avoid a traceback when importing pkg_resources from packages like cryptography + # newer setuptools versions are happy with an empty directory + # including a stub here means we don't need to locate the existing file or have setup.py generate it when running from source + pkg_info = ''' +Metadata-Version: 1.0 +Name: ansible +Version: %s +Platform: UNKNOWN +Summary: Radically simple IT automation +Author-email: info@ansible.com +License: GPLv3+ +''' % get_ansible_version() + + pkg_info_path = os.path.join(path, 'ansible_base.egg-info', 'PKG-INFO') + + if os.path.exists(pkg_info_path): + return + + write_text_file(pkg_info_path, pkg_info.lstrip(), create_directories=True) + + +def check_pyyaml(args, version, required=True, quiet=False): + """ + :type args: EnvironmentConfig + :type version: str + :type required: bool + :type quiet: bool + """ + try: + return CHECK_YAML_VERSIONS[version] + except KeyError: + pass + + python = find_python(version) + stdout, _dummy = run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'yamlcheck.py')], + capture=True, always=True) + + result = json.loads(stdout) + + yaml = result['yaml'] + cloader = result['cloader'] + + if yaml or required: + # results are cached only if pyyaml is required or present + # it is assumed that tests will not uninstall/re-install pyyaml -- if they do, those changes will go undetected + CHECK_YAML_VERSIONS[version] = result + + if not quiet: + if not yaml and required: + display.warning('PyYAML is not installed for interpreter: %s' % python) + elif not cloader: + display.warning('PyYAML will be slow due to installation without libyaml support for interpreter: %s' % python) + + return result + + +class CollectionDetail: + """Collection detail.""" + def __init__(self): # type: () -> None + self.version = None # type: t.Optional[str] + + +class CollectionDetailError(ApplicationError): + """An error occurred retrieving collection detail.""" + def __init__(self, reason): # type: (str) -> None + super(CollectionDetailError, self).__init__('Error collecting collection detail: %s' % reason) + self.reason = reason + + +def get_collection_detail(args, python): # type: (EnvironmentConfig, str) -> CollectionDetail + """Return collection detail.""" + collection = data_context().content.collection + directory = os.path.join(collection.root, collection.directory) + + stdout = run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'collection_detail.py'), directory], capture=True, always=True)[0] + result = json.loads(stdout) + error = result.get('error') + + if error: + raise CollectionDetailError(error) + + version = result.get('version') + + detail = CollectionDetail() + detail.version = str(version) if version is not None else None + + return detail diff --git a/test/lib/ansible_test/_internal/cache.py b/test/lib/ansible_test/_internal/cache.py new file mode 100644 index 00000000..85fdbb1f --- /dev/null +++ b/test/lib/ansible_test/_internal/cache.py @@ -0,0 +1,35 @@ +"""Cache for commonly shared data that is intended to be immutable.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class CommonCache: + """Common cache.""" + def __init__(self, args): + """ + :param args: CommonConfig + """ + self.args = args + + def get(self, key, factory): + """ + :param key: str + :param factory: () -> any + :rtype: any + """ + if key not in self.args.cache: + self.args.cache[key] = factory() + + return self.args.cache[key] + + def get_with_args(self, key, factory): + """ + :param key: str + :param factory: (CommonConfig) -> any + :rtype: any + """ + + if key not in self.args.cache: + self.args.cache[key] = factory(self.args) + + return self.args.cache[key] diff --git a/test/lib/ansible_test/_internal/ci/__init__.py b/test/lib/ansible_test/_internal/ci/__init__.py new file mode 100644 index 00000000..d6e2ad6e --- /dev/null +++ b/test/lib/ansible_test/_internal/ci/__init__.py @@ -0,0 +1,227 @@ +"""Support code for CI environments.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import abc +import base64 +import json +import os +import tempfile + + +from .. import types as t + +from ..encoding import ( + to_bytes, + to_text, +) + +from ..io import ( + read_text_file, + write_text_file, +) + +from ..config import ( + CommonConfig, + TestConfig, +) + +from ..util import ( + ABC, + ApplicationError, + display, + get_subclasses, + import_plugins, + raw_command, +) + + +class ChangeDetectionNotSupported(ApplicationError): + """Exception for cases where change detection is not supported.""" + + +class AuthContext: + """Context information required for Ansible Core CI authentication.""" + def __init__(self): # type: () -> None + self.region = None # type: t.Optional[str] + + +class CIProvider(ABC): + """Base class for CI provider plugins.""" + priority = 500 + + @staticmethod + @abc.abstractmethod + def is_supported(): # type: () -> bool + """Return True if this provider is supported in the current running environment.""" + + @property + @abc.abstractmethod + def code(self): # type: () -> str + """Return a unique code representing this provider.""" + + @property + @abc.abstractmethod + def name(self): # type: () -> str + """Return descriptive name for this provider.""" + + @abc.abstractmethod + def generate_resource_prefix(self): # type: () -> str + """Return a resource prefix specific to this CI provider.""" + + @abc.abstractmethod + def get_base_branch(self): # type: () -> str + """Return the base branch or an empty string.""" + + @abc.abstractmethod + def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]] + """Initialize change detection.""" + + @abc.abstractmethod + def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool + """Return True if Ansible Core CI is supported.""" + + @abc.abstractmethod + def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any] + """Return authentication details for Ansible Core CI.""" + + @abc.abstractmethod + def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]] + """Return details about git in the current environment.""" + + +def get_ci_provider(): # type: () -> CIProvider + """Return a CI provider instance for the current environment.""" + try: + return get_ci_provider.provider + except AttributeError: + pass + + provider = None + + import_plugins('ci') + + candidates = sorted(get_subclasses(CIProvider), key=lambda c: (c.priority, c.__name__)) + + for candidate in candidates: + if candidate.is_supported(): + provider = candidate() + break + + if provider.code: + display.info('Detected CI provider: %s' % provider.name) + + get_ci_provider.provider = provider + + return provider + + +class AuthHelper(ABC): + """Public key based authentication helper for Ansible Core CI.""" + def sign_request(self, request): # type: (t.Dict[str, t.Any]) -> None + """Sign the given auth request and make the public key available.""" + payload_bytes = to_bytes(json.dumps(request, sort_keys=True)) + signature_raw_bytes = self.sign_bytes(payload_bytes) + signature = to_text(base64.b64encode(signature_raw_bytes)) + + request.update(signature=signature) + + def initialize_private_key(self): # type: () -> str + """ + Initialize and publish a new key pair (if needed) and return the private key. + The private key is cached across ansible-test invocations so it is only generated and published once per CI job. + """ + path = os.path.expanduser('~/.ansible-core-ci-private.key') + + if os.path.exists(to_bytes(path)): + private_key_pem = read_text_file(path) + else: + private_key_pem = self.generate_private_key() + write_text_file(path, private_key_pem) + + return private_key_pem + + @abc.abstractmethod + def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes + """Sign the given payload and return the signature, initializing a new key pair if required.""" + + @abc.abstractmethod + def publish_public_key(self, public_key_pem): # type: (str) -> None + """Publish the given public key.""" + + @abc.abstractmethod + def generate_private_key(self): # type: () -> str + """Generate a new key pair, publishing the public key and returning the private key.""" + + +class CryptographyAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method + """Cryptography based public key based authentication helper for Ansible Core CI.""" + def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes + """Sign the given payload and return the signature, initializing a new key pair if required.""" + # import cryptography here to avoid overhead and failures in environments which do not use/provide it + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import hashes + from cryptography.hazmat.primitives.asymmetric import ec + from cryptography.hazmat.primitives.serialization import load_pem_private_key + + private_key_pem = self.initialize_private_key() + private_key = load_pem_private_key(to_bytes(private_key_pem), None, default_backend()) + + signature_raw_bytes = private_key.sign(payload_bytes, ec.ECDSA(hashes.SHA256())) + + return signature_raw_bytes + + def generate_private_key(self): # type: () -> str + """Generate a new key pair, publishing the public key and returning the private key.""" + # import cryptography here to avoid overhead and failures in environments which do not use/provide it + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import serialization + from cryptography.hazmat.primitives.asymmetric import ec + + private_key = ec.generate_private_key(ec.SECP384R1(), default_backend()) + public_key = private_key.public_key() + + private_key_pem = to_text(private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption(), + )) + + public_key_pem = to_text(public_key.public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + )) + + self.publish_public_key(public_key_pem) + + return private_key_pem + + +class OpenSSLAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method + """OpenSSL based public key based authentication helper for Ansible Core CI.""" + def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes + """Sign the given payload and return the signature, initializing a new key pair if required.""" + private_key_pem = self.initialize_private_key() + + with tempfile.NamedTemporaryFile() as private_key_file: + private_key_file.write(to_bytes(private_key_pem)) + private_key_file.flush() + + with tempfile.NamedTemporaryFile() as payload_file: + payload_file.write(payload_bytes) + payload_file.flush() + + with tempfile.NamedTemporaryFile() as signature_file: + raw_command(['openssl', 'dgst', '-sha256', '-sign', private_key_file.name, '-out', signature_file.name, payload_file.name], capture=True) + signature_raw_bytes = signature_file.read() + + return signature_raw_bytes + + def generate_private_key(self): # type: () -> str + """Generate a new key pair, publishing the public key and returning the private key.""" + private_key_pem = raw_command(['openssl', 'ecparam', '-genkey', '-name', 'secp384r1', '-noout'], capture=True)[0] + public_key_pem = raw_command(['openssl', 'ec', '-pubout'], data=private_key_pem, capture=True)[0] + + self.publish_public_key(public_key_pem) + + return private_key_pem diff --git a/test/lib/ansible_test/_internal/ci/azp.py b/test/lib/ansible_test/_internal/ci/azp.py new file mode 100644 index 00000000..f2a9d206 --- /dev/null +++ b/test/lib/ansible_test/_internal/ci/azp.py @@ -0,0 +1,268 @@ +"""Support code for working with Azure Pipelines.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re +import tempfile +import uuid + +from .. import types as t + +from ..encoding import ( + to_bytes, +) + +from ..config import ( + CommonConfig, + TestConfig, +) + +from ..git import ( + Git, +) + +from ..http import ( + HttpClient, + urlencode, +) + +from ..util import ( + display, + MissingEnvironmentVariable, +) + +from . import ( + AuthContext, + ChangeDetectionNotSupported, + CIProvider, + CryptographyAuthHelper, +) + +CODE = 'azp' + + +class AzurePipelines(CIProvider): + """CI provider implementation for Azure Pipelines.""" + def __init__(self): + self.auth = AzurePipelinesAuthHelper() + + @staticmethod + def is_supported(): # type: () -> bool + """Return True if this provider is supported in the current running environment.""" + return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/') + + @property + def code(self): # type: () -> str + """Return a unique code representing this provider.""" + return CODE + + @property + def name(self): # type: () -> str + """Return descriptive name for this provider.""" + return 'Azure Pipelines' + + def generate_resource_prefix(self): # type: () -> str + """Return a resource prefix specific to this CI provider.""" + try: + prefix = 'azp-%s-%s-%s' % ( + os.environ['BUILD_BUILDID'], + os.environ['SYSTEM_JOBATTEMPT'], + os.environ['SYSTEM_JOBIDENTIFIER'], + ) + except KeyError as ex: + raise MissingEnvironmentVariable(name=ex.args[0]) + + prefix = re.sub(r'[^a-zA-Z0-9]+', '-', prefix).lower() + + return prefix + + def get_base_branch(self): # type: () -> str + """Return the base branch or an empty string.""" + base_branch = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') or os.environ.get('BUILD_SOURCEBRANCHNAME') + + if base_branch: + base_branch = 'origin/%s' % base_branch + + return base_branch or '' + + def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]] + """Initialize change detection.""" + result = AzurePipelinesChanges(args) + + if result.is_pr: + job_type = 'pull request' + else: + job_type = 'merge commit' + + display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit)) + + if not args.metadata.changes: + args.metadata.populate_changes(result.diff) + + if result.paths is None: + # There are several likely causes of this: + # - First run on a new branch. + # - Too many pull requests passed since the last merge run passed. + display.warning('No successful commit found. All tests will be executed.') + + return result.paths + + def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool + """Return True if Ansible Core CI is supported.""" + return True + + def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any] + """Return authentication details for Ansible Core CI.""" + try: + request = dict( + org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1], + project_name=os.environ['SYSTEM_TEAMPROJECT'], + build_id=int(os.environ['BUILD_BUILDID']), + task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])), + ) + except KeyError as ex: + raise MissingEnvironmentVariable(name=ex.args[0]) + + self.auth.sign_request(request) + + auth = dict( + azp=request, + ) + + return auth + + def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]] + """Return details about git in the current environment.""" + changes = AzurePipelinesChanges(args) + + details = dict( + base_commit=changes.base_commit, + commit=changes.commit, + ) + + return details + + +class AzurePipelinesAuthHelper(CryptographyAuthHelper): + """ + Authentication helper for Azure Pipelines. + Based on cryptography since it is provided by the default Azure Pipelines environment. + """ + def publish_public_key(self, public_key_pem): # type: (str) -> None + """Publish the given public key.""" + try: + agent_temp_directory = os.environ['AGENT_TEMPDIRECTORY'] + except KeyError as ex: + raise MissingEnvironmentVariable(name=ex.args[0]) + + # the temporary file cannot be deleted because we do not know when the agent has processed it + # placing the file in the agent's temp directory allows it to be picked up when the job is running in a container + with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False, dir=agent_temp_directory) as public_key_file: + public_key_file.write(to_bytes(public_key_pem)) + public_key_file.flush() + + # make the agent aware of the public key by declaring it as an attachment + vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name) + + +class AzurePipelinesChanges: + """Change information for an Azure Pipelines build.""" + def __init__(self, args): # type: (CommonConfig) -> None + self.args = args + self.git = Git() + + try: + self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/ + self.project = os.environ['SYSTEM_TEAMPROJECT'] + self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub + self.source_branch = os.environ['BUILD_SOURCEBRANCH'] + self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME'] + self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') + except KeyError as ex: + raise MissingEnvironmentVariable(name=ex.args[0]) + + if self.source_branch.startswith('refs/tags/'): + raise ChangeDetectionNotSupported('Change detection is not supported for tags.') + + self.org = self.org_uri.strip('/').split('/')[-1] + self.is_pr = self.pr_branch_name is not None + + if self.is_pr: + # HEAD is a merge commit of the PR branch into the target branch + # HEAD^1 is HEAD of the target branch (first parent of merge commit) + # HEAD^2 is HEAD of the PR branch (second parent of merge commit) + # see: https://git-scm.com/docs/gitrevisions + self.branch = self.pr_branch_name + self.base_commit = 'HEAD^1' + self.commit = 'HEAD^2' + else: + commits = self.get_successful_merge_run_commits() + + self.branch = self.source_branch_name + self.base_commit = self.get_last_successful_commit(commits) + self.commit = 'HEAD' + + self.commit = self.git.run_git(['rev-parse', self.commit]).strip() + + if self.base_commit: + self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip() + + # ... + # This form is to view the changes on the branch containing and up to the second , starting at a common ancestor of both . + # see: https://git-scm.com/docs/git-diff + dot_range = '%s...%s' % (self.base_commit, self.commit) + + self.paths = sorted(self.git.get_diff_names([dot_range])) + self.diff = self.git.get_diff([dot_range]) + else: + self.paths = None # act as though change detection not enabled, do not filter targets + self.diff = [] + + def get_successful_merge_run_commits(self): # type: () -> t.Set[str] + """Return a set of recent successsful merge commits from Azure Pipelines.""" + parameters = dict( + maxBuildsPerDefinition=100, # max 5000 + queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits + resultFilter='succeeded', + reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it + repositoryType=self.repo_type, + repositoryId='%s/%s' % (self.org, self.project), + ) + + url = '%s%s/build/builds?%s' % (self.org_uri, self.project, urlencode(parameters)) + + http = HttpClient(self.args) + response = http.get(url) + + # noinspection PyBroadException + try: + result = response.json() + except Exception: # pylint: disable=broad-except + # most likely due to a private project, which returns an HTTP 203 response with HTML + display.warning('Unable to find project. Cannot determine changes. All tests will be executed.') + return set() + + commits = set(build['sourceVersion'] for build in result['value']) + + return commits + + def get_last_successful_commit(self, commits): # type: (t.Set[str]) -> t.Optional[str] + """Return the last successful commit from git history that is found in the given commit list, or None.""" + commit_history = self.git.get_rev_list(max_count=100) + ordered_successful_commits = [commit for commit in commit_history if commit in commits] + last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None + return last_successful_commit + + +def vso_add_attachment(file_type, file_name, path): # type: (str, str, str) -> None + """Upload and attach a file to the current timeline record.""" + vso('task.addattachment', dict(type=file_type, name=file_name), path) + + +def vso(name, data, message): # type: (str, t.Dict[str, str], str) -> None + """ + Write a logging command for the Azure Pipelines agent to process. + See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash + """ + display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message)) diff --git a/test/lib/ansible_test/_internal/ci/local.py b/test/lib/ansible_test/_internal/ci/local.py new file mode 100644 index 00000000..5f605c86 --- /dev/null +++ b/test/lib/ansible_test/_internal/ci/local.py @@ -0,0 +1,217 @@ +"""Support code for working without a supported CI provider.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import platform +import random +import re + +from .. import types as t + +from ..config import ( + CommonConfig, + TestConfig, +) + +from ..io import ( + read_text_file, +) + +from ..git import ( + Git, +) + +from ..util import ( + ApplicationError, + display, + is_binary_file, + SubprocessError, +) + +from . import ( + AuthContext, + CIProvider, +) + +CODE = '' # not really a CI provider, so use an empty string for the code + + +class Local(CIProvider): + """CI provider implementation when not using CI.""" + priority = 1000 + + @staticmethod + def is_supported(): # type: () -> bool + """Return True if this provider is supported in the current running environment.""" + return True + + @property + def code(self): # type: () -> str + """Return a unique code representing this provider.""" + return CODE + + @property + def name(self): # type: () -> str + """Return descriptive name for this provider.""" + return 'Local' + + def generate_resource_prefix(self): # type: () -> str + """Return a resource prefix specific to this CI provider.""" + node = re.sub(r'[^a-zA-Z0-9]+', '-', platform.node().split('.')[0]).lower() + + prefix = 'ansible-test-%s-%d' % (node, random.randint(10000000, 99999999)) + + return prefix + + def get_base_branch(self): # type: () -> str + """Return the base branch or an empty string.""" + return '' + + def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]] + """Initialize change detection.""" + result = LocalChanges(args) + + display.info('Detected branch %s forked from %s at commit %s' % ( + result.current_branch, result.fork_branch, result.fork_point)) + + if result.untracked and not args.untracked: + display.warning('Ignored %s untracked file(s). Use --untracked to include them.' % + len(result.untracked)) + + if result.committed and not args.committed: + display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' % + len(result.committed)) + + if result.staged and not args.staged: + display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' % + len(result.staged)) + + if result.unstaged and not args.unstaged: + display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' % + len(result.unstaged)) + + names = set() + + if args.tracked: + names |= set(result.tracked) + if args.untracked: + names |= set(result.untracked) + if args.committed: + names |= set(result.committed) + if args.staged: + names |= set(result.staged) + if args.unstaged: + names |= set(result.unstaged) + + if not args.metadata.changes: + args.metadata.populate_changes(result.diff) + + for path in result.untracked: + if is_binary_file(path): + args.metadata.changes[path] = ((0, 0),) + continue + + line_count = len(read_text_file(path).splitlines()) + + args.metadata.changes[path] = ((1, line_count),) + + return sorted(names) + + def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool + """Return True if Ansible Core CI is supported.""" + path = self._get_aci_key_path(context) + return os.path.exists(path) + + def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any] + """Return authentication details for Ansible Core CI.""" + path = self._get_aci_key_path(context) + auth_key = read_text_file(path).strip() + + request = dict( + key=auth_key, + nonce=None, + ) + + auth = dict( + remote=request, + ) + + return auth + + def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]] + """Return details about git in the current environment.""" + return None # not yet implemented for local + + def _get_aci_key_path(self, context): # type: (AuthContext) -> str + path = os.path.expanduser('~/.ansible-core-ci.key') + + if context.region: + path += '.%s' % context.region + + return path + + +class InvalidBranch(ApplicationError): + """Exception for invalid branch specification.""" + def __init__(self, branch, reason): # type: (str, str) -> None + message = 'Invalid branch: %s\n%s' % (branch, reason) + + super(InvalidBranch, self).__init__(message) + + self.branch = branch + + +class LocalChanges: + """Change information for local work.""" + def __init__(self, args): # type: (TestConfig) -> None + self.args = args + self.git = Git() + + self.current_branch = self.git.get_branch() + + if self.is_official_branch(self.current_branch): + raise InvalidBranch(branch=self.current_branch, + reason='Current branch is not a feature branch.') + + self.fork_branch = None + self.fork_point = None + + self.local_branches = sorted(self.git.get_branches()) + self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)]) + + for self.fork_branch in self.official_branches: + try: + self.fork_point = self.git.get_branch_fork_point(self.fork_branch) + break + except SubprocessError: + pass + + if self.fork_point is None: + raise ApplicationError('Unable to auto-detect fork branch and fork point.') + + # tracked files (including unchanged) + self.tracked = sorted(self.git.get_file_names(['--cached'])) + # untracked files (except ignored) + self.untracked = sorted(self.git.get_file_names(['--others', '--exclude-standard'])) + # tracked changes (including deletions) committed since the branch was forked + self.committed = sorted(self.git.get_diff_names([self.fork_point, 'HEAD'])) + # tracked changes (including deletions) which are staged + self.staged = sorted(self.git.get_diff_names(['--cached'])) + # tracked changes (including deletions) which are not staged + self.unstaged = sorted(self.git.get_diff_names([])) + # diff of all tracked files from fork point to working copy + self.diff = self.git.get_diff([self.fork_point]) + + def is_official_branch(self, name): # type: (str) -> bool + """Return True if the given branch name an official branch for development or releases.""" + if self.args.base_branch: + return name == self.args.base_branch + + if name == 'devel': + return True + + if re.match(r'^stable-[0-9]+\.[0-9]+$', name): + return True + + return False diff --git a/test/lib/ansible_test/_internal/ci/shippable.py b/test/lib/ansible_test/_internal/ci/shippable.py new file mode 100644 index 00000000..f9f0a192 --- /dev/null +++ b/test/lib/ansible_test/_internal/ci/shippable.py @@ -0,0 +1,269 @@ +"""Support code for working with Shippable.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re +import time + +from .. import types as t + +from ..config import ( + CommonConfig, + TestConfig, +) + +from ..git import ( + Git, +) + +from ..http import ( + HttpClient, + urlencode, +) + +from ..util import ( + ApplicationError, + display, + MissingEnvironmentVariable, + SubprocessError, +) + +from . import ( + AuthContext, + ChangeDetectionNotSupported, + CIProvider, + OpenSSLAuthHelper, +) + + +CODE = 'shippable' + + +class Shippable(CIProvider): + """CI provider implementation for Shippable.""" + def __init__(self): + self.auth = ShippableAuthHelper() + + @staticmethod + def is_supported(): # type: () -> bool + """Return True if this provider is supported in the current running environment.""" + return os.environ.get('SHIPPABLE') == 'true' + + @property + def code(self): # type: () -> str + """Return a unique code representing this provider.""" + return CODE + + @property + def name(self): # type: () -> str + """Return descriptive name for this provider.""" + return 'Shippable' + + def generate_resource_prefix(self): # type: () -> str + """Return a resource prefix specific to this CI provider.""" + try: + prefix = 'shippable-%s-%s' % ( + os.environ['SHIPPABLE_BUILD_NUMBER'], + os.environ['SHIPPABLE_JOB_NUMBER'], + ) + except KeyError as ex: + raise MissingEnvironmentVariable(name=ex.args[0]) + + return prefix + + def get_base_branch(self): # type: () -> str + """Return the base branch or an empty string.""" + base_branch = os.environ.get('BASE_BRANCH') + + if base_branch: + base_branch = 'origin/%s' % base_branch + + return base_branch or '' + + def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]] + """Initialize change detection.""" + result = ShippableChanges(args) + + if result.is_pr: + job_type = 'pull request' + elif result.is_tag: + job_type = 'tag' + else: + job_type = 'merge commit' + + display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit)) + + if not args.metadata.changes: + args.metadata.populate_changes(result.diff) + + if result.paths is None: + # There are several likely causes of this: + # - First run on a new branch. + # - Too many pull requests passed since the last merge run passed. + display.warning('No successful commit found. All tests will be executed.') + + return result.paths + + def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool + """Return True if Ansible Core CI is supported.""" + return True + + def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any] + """Return authentication details for Ansible Core CI.""" + try: + request = dict( + run_id=os.environ['SHIPPABLE_BUILD_ID'], + job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']), + ) + except KeyError as ex: + raise MissingEnvironmentVariable(name=ex.args[0]) + + self.auth.sign_request(request) + + auth = dict( + shippable=request, + ) + + return auth + + def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]] + """Return details about git in the current environment.""" + commit = os.environ.get('COMMIT') + base_commit = os.environ.get('BASE_COMMIT') + + details = dict( + base_commit=base_commit, + commit=commit, + merged_commit=self._get_merged_commit(args, commit), + ) + + return details + + # noinspection PyUnusedLocal + def _get_merged_commit(self, args, commit): # type: (CommonConfig, str) -> t.Optional[str] # pylint: disable=unused-argument + """Find the merged commit that should be present.""" + if not commit: + return None + + git = Git() + + try: + show_commit = git.run_git(['show', '--no-patch', '--no-abbrev', commit]) + except SubprocessError as ex: + # This should only fail for pull requests where the commit does not exist. + # Merge runs would fail much earlier when attempting to checkout the commit. + raise ApplicationError('Commit %s was not found:\n\n%s\n\n' + 'GitHub may not have fully replicated the commit across their infrastructure.\n' + 'It is also possible the commit was removed by a force push between job creation and execution.\n' + 'Find the latest run for the pull request and restart failed jobs as needed.' + % (commit, ex.stderr.strip())) + + head_commit = git.run_git(['show', '--no-patch', '--no-abbrev', 'HEAD']) + + if show_commit == head_commit: + # Commit is HEAD, so this is not a pull request or the base branch for the pull request is up-to-date. + return None + + match_merge = re.search(r'^Merge: (?P[0-9a-f]{40} [0-9a-f]{40})$', head_commit, flags=re.MULTILINE) + + if not match_merge: + # The most likely scenarios resulting in a failure here are: + # A new run should or does supersede this job, but it wasn't cancelled in time. + # A job was superseded and then later restarted. + raise ApplicationError('HEAD is not commit %s or a merge commit:\n\n%s\n\n' + 'This job has likely been superseded by another run due to additional commits being pushed.\n' + 'Find the latest run for the pull request and restart failed jobs as needed.' + % (commit, head_commit.strip())) + + parents = set(match_merge.group('parents').split(' ')) + + if len(parents) != 2: + raise ApplicationError('HEAD is a %d-way octopus merge.' % len(parents)) + + if commit not in parents: + raise ApplicationError('Commit %s is not a parent of HEAD.' % commit) + + parents.remove(commit) + + last_commit = parents.pop() + + return last_commit + + +class ShippableAuthHelper(OpenSSLAuthHelper): + """ + Authentication helper for Shippable. + Based on OpenSSL since cryptography is not provided by the default Shippable environment. + """ + def publish_public_key(self, public_key_pem): # type: (str) -> None + """Publish the given public key.""" + # display the public key as a single line to avoid mangling such as when prefixing each line with a timestamp + display.info(public_key_pem.replace('\n', ' ')) + # allow time for logs to become available to reduce repeated API calls + time.sleep(3) + + +class ShippableChanges: + """Change information for Shippable build.""" + def __init__(self, args): # type: (TestConfig) -> None + self.args = args + self.git = Git() + + try: + self.branch = os.environ['BRANCH'] + self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true' + self.is_tag = os.environ['IS_GIT_TAG'] == 'true' + self.commit = os.environ['COMMIT'] + self.project_id = os.environ['PROJECT_ID'] + self.commit_range = os.environ['SHIPPABLE_COMMIT_RANGE'] + except KeyError as ex: + raise MissingEnvironmentVariable(name=ex.args[0]) + + if self.is_tag: + raise ChangeDetectionNotSupported('Change detection is not supported for tags.') + + if self.is_pr: + self.paths = sorted(self.git.get_diff_names([self.commit_range])) + self.diff = self.git.get_diff([self.commit_range]) + else: + commits = self.get_successful_merge_run_commits(self.project_id, self.branch) + last_successful_commit = self.get_last_successful_commit(commits) + + if last_successful_commit: + self.paths = sorted(self.git.get_diff_names([last_successful_commit, self.commit])) + self.diff = self.git.get_diff([last_successful_commit, self.commit]) + else: + # first run for branch + self.paths = None # act as though change detection not enabled, do not filter targets + self.diff = [] + + def get_successful_merge_run_commits(self, project_id, branch): # type: (str, str) -> t.Set[str] + """Return a set of recent successsful merge commits from Shippable for the given project and branch.""" + parameters = dict( + isPullRequest='false', + projectIds=project_id, + branch=branch, + ) + + url = 'https://api.shippable.com/runs?%s' % urlencode(parameters) + + http = HttpClient(self.args, always=True) + response = http.get(url) + result = response.json() + + if 'id' in result and result['id'] == 4004: + # most likely due to a private project, which returns an HTTP 200 response with JSON + display.warning('Unable to find project. Cannot determine changes. All tests will be executed.') + return set() + + commits = set(run['commitSha'] for run in result if run['statusCode'] == 30) + + return commits + + def get_last_successful_commit(self, successful_commits): # type: (t.Set[str]) -> t.Optional[str] + """Return the last successful commit from git history that is found in the given commit list, or None.""" + commit_history = self.git.get_rev_list(max_count=100) + ordered_successful_commits = [commit for commit in commit_history if commit in successful_commits] + last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None + return last_successful_commit diff --git a/test/lib/ansible_test/_internal/classification.py b/test/lib/ansible_test/_internal/classification.py new file mode 100644 index 00000000..52385d1e --- /dev/null +++ b/test/lib/ansible_test/_internal/classification.py @@ -0,0 +1,977 @@ +"""Classify changes in Ansible code.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import collections +import os +import re +import time + +from . import types as t + +from .target import ( + walk_module_targets, + walk_integration_targets, + walk_units_targets, + walk_compile_targets, + walk_sanity_targets, + load_integration_prefixes, + analyze_integration_target_dependencies, +) + +from .util import ( + display, + is_subdir, +) + +from .import_analysis import ( + get_python_module_utils_imports, + get_python_module_utils_name, +) + +from .csharp_import_analysis import ( + get_csharp_module_utils_imports, + get_csharp_module_utils_name, +) + +from .powershell_import_analysis import ( + get_powershell_module_utils_imports, + get_powershell_module_utils_name, +) + +from .config import ( + TestConfig, + IntegrationConfig, +) + +from .metadata import ( + ChangeDescription, +) + +from .data import ( + data_context, +) + +FOCUSED_TARGET = '__focused__' + + +def categorize_changes(args, paths, verbose_command=None): + """ + :type args: TestConfig + :type paths: list[str] + :type verbose_command: str + :rtype: ChangeDescription + """ + mapper = PathMapper(args) + + commands = { + 'sanity': set(), + 'units': set(), + 'integration': set(), + 'windows-integration': set(), + 'network-integration': set(), + } + + focused_commands = collections.defaultdict(set) + + deleted_paths = set() + original_paths = set() + additional_paths = set() + no_integration_paths = set() + + for path in paths: + if not os.path.exists(path): + deleted_paths.add(path) + continue + + original_paths.add(path) + + dependent_paths = mapper.get_dependent_paths(path) + + if not dependent_paths: + continue + + display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2) + + for dependent_path in dependent_paths: + display.info(dependent_path, verbosity=2) + additional_paths.add(dependent_path) + + additional_paths -= set(paths) # don't count changed paths as additional paths + + if additional_paths: + display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths))) + paths = sorted(set(paths) | additional_paths) + + display.info('Mapping %d changed file(s) to tests.' % len(paths)) + + none_count = 0 + + for path in paths: + tests = mapper.classify(path) + + if tests is None: + focused_target = False + + display.info('%s -> all' % path, verbosity=1) + tests = all_tests(args) # not categorized, run all tests + display.warning('Path not categorized: %s' % path) + else: + focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths + + tests = dict((key, value) for key, value in tests.items() if value) + + if focused_target and not any('integration' in command for command in tests): + no_integration_paths.add(path) # path triggers no integration tests + + if verbose_command: + result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none') + + # identify targeted integration tests (those which only target a single integration command) + if 'integration' in verbose_command and tests.get(verbose_command): + if not any('integration' in command for command in tests if command != verbose_command): + if focused_target: + result += ' (focused)' + + result += ' (targeted)' + else: + result = '%s' % tests + + if not tests.get(verbose_command): + # minimize excessive output from potentially thousands of files which do not trigger tests + none_count += 1 + verbosity = 2 + else: + verbosity = 1 + + if args.verbosity >= verbosity: + display.info('%s -> %s' % (path, result), verbosity=1) + + for command, target in tests.items(): + commands[command].add(target) + + if focused_target: + focused_commands[command].add(target) + + if none_count > 0 and args.verbosity < 2: + display.notice('Omitted %d file(s) that triggered no tests.' % none_count) + + for command in commands: + commands[command].discard('none') + + if any(target == 'all' for target in commands[command]): + commands[command] = set(['all']) + + commands = dict((c, sorted(commands[c])) for c in commands if commands[c]) + focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands) + + for command in commands: + if commands[command] == ['all']: + commands[command] = [] # changes require testing all targets, do not filter targets + + changes = ChangeDescription() + changes.command = verbose_command + changes.changed_paths = sorted(original_paths) + changes.deleted_paths = sorted(deleted_paths) + changes.regular_command_targets = commands + changes.focused_command_targets = focused_commands + changes.no_integration_paths = sorted(no_integration_paths) + + return changes + + +class PathMapper: + """Map file paths to test commands and targets.""" + def __init__(self, args): + """ + :type args: TestConfig + """ + self.args = args + self.integration_all_target = get_integration_all_target(self.args) + + self.integration_targets = list(walk_integration_targets()) + self.module_targets = list(walk_module_targets()) + self.compile_targets = list(walk_compile_targets()) + self.units_targets = list(walk_units_targets()) + self.sanity_targets = list(walk_sanity_targets()) + self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')] + self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs'] + + self.units_modules = set(target.module for target in self.units_targets if target.module) + self.units_paths = set(a for target in self.units_targets for a in target.aliases) + self.sanity_paths = set(target.path for target in self.sanity_targets) + + self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets) + self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets) + self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases) + + self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets + if 'posix/' in target.aliases for m in target.modules) + self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets + if 'windows/' in target.aliases for m in target.modules) + self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets + if 'network/' in target.aliases for m in target.modules) + + self.prefixes = load_integration_prefixes() + self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets) + + self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed + self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed + self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed + + self.paths_to_dependent_targets = {} + + for target in self.integration_targets: + for path in target.needs_file: + if path not in self.paths_to_dependent_targets: + self.paths_to_dependent_targets[path] = set() + + self.paths_to_dependent_targets[path].add(target) + + def get_dependent_paths(self, path): + """ + :type path: str + :rtype: list[str] + """ + unprocessed_paths = set(self.get_dependent_paths_non_recursive(path)) + paths = set() + + while unprocessed_paths: + queued_paths = list(unprocessed_paths) + paths |= unprocessed_paths + unprocessed_paths = set() + + for queued_path in queued_paths: + new_paths = self.get_dependent_paths_non_recursive(queued_path) + + for new_path in new_paths: + if new_path not in paths: + unprocessed_paths.add(new_path) + + return sorted(paths) + + def get_dependent_paths_non_recursive(self, path): + """ + :type path: str + :rtype: list[str] + """ + paths = self.get_dependent_paths_internal(path) + paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())] + paths = sorted(set(paths)) + + return paths + + def get_dependent_paths_internal(self, path): + """ + :type path: str + :rtype: list[str] + """ + ext = os.path.splitext(os.path.split(path)[1])[1] + + if is_subdir(path, data_context().content.module_utils_path): + if ext == '.py': + return self.get_python_module_utils_usage(path) + + if ext == '.psm1': + return self.get_powershell_module_utils_usage(path) + + if ext == '.cs': + return self.get_csharp_module_utils_usage(path) + + if is_subdir(path, data_context().content.integration_targets_path): + return self.get_integration_target_usage(path) + + return [] + + def get_python_module_utils_usage(self, path): + """ + :type path: str + :rtype: list[str] + """ + if not self.python_module_utils_imports: + display.info('Analyzing python module_utils imports...') + before = time.time() + self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets) + after = time.time() + display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before)) + + name = get_python_module_utils_name(path) + + return sorted(self.python_module_utils_imports[name]) + + def get_powershell_module_utils_usage(self, path): + """ + :type path: str + :rtype: list[str] + """ + if not self.powershell_module_utils_imports: + display.info('Analyzing powershell module_utils imports...') + before = time.time() + self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets) + after = time.time() + display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before)) + + name = get_powershell_module_utils_name(path) + + return sorted(self.powershell_module_utils_imports[name]) + + def get_csharp_module_utils_usage(self, path): + """ + :type path: str + :rtype: list[str] + """ + if not self.csharp_module_utils_imports: + display.info('Analyzing C# module_utils imports...') + before = time.time() + self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets) + after = time.time() + display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before)) + + name = get_csharp_module_utils_name(path) + + return sorted(self.csharp_module_utils_imports[name]) + + def get_integration_target_usage(self, path): + """ + :type path: str + :rtype: list[str] + """ + target_name = path.split('/')[3] + dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep + for target in sorted(self.integration_dependencies.get(target_name, set()))] + + return dependents + + def classify(self, path): + """ + :type path: str + :rtype: dict[str, str] | None + """ + result = self._classify(path) + + # run all tests when no result given + if result is None: + return None + + # run sanity on path unless result specified otherwise + if path in self.sanity_paths and 'sanity' not in result: + result['sanity'] = path + + return result + + def _classify(self, path): # type: (str) -> t.Optional[t.Dict[str, str]] + """Return the classification for the given path.""" + if data_context().content.is_ansible: + return self._classify_ansible(path) + + if data_context().content.collection: + return self._classify_collection(path) + + return None + + def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]] + """Return the classification for the given path using rules common to all layouts.""" + dirname = os.path.dirname(path) + filename = os.path.basename(path) + name, ext = os.path.splitext(filename) + + minimal = {} + + if os.path.sep not in path: + if filename in ( + 'azure-pipelines.yml', + 'shippable.yml', + ): + return all_tests(self.args) # test infrastructure, run all tests + + if is_subdir(path, '.azure-pipelines'): + return all_tests(self.args) # test infrastructure, run all tests + + if is_subdir(path, '.github'): + return minimal + + if is_subdir(path, data_context().content.integration_targets_path): + if not os.path.exists(path): + return minimal + + target = self.integration_targets_by_name.get(path.split('/')[3]) + + if not target: + display.warning('Unexpected non-target found: %s' % path) + return minimal + + if 'hidden/' in target.aliases: + return minimal # already expanded using get_dependent_paths + + return { + 'integration': target.name if 'posix/' in target.aliases else None, + 'windows-integration': target.name if 'windows/' in target.aliases else None, + 'network-integration': target.name if 'network/' in target.aliases else None, + FOCUSED_TARGET: True, + } + + if is_subdir(path, data_context().content.integration_path): + if dirname == data_context().content.integration_path: + for command in ( + 'integration', + 'windows-integration', + 'network-integration', + ): + if name == command and ext == '.cfg': + return { + command: self.integration_all_target, + } + + if name == command + '.requirements' and ext == '.txt': + return { + command: self.integration_all_target, + } + + return { + 'integration': self.integration_all_target, + 'windows-integration': self.integration_all_target, + 'network-integration': self.integration_all_target, + } + + if is_subdir(path, data_context().content.sanity_path): + return { + 'sanity': 'all', # test infrastructure, run all sanity checks + } + + if is_subdir(path, data_context().content.unit_path): + if path in self.units_paths: + return { + 'units': path, + } + + # changes to files which are not unit tests should trigger tests from the nearest parent directory + + test_path = os.path.dirname(path) + + while test_path: + if test_path + '/' in self.units_paths: + return { + 'units': test_path + '/', + } + + test_path = os.path.dirname(test_path) + + if is_subdir(path, data_context().content.module_path): + module_name = self.module_names_by_path.get(path) + + if module_name: + return { + 'units': module_name if module_name in self.units_modules else None, + 'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None, + 'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None, + 'network-integration': self.network_integration_by_module.get(module_name), + FOCUSED_TARGET: True, + } + + return minimal + + if is_subdir(path, data_context().content.module_utils_path): + if ext == '.cs': + return minimal # already expanded using get_dependent_paths + + if ext == '.psm1': + return minimal # already expanded using get_dependent_paths + + if ext == '.py': + return minimal # already expanded using get_dependent_paths + + if is_subdir(path, data_context().content.plugin_paths['action']): + if ext == '.py': + if name.startswith('net_'): + network_target = 'network/.*_%s' % name[4:] + + if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias): + return { + 'network-integration': network_target, + 'units': 'all', + } + + return { + 'network-integration': self.integration_all_target, + 'units': 'all', + } + + if self.prefixes.get(name) == 'network': + network_platform = name + elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network': + network_platform = name[:-7] + elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network': + network_platform = name[:-9] + else: + network_platform = None + + if network_platform: + network_target = 'network/%s/' % network_platform + + if network_target in self.integration_targets_by_alias: + return { + 'network-integration': network_target, + 'units': 'all', + } + + display.warning('Integration tests for "%s" not found.' % network_target, unique=True) + + return { + 'units': 'all', + } + + if is_subdir(path, data_context().content.plugin_paths['connection']): + units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'connection') + if name == '__init__': + return { + 'integration': self.integration_all_target, + 'windows-integration': self.integration_all_target, + 'network-integration': self.integration_all_target, + 'units': units_dir, + } + + units_path = os.path.join(units_dir, 'test_%s.py' % name) + + if units_path not in self.units_paths: + units_path = None + + integration_name = 'connection_%s' % name + + if integration_name not in self.integration_targets_by_name: + integration_name = None + + windows_integration_name = 'connection_windows_%s' % name + + if windows_integration_name not in self.integration_targets_by_name: + windows_integration_name = None + + # entire integration test commands depend on these connection plugins + + if name in ['winrm', 'psrp']: + return { + 'windows-integration': self.integration_all_target, + 'units': units_path, + } + + if name == 'local': + return { + 'integration': self.integration_all_target, + 'network-integration': self.integration_all_target, + 'units': units_path, + } + + if name == 'network_cli': + return { + 'network-integration': self.integration_all_target, + 'units': units_path, + } + + if name == 'paramiko_ssh': + return { + 'integration': integration_name, + 'network-integration': self.integration_all_target, + 'units': units_path, + } + + # other connection plugins have isolated integration and unit tests + + return { + 'integration': integration_name, + 'windows-integration': windows_integration_name, + 'units': units_path, + } + + if is_subdir(path, data_context().content.plugin_paths['doc_fragments']): + return { + 'sanity': 'all', + } + + if is_subdir(path, data_context().content.plugin_paths['inventory']): + if name == '__init__': + return all_tests(self.args) # broad impact, run all tests + + # These inventory plugins are enabled by default (see INVENTORY_ENABLED). + # Without dedicated integration tests for these we must rely on the incidental coverage from other tests. + test_all = [ + 'host_list', + 'script', + 'yaml', + 'ini', + 'auto', + ] + + if name in test_all: + posix_integration_fallback = get_integration_all_target(self.args) + else: + posix_integration_fallback = None + + target = self.integration_targets_by_name.get('inventory_%s' % name) + units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'inventory') + units_path = os.path.join(units_dir, 'test_%s.py' % name) + + if units_path not in self.units_paths: + units_path = None + + return { + 'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback, + 'windows-integration': target.name if target and 'windows/' in target.aliases else None, + 'network-integration': target.name if target and 'network/' in target.aliases else None, + 'units': units_path, + FOCUSED_TARGET: target is not None, + } + + if is_subdir(path, data_context().content.plugin_paths['filter']): + return self._simple_plugin_tests('filter', name) + + if is_subdir(path, data_context().content.plugin_paths['lookup']): + return self._simple_plugin_tests('lookup', name) + + if (is_subdir(path, data_context().content.plugin_paths['terminal']) or + is_subdir(path, data_context().content.plugin_paths['cliconf']) or + is_subdir(path, data_context().content.plugin_paths['netconf'])): + if ext == '.py': + if name in self.prefixes and self.prefixes[name] == 'network': + network_target = 'network/%s/' % name + + if network_target in self.integration_targets_by_alias: + return { + 'network-integration': network_target, + 'units': 'all', + } + + display.warning('Integration tests for "%s" not found.' % network_target, unique=True) + + return { + 'units': 'all', + } + + return { + 'network-integration': self.integration_all_target, + 'units': 'all', + } + + if is_subdir(path, data_context().content.plugin_paths['test']): + return self._simple_plugin_tests('test', name) + + return None + + def _classify_collection(self, path): # type: (str) -> t.Optional[t.Dict[str, str]] + """Return the classification for the given path using rules specific to collections.""" + result = self._classify_common(path) + + if result is not None: + return result + + filename = os.path.basename(path) + dummy, ext = os.path.splitext(filename) + + minimal = {} + + if path.startswith('changelogs/'): + return minimal + + if path.startswith('docs/'): + return minimal + + if '/' not in path: + if path in ( + '.gitignore', + 'COPYING', + 'LICENSE', + 'Makefile', + ): + return minimal + + if ext in ( + '.in', + '.md', + '.rst', + '.toml', + '.txt', + ): + return minimal + + return None + + def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str]] + """Return the classification for the given path using rules specific to Ansible.""" + if path.startswith('test/units/compat/'): + return { + 'units': 'test/units/', + } + + result = self._classify_common(path) + + if result is not None: + return result + + dirname = os.path.dirname(path) + filename = os.path.basename(path) + name, ext = os.path.splitext(filename) + + minimal = {} + + if path.startswith('bin/'): + return all_tests(self.args) # broad impact, run all tests + + if path.startswith('changelogs/'): + return minimal + + if path.startswith('contrib/'): + return { + 'units': 'test/units/contrib/' + } + + if path.startswith('docs/'): + return minimal + + if path.startswith('examples/'): + if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1': + return { + 'windows-integration': 'connection_winrm', + } + + return minimal + + if path.startswith('hacking/'): + return minimal + + if path.startswith('lib/ansible/executor/powershell/'): + units_path = 'test/units/executor/powershell/' + + if units_path not in self.units_paths: + units_path = None + + return { + 'windows-integration': self.integration_all_target, + 'units': units_path, + } + + if path.startswith('lib/ansible/'): + return all_tests(self.args) # broad impact, run all tests + + if path.startswith('licenses/'): + return minimal + + if path.startswith('packaging/'): + if path.startswith('packaging/requirements/'): + if name.startswith('requirements-') and ext == '.txt': + component = name.split('-', 1)[1] + + candidates = ( + 'cloud/%s/' % component, + ) + + for candidate in candidates: + if candidate in self.integration_targets_by_alias: + return { + 'integration': candidate, + } + + return all_tests(self.args) # broad impact, run all tests + + return minimal + + if path.startswith('test/ansible_test/'): + return minimal # these tests are not invoked from ansible-test + + if path.startswith('test/lib/ansible_test/config/'): + if name.startswith('cloud-config-'): + # noinspection PyTypeChecker + cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0] + + if cloud_target in self.integration_targets_by_alias: + return { + 'integration': cloud_target, + } + + if path.startswith('test/lib/ansible_test/_data/completion/'): + if path == 'test/lib/ansible_test/_data/completion/docker.txt': + return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment + + if path.startswith('test/lib/ansible_test/_internal/cloud/'): + cloud_target = 'cloud/%s/' % name + + if cloud_target in self.integration_targets_by_alias: + return { + 'integration': cloud_target, + } + + return all_tests(self.args) # test infrastructure, run all tests + + if path.startswith('test/lib/ansible_test/_internal/sanity/'): + return { + 'sanity': 'all', # test infrastructure, run all sanity checks + 'integration': 'ansible-test', # run ansible-test self tests + } + + if path.startswith('test/lib/ansible_test/_data/sanity/'): + return { + 'sanity': 'all', # test infrastructure, run all sanity checks + 'integration': 'ansible-test', # run ansible-test self tests + } + + if path.startswith('test/lib/ansible_test/_internal/units/'): + return { + 'units': 'all', # test infrastructure, run all unit tests + 'integration': 'ansible-test', # run ansible-test self tests + } + + if path.startswith('test/lib/ansible_test/_data/units/'): + return { + 'units': 'all', # test infrastructure, run all unit tests + 'integration': 'ansible-test', # run ansible-test self tests + } + + if path.startswith('test/lib/ansible_test/_data/pytest/'): + return { + 'units': 'all', # test infrastructure, run all unit tests + 'integration': 'ansible-test', # run ansible-test self tests + } + + if path.startswith('test/lib/ansible_test/_data/requirements/'): + if name in ( + 'integration', + 'network-integration', + 'windows-integration', + ): + return { + name: self.integration_all_target, + } + + if name in ( + 'sanity', + 'units', + ): + return { + name: 'all', + } + + if name.startswith('integration.cloud.'): + cloud_target = 'cloud/%s/' % name.split('.')[2] + + if cloud_target in self.integration_targets_by_alias: + return { + 'integration': cloud_target, + } + + if path.startswith('test/lib/'): + return all_tests(self.args) # test infrastructure, run all tests + + if path.startswith('test/support/'): + return all_tests(self.args) # test infrastructure, run all tests + + if path.startswith('test/utils/shippable/'): + if dirname == 'test/utils/shippable': + test_map = { + 'cloud.sh': 'integration:cloud/', + 'linux.sh': 'integration:all', + 'network.sh': 'network-integration:all', + 'remote.sh': 'integration:all', + 'sanity.sh': 'sanity:all', + 'units.sh': 'units:all', + 'windows.sh': 'windows-integration:all', + } + + test_match = test_map.get(filename) + + if test_match: + test_command, test_target = test_match.split(':') + + return { + test_command: test_target, + } + + cloud_target = 'cloud/%s/' % name + + if cloud_target in self.integration_targets_by_alias: + return { + 'integration': cloud_target, + } + + return all_tests(self.args) # test infrastructure, run all tests + + if path.startswith('test/utils/'): + return minimal + + if '/' not in path: + if path in ( + '.gitattributes', + '.gitignore', + '.mailmap', + 'COPYING', + 'Makefile', + ): + return minimal + + if path in ( + 'setup.py', + ): + return all_tests(self.args) # broad impact, run all tests + + if ext in ( + '.in', + '.md', + '.rst', + '.toml', + '.txt', + ): + return minimal + + return None # unknown, will result in fall-back to run all tests + + def _simple_plugin_tests(self, plugin_type, plugin_name): # type: (str, str) -> t.Dict[str, t.Optional[str]] + """ + Return tests for the given plugin type and plugin name. + This function is useful for plugin types which do not require special processing. + """ + if plugin_name == '__init__': + return all_tests(self.args, True) + + integration_target = self.integration_targets_by_name.get('%s_%s' % (plugin_type, plugin_name)) + + if integration_target: + integration_name = integration_target.name + else: + integration_name = None + + units_path = os.path.join(data_context().content.unit_path, 'plugins', plugin_type, 'test_%s.py' % plugin_name) + + if units_path not in self.units_paths: + units_path = None + + return dict( + integration=integration_name, + units=units_path, + ) + + +def all_tests(args, force=False): + """ + :type args: TestConfig + :type force: bool + :rtype: dict[str, str] + """ + if force: + integration_all_target = 'all' + else: + integration_all_target = get_integration_all_target(args) + + return { + 'sanity': 'all', + 'units': 'all', + 'integration': integration_all_target, + 'windows-integration': integration_all_target, + 'network-integration': integration_all_target, + } + + +def get_integration_all_target(args): + """ + :type args: TestConfig + :rtype: str + """ + if isinstance(args, IntegrationConfig): + return args.changed_all_target + + return 'all' diff --git a/test/lib/ansible_test/_internal/cli.py b/test/lib/ansible_test/_internal/cli.py new file mode 100644 index 00000000..e406b2dd --- /dev/null +++ b/test/lib/ansible_test/_internal/cli.py @@ -0,0 +1,1217 @@ +"""Test runner for all Ansible tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import errno +import os +import sys + +# This import should occur as early as possible. +# It must occur before subprocess has been imported anywhere in the current process. +from .init import ( + CURRENT_RLIMIT_NOFILE, +) + +from . import types as t + +from .util import ( + ApplicationError, + display, + raw_command, + generate_pip_command, + read_lines_without_comments, + MAXFD, + ANSIBLE_TEST_DATA_ROOT, +) + +from .delegation import ( + check_delegation_args, + delegate, +) + +from .executor import ( + command_posix_integration, + command_network_integration, + command_windows_integration, + command_shell, + SUPPORTED_PYTHON_VERSIONS, + ApplicationWarning, + Delegate, + generate_pip_install, + check_startup, +) + +from .config import ( + PosixIntegrationConfig, + WindowsIntegrationConfig, + NetworkIntegrationConfig, + SanityConfig, + UnitsConfig, + ShellConfig, +) + +from .env import ( + EnvConfig, + command_env, + configure_timeout, +) + +from .sanity import ( + command_sanity, + sanity_init, + sanity_get_tests, +) + +from .units import ( + command_units, +) + +from .target import ( + find_target_completion, + walk_posix_integration_targets, + walk_network_integration_targets, + walk_windows_integration_targets, + walk_units_targets, + walk_sanity_targets, +) + +from .core_ci import ( + AWS_ENDPOINTS, +) + +from .cloud import ( + initialize_cloud_plugins, +) + +from .data import ( + data_context, +) + +from .util_common import ( + get_docker_completion, + get_network_completion, + get_remote_completion, + CommonConfig, +) + +from .coverage.combine import ( + command_coverage_combine, +) + +from .coverage.erase import ( + command_coverage_erase, +) + +from .coverage.html import ( + command_coverage_html, +) + +from .coverage.report import ( + command_coverage_report, + CoverageReportConfig, +) + +from .coverage.xml import ( + command_coverage_xml, +) + +from .coverage.analyze.targets.generate import ( + command_coverage_analyze_targets_generate, + CoverageAnalyzeTargetsGenerateConfig, +) + +from .coverage.analyze.targets.expand import ( + command_coverage_analyze_targets_expand, + CoverageAnalyzeTargetsExpandConfig, +) + +from .coverage.analyze.targets.filter import ( + command_coverage_analyze_targets_filter, + CoverageAnalyzeTargetsFilterConfig, +) + +from .coverage.analyze.targets.combine import ( + command_coverage_analyze_targets_combine, + CoverageAnalyzeTargetsCombineConfig, +) + +from .coverage.analyze.targets.missing import ( + command_coverage_analyze_targets_missing, + CoverageAnalyzeTargetsMissingConfig, +) + +from .coverage import ( + COVERAGE_GROUPS, + CoverageConfig, +) + +if t.TYPE_CHECKING: + import argparse as argparse_module + + +def main(): + """Main program function.""" + try: + os.chdir(data_context().content.root) + initialize_cloud_plugins() + sanity_init() + args = parse_args() + config = args.config(args) # type: CommonConfig + display.verbosity = config.verbosity + display.truncate = config.truncate + display.redact = config.redact + display.color = config.color + display.info_stderr = config.info_stderr + check_startup() + check_delegation_args(config) + configure_timeout(config) + + display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2) + display.info('MAXFD: %d' % MAXFD, verbosity=2) + + try: + args.func(config) + delegate_args = None + except Delegate as ex: + # save delegation args for use once we exit the exception handler + delegate_args = (ex.exclude, ex.require, ex.integration_targets) + + if delegate_args: + # noinspection PyTypeChecker + delegate(config, *delegate_args) + + display.review_warnings() + except ApplicationWarning as ex: + display.warning(u'%s' % ex) + sys.exit(0) + except ApplicationError as ex: + display.error(u'%s' % ex) + sys.exit(1) + except KeyboardInterrupt: + sys.exit(2) + except IOError as ex: + if ex.errno == errno.EPIPE: + sys.exit(3) + raise + + +def parse_args(): + """Parse command line arguments.""" + try: + import argparse + except ImportError: + if '--requirements' not in sys.argv: + raise + # install argparse without using constraints since pip may be too old to support them + # not using the ansible-test requirements file since this install is for sys.executable rather than the delegated python (which may be different) + # argparse has no special requirements, so upgrading pip is not required here + raw_command(generate_pip_install(generate_pip_command(sys.executable), '', packages=['argparse'], use_constraints=False)) + import argparse + + try: + import argcomplete + except ImportError: + argcomplete = None + + if argcomplete: + epilog = 'Tab completion available using the "argcomplete" python package.' + else: + epilog = 'Install the "argcomplete" python package to enable tab completion.' + + def key_value_type(value): # type: (str) -> t.Tuple[str, str] + """Wrapper around key_value.""" + return key_value(argparse, value) + + parser = argparse.ArgumentParser(epilog=epilog) + + common = argparse.ArgumentParser(add_help=False) + + common.add_argument('-e', '--explain', + action='store_true', + help='explain commands that would be executed') + + common.add_argument('-v', '--verbose', + dest='verbosity', + action='count', + default=0, + help='display more output') + + common.add_argument('--color', + metavar='COLOR', + nargs='?', + help='generate color output: %(choices)s', + choices=('yes', 'no', 'auto'), + const='yes', + default='auto') + + common.add_argument('--debug', + action='store_true', + help='run ansible commands in debug mode') + + # noinspection PyTypeChecker + common.add_argument('--truncate', + dest='truncate', + metavar='COLUMNS', + type=int, + default=display.columns, + help='truncate some long output (0=disabled) (default: auto)') + + common.add_argument('--redact', + dest='redact', + action='store_true', + default=True, + help='redact sensitive values in output') + + common.add_argument('--no-redact', + dest='redact', + action='store_false', + default=False, + help='show sensitive values in output') + + common.add_argument('--check-python', + choices=SUPPORTED_PYTHON_VERSIONS, + help=argparse.SUPPRESS) + + test = argparse.ArgumentParser(add_help=False, parents=[common]) + + test.add_argument('include', + metavar='TARGET', + nargs='*', + help='test the specified target').completer = complete_target + + test.add_argument('--include', + metavar='TARGET', + action='append', + help='include the specified target').completer = complete_target + + test.add_argument('--exclude', + metavar='TARGET', + action='append', + help='exclude the specified target').completer = complete_target + + test.add_argument('--require', + metavar='TARGET', + action='append', + help='require the specified target').completer = complete_target + + test.add_argument('--coverage', + action='store_true', + help='analyze code coverage when running tests') + + test.add_argument('--coverage-label', + default='', + help='label to include in coverage output file names') + + test.add_argument('--coverage-check', + action='store_true', + help='only verify code coverage can be enabled') + + test.add_argument('--metadata', + help=argparse.SUPPRESS) + + test.add_argument('--base-branch', + help='base branch used for change detection') + + add_changes(test, argparse) + add_environments(test) + + integration = argparse.ArgumentParser(add_help=False, parents=[test]) + + integration.add_argument('--python', + metavar='VERSION', + choices=SUPPORTED_PYTHON_VERSIONS + ('default',), + help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS)) + + integration.add_argument('--start-at', + metavar='TARGET', + help='start at the specified target').completer = complete_target + + integration.add_argument('--start-at-task', + metavar='TASK', + help='start at the specified task') + + integration.add_argument('--tags', + metavar='TAGS', + help='only run plays and tasks tagged with these values') + + integration.add_argument('--skip-tags', + metavar='TAGS', + help='only run plays and tasks whose tags do not match these values') + + integration.add_argument('--diff', + action='store_true', + help='show diff output') + + integration.add_argument('--allow-destructive', + action='store_true', + help='allow destructive tests') + + integration.add_argument('--allow-root', + action='store_true', + help='allow tests requiring root when not root') + + integration.add_argument('--allow-disabled', + action='store_true', + help='allow tests which have been marked as disabled') + + integration.add_argument('--allow-unstable', + action='store_true', + help='allow tests which have been marked as unstable') + + integration.add_argument('--allow-unstable-changed', + action='store_true', + help='allow tests which have been marked as unstable when focused changes are detected') + + integration.add_argument('--allow-unsupported', + action='store_true', + help='allow tests which have been marked as unsupported') + + integration.add_argument('--retry-on-error', + action='store_true', + help='retry failed test with increased verbosity') + + integration.add_argument('--continue-on-error', + action='store_true', + help='continue after failed test') + + integration.add_argument('--debug-strategy', + action='store_true', + help='run test playbooks using the debug strategy') + + integration.add_argument('--changed-all-target', + metavar='TARGET', + default='all', + help='target to run when all tests are needed') + + integration.add_argument('--changed-all-mode', + metavar='MODE', + choices=('default', 'include', 'exclude'), + help='include/exclude behavior with --changed-all-target: %(choices)s') + + integration.add_argument('--list-targets', + action='store_true', + help='list matching targets instead of running tests') + + integration.add_argument('--no-temp-workdir', + action='store_true', + help='do not run tests from a temporary directory (use only for verifying broken tests)') + + integration.add_argument('--no-temp-unicode', + action='store_true', + help='avoid unicode characters in temporary directory (use only for verifying broken tests)') + + subparsers = parser.add_subparsers(metavar='COMMAND') + subparsers.required = True # work-around for python 3 bug which makes subparsers optional + + posix_integration = subparsers.add_parser('integration', + parents=[integration], + help='posix integration tests') + + posix_integration.set_defaults(func=command_posix_integration, + targets=walk_posix_integration_targets, + config=PosixIntegrationConfig) + + add_extra_docker_options(posix_integration) + add_httptester_options(posix_integration, argparse) + + network_integration = subparsers.add_parser('network-integration', + parents=[integration], + help='network integration tests') + + network_integration.set_defaults(func=command_network_integration, + targets=walk_network_integration_targets, + config=NetworkIntegrationConfig) + + add_extra_docker_options(network_integration, integration=False) + + network_integration.add_argument('--platform', + metavar='PLATFORM', + action='append', + help='network platform/version').completer = complete_network_platform + + network_integration.add_argument('--platform-collection', + type=key_value_type, + metavar='PLATFORM=COLLECTION', + action='append', + help='collection used to test platform').completer = complete_network_platform_collection + + network_integration.add_argument('--platform-connection', + type=key_value_type, + metavar='PLATFORM=CONNECTION', + action='append', + help='connection used to test platform').completer = complete_network_platform_connection + + network_integration.add_argument('--inventory', + metavar='PATH', + help='path to inventory used for tests') + + network_integration.add_argument('--testcase', + metavar='TESTCASE', + help='limit a test to a specified testcase').completer = complete_network_testcase + + windows_integration = subparsers.add_parser('windows-integration', + parents=[integration], + help='windows integration tests') + + windows_integration.set_defaults(func=command_windows_integration, + targets=walk_windows_integration_targets, + config=WindowsIntegrationConfig) + + add_extra_docker_options(windows_integration, integration=False) + add_httptester_options(windows_integration, argparse) + + windows_integration.add_argument('--windows', + metavar='VERSION', + action='append', + help='windows version').completer = complete_windows + + windows_integration.add_argument('--inventory', + metavar='PATH', + help='path to inventory used for tests') + + units = subparsers.add_parser('units', + parents=[test], + help='unit tests') + + units.set_defaults(func=command_units, + targets=walk_units_targets, + config=UnitsConfig) + + units.add_argument('--python', + metavar='VERSION', + choices=SUPPORTED_PYTHON_VERSIONS + ('default',), + help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS)) + + units.add_argument('--collect-only', + action='store_true', + help='collect tests but do not execute them') + + # noinspection PyTypeChecker + units.add_argument('--num-workers', + type=int, + help='number of workers to use (default: auto)') + + units.add_argument('--requirements-mode', + choices=('only', 'skip'), + help=argparse.SUPPRESS) + + add_extra_docker_options(units, integration=False) + + sanity = subparsers.add_parser('sanity', + parents=[test], + help='sanity tests') + + sanity.set_defaults(func=command_sanity, + targets=walk_sanity_targets, + config=SanityConfig) + + sanity.add_argument('--test', + metavar='TEST', + action='append', + choices=[test.name for test in sanity_get_tests()], + help='tests to run').completer = complete_sanity_test + + sanity.add_argument('--skip-test', + metavar='TEST', + action='append', + choices=[test.name for test in sanity_get_tests()], + help='tests to skip').completer = complete_sanity_test + + sanity.add_argument('--allow-disabled', + action='store_true', + help='allow tests to run which are disabled by default') + + sanity.add_argument('--list-tests', + action='store_true', + help='list available tests') + + sanity.add_argument('--python', + metavar='VERSION', + choices=SUPPORTED_PYTHON_VERSIONS + ('default',), + help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS)) + + sanity.add_argument('--enable-optional-errors', + action='store_true', + help='enable optional errors') + + add_lint(sanity) + add_extra_docker_options(sanity, integration=False) + + shell = subparsers.add_parser('shell', + parents=[common], + help='open an interactive shell') + + shell.add_argument('--python', + metavar='VERSION', + choices=SUPPORTED_PYTHON_VERSIONS + ('default',), + help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS)) + + shell.set_defaults(func=command_shell, + config=ShellConfig) + + shell.add_argument('--raw', + action='store_true', + help='direct to shell with no setup') + + add_environments(shell) + add_extra_docker_options(shell) + add_httptester_options(shell, argparse) + + coverage_common = argparse.ArgumentParser(add_help=False, parents=[common]) + + add_environments(coverage_common, isolated_delegation=False) + + coverage = subparsers.add_parser('coverage', + help='code coverage management and reporting') + + coverage_subparsers = coverage.add_subparsers(metavar='COMMAND') + coverage_subparsers.required = True # work-around for python 3 bug which makes subparsers optional + + add_coverage_analyze(coverage_subparsers, coverage_common) + + coverage_combine = coverage_subparsers.add_parser('combine', + parents=[coverage_common], + help='combine coverage data and rewrite remote paths') + + coverage_combine.set_defaults(func=command_coverage_combine, + config=CoverageConfig) + + coverage_combine.add_argument('--export', + help='directory to export combined coverage files to') + + add_extra_coverage_options(coverage_combine) + + coverage_erase = coverage_subparsers.add_parser('erase', + parents=[coverage_common], + help='erase coverage data files') + + coverage_erase.set_defaults(func=command_coverage_erase, + config=CoverageConfig) + + coverage_report = coverage_subparsers.add_parser('report', + parents=[coverage_common], + help='generate console coverage report') + + coverage_report.set_defaults(func=command_coverage_report, + config=CoverageReportConfig) + + coverage_report.add_argument('--show-missing', + action='store_true', + help='show line numbers of statements not executed') + coverage_report.add_argument('--include', + metavar='PAT1,PAT2,...', + help='include only files whose paths match one of these ' + 'patterns. Accepts shell-style wildcards, which must be ' + 'quoted.') + coverage_report.add_argument('--omit', + metavar='PAT1,PAT2,...', + help='omit files whose paths match one of these patterns. ' + 'Accepts shell-style wildcards, which must be quoted.') + + add_extra_coverage_options(coverage_report) + + coverage_html = coverage_subparsers.add_parser('html', + parents=[coverage_common], + help='generate html coverage report') + + coverage_html.set_defaults(func=command_coverage_html, + config=CoverageConfig) + + add_extra_coverage_options(coverage_html) + + coverage_xml = coverage_subparsers.add_parser('xml', + parents=[coverage_common], + help='generate xml coverage report') + + coverage_xml.set_defaults(func=command_coverage_xml, + config=CoverageConfig) + + add_extra_coverage_options(coverage_xml) + + env = subparsers.add_parser('env', + parents=[common], + help='show information about the test environment') + + env.set_defaults(func=command_env, + config=EnvConfig) + + env.add_argument('--show', + action='store_true', + help='show environment on stdout') + + env.add_argument('--dump', + action='store_true', + help='dump environment to disk') + + env.add_argument('--list-files', + action='store_true', + help='list files on stdout') + + # noinspection PyTypeChecker + env.add_argument('--timeout', + type=int, + metavar='MINUTES', + help='timeout for future ansible-test commands (0 clears)') + + if argcomplete: + argcomplete.autocomplete(parser, always_complete_options=False, validator=lambda i, k: True) + + args = parser.parse_args() + + if args.explain and not args.verbosity: + args.verbosity = 1 + + if args.color == 'yes': + args.color = True + elif args.color == 'no': + args.color = False + else: + args.color = sys.stdout.isatty() + + return args + + +def key_value(argparse, value): # type: (argparse_module, str) -> t.Tuple[str, str] + """Type parsing and validation for argparse key/value pairs separated by an '=' character.""" + parts = value.split('=') + + if len(parts) != 2: + raise argparse.ArgumentTypeError('"%s" must be in the format "key=value"' % value) + + return parts[0], parts[1] + + +# noinspection PyProtectedMember +def add_coverage_analyze(coverage_subparsers, coverage_common): # type: (argparse_module._SubParsersAction, argparse_module.ArgumentParser) -> None + """Add the `coverage analyze` subcommand.""" + analyze = coverage_subparsers.add_parser( + 'analyze', + help='analyze collected coverage data', + ) + + analyze_subparsers = analyze.add_subparsers(metavar='COMMAND') + analyze_subparsers.required = True # work-around for python 3 bug which makes subparsers optional + + targets = analyze_subparsers.add_parser( + 'targets', + help='analyze integration test target coverage', + ) + + targets_subparsers = targets.add_subparsers(metavar='COMMAND') + targets_subparsers.required = True # work-around for python 3 bug which makes subparsers optional + + targets_generate = targets_subparsers.add_parser( + 'generate', + parents=[coverage_common], + help='aggregate coverage by integration test target', + ) + + targets_generate.set_defaults( + func=command_coverage_analyze_targets_generate, + config=CoverageAnalyzeTargetsGenerateConfig, + ) + + targets_generate.add_argument( + 'input_dir', + nargs='?', + help='directory to read coverage from', + ) + + targets_generate.add_argument( + 'output_file', + help='output file for aggregated coverage', + ) + + targets_expand = targets_subparsers.add_parser( + 'expand', + parents=[coverage_common], + help='expand target names from integers in aggregated coverage', + ) + + targets_expand.set_defaults( + func=command_coverage_analyze_targets_expand, + config=CoverageAnalyzeTargetsExpandConfig, + ) + + targets_expand.add_argument( + 'input_file', + help='input file to read aggregated coverage from', + ) + + targets_expand.add_argument( + 'output_file', + help='output file to write expanded coverage to', + ) + + targets_filter = targets_subparsers.add_parser( + 'filter', + parents=[coverage_common], + help='filter aggregated coverage data', + ) + + targets_filter.set_defaults( + func=command_coverage_analyze_targets_filter, + config=CoverageAnalyzeTargetsFilterConfig, + ) + + targets_filter.add_argument( + 'input_file', + help='input file to read aggregated coverage from', + ) + + targets_filter.add_argument( + 'output_file', + help='output file to write expanded coverage to', + ) + + targets_filter.add_argument( + '--include-target', + dest='include_targets', + action='append', + help='include the specified targets', + ) + + targets_filter.add_argument( + '--exclude-target', + dest='exclude_targets', + action='append', + help='exclude the specified targets', + ) + + targets_filter.add_argument( + '--include-path', + help='include paths matching the given regex', + ) + + targets_filter.add_argument( + '--exclude-path', + help='exclude paths matching the given regex', + ) + + targets_combine = targets_subparsers.add_parser( + 'combine', + parents=[coverage_common], + help='combine multiple aggregated coverage files', + ) + + targets_combine.set_defaults( + func=command_coverage_analyze_targets_combine, + config=CoverageAnalyzeTargetsCombineConfig, + ) + + targets_combine.add_argument( + 'input_file', + nargs='+', + help='input file to read aggregated coverage from', + ) + + targets_combine.add_argument( + 'output_file', + help='output file to write aggregated coverage to', + ) + + targets_missing = targets_subparsers.add_parser( + 'missing', + parents=[coverage_common], + help='identify coverage in one file missing in another', + ) + + targets_missing.set_defaults( + func=command_coverage_analyze_targets_missing, + config=CoverageAnalyzeTargetsMissingConfig, + ) + + targets_missing.add_argument( + 'from_file', + help='input file containing aggregated coverage', + ) + + targets_missing.add_argument( + 'to_file', + help='input file containing aggregated coverage', + ) + + targets_missing.add_argument( + 'output_file', + help='output file to write aggregated coverage to', + ) + + targets_missing.add_argument( + '--only-gaps', + action='store_true', + help='report only arcs/lines not hit by any target', + ) + + targets_missing.add_argument( + '--only-exists', + action='store_true', + help='limit results to files that exist', + ) + + +def add_lint(parser): + """ + :type parser: argparse.ArgumentParser + """ + parser.add_argument('--lint', + action='store_true', + help='write lint output to stdout, everything else stderr') + + parser.add_argument('--junit', + action='store_true', + help='write test failures to junit xml files') + + parser.add_argument('--failure-ok', + action='store_true', + help='exit successfully on failed tests after saving results') + + +def add_changes(parser, argparse): + """ + :type parser: argparse.ArgumentParser + :type argparse: argparse + """ + parser.add_argument('--changed', action='store_true', help='limit targets based on changes') + + changes = parser.add_argument_group(title='change detection arguments') + + changes.add_argument('--tracked', action='store_true', help=argparse.SUPPRESS) + changes.add_argument('--untracked', action='store_true', help='include untracked files') + changes.add_argument('--ignore-committed', dest='committed', action='store_false', help='exclude committed files') + changes.add_argument('--ignore-staged', dest='staged', action='store_false', help='exclude staged files') + changes.add_argument('--ignore-unstaged', dest='unstaged', action='store_false', help='exclude unstaged files') + + changes.add_argument('--changed-from', metavar='PATH', help=argparse.SUPPRESS) + changes.add_argument('--changed-path', metavar='PATH', action='append', help=argparse.SUPPRESS) + + +def add_environments(parser, isolated_delegation=True): + """ + :type parser: argparse.ArgumentParser + :type isolated_delegation: bool + """ + parser.add_argument('--requirements', + action='store_true', + help='install command requirements') + + parser.add_argument('--python-interpreter', + metavar='PATH', + default=None, + help='path to the docker or remote python interpreter') + + parser.add_argument('--no-pip-check', + dest='pip_check', + default=True, + action='store_false', + help='do not run "pip check" to verify requirements') + + environments = parser.add_mutually_exclusive_group() + + environments.add_argument('--local', + action='store_true', + help='run from the local environment') + + environments.add_argument('--venv', + action='store_true', + help='run from ansible-test managed virtual environments') + + venv = parser.add_argument_group(title='venv arguments') + + venv.add_argument('--venv-system-site-packages', + action='store_true', + help='enable system site packages') + + if not isolated_delegation: + environments.set_defaults( + docker=None, + remote=None, + remote_stage=None, + remote_provider=None, + remote_aws_region=None, + remote_terminate=None, + remote_endpoint=None, + python_interpreter=None, + ) + + return + + environments.add_argument('--docker', + metavar='IMAGE', + nargs='?', + default=None, + const='default', + help='run from a docker container').completer = complete_docker + + environments.add_argument('--remote', + metavar='PLATFORM', + default=None, + help='run from a remote instance').completer = complete_remote_shell if parser.prog.endswith(' shell') else complete_remote + + remote = parser.add_argument_group(title='remote arguments') + + remote.add_argument('--remote-stage', + metavar='STAGE', + help='remote stage to use: prod, dev', + default='prod').completer = complete_remote_stage + + remote.add_argument('--remote-provider', + metavar='PROVIDER', + help='remote provider to use: %(choices)s', + choices=['default', 'aws', 'azure', 'parallels', 'ibmvpc', 'ibmps'], + default='default') + + remote.add_argument('--remote-endpoint', + metavar='ENDPOINT', + help='remote provisioning endpoint to use (default: auto)', + default=None) + + remote.add_argument('--remote-aws-region', + metavar='REGION', + help='remote aws region to use: %(choices)s (default: auto)', + choices=sorted(AWS_ENDPOINTS), + default=None) + + remote.add_argument('--remote-terminate', + metavar='WHEN', + help='terminate remote instance: %(choices)s (default: %(default)s)', + choices=['never', 'always', 'success'], + default='never') + + +def add_extra_coverage_options(parser): + """ + :type parser: argparse.ArgumentParser + """ + parser.add_argument('--group-by', + metavar='GROUP', + action='append', + choices=COVERAGE_GROUPS, + help='group output by: %s' % ', '.join(COVERAGE_GROUPS)) + + parser.add_argument('--all', + action='store_true', + help='include all python/powershell source files') + + parser.add_argument('--stub', + action='store_true', + help='generate empty report of all python/powershell source files') + + +def add_httptester_options(parser, argparse): + """ + :type parser: argparse.ArgumentParser + :type argparse: argparse + """ + group = parser.add_mutually_exclusive_group() + + group.add_argument('--httptester', + metavar='IMAGE', + default='quay.io/ansible/http-test-container:1.0.0', + help='docker image to use for the httptester container') + + group.add_argument('--disable-httptester', + dest='httptester', + action='store_const', + const='', + help='do not use the httptester container') + + parser.add_argument('--inject-httptester', + action='store_true', + help=argparse.SUPPRESS) # internal use only + + +def add_extra_docker_options(parser, integration=True): + """ + :type parser: argparse.ArgumentParser + :type integration: bool + """ + docker = parser.add_argument_group(title='docker arguments') + + docker.add_argument('--docker-no-pull', + action='store_false', + dest='docker_pull', + help='do not explicitly pull the latest docker images') + + if data_context().content.is_ansible: + docker.add_argument('--docker-keep-git', + action='store_true', + help='transfer git related files into the docker container') + else: + docker.set_defaults( + docker_keep_git=False, + ) + + docker.add_argument('--docker-seccomp', + metavar='SC', + choices=('default', 'unconfined'), + default=None, + help='set seccomp confinement for the test container: %(choices)s') + + docker.add_argument('--docker-terminate', + metavar='WHEN', + help='terminate docker container: %(choices)s (default: %(default)s)', + choices=['never', 'always', 'success'], + default='always') + + if not integration: + return + + docker.add_argument('--docker-privileged', + action='store_true', + help='run docker container in privileged mode') + + docker.add_argument('--docker-network', + help='run using the specified docker network') + + # noinspection PyTypeChecker + docker.add_argument('--docker-memory', + help='memory limit for docker in bytes', type=int) + + +# noinspection PyUnusedLocal +def complete_remote_stage(prefix, parsed_args, **_): # pylint: disable=unused-argument + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)] + + +def complete_target(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + return find_target_completion(parsed_args.targets, prefix) + + +# noinspection PyUnusedLocal +def complete_remote(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + del parsed_args + + images = sorted(get_remote_completion().keys()) + + return [i for i in images if i.startswith(prefix)] + + +# noinspection PyUnusedLocal +def complete_remote_shell(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + del parsed_args + + images = sorted(get_remote_completion().keys()) + + # 2008 doesn't support SSH so we do not add to the list of valid images + windows_completion_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', 'windows.txt') + images.extend(["windows/%s" % i for i in read_lines_without_comments(windows_completion_path, remove_blank_lines=True) if i != '2008']) + + return [i for i in images if i.startswith(prefix)] + + +# noinspection PyUnusedLocal +def complete_docker(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + del parsed_args + + images = sorted(get_docker_completion().keys()) + + return [i for i in images if i.startswith(prefix)] + + +def complete_windows(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + images = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', 'windows.txt'), remove_blank_lines=True) + + return [i for i in images if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)] + + +def complete_network_platform(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + images = sorted(get_network_completion()) + + return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)] + + +def complete_network_platform_collection(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + left = prefix.split('=')[0] + images = sorted(set(image.split('/')[0] for image in get_network_completion())) + + return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])] + + +def complete_network_platform_connection(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + left = prefix.split('=')[0] + images = sorted(set(image.split('/')[0] for image in get_network_completion())) + + return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_connection or i not in [x[0] for x in parsed_args.platform_connection])] + + +def complete_network_testcase(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + testcases = [] + + # since testcases are module specific, don't autocomplete if more than one + # module is specidied + if len(parsed_args.include) != 1: + return [] + + test_dir = os.path.join(data_context().content.integration_targets_path, parsed_args.include[0], 'tests') + connection_dirs = data_context().content.get_dirs(test_dir) + + for connection_dir in connection_dirs: + for testcase in [os.path.basename(path) for path in data_context().content.get_files(connection_dir)]: + if testcase.startswith(prefix): + testcases.append(testcase.split('.')[0]) + + return testcases + + +# noinspection PyUnusedLocal +def complete_sanity_test(prefix, parsed_args, **_): + """ + :type prefix: unicode + :type parsed_args: any + :rtype: list[str] + """ + del parsed_args + + tests = sorted(test.name for test in sanity_get_tests()) + + return [i for i in tests if i.startswith(prefix)] diff --git a/test/lib/ansible_test/_internal/cloud/__init__.py b/test/lib/ansible_test/_internal/cloud/__init__.py new file mode 100644 index 00000000..04f592c4 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/__init__.py @@ -0,0 +1,429 @@ +"""Plugin system for cloud providers and environments for use in integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import abc +import atexit +import datetime +import time +import os +import re +import tempfile + +from .. import types as t + +from ..encoding import ( + to_bytes, +) + +from ..io import ( + read_text_file, +) + +from ..util import ( + ApplicationError, + display, + import_plugins, + load_plugins, + ABC, + ANSIBLE_TEST_CONFIG_ROOT, +) + +from ..util_common import ( + write_json_test_results, + ResultType, +) + +from ..target import ( + TestTarget, +) + +from ..config import ( + IntegrationConfig, +) + +from ..ci import ( + get_ci_provider, +) + +from ..data import ( + data_context, +) + +PROVIDERS = {} +ENVIRONMENTS = {} + + +def initialize_cloud_plugins(): + """Import cloud plugins and load them into the plugin dictionaries.""" + import_plugins('cloud') + + load_plugins(CloudProvider, PROVIDERS) + load_plugins(CloudEnvironment, ENVIRONMENTS) + + +def get_cloud_platforms(args, targets=None): + """ + :type args: TestConfig + :type targets: tuple[IntegrationTarget] | None + :rtype: list[str] + """ + if isinstance(args, IntegrationConfig): + if args.list_targets: + return [] + + if targets is None: + cloud_platforms = set(args.metadata.cloud_config or []) + else: + cloud_platforms = set(get_cloud_platform(target) for target in targets) + + cloud_platforms.discard(None) + + return sorted(cloud_platforms) + + +def get_cloud_platform(target): + """ + :type target: IntegrationTarget + :rtype: str | None + """ + cloud_platforms = set(a.split('/')[1] for a in target.aliases if a.startswith('cloud/') and a.endswith('/') and a != 'cloud/') + + if not cloud_platforms: + return None + + if len(cloud_platforms) == 1: + cloud_platform = cloud_platforms.pop() + + if cloud_platform not in PROVIDERS: + raise ApplicationError('Target %s aliases contains unknown cloud platform: %s' % (target.name, cloud_platform)) + + return cloud_platform + + raise ApplicationError('Target %s aliases contains multiple cloud platforms: %s' % (target.name, ', '.join(sorted(cloud_platforms)))) + + +def get_cloud_providers(args, targets=None): + """ + :type args: IntegrationConfig + :type targets: tuple[IntegrationTarget] | None + :rtype: list[CloudProvider] + """ + return [PROVIDERS[p](args) for p in get_cloud_platforms(args, targets)] + + +def get_cloud_environment(args, target): + """ + :type args: IntegrationConfig + :type target: IntegrationTarget + :rtype: CloudEnvironment + """ + cloud_platform = get_cloud_platform(target) + + if not cloud_platform: + return None + + return ENVIRONMENTS[cloud_platform](args) + + +def cloud_filter(args, targets): + """ + :type args: IntegrationConfig + :type targets: tuple[IntegrationTarget] + :return: list[str] + """ + if args.metadata.cloud_config is not None: + return [] # cloud filter already performed prior to delegation + + exclude = [] + + for provider in get_cloud_providers(args, targets): + provider.filter(targets, exclude) + + return exclude + + +def cloud_init(args, targets): + """ + :type args: IntegrationConfig + :type targets: tuple[IntegrationTarget] + """ + if args.metadata.cloud_config is not None: + return # cloud configuration already established prior to delegation + + args.metadata.cloud_config = {} + + results = {} + + for provider in get_cloud_providers(args, targets): + args.metadata.cloud_config[provider.platform] = {} + + start_time = time.time() + provider.setup() + end_time = time.time() + + results[provider.platform] = dict( + platform=provider.platform, + setup_seconds=int(end_time - start_time), + targets=[target.name for target in targets], + ) + + if not args.explain and results: + result_name = '%s-%s.json' % ( + args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0)))) + + data = dict( + clouds=results, + ) + + write_json_test_results(ResultType.DATA, result_name, data) + + +class CloudBase(ABC): + """Base class for cloud plugins.""" + __metaclass__ = abc.ABCMeta + + _CONFIG_PATH = 'config_path' + _RESOURCE_PREFIX = 'resource_prefix' + _MANAGED = 'managed' + _SETUP_EXECUTED = 'setup_executed' + + def __init__(self, args): + """ + :type args: IntegrationConfig + """ + self.args = args + self.platform = self.__module__.split('.')[-1] + + def config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None + """Add the config file to the payload file list.""" + if self._get_cloud_config(self._CONFIG_PATH, ''): + pair = (self.config_path, os.path.relpath(self.config_path, data_context().content.root)) + + if pair not in files: + display.info('Including %s config: %s -> %s' % (self.platform, pair[0], pair[1]), verbosity=3) + files.append(pair) + + data_context().register_payload_callback(config_callback) + + @property + def setup_executed(self): + """ + :rtype: bool + """ + return self._get_cloud_config(self._SETUP_EXECUTED, False) + + @setup_executed.setter + def setup_executed(self, value): + """ + :type value: bool + """ + self._set_cloud_config(self._SETUP_EXECUTED, value) + + @property + def config_path(self): + """ + :rtype: str + """ + return os.path.join(data_context().content.root, self._get_cloud_config(self._CONFIG_PATH)) + + @config_path.setter + def config_path(self, value): + """ + :type value: str + """ + self._set_cloud_config(self._CONFIG_PATH, value) + + @property + def resource_prefix(self): + """ + :rtype: str + """ + return self._get_cloud_config(self._RESOURCE_PREFIX) + + @resource_prefix.setter + def resource_prefix(self, value): + """ + :type value: str + """ + self._set_cloud_config(self._RESOURCE_PREFIX, value) + + @property + def managed(self): + """ + :rtype: bool + """ + return self._get_cloud_config(self._MANAGED) + + @managed.setter + def managed(self, value): + """ + :type value: bool + """ + self._set_cloud_config(self._MANAGED, value) + + def _get_cloud_config(self, key, default=None): + """ + :type key: str + :type default: str | int | bool | None + :rtype: str | int | bool + """ + if default is not None: + return self.args.metadata.cloud_config[self.platform].get(key, default) + + return self.args.metadata.cloud_config[self.platform][key] + + def _set_cloud_config(self, key, value): + """ + :type key: str + :type value: str | int | bool + """ + self.args.metadata.cloud_config[self.platform][key] = value + + +class CloudProvider(CloudBase): + """Base class for cloud provider plugins. Sets up cloud resources before delegation.""" + def __init__(self, args, config_extension='.ini'): + """ + :type args: IntegrationConfig + :type config_extension: str + """ + super(CloudProvider, self).__init__(args) + + self.ci_provider = get_ci_provider() + self.remove_config = False + self.config_static_name = 'cloud-config-%s%s' % (self.platform, config_extension) + self.config_static_path = os.path.join(data_context().content.integration_path, self.config_static_name) + self.config_template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, '%s.template' % self.config_static_name) + self.config_extension = config_extension + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + skip = 'cloud/%s/' % self.platform + skipped = [target.name for target in targets if skip in target.aliases] + + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which require config (see "%s"): %s' + % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped))) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + self.resource_prefix = self.ci_provider.generate_resource_prefix() + + atexit.register(self.cleanup) + + def get_remote_ssh_options(self): + """Get any additional options needed when delegating tests to a remote instance via SSH. + :rtype: list[str] + """ + return [] + + def get_docker_run_options(self): + """Get any additional options needed when delegating tests to a docker container. + :rtype: list[str] + """ + return [] + + def cleanup(self): + """Clean up the cloud resource and any temporary configuration files after tests complete.""" + if self.remove_config: + os.remove(self.config_path) + + def _use_static_config(self): + """ + :rtype: bool + """ + if os.path.isfile(self.config_static_path): + display.info('Using existing %s cloud config: %s' % (self.platform, self.config_static_path), verbosity=1) + self.config_path = self.config_static_path + static = True + else: + static = False + + self.managed = not static + + return static + + def _write_config(self, content): + """ + :type content: str + """ + prefix = '%s-' % os.path.splitext(os.path.basename(self.config_static_path))[0] + + with tempfile.NamedTemporaryFile(dir=data_context().content.integration_path, prefix=prefix, suffix=self.config_extension, delete=False) as config_fd: + filename = os.path.join(data_context().content.integration_path, os.path.basename(config_fd.name)) + + self.config_path = filename + self.remove_config = True + + display.info('>>> Config: %s\n%s' % (filename, content.strip()), verbosity=3) + + config_fd.write(to_bytes(content)) + config_fd.flush() + + def _read_config_template(self): + """ + :rtype: str + """ + lines = read_text_file(self.config_template_path).splitlines() + lines = [line for line in lines if not line.startswith('#')] + config = '\n'.join(lines).strip() + '\n' + return config + + @staticmethod + def _populate_config_template(template, values): + """ + :type template: str + :type values: dict[str, str] + :rtype: str + """ + for key in sorted(values): + value = values[key] + template = template.replace('@%s' % key, value) + + return template + + +class CloudEnvironment(CloudBase): + """Base class for cloud environment plugins. Updates integration test environment after delegation.""" + def setup_once(self): + """Run setup if it has not already been run.""" + if self.setup_executed: + return + + self.setup() + self.setup_executed = True + + def setup(self): + """Setup which should be done once per environment instead of once per test target.""" + + @abc.abstractmethod + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + + def on_failure(self, target, tries): + """ + :type target: IntegrationTarget + :type tries: int + """ + + +class CloudEnvironmentConfig: + """Configuration for the environment.""" + def __init__(self, env_vars=None, ansible_vars=None, module_defaults=None, callback_plugins=None): + """ + :type env_vars: dict[str, str] | None + :type ansible_vars: dict[str, any] | None + :type module_defaults: dict[str, dict[str, any]] | None + :type callback_plugins: list[str] | None + """ + self.env_vars = env_vars + self.ansible_vars = ansible_vars + self.module_defaults = module_defaults + self.callback_plugins = callback_plugins diff --git a/test/lib/ansible_test/_internal/cloud/acme.py b/test/lib/ansible_test/_internal/cloud/acme.py new file mode 100644 index 00000000..3d0ace24 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/acme.py @@ -0,0 +1,193 @@ +"""ACME plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import time + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ( + find_executable, + display, + ApplicationError, + SubprocessError, +) + +from ..http import ( + HttpClient, +) + +from ..docker_util import ( + docker_run, + docker_rm, + docker_inspect, + docker_pull, + get_docker_container_id, + get_docker_hostname, + get_docker_container_ip, + get_docker_preferred_network_name, + is_docker_user_defined_network, +) + + +class ACMEProvider(CloudProvider): + """ACME plugin. Sets up cloud resources for tests.""" + DOCKER_SIMULATOR_NAME = 'acme-simulator' + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(ACMEProvider, self).__init__(args) + + # The simulator must be pinned to a specific version to guarantee CI passes with the version used. + if os.environ.get('ANSIBLE_ACME_CONTAINER'): + self.image = os.environ.get('ANSIBLE_ACME_CONTAINER') + else: + self.image = 'quay.io/ansible/acme-test-container:2.0.0' + self.container_name = '' + + def _wait_for_service(self, protocol, acme_host, port, local_part, name): + """Wait for an endpoint to accept connections.""" + if self.args.explain: + return + + client = HttpClient(self.args, always=True, insecure=True) + endpoint = '%s://%s:%d/%s' % (protocol, acme_host, port, local_part) + + for dummy in range(1, 30): + display.info('Waiting for %s: %s' % (name, endpoint), verbosity=1) + + try: + client.get(endpoint) + return + except SubprocessError: + pass + + time.sleep(1) + + raise ApplicationError('Timeout waiting for %s.' % name) + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + docker = find_executable('docker', required=False) + + if docker: + return + + skip = 'cloud/%s/' % self.platform + skipped = [target.name for target in targets if skip in target.aliases] + + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which require the "docker" command: %s' + % (skip.rstrip('/'), ', '.join(skipped))) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(ACMEProvider, self).setup() + + if self._use_static_config(): + self._setup_static() + else: + self._setup_dynamic() + + def get_docker_run_options(self): + """Get any additional options needed when delegating tests to a docker container. + :rtype: list[str] + """ + network = get_docker_preferred_network_name(self.args) + + if self.managed and not is_docker_user_defined_network(network): + return ['--link', self.DOCKER_SIMULATOR_NAME] + + return [] + + def cleanup(self): + """Clean up the cloud resource and any temporary configuration files after tests complete.""" + if self.container_name: + docker_rm(self.args, self.container_name) + + super(ACMEProvider, self).cleanup() + + def _setup_dynamic(self): + """Create a ACME test container using docker.""" + container_id = get_docker_container_id() + + self.container_name = self.DOCKER_SIMULATOR_NAME + + results = docker_inspect(self.args, self.container_name) + + if results and not results[0].get('State', {}).get('Running'): + docker_rm(self.args, self.container_name) + results = [] + + if results: + display.info('Using the existing ACME docker test container.', verbosity=1) + else: + display.info('Starting a new ACME docker test container.', verbosity=1) + + if not container_id: + # publish the simulator ports when not running inside docker + publish_ports = [ + '-p', '5000:5000', # control port for flask app in container + '-p', '14000:14000', # Pebble ACME CA + ] + else: + publish_ports = [] + + if not os.environ.get('ANSIBLE_ACME_CONTAINER'): + docker_pull(self.args, self.image) + + docker_run( + self.args, + self.image, + ['-d', '--name', self.container_name] + publish_ports, + ) + + if self.args.docker: + acme_host = self.DOCKER_SIMULATOR_NAME + elif container_id: + acme_host = self._get_simulator_address() + display.info('Found ACME test container address: %s' % acme_host, verbosity=1) + else: + acme_host = get_docker_hostname() + + if container_id: + acme_host_ip = self._get_simulator_address() + else: + acme_host_ip = get_docker_hostname() + + self._set_cloud_config('acme_host', acme_host) + + self._wait_for_service('http', acme_host_ip, 5000, '', 'ACME controller') + self._wait_for_service('https', acme_host_ip, 14000, 'dir', 'ACME CA endpoint') + + def _get_simulator_address(self): + return get_docker_container_ip(self.args, self.container_name) + + def _setup_static(self): + raise NotImplementedError() + + +class ACMEEnvironment(CloudEnvironment): + """ACME environment plugin. Updates integration test environment after delegation.""" + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + ansible_vars = dict( + acme_host=self._get_cloud_config('acme_host'), + ) + + return CloudEnvironmentConfig( + ansible_vars=ansible_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/aws.py b/test/lib/ansible_test/_internal/cloud/aws.py new file mode 100644 index 00000000..190ef488 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/aws.py @@ -0,0 +1,124 @@ +"""AWS plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..util import ( + ApplicationError, + display, + ConfigParser, +) + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..core_ci import ( + AnsibleCoreCI, +) + + +class AwsCloudProvider(CloudProvider): + """AWS cloud provider plugin. Sets up cloud resources before delegation.""" + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if os.path.isfile(self.config_static_path): + return + + aci = self._create_ansible_core_ci() + + if aci.available: + return + + super(AwsCloudProvider, self).filter(targets, exclude) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(AwsCloudProvider, self).setup() + + aws_config_path = os.path.expanduser('~/.aws') + + if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote: + raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path) + + if not self._use_static_config(): + self._setup_dynamic() + + def _setup_dynamic(self): + """Request AWS credentials through the Ansible Core CI service.""" + display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1) + + config = self._read_config_template() + + aci = self._create_ansible_core_ci() + + response = aci.start() + + if not self.args.explain: + credentials = response['aws']['credentials'] + + values = dict( + ACCESS_KEY=credentials['access_key'], + SECRET_KEY=credentials['secret_key'], + SECURITY_TOKEN=credentials['session_token'], + REGION='us-east-1', + ) + + display.sensitive.add(values['SECRET_KEY']) + display.sensitive.add(values['SECURITY_TOKEN']) + + config = self._populate_config_template(config, values) + + self._write_config(config) + + def _create_ansible_core_ci(self): + """ + :rtype: AnsibleCoreCI + """ + return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider) + + +class AwsCloudEnvironment(CloudEnvironment): + """AWS cloud environment plugin. Updates integration test environment after delegation.""" + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + parser = ConfigParser() + parser.read(self.config_path) + + ansible_vars = dict( + resource_prefix=self.resource_prefix, + ) + + ansible_vars.update(dict(parser.items('default'))) + + display.sensitive.add(ansible_vars.get('aws_secret_key')) + display.sensitive.add(ansible_vars.get('security_token')) + + if 'aws_cleanup' not in ansible_vars: + ansible_vars['aws_cleanup'] = not self.managed + + env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'} + + return CloudEnvironmentConfig( + env_vars=env_vars, + ansible_vars=ansible_vars, + callback_plugins=['aws_resource_actions'], + ) + + def on_failure(self, target, tries): + """ + :type target: TestTarget + :type tries: int + """ + if not tries and self.managed: + display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. ' + 'https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html#aws-permissions-for-integration-tests.' + % target.name) diff --git a/test/lib/ansible_test/_internal/cloud/azure.py b/test/lib/ansible_test/_internal/cloud/azure.py new file mode 100644 index 00000000..02465eed --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/azure.py @@ -0,0 +1,213 @@ +"""Azure plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..io import ( + read_text_file, +) + +from ..util import ( + ApplicationError, + display, + ConfigParser, +) + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..http import ( + HttpClient, + urlparse, + urlunparse, + parse_qs, +) + +from ..core_ci import ( + AnsibleCoreCI, +) + + +class AzureCloudProvider(CloudProvider): + """Azure cloud provider plugin. Sets up cloud resources before delegation.""" + SHERLOCK_CONFIG_PATH = os.path.expanduser('~/.ansible-sherlock-ci.cfg') + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(AzureCloudProvider, self).__init__(args) + + self.aci = None + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if os.path.isfile(self.config_static_path): + return + + aci = self._create_ansible_core_ci() + + if aci.available: + return + + if os.path.isfile(self.SHERLOCK_CONFIG_PATH): + return + + super(AzureCloudProvider, self).filter(targets, exclude) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(AzureCloudProvider, self).setup() + + if not self._use_static_config(): + self._setup_dynamic() + + get_config(self.config_path) # check required variables + + def cleanup(self): + """Clean up the cloud resource and any temporary configuration files after tests complete.""" + if self.aci: + self.aci.stop() + + super(AzureCloudProvider, self).cleanup() + + def _setup_dynamic(self): + """Request Azure credentials through Sherlock.""" + display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1) + + config = self._read_config_template() + response = {} + + if os.path.isfile(self.SHERLOCK_CONFIG_PATH): + sherlock_uri = read_text_file(self.SHERLOCK_CONFIG_PATH).splitlines()[0].strip() + '&rgcount=2' + + parts = urlparse(sherlock_uri) + query_string = parse_qs(parts.query) + base_uri = urlunparse(parts[:4] + ('', '')) + + if 'code' not in query_string: + example_uri = 'https://example.azurewebsites.net/api/sandbox-provisioning' + raise ApplicationError('The Sherlock URI must include the API key in the query string. Example: %s?code=xxx' % example_uri) + + display.info('Initializing azure/sherlock from: %s' % base_uri, verbosity=1) + + http = HttpClient(self.args) + result = http.get(sherlock_uri) + + display.info('Started azure/sherlock from: %s' % base_uri, verbosity=1) + + if not self.args.explain: + response = result.json() + else: + aci = self._create_ansible_core_ci() + + aci_result = aci.start() + + if not self.args.explain: + response = aci_result['azure'] + self.aci = aci + + if not self.args.explain: + values = dict( + AZURE_CLIENT_ID=response['clientId'], + AZURE_SECRET=response['clientSecret'], + AZURE_SUBSCRIPTION_ID=response['subscriptionId'], + AZURE_TENANT=response['tenantId'], + RESOURCE_GROUP=response['resourceGroupNames'][0], + RESOURCE_GROUP_SECONDARY=response['resourceGroupNames'][1], + ) + + display.sensitive.add(values['AZURE_SECRET']) + + config = '\n'.join('%s: %s' % (key, values[key]) for key in sorted(values)) + + config = '[default]\n' + config + + self._write_config(config) + + def _create_ansible_core_ci(self): + """ + :rtype: AnsibleCoreCI + """ + return AnsibleCoreCI(self.args, 'azure', 'azure', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider) + + +class AzureCloudEnvironment(CloudEnvironment): + """Azure cloud environment plugin. Updates integration test environment after delegation.""" + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + env_vars = get_config(self.config_path) + + display.sensitive.add(env_vars.get('AZURE_SECRET')) + display.sensitive.add(env_vars.get('AZURE_PASSWORD')) + + ansible_vars = dict( + resource_prefix=self.resource_prefix, + ) + + ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items())) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ansible_vars=ansible_vars, + ) + + def on_failure(self, target, tries): + """ + :type target: TestTarget + :type tries: int + """ + if not tries and self.managed: + display.notice('If %s failed due to permissions, the test policy may need to be updated. ' + 'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name) + + +def get_config(config_path): + """ + :type config_path: str + :rtype: dict[str, str] + """ + parser = ConfigParser() + parser.read(config_path) + + config = dict((key.upper(), value) for key, value in parser.items('default')) + + rg_vars = ( + 'RESOURCE_GROUP', + 'RESOURCE_GROUP_SECONDARY', + ) + + sp_vars = ( + 'AZURE_CLIENT_ID', + 'AZURE_SECRET', + 'AZURE_SUBSCRIPTION_ID', + 'AZURE_TENANT', + ) + + ad_vars = ( + 'AZURE_AD_USER', + 'AZURE_PASSWORD', + 'AZURE_SUBSCRIPTION_ID', + ) + + rg_ok = all(var in config for var in rg_vars) + sp_ok = all(var in config for var in sp_vars) + ad_ok = all(var in config for var in ad_vars) + + if not rg_ok: + raise ApplicationError('Resource groups must be defined with: %s' % ', '.join(sorted(rg_vars))) + + if not sp_ok and not ad_ok: + raise ApplicationError('Credentials must be defined using either:\nService Principal: %s\nActive Directory: %s' % ( + ', '.join(sorted(sp_vars)), ', '.join(sorted(ad_vars)))) + + return config diff --git a/test/lib/ansible_test/_internal/cloud/cloudscale.py b/test/lib/ansible_test/_internal/cloud/cloudscale.py new file mode 100644 index 00000000..8e5885b2 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/cloudscale.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +# +# (c) 2018, Gaudenz Steinlin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Cloudscale plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ConfigParser, display + + +class CloudscaleCloudProvider(CloudProvider): + """Cloudscale cloud provider plugin. Sets up cloud resources before + delegation. + """ + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(CloudscaleCloudProvider, self).__init__(args) + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if os.path.isfile(self.config_static_path): + return + + super(CloudscaleCloudProvider, self).filter(targets, exclude) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(CloudscaleCloudProvider, self).setup() + + if os.path.isfile(self.config_static_path): + display.info('Using existing %s cloud config: %s' + % (self.platform, self.config_static_path), + verbosity=1) + self.config_path = self.config_static_path + self.managed = False + + +class CloudscaleCloudEnvironment(CloudEnvironment): + """Cloudscale cloud environment plugin. Updates integration test environment + after delegation. + """ + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + parser = ConfigParser() + parser.read(self.config_path) + + env_vars = dict( + CLOUDSCALE_API_TOKEN=parser.get('default', 'cloudscale_api_token'), + ) + + display.sensitive.add(env_vars['CLOUDSCALE_API_TOKEN']) + + ansible_vars = dict( + cloudscale_resource_prefix=self.resource_prefix, + ) + + ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items())) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ansible_vars=ansible_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/cs.py b/test/lib/ansible_test/_internal/cloud/cs.py new file mode 100644 index 00000000..d028d9c4 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/cs.py @@ -0,0 +1,300 @@ +"""CloudStack plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import re +import time + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ( + find_executable, + ApplicationError, + display, + SubprocessError, + ConfigParser, +) + +from ..http import ( + HttpClient, + HttpError, + urlparse, +) + +from ..docker_util import ( + docker_run, + docker_rm, + docker_inspect, + docker_pull, + docker_network_inspect, + docker_exec, + get_docker_container_id, + get_docker_preferred_network_name, + get_docker_hostname, + is_docker_user_defined_network, +) + + +class CsCloudProvider(CloudProvider): + """CloudStack cloud provider plugin. Sets up cloud resources before delegation.""" + DOCKER_SIMULATOR_NAME = 'cloudstack-sim' + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(CsCloudProvider, self).__init__(args) + + # The simulator must be pinned to a specific version to guarantee CI passes with the version used. + self.image = 'quay.io/ansible/cloudstack-test-container:1.2.0' + self.container_name = '' + self.endpoint = '' + self.host = '' + self.port = 0 + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if os.path.isfile(self.config_static_path): + return + + docker = find_executable('docker', required=False) + + if docker: + return + + skip = 'cloud/%s/' % self.platform + skipped = [target.name for target in targets if skip in target.aliases] + + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s' + % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped))) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(CsCloudProvider, self).setup() + + if self._use_static_config(): + self._setup_static() + else: + self._setup_dynamic() + + def get_remote_ssh_options(self): + """Get any additional options needed when delegating tests to a remote instance via SSH. + :rtype: list[str] + """ + if self.managed: + return ['-R', '8888:%s:8888' % get_docker_hostname()] + + return [] + + def get_docker_run_options(self): + """Get any additional options needed when delegating tests to a docker container. + :rtype: list[str] + """ + network = get_docker_preferred_network_name(self.args) + + if self.managed and not is_docker_user_defined_network(network): + return ['--link', self.DOCKER_SIMULATOR_NAME] + + return [] + + def cleanup(self): + """Clean up the cloud resource and any temporary configuration files after tests complete.""" + if self.container_name: + if self.ci_provider.code: + docker_rm(self.args, self.container_name) + elif not self.args.explain: + display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name) + + super(CsCloudProvider, self).cleanup() + + def _setup_static(self): + """Configure CloudStack tests for use with static configuration.""" + parser = ConfigParser() + parser.read(self.config_static_path) + + self.endpoint = parser.get('cloudstack', 'endpoint') + + parts = urlparse(self.endpoint) + + self.host = parts.hostname + + if not self.host: + raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint) + + if parts.port: + self.port = parts.port + elif parts.scheme == 'http': + self.port = 80 + elif parts.scheme == 'https': + self.port = 443 + else: + raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint) + + display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1) + + self._wait_for_service() + + def _setup_dynamic(self): + """Create a CloudStack simulator using docker.""" + config = self._read_config_template() + + self.container_name = self.DOCKER_SIMULATOR_NAME + + results = docker_inspect(self.args, self.container_name) + + if results and not results[0]['State']['Running']: + docker_rm(self.args, self.container_name) + results = [] + + if results: + display.info('Using the existing CloudStack simulator docker container.', verbosity=1) + else: + display.info('Starting a new CloudStack simulator docker container.', verbosity=1) + docker_pull(self.args, self.image) + docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name]) + + # apply work-around for OverlayFS issue + # https://github.com/docker/for-linux/issues/72#issuecomment-319904698 + docker_exec(self.args, self.container_name, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';']) + + if not self.args.explain: + display.notice('The CloudStack simulator will probably be ready in 2 - 4 minutes.') + + container_id = get_docker_container_id() + + if container_id: + self.host = self._get_simulator_address() + display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1) + else: + self.host = get_docker_hostname() + + self.port = 8888 + self.endpoint = 'http://%s:%d' % (self.host, self.port) + + self._wait_for_service() + + if self.args.explain: + values = dict( + HOST=self.host, + PORT=str(self.port), + ) + else: + credentials = self._get_credentials() + + if self.args.docker: + host = self.DOCKER_SIMULATOR_NAME + elif self.args.remote: + host = 'localhost' + else: + host = self.host + + values = dict( + HOST=host, + PORT=str(self.port), + KEY=credentials['apikey'], + SECRET=credentials['secretkey'], + ) + + display.sensitive.add(values['SECRET']) + + config = self._populate_config_template(config, values) + + self._write_config(config) + + def _get_simulator_address(self): + current_network = get_docker_preferred_network_name(self.args) + networks = docker_network_inspect(self.args, current_network) + + try: + network = [network for network in networks if network['Name'] == current_network][0] + containers = network['Containers'] + container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0] + return re.sub(r'/[0-9]+$', '', container['IPv4Address']) + except Exception: + display.error('Failed to process the following docker network inspect output:\n%s' % + json.dumps(networks, indent=4, sort_keys=True)) + raise + + def _wait_for_service(self): + """Wait for the CloudStack service endpoint to accept connections.""" + if self.args.explain: + return + + client = HttpClient(self.args, always=True) + endpoint = self.endpoint + + for _iteration in range(1, 30): + display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1) + + try: + client.get(endpoint) + return + except SubprocessError: + pass + + time.sleep(10) + + raise ApplicationError('Timeout waiting for CloudStack service.') + + def _get_credentials(self): + """Wait for the CloudStack simulator to return credentials. + :rtype: dict[str, str] + """ + client = HttpClient(self.args, always=True) + endpoint = '%s/admin.json' % self.endpoint + + for _iteration in range(1, 30): + display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1) + + response = client.get(endpoint) + + if response.status_code == 200: + try: + return response.json() + except HttpError as ex: + display.error(ex) + + time.sleep(10) + + raise ApplicationError('Timeout waiting for CloudStack credentials.') + + +class CsCloudEnvironment(CloudEnvironment): + """CloudStack cloud environment plugin. Updates integration test environment after delegation.""" + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + parser = ConfigParser() + parser.read(self.config_path) + + config = dict(parser.items('default')) + + env_vars = dict( + CLOUDSTACK_ENDPOINT=config['endpoint'], + CLOUDSTACK_KEY=config['key'], + CLOUDSTACK_SECRET=config['secret'], + CLOUDSTACK_TIMEOUT=config['timeout'], + ) + + display.sensitive.add(env_vars['CLOUDSTACK_SECRET']) + + ansible_vars = dict( + cs_resource_prefix=self.resource_prefix, + ) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ansible_vars=ansible_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/fallaxy.py b/test/lib/ansible_test/_internal/cloud/fallaxy.py new file mode 100644 index 00000000..504094bd --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/fallaxy.py @@ -0,0 +1,177 @@ +"""Fallaxy (ansible-galaxy) plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import uuid + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ( + find_executable, + display, +) + +from ..docker_util import ( + docker_run, + docker_rm, + docker_inspect, + docker_pull, + get_docker_container_id, +) + + +class FallaxyProvider(CloudProvider): + """Fallaxy plugin. + + Sets up Fallaxy (ansible-galaxy) stub server for tests. + + It's source source itself resides at: https://github.com/ansible/fallaxy-test-container + """ + + DOCKER_SIMULATOR_NAME = 'fallaxy-stub' + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(FallaxyProvider, self).__init__(args) + + if os.environ.get('ANSIBLE_FALLAXY_CONTAINER'): + self.image = os.environ.get('ANSIBLE_FALLAXY_CONTAINER') + else: + self.image = 'quay.io/ansible/fallaxy-test-container:2.0.1' + self.container_name = '' + + def filter(self, targets, exclude): + """Filter out the tests with the necessary config and res unavailable. + + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + docker_cmd = 'docker' + docker = find_executable(docker_cmd, required=False) + + if docker: + return + + skip = 'cloud/%s/' % self.platform + skipped = [target.name for target in targets if skip in target.aliases] + + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which require the "%s" command: %s' + % (skip.rstrip('/'), docker_cmd, ', '.join(skipped))) + + def setup(self): + """Setup cloud resource before delegation and reg cleanup callback.""" + super(FallaxyProvider, self).setup() + + if self._use_static_config(): + self._setup_static() + else: + self._setup_dynamic() + + def get_docker_run_options(self): + """Get additional options needed when delegating tests to a container. + + :rtype: list[str] + """ + return ['--link', self.DOCKER_SIMULATOR_NAME] if self.managed else [] + + def cleanup(self): + """Clean up the resource and temporary configs files after tests.""" + if self.container_name: + docker_rm(self.args, self.container_name) + + super(FallaxyProvider, self).cleanup() + + def _setup_dynamic(self): + container_id = get_docker_container_id() + + if container_id: + display.info('Running in docker container: %s' % container_id, verbosity=1) + + self.container_name = self.DOCKER_SIMULATOR_NAME + + results = docker_inspect(self.args, self.container_name) + + if results and not results[0].get('State', {}).get('Running'): + docker_rm(self.args, self.container_name) + results = [] + + display.info('%s Fallaxy simulator docker container.' + % ('Using the existing' if results else 'Starting a new'), + verbosity=1) + + fallaxy_port = 8080 + fallaxy_token = str(uuid.uuid4()).replace('-', '') + + if not results: + if self.args.docker or container_id: + publish_ports = [] + else: + # publish the simulator ports when not running inside docker + publish_ports = [ + '-p', ':'.join((str(fallaxy_port),) * 2), + ] + + if not os.environ.get('ANSIBLE_FALLAXY_CONTAINER'): + docker_pull(self.args, self.image) + + docker_run( + self.args, + self.image, + ['-d', '--name', self.container_name, '-e', 'FALLAXY_TOKEN=%s' % fallaxy_token] + publish_ports, + ) + + if self.args.docker: + fallaxy_host = self.DOCKER_SIMULATOR_NAME + elif container_id: + fallaxy_host = self._get_simulator_address() + display.info('Found Fallaxy simulator container address: %s' % fallaxy_host, verbosity=1) + else: + fallaxy_host = 'localhost' + + self._set_cloud_config('FALLAXY_HOST', fallaxy_host) + self._set_cloud_config('FALLAXY_PORT', str(fallaxy_port)) + self._set_cloud_config('FALLAXY_TOKEN', fallaxy_token) + + def _get_simulator_address(self): + results = docker_inspect(self.args, self.container_name) + ipaddress = results[0]['NetworkSettings']['IPAddress'] + return ipaddress + + def _setup_static(self): + raise NotImplementedError() + + +class FallaxyEnvironment(CloudEnvironment): + """Fallaxy environment plugin. + + Updates integration test environment after delegation. + """ + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + fallaxy_token = self._get_cloud_config('FALLAXY_TOKEN') + fallaxy_host = self._get_cloud_config('FALLAXY_HOST') + fallaxy_port = self._get_cloud_config('FALLAXY_PORT') + + return CloudEnvironmentConfig( + ansible_vars=dict( + fallaxy_token=fallaxy_token, + fallaxy_galaxy_server='http://%s:%s/api/' % (fallaxy_host, fallaxy_port), + fallaxy_ah_server='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port), + ), + env_vars=dict( + FALLAXY_TOKEN=fallaxy_token, + FALLAXY_GALAXY_SERVER='http://%s:%s/api/' % (fallaxy_host, fallaxy_port), + FALLAXY_AH_SERVER='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port), + ), + ) diff --git a/test/lib/ansible_test/_internal/cloud/foreman.py b/test/lib/ansible_test/_internal/cloud/foreman.py new file mode 100644 index 00000000..7517f1f6 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/foreman.py @@ -0,0 +1,191 @@ +"""Foreman plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ( + find_executable, + display, +) + +from ..docker_util import ( + docker_run, + docker_rm, + docker_inspect, + docker_pull, + get_docker_container_id, + get_docker_hostname, + get_docker_container_ip, + get_docker_preferred_network_name, + is_docker_user_defined_network, +) + + +class ForemanProvider(CloudProvider): + """Foreman plugin. + + Sets up Foreman stub server for tests. + """ + + DOCKER_SIMULATOR_NAME = 'foreman-stub' + + DOCKER_IMAGE = 'quay.io/ansible/foreman-test-container:1.4.0' + """Default image to run Foreman stub from. + + The simulator must be pinned to a specific version + to guarantee CI passes with the version used. + + It's source source itself resides at: + https://github.com/ansible/foreman-test-container + """ + + def __init__(self, args): + """Set up container references for provider. + + :type args: TestConfig + """ + super(ForemanProvider, self).__init__(args) + + self.__container_from_env = os.environ.get('ANSIBLE_FRMNSIM_CONTAINER') + """Overrides target container, might be used for development. + + Use ANSIBLE_FRMNSIM_CONTAINER=whatever_you_want if you want + to use other image. Omit/empty otherwise. + """ + + self.image = self.__container_from_env or self.DOCKER_IMAGE + self.container_name = '' + + def filter(self, targets, exclude): + """Filter out the tests with the necessary config and res unavailable. + + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + docker_cmd = 'docker' + docker = find_executable(docker_cmd, required=False) + + if docker: + return + + skip = 'cloud/%s/' % self.platform + skipped = [target.name for target in targets if skip in target.aliases] + + if skipped: + exclude.append(skip) + display.warning( + 'Excluding tests marked "%s" ' + 'which require the "%s" command: %s' + % (skip.rstrip('/'), docker_cmd, ', '.join(skipped)) + ) + + def setup(self): + """Setup cloud resource before delegation and reg cleanup callback.""" + super(ForemanProvider, self).setup() + + if self._use_static_config(): + self._setup_static() + else: + self._setup_dynamic() + + def get_docker_run_options(self): + """Get additional options needed when delegating tests to a container. + + :rtype: list[str] + """ + network = get_docker_preferred_network_name(self.args) + + if self.managed and not is_docker_user_defined_network(network): + return ['--link', self.DOCKER_SIMULATOR_NAME] + + return [] + + def cleanup(self): + """Clean up the resource and temporary configs files after tests.""" + if self.container_name: + docker_rm(self.args, self.container_name) + + super(ForemanProvider, self).cleanup() + + def _setup_dynamic(self): + """Spawn a Foreman stub within docker container.""" + foreman_port = 8080 + container_id = get_docker_container_id() + + self.container_name = self.DOCKER_SIMULATOR_NAME + + results = docker_inspect(self.args, self.container_name) + + if results and not results[0].get('State', {}).get('Running'): + docker_rm(self.args, self.container_name) + results = [] + + display.info( + '%s Foreman simulator docker container.' + % ('Using the existing' if results else 'Starting a new'), + verbosity=1, + ) + + if not results: + if self.args.docker or container_id: + publish_ports = [] + else: + # publish the simulator ports when not running inside docker + publish_ports = [ + '-p', ':'.join((str(foreman_port), ) * 2), + ] + + if not self.__container_from_env: + docker_pull(self.args, self.image) + + docker_run( + self.args, + self.image, + ['-d', '--name', self.container_name] + publish_ports, + ) + + if self.args.docker: + foreman_host = self.DOCKER_SIMULATOR_NAME + elif container_id: + foreman_host = self._get_simulator_address() + display.info( + 'Found Foreman simulator container address: %s' + % foreman_host, verbosity=1 + ) + else: + foreman_host = get_docker_hostname() + + self._set_cloud_config('FOREMAN_HOST', foreman_host) + self._set_cloud_config('FOREMAN_PORT', str(foreman_port)) + + def _get_simulator_address(self): + return get_docker_container_ip(self.args, self.container_name) + + def _setup_static(self): + raise NotImplementedError + + +class ForemanEnvironment(CloudEnvironment): + """Foreman environment plugin. + + Updates integration test environment after delegation. + """ + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + env_vars = dict( + FOREMAN_HOST=self._get_cloud_config('FOREMAN_HOST'), + FOREMAN_PORT=self._get_cloud_config('FOREMAN_PORT'), + ) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/gcp.py b/test/lib/ansible_test/_internal/cloud/gcp.py new file mode 100644 index 00000000..c8de1835 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/gcp.py @@ -0,0 +1,62 @@ +# Copyright: (c) 2018, Google Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""GCP plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..util import ( + display, + ConfigParser, +) + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + + +class GcpCloudProvider(CloudProvider): + """GCP cloud provider plugin. Sets up cloud resources before delegation.""" + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + + if os.path.isfile(self.config_static_path): + return + + super(GcpCloudProvider, self).filter(targets, exclude) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(GcpCloudProvider, self).setup() + + if not self._use_static_config(): + display.notice( + 'static configuration could not be used. are you missing a template file?' + ) + + +class GcpCloudEnvironment(CloudEnvironment): + """GCP cloud environment plugin. Updates integration test environment after delegation.""" + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + parser = ConfigParser() + parser.read(self.config_path) + + ansible_vars = dict( + resource_prefix=self.resource_prefix, + ) + + ansible_vars.update(dict(parser.items('default'))) + + return CloudEnvironmentConfig( + ansible_vars=ansible_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/hcloud.py b/test/lib/ansible_test/_internal/cloud/hcloud.py new file mode 100644 index 00000000..5902b566 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/hcloud.py @@ -0,0 +1,116 @@ +"""Hetzner Cloud plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..util import ( + display, + ConfigParser, +) + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..core_ci import ( + AnsibleCoreCI, +) + + +class HcloudCloudProvider(CloudProvider): + """Hetzner Cloud provider plugin. Sets up cloud resources before + delegation. + """ + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(HcloudCloudProvider, self).__init__(args) + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if os.path.isfile(self.config_static_path): + return + + aci = self._create_ansible_core_ci() + + if aci.available: + return + + super(HcloudCloudProvider, self).filter(targets, exclude) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(HcloudCloudProvider, self).setup() + + if not self._use_static_config(): + self._setup_dynamic() + + def _setup_dynamic(self): + """Request Hetzner credentials through the Ansible Core CI service.""" + display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1) + + config = self._read_config_template() + + aci = self._create_ansible_core_ci() + + response = aci.start() + + if not self.args.explain: + token = response['hetzner']['token'] + + display.sensitive.add(token) + display.info('Hetzner Cloud Token: %s' % token, verbosity=1) + + values = dict( + TOKEN=token, + ) + + display.sensitive.add(values['TOKEN']) + + config = self._populate_config_template(config, values) + + self._write_config(config) + + def _create_ansible_core_ci(self): + """ + :rtype: AnsibleCoreCI + """ + return AnsibleCoreCI(self.args, 'hetzner', 'hetzner', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider) + + +class HcloudCloudEnvironment(CloudEnvironment): + """Hetzner Cloud cloud environment plugin. Updates integration test environment + after delegation. + """ + + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + parser = ConfigParser() + parser.read(self.config_path) + + env_vars = dict( + HCLOUD_TOKEN=parser.get('default', 'hcloud_api_token'), + ) + + display.sensitive.add(env_vars['HCLOUD_TOKEN']) + + ansible_vars = dict( + hcloud_prefix=self.resource_prefix, + ) + + ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items())) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ansible_vars=ansible_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/nios.py b/test/lib/ansible_test/_internal/cloud/nios.py new file mode 100644 index 00000000..b9a1a4e4 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/nios.py @@ -0,0 +1,193 @@ +"""NIOS plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ( + find_executable, + display, +) + +from ..docker_util import ( + docker_run, + docker_rm, + docker_inspect, + docker_pull, + get_docker_container_id, + get_docker_hostname, + get_docker_container_ip, + get_docker_preferred_network_name, + is_docker_user_defined_network, +) + + +class NiosProvider(CloudProvider): + """Nios plugin. + + Sets up NIOS mock server for tests. + """ + + DOCKER_SIMULATOR_NAME = 'nios-simulator' + + DOCKER_IMAGE = 'quay.io/ansible/nios-test-container:1.3.0' + """Default image to run the nios simulator. + + The simulator must be pinned to a specific version + to guarantee CI passes with the version used. + + It's source source itself resides at: + https://github.com/ansible/nios-test-container + """ + + def __init__(self, args): + """Set up container references for provider. + + :type args: TestConfig + """ + super(NiosProvider, self).__init__(args) + + self.__container_from_env = os.environ.get('ANSIBLE_NIOSSIM_CONTAINER') + """Overrides target container, might be used for development. + + Use ANSIBLE_NIOSSIM_CONTAINER=whatever_you_want if you want + to use other image. Omit/empty otherwise. + """ + + self.image = self.__container_from_env or self.DOCKER_IMAGE + self.container_name = '' + + def filter(self, targets, exclude): + """Filter out the tests with the necessary config and res unavailable. + + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + docker_cmd = 'docker' + docker = find_executable(docker_cmd, required=False) + + if docker: + return + + skip = 'cloud/%s/' % self.platform + skipped = [target.name for target in targets if skip in target.aliases] + + if skipped: + exclude.append(skip) + display.warning( + 'Excluding tests marked "%s" ' + 'which require the "%s" command: %s' + % (skip.rstrip('/'), docker_cmd, ', '.join(skipped)) + ) + + def setup(self): + """Setup cloud resource before delegation and reg cleanup callback.""" + super(NiosProvider, self).setup() + + if self._use_static_config(): + self._setup_static() + else: + self._setup_dynamic() + + def get_docker_run_options(self): + """Get additional options needed when delegating tests to a container. + + :rtype: list[str] + """ + network = get_docker_preferred_network_name(self.args) + + if self.managed and not is_docker_user_defined_network(network): + return ['--link', self.DOCKER_SIMULATOR_NAME] + + return [] + + def cleanup(self): + """Clean up the resource and temporary configs files after tests.""" + if self.container_name: + docker_rm(self.args, self.container_name) + + super(NiosProvider, self).cleanup() + + def _setup_dynamic(self): + """Spawn a NIOS simulator within docker container.""" + nios_port = 443 + container_id = get_docker_container_id() + + self.container_name = self.DOCKER_SIMULATOR_NAME + + results = docker_inspect(self.args, self.container_name) + + if results and not results[0].get('State', {}).get('Running'): + docker_rm(self.args, self.container_name) + results = [] + + display.info( + '%s NIOS simulator docker container.' + % ('Using the existing' if results else 'Starting a new'), + verbosity=1, + ) + + if not results: + if self.args.docker or container_id: + publish_ports = [] + else: + # publish the simulator ports when not running inside docker + publish_ports = [ + '-p', ':'.join((str(nios_port), ) * 2), + ] + + if not self.__container_from_env: + docker_pull(self.args, self.image) + + docker_run( + self.args, + self.image, + ['-d', '--name', self.container_name] + publish_ports, + ) + + if self.args.docker: + nios_host = self.DOCKER_SIMULATOR_NAME + elif container_id: + nios_host = self._get_simulator_address() + display.info( + 'Found NIOS simulator container address: %s' + % nios_host, verbosity=1 + ) + else: + nios_host = get_docker_hostname() + + self._set_cloud_config('NIOS_HOST', nios_host) + + def _get_simulator_address(self): + return get_docker_container_ip(self.args, self.container_name) + + def _setup_static(self): + raise NotImplementedError + + +class NiosEnvironment(CloudEnvironment): + """NIOS environment plugin. + + Updates integration test environment after delegation. + """ + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + ansible_vars = dict( + nios_provider=dict( + host=self._get_cloud_config('NIOS_HOST'), + username='admin', + password='infoblox', + ), + ) + + return CloudEnvironmentConfig( + ansible_vars=ansible_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/opennebula.py b/test/lib/ansible_test/_internal/cloud/opennebula.py new file mode 100644 index 00000000..559093e3 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/opennebula.py @@ -0,0 +1,66 @@ +"""OpenNebula plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ( + display, + ConfigParser, +) + + +class OpenNebulaCloudProvider(CloudProvider): + """Checks if a configuration file has been passed or fixtures are going to be used for testing""" + + def filter(self, targets, exclude): + """ no need to filter modules, they can either run from config file or from fixtures""" + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(OpenNebulaCloudProvider, self).setup() + + if not self._use_static_config(): + self._setup_dynamic() + + def _setup_dynamic(self): + display.info('No config file provided, will run test from fixtures') + + config = self._read_config_template() + values = dict( + URL="http://localhost/RPC2", + USERNAME='oneadmin', + PASSWORD='onepass', + FIXTURES='true', + REPLAY='true', + ) + config = self._populate_config_template(config, values) + self._write_config(config) + + +class OpenNebulaCloudEnvironment(CloudEnvironment): + """ + Updates integration test environment after delegation. Will setup the config file as parameter. + """ + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + parser = ConfigParser() + parser.read(self.config_path) + + ansible_vars = dict( + resource_prefix=self.resource_prefix, + ) + + ansible_vars.update(dict(parser.items('default'))) + + display.sensitive.add(ansible_vars.get('opennebula_password')) + + return CloudEnvironmentConfig( + ansible_vars=ansible_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/openshift.py b/test/lib/ansible_test/_internal/cloud/openshift.py new file mode 100644 index 00000000..450816bf --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/openshift.py @@ -0,0 +1,236 @@ +"""OpenShift plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import re +import time + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..io import ( + read_text_file, +) + +from ..util import ( + find_executable, + ApplicationError, + display, + SubprocessError, +) + +from ..http import ( + HttpClient, +) + +from ..docker_util import ( + docker_exec, + docker_run, + docker_rm, + docker_inspect, + docker_pull, + docker_network_inspect, + get_docker_container_id, + get_docker_preferred_network_name, + get_docker_hostname, + is_docker_user_defined_network, +) + + +class OpenShiftCloudProvider(CloudProvider): + """OpenShift cloud provider plugin. Sets up cloud resources before delegation.""" + DOCKER_CONTAINER_NAME = 'openshift-origin' + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(OpenShiftCloudProvider, self).__init__(args, config_extension='.kubeconfig') + + # The image must be pinned to a specific version to guarantee CI passes with the version used. + self.image = 'openshift/origin:v3.9.0' + self.container_name = '' + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if os.path.isfile(self.config_static_path): + return + + docker = find_executable('docker', required=False) + + if docker: + return + + skip = 'cloud/%s/' % self.platform + skipped = [target.name for target in targets if skip in target.aliases] + + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s' + % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped))) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(OpenShiftCloudProvider, self).setup() + + if self._use_static_config(): + self._setup_static() + else: + self._setup_dynamic() + + def get_remote_ssh_options(self): + """Get any additional options needed when delegating tests to a remote instance via SSH. + :rtype: list[str] + """ + if self.managed: + return ['-R', '8443:%s:8443' % get_docker_hostname()] + + return [] + + def get_docker_run_options(self): + """Get any additional options needed when delegating tests to a docker container. + :rtype: list[str] + """ + network = get_docker_preferred_network_name(self.args) + + if self.managed and not is_docker_user_defined_network(network): + return ['--link', self.DOCKER_CONTAINER_NAME] + + return [] + + def cleanup(self): + """Clean up the cloud resource and any temporary configuration files after tests complete.""" + if self.container_name: + docker_rm(self.args, self.container_name) + + super(OpenShiftCloudProvider, self).cleanup() + + def _setup_static(self): + """Configure OpenShift tests for use with static configuration.""" + config = read_text_file(self.config_static_path) + + match = re.search(r'^ *server: (?P.*)$', config, flags=re.MULTILINE) + + if match: + endpoint = match.group('server') + self._wait_for_service(endpoint) + else: + display.warning('Could not find OpenShift endpoint in kubeconfig. Skipping check for OpenShift service availability.') + + def _setup_dynamic(self): + """Create a OpenShift container using docker.""" + self.container_name = self.DOCKER_CONTAINER_NAME + + results = docker_inspect(self.args, self.container_name) + + if results and not results[0]['State']['Running']: + docker_rm(self.args, self.container_name) + results = [] + + if results: + display.info('Using the existing OpenShift docker container.', verbosity=1) + else: + display.info('Starting a new OpenShift docker container.', verbosity=1) + docker_pull(self.args, self.image) + cmd = ['start', 'master', '--listen', 'https://0.0.0.0:8443'] + docker_run(self.args, self.image, ['-d', '-p', '8443:8443', '--name', self.container_name], cmd) + + container_id = get_docker_container_id() + + if container_id: + host = self._get_container_address() + display.info('Found OpenShift container address: %s' % host, verbosity=1) + else: + host = get_docker_hostname() + + port = 8443 + endpoint = 'https://%s:%s/' % (host, port) + + self._wait_for_service(endpoint) + + if self.args.explain: + config = '# Unknown' + else: + if self.args.docker: + host = self.DOCKER_CONTAINER_NAME + elif self.args.remote: + host = 'localhost' + + server = 'https://%s:%s' % (host, port) + config = self._get_config(server) + + self._write_config(config) + + def _get_container_address(self): + current_network = get_docker_preferred_network_name(self.args) + networks = docker_network_inspect(self.args, current_network) + + try: + network = [network for network in networks if network['Name'] == current_network][0] + containers = network['Containers'] + container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_CONTAINER_NAME][0] + return re.sub(r'/[0-9]+$', '', container['IPv4Address']) + except Exception: + display.error('Failed to process the following docker network inspect output:\n%s' % + json.dumps(networks, indent=4, sort_keys=True)) + raise + + def _wait_for_service(self, endpoint): + """Wait for the OpenShift service endpoint to accept connections. + :type endpoint: str + """ + if self.args.explain: + return + + client = HttpClient(self.args, always=True, insecure=True) + + for dummy in range(1, 30): + display.info('Waiting for OpenShift service: %s' % endpoint, verbosity=1) + + try: + client.get(endpoint) + return + except SubprocessError: + pass + + time.sleep(10) + + raise ApplicationError('Timeout waiting for OpenShift service.') + + def _get_config(self, server): + """Get OpenShift config from container. + :type server: str + :rtype: dict[str, str] + """ + cmd = ['cat', '/var/lib/origin/openshift.local.config/master/admin.kubeconfig'] + + stdout, dummy = docker_exec(self.args, self.container_name, cmd, capture=True) + + config = stdout + config = re.sub(r'^( *)certificate-authority-data: .*$', r'\1insecure-skip-tls-verify: true', config, flags=re.MULTILINE) + config = re.sub(r'^( *)server: .*$', r'\1server: %s' % server, config, flags=re.MULTILINE) + + return config + + +class OpenShiftCloudEnvironment(CloudEnvironment): + """OpenShift cloud environment plugin. Updates integration test environment after delegation.""" + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + env_vars = dict( + K8S_AUTH_KUBECONFIG=self.config_path, + ) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/scaleway.py b/test/lib/ansible_test/_internal/cloud/scaleway.py new file mode 100644 index 00000000..22abe197 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/scaleway.py @@ -0,0 +1,72 @@ +"""Scaleway plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ( + ConfigParser, + display, +) + + +class ScalewayCloudProvider(CloudProvider): + """Checks if a configuration file has been passed or fixtures are going to be used for testing""" + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(ScalewayCloudProvider, self).__init__(args) + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if os.path.isfile(self.config_static_path): + return + + super(ScalewayCloudProvider, self).filter(targets, exclude) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(ScalewayCloudProvider, self).setup() + + if os.path.isfile(self.config_static_path): + self.config_path = self.config_static_path + self.managed = False + + +class ScalewayCloudEnvironment(CloudEnvironment): + """ + Updates integration test environment after delegation. Will setup the config file as parameter. + """ + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + parser = ConfigParser() + parser.read(self.config_path) + + env_vars = dict( + SCW_API_KEY=parser.get('default', 'key'), + SCW_ORG=parser.get('default', 'org') + ) + + display.sensitive.add(env_vars['SCW_API_KEY']) + + ansible_vars = dict( + scw_org=parser.get('default', 'org'), + ) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ansible_vars=ansible_vars, + ) diff --git a/test/lib/ansible_test/_internal/cloud/tower.py b/test/lib/ansible_test/_internal/cloud/tower.py new file mode 100644 index 00000000..227d170c --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/tower.py @@ -0,0 +1,255 @@ +"""Tower plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import time + +from ..util import ( + display, + ApplicationError, + SubprocessError, + ConfigParser, +) + +from ..util_common import ( + run_command, +) + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..core_ci import ( + AnsibleCoreCI, +) + + +class TowerCloudProvider(CloudProvider): + """Tower cloud provider plugin. Sets up cloud resources before delegation.""" + def __init__(self, args): + """ + :type args: TestConfig + """ + super(TowerCloudProvider, self).__init__(args) + + self.aci = None + self.version = '' + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if os.path.isfile(self.config_static_path): + return + + aci = get_tower_aci(self.args) + + if aci.available: + return + + super(TowerCloudProvider, self).filter(targets, exclude) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(TowerCloudProvider, self).setup() + + if self._use_static_config(): + self._setup_static() + else: + self._setup_dynamic() + + def check_tower_version(self, fallback=None): + """Check the Tower version being tested and determine the correct CLI version to use. + :type fallback: str | None + """ + tower_cli_version_map = { + '3.1.5': '3.1.8', + '3.2.3': '3.3.0', + '3.3.5': '3.3.3', + '3.4.3': '3.3.3', + '3.6.3': '3.3.8', + } + + cli_version = tower_cli_version_map.get(self.version, fallback) + + if not cli_version: + raise ApplicationError('Mapping to ansible-tower-cli version required for Tower version: %s' % self.version) + + self._set_cloud_config('tower_cli_version', cli_version) + + def cleanup(self): + """Clean up the cloud resource and any temporary configuration files after tests complete.""" + # cleanup on success or failure is not yet supported due to how cleanup is called + if self.aci and self.args.remote_terminate == 'always': + self.aci.stop() + + super(TowerCloudProvider, self).cleanup() + + def _setup_static(self): + config = TowerConfig.parse(self.config_static_path) + + self.version = config.version + self.check_tower_version() + + def _setup_dynamic(self): + """Request Tower credentials through the Ansible Core CI service.""" + display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1) + + # temporary solution to allow version selection + self.version = os.environ.get('TOWER_VERSION', '3.6.3') + self.check_tower_version(os.environ.get('TOWER_CLI_VERSION')) + + aci = get_tower_aci(self.args, self.version) + aci.start() + aci.wait() + + connection = aci.get() + + config = self._read_config_template() + + if not self.args.explain: + self.aci = aci + + values = dict( + VERSION=self.version, + HOST=connection.hostname, + USERNAME=connection.username, + PASSWORD=connection.password, + ) + + display.sensitive.add(values['PASSWORD']) + + config = self._populate_config_template(config, values) + + self._write_config(config) + + +class TowerCloudEnvironment(CloudEnvironment): + """Tower cloud environment plugin. Updates integration test environment after delegation.""" + def setup(self): + """Setup which should be done once per environment instead of once per test target.""" + self.setup_cli() + self.disable_pendo() + + def setup_cli(self): + """Install the correct Tower CLI for the version of Tower being tested.""" + tower_cli_version = self._get_cloud_config('tower_cli_version') + + display.info('Installing Tower CLI version: %s' % tower_cli_version) + + cmd = self.args.pip_command + ['install', '--disable-pip-version-check', 'ansible-tower-cli==%s' % tower_cli_version] + + run_command(self.args, cmd) + + cmd = ['tower-cli', 'config', 'verify_ssl', 'false'] + run_command(self.args, cmd, capture=True) + + def disable_pendo(self): + """Disable Pendo tracking.""" + display.info('Disable Pendo tracking') + + config = TowerConfig.parse(self.config_path) + + # tower-cli does not recognize TOWER_ environment variables + cmd = ['tower-cli', 'setting', 'modify', 'PENDO_TRACKING_STATE', 'off', + '-h', config.host, '-u', config.username, '-p', config.password] + + attempts = 60 + + while True: + attempts -= 1 + + try: + run_command(self.args, cmd, capture=True) + return + except SubprocessError as ex: + if not attempts: + raise ApplicationError('Timed out trying to disable Pendo tracking:\n%s' % ex) + + time.sleep(5) + + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + config = TowerConfig.parse(self.config_path) + + env_vars = config.environment + + ansible_vars = dict((key.lower(), value) for key, value in env_vars.items()) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ansible_vars=ansible_vars, + ) + + +class TowerConfig: + """Tower settings.""" + def __init__(self, values): + self.version = values.get('version') + self.host = values.get('host') + self.username = values.get('username') + self.password = values.get('password') + + if self.password: + display.sensitive.add(self.password) + + @property + def environment(self): + """Tower settings as environment variables. + :rtype: dict[str, str] + """ + env = dict( + TOWER_VERSION=self.version, + TOWER_HOST=self.host, + TOWER_USERNAME=self.username, + TOWER_PASSWORD=self.password, + ) + + return env + + @staticmethod + def parse(path): + """ + :type path: str + :rtype: TowerConfig + """ + parser = ConfigParser() + parser.read(path) + + keys = ( + 'version', + 'host', + 'username', + 'password', + ) + + values = dict((k, parser.get('default', k)) for k in keys) + config = TowerConfig(values) + + missing = [k for k in keys if not values.get(k)] + + if missing: + raise ApplicationError('Missing or empty Tower configuration value(s): %s' % ', '.join(missing)) + + return config + + +def get_tower_aci(args, version=None): + """ + :type args: EnvironmentConfig + :type version: str | None + :rtype: AnsibleCoreCI + """ + if version: + persist = True + else: + version = '' + persist = False + + return AnsibleCoreCI(args, 'tower', version, persist=persist, stage=args.remote_stage, provider=args.remote_provider) diff --git a/test/lib/ansible_test/_internal/cloud/vcenter.py b/test/lib/ansible_test/_internal/cloud/vcenter.py new file mode 100644 index 00000000..3b38a19e --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/vcenter.py @@ -0,0 +1,232 @@ +"""VMware vCenter plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ( + find_executable, + display, + ConfigParser, + ApplicationError, +) + +from ..docker_util import ( + docker_run, + docker_rm, + docker_inspect, + docker_pull, + get_docker_container_id, + get_docker_hostname, + get_docker_container_ip, + get_docker_preferred_network_name, + is_docker_user_defined_network, +) + + +class VcenterProvider(CloudProvider): + """VMware vcenter/esx plugin. Sets up cloud resources for tests.""" + DOCKER_SIMULATOR_NAME = 'vcenter-simulator' + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(VcenterProvider, self).__init__(args) + + # The simulator must be pinned to a specific version to guarantee CI passes with the version used. + if os.environ.get('ANSIBLE_VCSIM_CONTAINER'): + self.image = os.environ.get('ANSIBLE_VCSIM_CONTAINER') + else: + self.image = 'quay.io/ansible/vcenter-test-container:1.7.0' + self.container_name = '' + + # VMware tests can be run on govcsim or BYO with a static config file. + # The simulator is the default if no config is provided. + self.vmware_test_platform = os.environ.get('VMWARE_TEST_PLATFORM', 'govcsim') + self.insecure = False + self.proxy = None + self.platform = 'vcenter' + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if self.vmware_test_platform == 'govcsim' or (self.vmware_test_platform == '' and not os.path.isfile(self.config_static_path)): + docker = find_executable('docker', required=False) + + if docker: + return + + skip = 'cloud/%s/' % self.platform + skipped = [target.name for target in targets if skip in target.aliases] + + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s' + % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped))) + elif self.vmware_test_platform == 'static': + if os.path.isfile(self.config_static_path): + return + + super(VcenterProvider, self).filter(targets, exclude) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(VcenterProvider, self).setup() + + self._set_cloud_config('vmware_test_platform', self.vmware_test_platform) + if self.vmware_test_platform == 'govcsim': + self._setup_dynamic_simulator() + self.managed = True + elif self.vmware_test_platform == 'static': + self._use_static_config() + self._setup_static() + else: + raise ApplicationError('Unknown vmware_test_platform: %s' % self.vmware_test_platform) + + def get_docker_run_options(self): + """Get any additional options needed when delegating tests to a docker container. + :rtype: list[str] + """ + network = get_docker_preferred_network_name(self.args) + + if self.managed and not is_docker_user_defined_network(network): + return ['--link', self.DOCKER_SIMULATOR_NAME] + + return [] + + def cleanup(self): + """Clean up the cloud resource and any temporary configuration files after tests complete.""" + if self.container_name: + docker_rm(self.args, self.container_name) + + super(VcenterProvider, self).cleanup() + + def _setup_dynamic_simulator(self): + """Create a vcenter simulator using docker.""" + container_id = get_docker_container_id() + + self.container_name = self.DOCKER_SIMULATOR_NAME + + results = docker_inspect(self.args, self.container_name) + + if results and not results[0].get('State', {}).get('Running'): + docker_rm(self.args, self.container_name) + results = [] + + if results: + display.info('Using the existing vCenter simulator docker container.', verbosity=1) + else: + display.info('Starting a new vCenter simulator docker container.', verbosity=1) + + if not self.args.docker and not container_id: + # publish the simulator ports when not running inside docker + publish_ports = [ + '-p', '1443:443', + '-p', '8080:8080', + '-p', '8989:8989', + '-p', '5000:5000', # control port for flask app in simulator + ] + else: + publish_ports = [] + + if not os.environ.get('ANSIBLE_VCSIM_CONTAINER'): + docker_pull(self.args, self.image) + + docker_run( + self.args, + self.image, + ['-d', '--name', self.container_name] + publish_ports, + ) + + if self.args.docker: + vcenter_hostname = self.DOCKER_SIMULATOR_NAME + elif container_id: + vcenter_hostname = self._get_simulator_address() + display.info('Found vCenter simulator container address: %s' % vcenter_hostname, verbosity=1) + else: + vcenter_hostname = get_docker_hostname() + + self._set_cloud_config('vcenter_hostname', vcenter_hostname) + + def _get_simulator_address(self): + return get_docker_container_ip(self.args, self.container_name) + + def _setup_static(self): + if not os.path.exists(self.config_static_path): + raise ApplicationError('Configuration file does not exist: %s' % self.config_static_path) + + parser = ConfigParser({ + 'vcenter_port': '443', + 'vmware_proxy_host': '', + 'vmware_proxy_port': '8080'}) + parser.read(self.config_static_path) + + if parser.get('DEFAULT', 'vmware_validate_certs').lower() in ('no', 'false'): + self.insecure = True + proxy_host = parser.get('DEFAULT', 'vmware_proxy_host') + proxy_port = int(parser.get('DEFAULT', 'vmware_proxy_port')) + if proxy_host and proxy_port: + self.proxy = 'http://%s:%d' % (proxy_host, proxy_port) + + +class VcenterEnvironment(CloudEnvironment): + """VMware vcenter/esx environment plugin. Updates integration test environment after delegation.""" + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + try: + # We may be in a container, so we cannot just reach VMWARE_TEST_PLATFORM, + # We do a try/except instead + parser = ConfigParser() + parser.read(self.config_path) # static + + env_vars = dict() + ansible_vars = dict( + resource_prefix=self.resource_prefix, + ) + ansible_vars.update(dict(parser.items('DEFAULT', raw=True))) + except KeyError: # govcsim + env_vars = dict( + VCENTER_HOSTNAME=self._get_cloud_config('vcenter_hostname'), + VCENTER_USERNAME='user', + VCENTER_PASSWORD='pass', + ) + + ansible_vars = dict( + vcsim=self._get_cloud_config('vcenter_hostname'), + vcenter_hostname=self._get_cloud_config('vcenter_hostname'), + vcenter_username='user', + vcenter_password='pass', + ) + # Shippable starts ansible-test from withing an existing container, + # and in this case, we don't have to change the vcenter port. + if not self.args.docker and not get_docker_container_id(): + ansible_vars['vcenter_port'] = '1443' + + for key, value in ansible_vars.items(): + if key.endswith('_password'): + display.sensitive.add(value) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ansible_vars=ansible_vars, + module_defaults={ + 'group/vmware': { + 'hostname': ansible_vars['vcenter_hostname'], + 'username': ansible_vars['vcenter_username'], + 'password': ansible_vars['vcenter_password'], + 'port': ansible_vars.get('vcenter_port', '443'), + 'validate_certs': ansible_vars.get('vmware_validate_certs', 'no'), + }, + }, + ) diff --git a/test/lib/ansible_test/_internal/cloud/vultr.py b/test/lib/ansible_test/_internal/cloud/vultr.py new file mode 100644 index 00000000..ce6184f7 --- /dev/null +++ b/test/lib/ansible_test/_internal/cloud/vultr.py @@ -0,0 +1,71 @@ +"""Vultr plugin for integration tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from . import ( + CloudProvider, + CloudEnvironment, + CloudEnvironmentConfig, +) + +from ..util import ( + ConfigParser, + display, +) + + +class VultrCloudProvider(CloudProvider): + """Checks if a configuration file has been passed or fixtures are going to be used for testing""" + + def __init__(self, args): + """ + :type args: TestConfig + """ + super(VultrCloudProvider, self).__init__(args) + + def filter(self, targets, exclude): + """Filter out the cloud tests when the necessary config and resources are not available. + :type targets: tuple[TestTarget] + :type exclude: list[str] + """ + if os.path.isfile(self.config_static_path): + return + + super(VultrCloudProvider, self).filter(targets, exclude) + + def setup(self): + """Setup the cloud resource before delegation and register a cleanup callback.""" + super(VultrCloudProvider, self).setup() + + if os.path.isfile(self.config_static_path): + self.config_path = self.config_static_path + self.managed = False + + +class VultrCloudEnvironment(CloudEnvironment): + """ + Updates integration test environment after delegation. Will setup the config file as parameter. + """ + def get_environment_config(self): + """ + :rtype: CloudEnvironmentConfig + """ + parser = ConfigParser() + parser.read(self.config_path) + + env_vars = dict( + VULTR_API_KEY=parser.get('default', 'key'), + ) + + display.sensitive.add(env_vars['VULTR_API_KEY']) + + ansible_vars = dict( + vultr_resource_prefix=self.resource_prefix, + ) + + return CloudEnvironmentConfig( + env_vars=env_vars, + ansible_vars=ansible_vars, + ) diff --git a/test/lib/ansible_test/_internal/config.py b/test/lib/ansible_test/_internal/config.py new file mode 100644 index 00000000..a3c31959 --- /dev/null +++ b/test/lib/ansible_test/_internal/config.py @@ -0,0 +1,356 @@ +"""Configuration classes.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + +from . import types as t + +from .util import ( + find_python, + generate_pip_command, + ApplicationError, +) + +from .util_common import ( + docker_qualify_image, + get_docker_completion, + get_remote_completion, + CommonConfig, +) + +from .metadata import ( + Metadata, +) + +from .data import ( + data_context, +) + +try: + TIntegrationConfig = t.TypeVar('TIntegrationConfig', bound='IntegrationConfig') +except AttributeError: + TIntegrationConfig = None # pylint: disable=invalid-name + + +class ParsedRemote: + """A parsed version of a "remote" string.""" + def __init__(self, arch, platform, version): # type: (t.Optional[str], str, str) -> None + self.arch = arch + self.platform = platform + self.version = version + + @staticmethod + def parse(value): # type: (str) -> t.Optional['ParsedRemote'] + """Return a ParsedRemote from the given value or None if the syntax is invalid.""" + parts = value.split('/') + + if len(parts) == 2: + arch = None + platform, version = parts + elif len(parts) == 3: + arch, platform, version = parts + else: + return None + + return ParsedRemote(arch, platform, version) + + +class EnvironmentConfig(CommonConfig): + """Configuration common to all commands which execute in an environment.""" + def __init__(self, args, command): + """ + :type args: any + :type command: str + """ + super(EnvironmentConfig, self).__init__(args, command) + + self.local = args.local is True + self.venv = args.venv + self.venv_system_site_packages = args.venv_system_site_packages + + self.python = args.python if 'python' in args else None # type: str + + self.docker = docker_qualify_image(args.docker) # type: str + self.docker_raw = args.docker # type: str + self.remote = args.remote # type: str + + if self.remote: + self.parsed_remote = ParsedRemote.parse(self.remote) + + if not self.parsed_remote or not self.parsed_remote.platform or not self.parsed_remote.version: + raise ApplicationError('Unrecognized remote "%s" syntax. Use "platform/version" or "arch/platform/version".' % self.remote) + else: + self.parsed_remote = None + + self.docker_privileged = args.docker_privileged if 'docker_privileged' in args else False # type: bool + self.docker_pull = args.docker_pull if 'docker_pull' in args else False # type: bool + self.docker_keep_git = args.docker_keep_git if 'docker_keep_git' in args else False # type: bool + self.docker_seccomp = args.docker_seccomp if 'docker_seccomp' in args else None # type: str + self.docker_memory = args.docker_memory if 'docker_memory' in args else None + self.docker_terminate = args.docker_terminate if 'docker_terminate' in args else None # type: str + self.docker_network = args.docker_network if 'docker_network' in args else None # type: str + + if self.docker_seccomp is None: + self.docker_seccomp = get_docker_completion().get(self.docker_raw, {}).get('seccomp', 'default') + + self.remote_stage = args.remote_stage # type: str + self.remote_provider = args.remote_provider # type: str + self.remote_endpoint = args.remote_endpoint # type: t.Optional[str] + self.remote_aws_region = args.remote_aws_region # type: str + self.remote_terminate = args.remote_terminate # type: str + + if self.remote_provider == 'default': + self.remote_provider = None + + self.requirements = args.requirements # type: bool + + if self.python == 'default': + self.python = None + + actual_major_minor = '.'.join(str(i) for i in sys.version_info[:2]) + + self.python_version = self.python or actual_major_minor + self.python_interpreter = args.python_interpreter + + self.pip_check = args.pip_check + + self.delegate = self.docker or self.remote or self.venv + self.delegate_args = [] # type: t.List[str] + + if self.delegate: + self.requirements = True + + self.inject_httptester = args.inject_httptester if 'inject_httptester' in args else False # type: bool + self.httptester = docker_qualify_image(args.httptester if 'httptester' in args else '') # type: str + + if self.get_delegated_completion().get('httptester', 'enabled') == 'disabled': + self.httptester = False + + if self.get_delegated_completion().get('pip-check', 'enabled') == 'disabled': + self.pip_check = False + + if args.check_python and args.check_python != actual_major_minor: + raise ApplicationError('Running under Python %s instead of Python %s as expected.' % (actual_major_minor, args.check_python)) + + if self.docker_keep_git: + def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None + """Add files from the content root .git directory to the payload file list.""" + for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')): + paths = [os.path.join(dirpath, filename) for filename in filenames] + files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths) + + data_context().register_payload_callback(git_callback) + + @property + def python_executable(self): + """ + :rtype: str + """ + return find_python(self.python_version) + + @property + def pip_command(self): + """ + :rtype: list[str] + """ + return generate_pip_command(self.python_executable) + + def get_delegated_completion(self): + """Returns a dictionary of settings specific to the selected delegation system, if any. Otherwise returns an empty dictionary. + :rtype: dict[str, str] + """ + if self.docker: + return get_docker_completion().get(self.docker_raw, {}) + + if self.remote: + return get_remote_completion().get(self.remote, {}) + + return {} + + +class TestConfig(EnvironmentConfig): + """Configuration common to all test commands.""" + def __init__(self, args, command): + """ + :type args: any + :type command: str + """ + super(TestConfig, self).__init__(args, command) + + self.coverage = args.coverage # type: bool + self.coverage_label = args.coverage_label # type: str + self.coverage_check = args.coverage_check # type: bool + self.coverage_config_base_path = None # type: t.Optional[str] + self.include = args.include or [] # type: t.List[str] + self.exclude = args.exclude or [] # type: t.List[str] + self.require = args.require or [] # type: t.List[str] + + self.changed = args.changed # type: bool + self.tracked = args.tracked # type: bool + self.untracked = args.untracked # type: bool + self.committed = args.committed # type: bool + self.staged = args.staged # type: bool + self.unstaged = args.unstaged # type: bool + self.changed_from = args.changed_from # type: str + self.changed_path = args.changed_path # type: t.List[str] + self.base_branch = args.base_branch # type: str + + self.lint = args.lint if 'lint' in args else False # type: bool + self.junit = args.junit if 'junit' in args else False # type: bool + self.failure_ok = args.failure_ok if 'failure_ok' in args else False # type: bool + + self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata() + self.metadata_path = None + + if self.coverage_check: + self.coverage = True + + def metadata_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None + """Add the metadata file to the payload file list.""" + config = self + + if self.metadata_path: + files.append((os.path.abspath(config.metadata_path), config.metadata_path)) + + data_context().register_payload_callback(metadata_callback) + + +class ShellConfig(EnvironmentConfig): + """Configuration for the shell command.""" + def __init__(self, args): + """ + :type args: any + """ + super(ShellConfig, self).__init__(args, 'shell') + + self.raw = args.raw # type: bool + + if self.raw: + self.httptester = False + + +class SanityConfig(TestConfig): + """Configuration for the sanity command.""" + def __init__(self, args): + """ + :type args: any + """ + super(SanityConfig, self).__init__(args, 'sanity') + + self.test = args.test # type: t.List[str] + self.skip_test = args.skip_test # type: t.List[str] + self.list_tests = args.list_tests # type: bool + self.allow_disabled = args.allow_disabled # type: bool + self.enable_optional_errors = args.enable_optional_errors # type: bool + self.info_stderr = self.lint + + +class IntegrationConfig(TestConfig): + """Configuration for the integration command.""" + def __init__(self, args, command): + """ + :type args: any + :type command: str + """ + super(IntegrationConfig, self).__init__(args, command) + + self.start_at = args.start_at # type: str + self.start_at_task = args.start_at_task # type: str + self.allow_destructive = args.allow_destructive # type: bool + self.allow_root = args.allow_root # type: bool + self.allow_disabled = args.allow_disabled # type: bool + self.allow_unstable = args.allow_unstable # type: bool + self.allow_unstable_changed = args.allow_unstable_changed # type: bool + self.allow_unsupported = args.allow_unsupported # type: bool + self.retry_on_error = args.retry_on_error # type: bool + self.continue_on_error = args.continue_on_error # type: bool + self.debug_strategy = args.debug_strategy # type: bool + self.changed_all_target = args.changed_all_target # type: str + self.changed_all_mode = args.changed_all_mode # type: str + self.list_targets = args.list_targets # type: bool + self.tags = args.tags + self.skip_tags = args.skip_tags + self.diff = args.diff + self.no_temp_workdir = args.no_temp_workdir + self.no_temp_unicode = args.no_temp_unicode + + if self.get_delegated_completion().get('temp-unicode', 'enabled') == 'disabled': + self.no_temp_unicode = True + + if self.list_targets: + self.explain = True + self.info_stderr = True + + def get_ansible_config(self): # type: () -> str + """Return the path to the Ansible config for the given config.""" + ansible_config_relative_path = os.path.join(data_context().content.integration_path, '%s.cfg' % self.command) + ansible_config_path = os.path.join(data_context().content.root, ansible_config_relative_path) + + if not os.path.exists(ansible_config_path): + # use the default empty configuration unless one has been provided + ansible_config_path = super(IntegrationConfig, self).get_ansible_config() + + return ansible_config_path + + +class PosixIntegrationConfig(IntegrationConfig): + """Configuration for the posix integration command.""" + + def __init__(self, args): + """ + :type args: any + """ + super(PosixIntegrationConfig, self).__init__(args, 'integration') + + +class WindowsIntegrationConfig(IntegrationConfig): + """Configuration for the windows integration command.""" + + def __init__(self, args): + """ + :type args: any + """ + super(WindowsIntegrationConfig, self).__init__(args, 'windows-integration') + + self.windows = args.windows # type: t.List[str] + self.inventory = args.inventory # type: str + + if self.windows: + self.allow_destructive = True + + +class NetworkIntegrationConfig(IntegrationConfig): + """Configuration for the network integration command.""" + + def __init__(self, args): + """ + :type args: any + """ + super(NetworkIntegrationConfig, self).__init__(args, 'network-integration') + + self.platform = args.platform # type: t.List[str] + self.platform_collection = dict(args.platform_collection or []) # type: t.Dict[str, str] + self.platform_connection = dict(args.platform_connection or []) # type: t.Dict[str, str] + self.inventory = args.inventory # type: str + self.testcase = args.testcase # type: str + + +class UnitsConfig(TestConfig): + """Configuration for the units command.""" + def __init__(self, args): + """ + :type args: any + """ + super(UnitsConfig, self).__init__(args, 'units') + + self.collect_only = args.collect_only # type: bool + self.num_workers = args.num_workers # type: int + + self.requirements_mode = args.requirements_mode if 'requirements_mode' in args else '' + + if self.requirements_mode == 'only': + self.requirements = True + elif self.requirements_mode == 'skip': + self.requirements = False diff --git a/test/lib/ansible_test/_internal/constants.py b/test/lib/ansible_test/_internal/constants.py new file mode 100644 index 00000000..f4307822 --- /dev/null +++ b/test/lib/ansible_test/_internal/constants.py @@ -0,0 +1,10 @@ +"""Constants used by ansible-test. Imports should not be used in this file.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True. +# This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time. +SOFT_RLIMIT_NOFILE = 1024 + +# File used to track the ansible-test test execution timeout. +TIMEOUT_PATH = '.ansible-test-timeout.json' diff --git a/test/lib/ansible_test/_internal/core_ci.py b/test/lib/ansible_test/_internal/core_ci.py new file mode 100644 index 00000000..c984f4fe --- /dev/null +++ b/test/lib/ansible_test/_internal/core_ci.py @@ -0,0 +1,680 @@ +"""Access Ansible Core CI remote services.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import re +import traceback +import uuid +import errno +import time + +from . import types as t + +from .http import ( + HttpClient, + HttpResponse, + HttpError, +) + +from .io import ( + make_dirs, + read_text_file, + write_json_file, + write_text_file, +) + +from .util import ( + ApplicationError, + display, + ANSIBLE_TEST_DATA_ROOT, +) + +from .util_common import ( + run_command, + ResultType, +) + +from .config import ( + EnvironmentConfig, +) + +from .ci import ( + AuthContext, + get_ci_provider, +) + +from .data import ( + data_context, +) + +AWS_ENDPOINTS = { + 'us-east-1': 'https://ansible-core-ci.testing.ansible.com', +} + + +class AnsibleCoreCI: + """Client for Ansible Core CI services.""" + def __init__(self, args, platform, version, stage='prod', persist=True, load=True, provider=None, arch=None): + """ + :type args: EnvironmentConfig + :type platform: str + :type version: str + :type stage: str + :type persist: bool + :type load: bool + :type provider: str | None + :type arch: str | None + """ + self.args = args + self.arch = arch + self.platform = platform + self.version = version + self.stage = stage + self.client = HttpClient(args) + self.connection = None + self.instance_id = None + self.endpoint = None + self.max_threshold = 1 + self.retries = 3 + self.ci_provider = get_ci_provider() + self.auth_context = AuthContext() + + if self.arch: + self.name = '%s-%s-%s' % (self.arch, self.platform, self.version) + else: + self.name = '%s-%s' % (self.platform, self.version) + + # Assign each supported platform to one provider. + # This is used to determine the provider from the platform when no provider is specified. + providers = dict( + aws=( + 'aws', + 'windows', + 'freebsd', + 'vyos', + 'junos', + 'ios', + 'tower', + 'rhel', + 'hetzner', + ), + azure=( + 'azure', + ), + ibmps=( + 'aix', + 'ibmi', + ), + ibmvpc=( + 'centos arch=power', # avoid ibmvpc as default for no-arch centos to avoid making centos default to power + ), + parallels=( + 'macos', + 'osx', + ), + ) + + # Currently ansible-core-ci has no concept of arch selection. This effectively means each provider only supports one arch. + # The list below identifies which platforms accept an arch, and which one. These platforms can only be used with the specified arch. + provider_arches = dict( + ibmvpc='power', + ) + + if provider: + # override default provider selection (not all combinations are valid) + self.provider = provider + else: + self.provider = None + + for candidate in providers: + choices = [ + platform, + '%s arch=%s' % (platform, arch), + ] + + if any(choice in providers[candidate] for choice in choices): + # assign default provider based on platform + self.provider = candidate + break + + # If a provider has been selected, make sure the correct arch (or none) has been selected. + if self.provider: + required_arch = provider_arches.get(self.provider) + + if self.arch != required_arch: + if required_arch: + if self.arch: + raise ApplicationError('Provider "%s" requires the "%s" arch instead of "%s".' % (self.provider, required_arch, self.arch)) + + raise ApplicationError('Provider "%s" requires the "%s" arch.' % (self.provider, required_arch)) + + raise ApplicationError('Provider "%s" does not support specification of an arch.' % self.provider) + + self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s-%s' % (self.name, self.provider, self.stage)) + + if self.provider in ('aws', 'azure', 'ibmps', 'ibmvpc'): + if args.remote_aws_region: + display.warning('The --remote-aws-region option is obsolete and will be removed in a future version of ansible-test.') + # permit command-line override of region selection + region = args.remote_aws_region + # use a dedicated CI key when overriding the region selection + self.auth_context.region = args.remote_aws_region + else: + region = 'us-east-1' + + self.path = "%s-%s" % (self.path, region) + + if self.args.remote_endpoint: + self.endpoints = (self.args.remote_endpoint,) + else: + self.endpoints = (AWS_ENDPOINTS[region],) + + self.ssh_key = SshKey(args) + + if self.platform == 'windows': + self.port = 5986 + else: + self.port = 22 + + if self.provider == 'ibmps': + # Additional retries are neededed to accommodate images transitioning + # to the active state in the IBM cloud. This operation can take up to + # 90 seconds + self.retries = 7 + elif self.provider == 'parallels': + if self.args.remote_endpoint: + self.endpoints = (self.args.remote_endpoint,) + else: + self.endpoints = (AWS_ENDPOINTS['us-east-1'],) + + self.ssh_key = SshKey(args) + self.port = None + else: + if self.arch: + raise ApplicationError('Provider not detected for platform "%s" on arch "%s".' % (self.platform, self.arch)) + + raise ApplicationError('Provider not detected for platform "%s" with no arch specified.' % self.platform) + + if persist and load and self._load(): + try: + display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id), + verbosity=1) + + self.connection = self.get(always_raise_on=[404]) + + display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1) + except HttpError as ex: + if ex.status != 404: + raise + + self._clear() + + display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id), + verbosity=1) + + self.instance_id = None + self.endpoint = None + elif not persist: + self.instance_id = None + self.endpoint = None + self._clear() + + if self.instance_id: + self.started = True + else: + self.started = False + self.instance_id = str(uuid.uuid4()) + self.endpoint = None + + display.sensitive.add(self.instance_id) + + def _get_parallels_endpoints(self): + """ + :rtype: tuple[str] + """ + client = HttpClient(self.args, always=True) + display.info('Getting available endpoints...', verbosity=1) + sleep = 3 + + for _iteration in range(1, 10): + response = client.get('https://ansible-ci-files.s3.amazonaws.com/ansible-test/parallels-endpoints.txt') + + if response.status_code == 200: + endpoints = tuple(response.response.splitlines()) + display.info('Available endpoints (%d):\n%s' % (len(endpoints), '\n'.join(' - %s' % endpoint for endpoint in endpoints)), verbosity=1) + return endpoints + + display.warning('HTTP %d error getting endpoints, trying again in %d seconds.' % (response.status_code, sleep)) + time.sleep(sleep) + + raise ApplicationError('Unable to get available endpoints.') + + @property + def available(self): + """Return True if Ansible Core CI is supported.""" + return self.ci_provider.supports_core_ci_auth(self.auth_context) + + def start(self): + """Start instance.""" + if self.started: + display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id), + verbosity=1) + return None + + return self._start(self.ci_provider.prepare_core_ci_auth(self.auth_context)) + + def stop(self): + """Stop instance.""" + if not self.started: + display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), + verbosity=1) + return + + response = self.client.delete(self._uri) + + if response.status_code == 404: + self._clear() + display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), + verbosity=1) + return + + if response.status_code == 200: + self._clear() + display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id), + verbosity=1) + return + + raise self._create_http_error(response) + + def get(self, tries=3, sleep=15, always_raise_on=None): + """ + Get instance connection information. + :type tries: int + :type sleep: int + :type always_raise_on: list[int] | None + :rtype: InstanceConnection + """ + if not self.started: + display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), + verbosity=1) + return None + + if not always_raise_on: + always_raise_on = [] + + if self.connection and self.connection.running: + return self.connection + + while True: + tries -= 1 + response = self.client.get(self._uri) + + if response.status_code == 200: + break + + error = self._create_http_error(response) + + if not tries or response.status_code in always_raise_on: + raise error + + display.warning('%s. Trying again after %d seconds.' % (error, sleep)) + time.sleep(sleep) + + if self.args.explain: + self.connection = InstanceConnection( + running=True, + hostname='cloud.example.com', + port=self.port or 12345, + username='username', + password='password' if self.platform == 'windows' else None, + ) + else: + response_json = response.json() + status = response_json['status'] + con = response_json.get('connection') + + if con: + self.connection = InstanceConnection( + running=status == 'running', + hostname=con['hostname'], + port=int(con.get('port', self.port)), + username=con['username'], + password=con.get('password'), + response_json=response_json, + ) + else: + self.connection = InstanceConnection( + running=status == 'running', + response_json=response_json, + ) + + if self.connection.password: + display.sensitive.add(str(self.connection.password)) + + status = 'running' if self.connection.running else 'starting' + + display.info('Status update: %s/%s on instance %s is %s.' % + (self.platform, self.version, self.instance_id, status), + verbosity=1) + + return self.connection + + def wait(self, iterations=90): # type: (t.Optional[int]) -> None + """Wait for the instance to become ready.""" + for _iteration in range(1, iterations): + if self.get().running: + return + time.sleep(10) + + raise ApplicationError('Timeout waiting for %s/%s instance %s.' % + (self.platform, self.version, self.instance_id)) + + @property + def _uri(self): + return '%s/%s/%s/%s' % (self.endpoint, self.stage, self.provider, self.instance_id) + + def _start(self, auth): + """Start instance.""" + display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) + + if self.platform == 'windows': + winrm_config = read_text_file(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'ConfigureRemotingForAnsible.ps1')) + else: + winrm_config = None + + data = dict( + config=dict( + platform=self.platform, + version=self.version, + public_key=self.ssh_key.pub_contents if self.ssh_key else None, + query=False, + winrm_config=winrm_config, + ) + ) + + data.update(dict(auth=auth)) + + headers = { + 'Content-Type': 'application/json', + } + + response = self._start_try_endpoints(data, headers) + + self.started = True + self._save() + + display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1) + + if self.args.explain: + return {} + + return response.json() + + def _start_try_endpoints(self, data, headers): + """ + :type data: dict[str, any] + :type headers: dict[str, str] + :rtype: HttpResponse + """ + threshold = 1 + + while threshold <= self.max_threshold: + for self.endpoint in self.endpoints: + try: + return self._start_at_threshold(data, headers, threshold) + except CoreHttpError as ex: + if ex.status == 503: + display.info('Service Unavailable: %s' % ex.remote_message, verbosity=1) + continue + display.error(ex.remote_message) + except HttpError as ex: + display.error(u'%s' % ex) + + time.sleep(3) + + threshold += 1 + + raise ApplicationError('Maximum threshold reached and all endpoints exhausted.') + + def _start_at_threshold(self, data, headers, threshold): + """ + :type data: dict[str, any] + :type headers: dict[str, str] + :type threshold: int + :rtype: HttpResponse | None + """ + tries = self.retries + sleep = 15 + + data['threshold'] = threshold + + display.info('Trying endpoint: %s (threshold %d)' % (self.endpoint, threshold), verbosity=1) + + while True: + tries -= 1 + response = self.client.put(self._uri, data=json.dumps(data), headers=headers) + + if response.status_code == 200: + return response + + error = self._create_http_error(response) + + if response.status_code == 503: + raise error + + if not tries: + raise error + + display.warning('%s. Trying again after %d seconds.' % (error, sleep)) + time.sleep(sleep) + + def _clear(self): + """Clear instance information.""" + try: + self.connection = None + os.remove(self.path) + except OSError as ex: + if ex.errno != errno.ENOENT: + raise + + def _load(self): + """Load instance information.""" + try: + data = read_text_file(self.path) + except IOError as ex: + if ex.errno != errno.ENOENT: + raise + + return False + + if not data.startswith('{'): + return False # legacy format + + config = json.loads(data) + + return self.load(config) + + def load(self, config): + """ + :type config: dict[str, str] + :rtype: bool + """ + self.instance_id = str(config['instance_id']) + self.endpoint = config['endpoint'] + self.started = True + + display.sensitive.add(self.instance_id) + + return True + + def _save(self): + """Save instance information.""" + if self.args.explain: + return + + config = self.save() + + write_json_file(self.path, config, create_directories=True) + + def save(self): + """ + :rtype: dict[str, str] + """ + return dict( + platform_version='%s/%s' % (self.platform, self.version), + instance_id=self.instance_id, + endpoint=self.endpoint, + ) + + @staticmethod + def _create_http_error(response): + """ + :type response: HttpResponse + :rtype: ApplicationError + """ + response_json = response.json() + stack_trace = '' + + if 'message' in response_json: + message = response_json['message'] + elif 'errorMessage' in response_json: + message = response_json['errorMessage'].strip() + if 'stackTrace' in response_json: + traceback_lines = response_json['stackTrace'] + + # AWS Lambda on Python 2.7 returns a list of tuples + # AWS Lambda on Python 3.7 returns a list of strings + if traceback_lines and isinstance(traceback_lines[0], list): + traceback_lines = traceback.format_list(traceback_lines) + + trace = '\n'.join([x.rstrip() for x in traceback_lines]) + stack_trace = ('\nTraceback (from remote server):\n%s' % trace) + else: + message = str(response_json) + + return CoreHttpError(response.status_code, message, stack_trace) + + +class CoreHttpError(HttpError): + """HTTP response as an error.""" + def __init__(self, status, remote_message, remote_stack_trace): + """ + :type status: int + :type remote_message: str + :type remote_stack_trace: str + """ + super(CoreHttpError, self).__init__(status, '%s%s' % (remote_message, remote_stack_trace)) + + self.remote_message = remote_message + self.remote_stack_trace = remote_stack_trace + + +class SshKey: + """Container for SSH key used to connect to remote instances.""" + KEY_NAME = 'id_rsa' + PUB_NAME = 'id_rsa.pub' + + def __init__(self, args): + """ + :type args: EnvironmentConfig + """ + key_pair = self.get_key_pair() + + if not key_pair: + key_pair = self.generate_key_pair(args) + + key, pub = key_pair + key_dst, pub_dst = self.get_in_tree_key_pair_paths() + + def ssh_key_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None + """ + Add the SSH keys to the payload file list. + They are either outside the source tree or in the cache dir which is ignored by default. + """ + files.append((key, os.path.relpath(key_dst, data_context().content.root))) + files.append((pub, os.path.relpath(pub_dst, data_context().content.root))) + + data_context().register_payload_callback(ssh_key_callback) + + self.key, self.pub = key, pub + + if args.explain: + self.pub_contents = None + else: + self.pub_contents = read_text_file(self.pub).strip() + + def get_in_tree_key_pair_paths(self): # type: () -> t.Optional[t.Tuple[str, str]] + """Return the ansible-test SSH key pair paths from the content tree.""" + temp_dir = ResultType.TMP.path + + key = os.path.join(temp_dir, self.KEY_NAME) + pub = os.path.join(temp_dir, self.PUB_NAME) + + return key, pub + + def get_source_key_pair_paths(self): # type: () -> t.Optional[t.Tuple[str, str]] + """Return the ansible-test SSH key pair paths for the current user.""" + base_dir = os.path.expanduser('~/.ansible/test/') + + key = os.path.join(base_dir, self.KEY_NAME) + pub = os.path.join(base_dir, self.PUB_NAME) + + return key, pub + + def get_key_pair(self): # type: () -> t.Optional[t.Tuple[str, str]] + """Return the ansible-test SSH key pair paths if present, otherwise return None.""" + key, pub = self.get_in_tree_key_pair_paths() + + if os.path.isfile(key) and os.path.isfile(pub): + return key, pub + + key, pub = self.get_source_key_pair_paths() + + if os.path.isfile(key) and os.path.isfile(pub): + return key, pub + + return None + + def generate_key_pair(self, args): # type: (EnvironmentConfig) -> t.Tuple[str, str] + """Generate an SSH key pair for use by all ansible-test invocations for the current user.""" + key, pub = self.get_source_key_pair_paths() + + if not args.explain: + make_dirs(os.path.dirname(key)) + + if not os.path.isfile(key) or not os.path.isfile(pub): + run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', 'rsa', '-N', '', '-f', key]) + + # newer ssh-keygen PEM output (such as on RHEL 8.1) is not recognized by paramiko + key_contents = read_text_file(key) + key_contents = re.sub(r'(BEGIN|END) PRIVATE KEY', r'\1 RSA PRIVATE KEY', key_contents) + + write_text_file(key, key_contents) + + return key, pub + + +class InstanceConnection: + """Container for remote instance status and connection details.""" + def __init__(self, + running, # type: bool + hostname=None, # type: t.Optional[str] + port=None, # type: t.Optional[int] + username=None, # type: t.Optional[str] + password=None, # type: t.Optional[str] + response_json=None, # type: t.Optional[t.Dict[str, t.Any]] + ): # type: (...) -> None + self.running = running + self.hostname = hostname + self.port = port + self.username = username + self.password = password + self.response_json = response_json or {} + + def __str__(self): + if self.password: + return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password) + + return '%s:%s [%s]' % (self.hostname, self.port, self.username) diff --git a/test/lib/ansible_test/_internal/coverage/__init__.py b/test/lib/ansible_test/_internal/coverage/__init__.py new file mode 100644 index 00000000..ebb1ca22 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/__init__.py @@ -0,0 +1,325 @@ +"""Common logic for the coverage subcommand.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re + +from .. import types as t + +from ..encoding import ( + to_bytes, +) + +from ..io import ( + open_binary_file, + read_json_file, +) + +from ..util import ( + ApplicationError, + common_environment, + display, + ANSIBLE_TEST_DATA_ROOT, +) + +from ..util_common import ( + intercept_command, + ResultType, +) + +from ..config import ( + EnvironmentConfig, +) + +from ..executor import ( + Delegate, + install_command_requirements, +) + +from .. target import ( + walk_module_targets, +) + +from ..data import ( + data_context, +) + +if t.TYPE_CHECKING: + import coverage as coverage_module + +COVERAGE_GROUPS = ('command', 'target', 'environment', 'version') +COVERAGE_CONFIG_PATH = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'coveragerc') +COVERAGE_OUTPUT_FILE_NAME = 'coverage' + + +class CoverageConfig(EnvironmentConfig): + """Configuration for the coverage command.""" + def __init__(self, args): # type: (t.Any) -> None + super(CoverageConfig, self).__init__(args, 'coverage') + + self.group_by = frozenset(args.group_by) if 'group_by' in args and args.group_by else set() # type: t.FrozenSet[str] + self.all = args.all if 'all' in args else False # type: bool + self.stub = args.stub if 'stub' in args else False # type: bool + self.export = args.export if 'export' in args else None # type: str + self.coverage = False # temporary work-around to support intercept_command in cover.py + + +def initialize_coverage(args): # type: (CoverageConfig) -> coverage_module + """Delegate execution if requested, install requirements, then import and return the coverage module. Raises an exception if coverage is not available.""" + if args.delegate: + raise Delegate() + + if args.requirements: + install_command_requirements(args) + + try: + import coverage + except ImportError: + coverage = None + + if not coverage: + raise ApplicationError('You must install the "coverage" python module to use this command.') + + coverage_version_string = coverage.__version__ + coverage_version = tuple(int(v) for v in coverage_version_string.split('.')) + + min_version = (4, 2) + max_version = (5, 0) + + supported_version = True + recommended_version = '4.5.4' + + if coverage_version < min_version or coverage_version >= max_version: + supported_version = False + + if not supported_version: + raise ApplicationError('Version %s of "coverage" is not supported. Version %s is known to work and is recommended.' % ( + coverage_version_string, recommended_version)) + + return coverage + + +def run_coverage(args, output_file, command, cmd): # type: (CoverageConfig, str, str, t.List[str]) -> None + """Run the coverage cli tool with the specified options.""" + env = common_environment() + env.update(dict(COVERAGE_FILE=output_file)) + + cmd = ['python', '-m', 'coverage.__main__', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd + + intercept_command(args, target_name='coverage', env=env, cmd=cmd, disable_coverage=True) + + +def get_python_coverage_files(path=None): # type: (t.Optional[str]) -> t.List[str] + """Return the list of Python coverage file paths.""" + return get_coverage_files('python', path) + + +def get_powershell_coverage_files(path=None): # type: (t.Optional[str]) -> t.List[str] + """Return the list of PowerShell coverage file paths.""" + return get_coverage_files('powershell', path) + + +def get_coverage_files(language, path=None): # type: (str, t.Optional[str]) -> t.List[str] + """Return the list of coverage file paths for the given language.""" + coverage_dir = path or ResultType.COVERAGE.path + coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir) + if '=coverage.' in f and '=%s' % language in f] + + return coverage_files + + +def get_collection_path_regexes(): # type: () -> t.Tuple[t.Optional[t.Pattern], t.Optional[t.Pattern]] + """Return a pair of regexes used for identifying and manipulating collection paths.""" + if data_context().content.collection: + collection_search_re = re.compile(r'/%s/' % data_context().content.collection.directory) + collection_sub_re = re.compile(r'^.*?/%s/' % data_context().content.collection.directory) + else: + collection_search_re = None + collection_sub_re = None + + return collection_search_re, collection_sub_re + + +def get_python_modules(): # type: () -> t.Dict[str, str] + """Return a dictionary of Ansible module names and their paths.""" + return dict((target.module, target.path) for target in list(walk_module_targets()) if target.path.endswith('.py')) + + +def enumerate_python_arcs( + path, # type: str + coverage, # type: coverage_module + modules, # type: t.Dict[str, str] + collection_search_re, # type: t.Optional[t.Pattern] + collection_sub_re, # type: t.Optional[t.Pattern] +): # type: (...) -> t.Generator[t.Tuple[str, t.Set[t.Tuple[int, int]]]] + """Enumerate Python code coverage arcs in the given file.""" + if os.path.getsize(path) == 0: + display.warning('Empty coverage file: %s' % path, verbosity=2) + return + + original = coverage.CoverageData() + + try: + original.read_file(path) + except Exception as ex: # pylint: disable=locally-disabled, broad-except + with open_binary_file(path) as file: + header = file.read(6) + + if header == b'SQLite': + display.error('File created by "coverage" 5.0+: %s' % os.path.relpath(path)) + else: + display.error(u'%s' % ex) + + return + + for filename in original.measured_files(): + arcs = original.arcs(filename) + + if not arcs: + # This is most likely due to using an unsupported version of coverage. + display.warning('No arcs found for "%s" in coverage file: %s' % (filename, path)) + continue + + filename = sanitize_filename(filename, modules=modules, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re) + + if not filename: + continue + + yield filename, set(arcs) + + +def enumerate_powershell_lines( + path, # type: str + collection_search_re, # type: t.Optional[t.Pattern] + collection_sub_re, # type: t.Optional[t.Pattern] +): # type: (...) -> t.Generator[t.Tuple[str, t.Dict[int, int]]] + """Enumerate PowerShell code coverage lines in the given file.""" + if os.path.getsize(path) == 0: + display.warning('Empty coverage file: %s' % path, verbosity=2) + return + + try: + coverage_run = read_json_file(path) + except Exception as ex: # pylint: disable=locally-disabled, broad-except + display.error(u'%s' % ex) + return + + for filename, hits in coverage_run.items(): + filename = sanitize_filename(filename, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re) + + if not filename: + continue + + if isinstance(hits, dict) and not hits.get('Line'): + # Input data was previously aggregated and thus uses the standard ansible-test output format for PowerShell coverage. + # This format differs from the more verbose format of raw coverage data from the remote Windows hosts. + hits = dict((int(key), value) for key, value in hits.items()) + + yield filename, hits + continue + + # PowerShell unpacks arrays if there's only a single entry so this is a defensive check on that + if not isinstance(hits, list): + hits = [hits] + + hits = dict((hit['Line'], hit['HitCount']) for hit in hits if hit) + + yield filename, hits + + +def sanitize_filename( + filename, # type: str + modules=None, # type: t.Optional[t.Dict[str, str]] + collection_search_re=None, # type: t.Optional[t.Pattern] + collection_sub_re=None, # type: t.Optional[t.Pattern] +): # type: (...) -> t.Optional[str] + """Convert the given code coverage path to a local absolute path and return its, or None if the path is not valid.""" + ansible_path = os.path.abspath('lib/ansible/') + '/' + root_path = data_context().content.root + '/' + integration_temp_path = os.path.sep + os.path.join(ResultType.TMP.relative_path, 'integration') + os.path.sep + + if modules is None: + modules = {} + + if '/ansible_modlib.zip/ansible/' in filename: + # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier. + new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename) + display.info('%s -> %s' % (filename, new_name), verbosity=3) + filename = new_name + elif collection_search_re and collection_search_re.search(filename): + new_name = os.path.abspath(collection_sub_re.sub('', filename)) + display.info('%s -> %s' % (filename, new_name), verbosity=3) + filename = new_name + elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename): + # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later. + new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename) + display.info('%s -> %s' % (filename, new_name), verbosity=3) + filename = new_name + elif '/ansible_module_' in filename: + # Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier. + module_name = re.sub('^.*/ansible_module_(?P.*).py$', '\\g', filename) + if module_name not in modules: + display.warning('Skipping coverage of unknown module: %s' % module_name) + return None + new_name = os.path.abspath(modules[module_name]) + display.info('%s -> %s' % (filename, new_name), verbosity=3) + filename = new_name + elif re.search(r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename): + # Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later. + # AnsiballZ versions using zipimporter will match the `.zip` portion of the regex. + # AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex. + module_name = re.sub(r'^.*/ansible_(?P[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$', + '\\g', filename).rstrip('_') + if module_name not in modules: + display.warning('Skipping coverage of unknown module: %s' % module_name) + return None + new_name = os.path.abspath(modules[module_name]) + display.info('%s -> %s' % (filename, new_name), verbosity=3) + filename = new_name + elif re.search('^(/.*?)?/root/ansible/', filename): + # Rewrite the path of code running on a remote host or in a docker container as root. + new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename) + display.info('%s -> %s' % (filename, new_name), verbosity=3) + filename = new_name + elif integration_temp_path in filename: + # Rewrite the path of code running from an integration test temporary directory. + new_name = re.sub(r'^.*' + re.escape(integration_temp_path) + '[^/]+/', root_path, filename) + display.info('%s -> %s' % (filename, new_name), verbosity=3) + filename = new_name + + filename = os.path.abspath(filename) # make sure path is absolute (will be relative if previously exported) + + return filename + + +class PathChecker: + """Checks code coverage paths to verify they are valid and reports on the findings.""" + def __init__(self, args, collection_search_re=None): # type: (CoverageConfig, t.Optional[t.Pattern]) -> None + self.args = args + self.collection_search_re = collection_search_re + self.invalid_paths = [] + self.invalid_path_chars = 0 + + def check_path(self, path): # type: (str) -> bool + """Return True if the given coverage path is valid, otherwise display a warning and return False.""" + if os.path.isfile(to_bytes(path)): + return True + + if self.collection_search_re and self.collection_search_re.search(path) and os.path.basename(path) == '__init__.py': + # the collection loader uses implicit namespace packages, so __init__.py does not need to exist on disk + # coverage is still reported for these non-existent files, but warnings are not needed + return False + + self.invalid_paths.append(path) + self.invalid_path_chars += len(path) + + if self.args.verbosity > 1: + display.warning('Invalid coverage path: %s' % path) + + return False + + def report(self): # type: () -> None + """Display a warning regarding invalid paths if any were found.""" + if self.invalid_paths: + display.warning('Ignored %d characters from %d invalid coverage path(s).' % (self.invalid_path_chars, len(self.invalid_paths))) diff --git a/test/lib/ansible_test/_internal/coverage/analyze/__init__.py b/test/lib/ansible_test/_internal/coverage/analyze/__init__.py new file mode 100644 index 00000000..45770373 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/analyze/__init__.py @@ -0,0 +1,19 @@ +"""Common logic for the `coverage analyze` subcommand.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ... import types as t + +from .. import ( + CoverageConfig, +) + + +class CoverageAnalyzeConfig(CoverageConfig): + """Configuration for the `coverage analyze` command.""" + def __init__(self, args): # type: (t.Any) -> None + super(CoverageAnalyzeConfig, self).__init__(args) + + # avoid mixing log messages with file output when using `/dev/stdout` for the output file on commands + # this may be worth considering as the default behavior in the future, instead of being dependent on the command or options used + self.info_stderr = True diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py new file mode 100644 index 00000000..8fe571b8 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py @@ -0,0 +1,154 @@ +"""Analyze integration test target code coverage.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from .... import types as t + +from ....io import ( + read_json_file, + write_json_file, +) + +from ....util import ( + ApplicationError, + display, +) + +from .. import ( + CoverageAnalyzeConfig, +) + +if t.TYPE_CHECKING: + TargetKey = t.TypeVar('TargetKey', int, t.Tuple[int, int]) + NamedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[str]]] + IndexedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[int]]] + Arcs = t.Dict[str, t.Dict[t.Tuple[int, int], t.Set[int]]] + Lines = t.Dict[str, t.Dict[int, t.Set[int]]] + TargetIndexes = t.Dict[str, int] + TargetSetIndexes = t.Dict[t.FrozenSet[int], int] + + +class CoverageAnalyzeTargetsConfig(CoverageAnalyzeConfig): + """Configuration for the `coverage analyze targets` command.""" + def __init__(self, args): # type: (t.Any) -> None + super(CoverageAnalyzeTargetsConfig, self).__init__(args) + + self.info_stderr = True + + +def make_report(target_indexes, arcs, lines): # type: (TargetIndexes, Arcs, Lines) -> t.Dict[str, t.Any] + """Condense target indexes, arcs and lines into a compact report.""" + set_indexes = {} + arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items()) + line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items()) + + report = dict( + targets=[name for name, index in sorted(target_indexes.items(), key=lambda kvp: kvp[1])], + target_sets=[sorted(data) for data, index in sorted(set_indexes.items(), key=lambda kvp: kvp[1])], + arcs=arc_refs, + lines=line_refs, + ) + + return report + + +def load_report(report): # type: (t.Dict[str, t.Any]) -> t.Tuple[t.List[str], Arcs, Lines] + """Extract target indexes, arcs and lines from an existing report.""" + try: + target_indexes = report['targets'] # type: t.List[str] + target_sets = report['target_sets'] # type: t.List[t.List[int]] + arc_data = report['arcs'] # type: t.Dict[str, t.Dict[str, int]] + line_data = report['lines'] # type: t.Dict[str, t.Dict[int, int]] + except KeyError as ex: + raise ApplicationError('Document is missing key "%s".' % ex.args) + except TypeError: + raise ApplicationError('Document is type "%s" instead of "dict".' % type(report).__name__) + + arcs = dict((path, dict((parse_arc(arc), set(target_sets[index])) for arc, index in data.items())) for path, data in arc_data.items()) + lines = dict((path, dict((int(line), set(target_sets[index])) for line, index in data.items())) for path, data in line_data.items()) + + return target_indexes, arcs, lines + + +def read_report(path): # type: (str) -> t.Tuple[t.List[str], Arcs, Lines] + """Read a JSON report from disk.""" + try: + report = read_json_file(path) + except Exception as ex: + raise ApplicationError('File "%s" is not valid JSON: %s' % (path, ex)) + + try: + return load_report(report) + except ApplicationError as ex: + raise ApplicationError('File "%s" is not an aggregated coverage data file. %s' % (path, ex)) + + +def write_report(args, report, path): # type: (CoverageAnalyzeTargetsConfig, t.Dict[str, t.Any], str) -> None + """Write a JSON report to disk.""" + if args.explain: + return + + write_json_file(path, report, formatted=False) + + display.info('Generated %d byte report with %d targets covering %d files.' % ( + os.path.getsize(path), len(report['targets']), len(set(report['arcs'].keys()) | set(report['lines'].keys())), + ), verbosity=1) + + +def format_arc(value): # type: (t.Tuple[int, int]) -> str + """Format an arc tuple as a string.""" + return '%d:%d' % value + + +def parse_arc(value): # type: (str) -> t.Tuple[int, int] + """Parse an arc string into a tuple.""" + first, last = tuple(map(int, value.split(':'))) + return first, last + + +def get_target_set_index(data, target_set_indexes): # type: (t.Set[int], TargetSetIndexes) -> int + """Find or add the target set in the result set and return the target set index.""" + return target_set_indexes.setdefault(frozenset(data), len(target_set_indexes)) + + +def get_target_index(name, target_indexes): # type: (str, TargetIndexes) -> int + """Find or add the target in the result set and return the target index.""" + return target_indexes.setdefault(name, len(target_indexes)) + + +def expand_indexes( + source_data, # type: IndexedPoints + source_index, # type: t.List[str] + format_func, # type: t.Callable[t.Tuple[t.Any], str] +): # type: (...) -> NamedPoints + """Expand indexes from the source into target names for easier processing of the data (arcs or lines).""" + combined_data = {} # type: t.Dict[str, t.Dict[t.Any, t.Set[str]]] + + for covered_path, covered_points in source_data.items(): + combined_points = combined_data.setdefault(covered_path, {}) + + for covered_point, covered_target_indexes in covered_points.items(): + combined_point = combined_points.setdefault(format_func(covered_point), set()) + + for covered_target_index in covered_target_indexes: + combined_point.add(source_index[covered_target_index]) + + return combined_data + + +def generate_indexes(target_indexes, data): # type: (TargetIndexes, NamedPoints) -> IndexedPoints + """Return an indexed version of the given data (arcs or points).""" + results = {} # type: IndexedPoints + + for path, points in data.items(): + result_points = results[path] = {} + + for point, target_names in points.items(): + result_point = result_points[point] = set() + + for target_name in target_names: + result_point.add(get_target_index(target_name, target_indexes)) + + return results diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py new file mode 100644 index 00000000..35148ff6 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py @@ -0,0 +1,64 @@ +"""Combine integration test target code coverage reports.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from .... import types as t + +from . import ( + CoverageAnalyzeTargetsConfig, + get_target_index, + make_report, + read_report, + write_report, +) + +if t.TYPE_CHECKING: + from . import ( + Arcs, + IndexedPoints, + Lines, + TargetIndexes, + ) + + +class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig): + """Configuration for the `coverage analyze targets combine` command.""" + def __init__(self, args): # type: (t.Any) -> None + super(CoverageAnalyzeTargetsCombineConfig, self).__init__(args) + + self.input_files = args.input_file # type: t.List[str] + self.output_file = args.output_file # type: str + + +def command_coverage_analyze_targets_combine(args): # type: (CoverageAnalyzeTargetsCombineConfig) -> None + """Combine integration test target code coverage reports.""" + combined_target_indexes = {} # type: TargetIndexes + combined_path_arcs = {} # type: Arcs + combined_path_lines = {} # type: Lines + + for report_path in args.input_files: + covered_targets, covered_path_arcs, covered_path_lines = read_report(report_path) + + merge_indexes(covered_path_arcs, covered_targets, combined_path_arcs, combined_target_indexes) + merge_indexes(covered_path_lines, covered_targets, combined_path_lines, combined_target_indexes) + + report = make_report(combined_target_indexes, combined_path_arcs, combined_path_lines) + + write_report(args, report, args.output_file) + + +def merge_indexes( + source_data, # type: IndexedPoints + source_index, # type: t.List[str] + combined_data, # type: IndexedPoints + combined_index, # type: TargetIndexes +): # type: (...) -> None + """Merge indexes from the source into the combined data set (arcs or lines).""" + for covered_path, covered_points in source_data.items(): + combined_points = combined_data.setdefault(covered_path, {}) + + for covered_point, covered_target_indexes in covered_points.items(): + combined_point = combined_points.setdefault(covered_point, set()) + + for covered_target_index in covered_target_indexes: + combined_point.add(get_target_index(source_index[covered_target_index], combined_index)) diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py new file mode 100644 index 00000000..388dd6cb --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py @@ -0,0 +1,39 @@ +"""Expand target names in an aggregated coverage file.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from .... import types as t + +from ....io import ( + SortedSetEncoder, + write_json_file, +) + +from . import ( + CoverageAnalyzeTargetsConfig, + expand_indexes, + format_arc, + read_report, +) + + +class CoverageAnalyzeTargetsExpandConfig(CoverageAnalyzeTargetsConfig): + """Configuration for the `coverage analyze targets expand` command.""" + def __init__(self, args): # type: (t.Any) -> None + super(CoverageAnalyzeTargetsExpandConfig, self).__init__(args) + + self.input_file = args.input_file # type: str + self.output_file = args.output_file # type: str + + +def command_coverage_analyze_targets_expand(args): # type: (CoverageAnalyzeTargetsExpandConfig) -> None + """Expand target names in an aggregated coverage file.""" + covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file) + + report = dict( + arcs=expand_indexes(covered_path_arcs, covered_targets, format_arc), + lines=expand_indexes(covered_path_lines, covered_targets, str), + ) + + if not args.explain: + write_json_file(args.output_file, report, encoder=SortedSetEncoder) diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py new file mode 100644 index 00000000..e90fb227 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py @@ -0,0 +1,104 @@ +"""Filter an aggregated coverage file, keeping only the specified targets.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from .... import types as t + +from . import ( + CoverageAnalyzeTargetsConfig, + expand_indexes, + generate_indexes, + make_report, + read_report, + write_report, +) + +if t.TYPE_CHECKING: + from . import ( + NamedPoints, + TargetIndexes, + ) + + +class CoverageAnalyzeTargetsFilterConfig(CoverageAnalyzeTargetsConfig): + """Configuration for the `coverage analyze targets filter` command.""" + def __init__(self, args): # type: (t.Any) -> None + super(CoverageAnalyzeTargetsFilterConfig, self).__init__(args) + + self.input_file = args.input_file # type: str + self.output_file = args.output_file # type: str + self.include_targets = args.include_targets # type: t.List[str] + self.exclude_targets = args.exclude_targets # type: t.List[str] + self.include_path = args.include_path # type: t.Optional[str] + self.exclude_path = args.exclude_path # type: t.Optional[str] + + +def command_coverage_analyze_targets_filter(args): # type: (CoverageAnalyzeTargetsFilterConfig) -> None + """Filter target names in an aggregated coverage file.""" + covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file) + + filtered_path_arcs = expand_indexes(covered_path_arcs, covered_targets, lambda v: v) + filtered_path_lines = expand_indexes(covered_path_lines, covered_targets, lambda v: v) + + include_targets = set(args.include_targets) if args.include_targets else None + exclude_targets = set(args.exclude_targets) if args.exclude_targets else None + + include_path = re.compile(args.include_path) if args.include_path else None + exclude_path = re.compile(args.exclude_path) if args.exclude_path else None + + def path_filter_func(path): + if include_path and not re.search(include_path, path): + return False + + if exclude_path and re.search(exclude_path, path): + return False + + return True + + def target_filter_func(targets): + if include_targets: + targets &= include_targets + + if exclude_targets: + targets -= exclude_targets + + return targets + + filtered_path_arcs = filter_data(filtered_path_arcs, path_filter_func, target_filter_func) + filtered_path_lines = filter_data(filtered_path_lines, path_filter_func, target_filter_func) + + target_indexes = {} # type: TargetIndexes + indexed_path_arcs = generate_indexes(target_indexes, filtered_path_arcs) + indexed_path_lines = generate_indexes(target_indexes, filtered_path_lines) + + report = make_report(target_indexes, indexed_path_arcs, indexed_path_lines) + + write_report(args, report, args.output_file) + + +def filter_data( + data, # type: NamedPoints + path_filter_func, # type: t.Callable[[str], bool] + target_filter_func, # type: t.Callable[[t.Set[str]], t.Set[str]] +): # type: (...) -> NamedPoints + """Filter the data set using the specified filter function.""" + result = {} # type: NamedPoints + + for src_path, src_points in data.items(): + if not path_filter_func(src_path): + continue + + dst_points = {} + + for src_point, src_targets in src_points.items(): + dst_targets = target_filter_func(src_targets) + + if dst_targets: + dst_points[src_point] = dst_targets + + if dst_points: + result[src_path] = dst_points + + return result diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py new file mode 100644 index 00000000..a14b6f55 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py @@ -0,0 +1,146 @@ +"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from .... import types as t + +from ....encoding import ( + to_text, +) + +from ....data import ( + data_context, +) + +from ....util_common import ( + ResultType, +) + +from ... import ( + enumerate_powershell_lines, + enumerate_python_arcs, + get_collection_path_regexes, + get_powershell_coverage_files, + get_python_coverage_files, + get_python_modules, + initialize_coverage, + PathChecker, +) + +from . import ( + CoverageAnalyzeTargetsConfig, + get_target_index, + make_report, + write_report, +) + +if t.TYPE_CHECKING: + from . import ( + Arcs, + Lines, + TargetIndexes, + ) + + +class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig): + """Configuration for the `coverage analyze targets generate` command.""" + def __init__(self, args): # type: (t.Any) -> None + super(CoverageAnalyzeTargetsGenerateConfig, self).__init__(args) + + self.input_dir = args.input_dir or ResultType.COVERAGE.path # type: str + self.output_file = args.output_file # type: str + + +def command_coverage_analyze_targets_generate(args): # type: (CoverageAnalyzeTargetsGenerateConfig) -> None + """Analyze code coverage data to determine which integration test targets provide coverage for each arc or line.""" + root = data_context().content.root + target_indexes = {} + arcs = dict((os.path.relpath(path, root), data) for path, data in analyze_python_coverage(args, args.input_dir, target_indexes).items()) + lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, args.input_dir, target_indexes).items()) + report = make_report(target_indexes, arcs, lines) + write_report(args, report, args.output_file) + + +def analyze_python_coverage( + args, # type: CoverageAnalyzeTargetsGenerateConfig + path, # type: str + target_indexes, # type: TargetIndexes +): # type: (...) -> Arcs + """Analyze Python code coverage.""" + results = {} # type: Arcs + collection_search_re, collection_sub_re = get_collection_path_regexes() + modules = get_python_modules() + python_files = get_python_coverage_files(path) + coverage = initialize_coverage(args) + + for python_file in python_files: + if not is_integration_coverage_file(python_file): + continue + + target_name = get_target_name(python_file) + target_index = get_target_index(target_name, target_indexes) + + for filename, covered_arcs in enumerate_python_arcs(python_file, coverage, modules, collection_search_re, collection_sub_re): + arcs = results.setdefault(filename, {}) + + for covered_arc in covered_arcs: + arc = arcs.setdefault(covered_arc, set()) + arc.add(target_index) + + prune_invalid_filenames(args, results, collection_search_re=collection_search_re) + + return results + + +def analyze_powershell_coverage( + args, # type: CoverageAnalyzeTargetsGenerateConfig + path, # type: str + target_indexes, # type: TargetIndexes +): # type: (...) -> Lines + """Analyze PowerShell code coverage""" + results = {} # type: Lines + collection_search_re, collection_sub_re = get_collection_path_regexes() + powershell_files = get_powershell_coverage_files(path) + + for powershell_file in powershell_files: + if not is_integration_coverage_file(powershell_file): + continue + + target_name = get_target_name(powershell_file) + target_index = get_target_index(target_name, target_indexes) + + for filename, hits in enumerate_powershell_lines(powershell_file, collection_search_re, collection_sub_re): + lines = results.setdefault(filename, {}) + + for covered_line in hits: + line = lines.setdefault(covered_line, set()) + line.add(target_index) + + prune_invalid_filenames(args, results) + + return results + + +def prune_invalid_filenames( + args, # type: CoverageAnalyzeTargetsGenerateConfig + results, # type: t.Dict[str, t.Any] + collection_search_re=None, # type: t.Optional[str] +): # type: (...) -> None + """Remove invalid filenames from the given result set.""" + path_checker = PathChecker(args, collection_search_re) + + for path in list(results.keys()): + if not path_checker.check_path(path): + del results[path] + + +def get_target_name(path): # type: (str) -> str + """Extract the test target name from the given coverage path.""" + return to_text(os.path.basename(path).split('=')[1]) + + +def is_integration_coverage_file(path): # type: (str) -> bool + """Returns True if the coverage file came from integration tests, otherwise False.""" + return os.path.basename(path).split('=')[0] in ('integration', 'windows-integration', 'network-integration') diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py new file mode 100644 index 00000000..613a0ef2 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py @@ -0,0 +1,109 @@ +"""Identify aggregated coverage in one file missing from another.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from .... import types as t + +from ....encoding import ( + to_bytes, +) + +from . import ( + CoverageAnalyzeTargetsConfig, + get_target_index, + make_report, + read_report, + write_report, +) + +if t.TYPE_CHECKING: + from . import ( + TargetIndexes, + IndexedPoints, + ) + + +class CoverageAnalyzeTargetsMissingConfig(CoverageAnalyzeTargetsConfig): + """Configuration for the `coverage analyze targets missing` command.""" + def __init__(self, args): # type: (t.Any) -> None + super(CoverageAnalyzeTargetsMissingConfig, self).__init__(args) + + self.from_file = args.from_file # type: str + self.to_file = args.to_file # type: str + self.output_file = args.output_file # type: str + + self.only_gaps = args.only_gaps # type: bool + self.only_exists = args.only_exists # type: bool + + +def command_coverage_analyze_targets_missing(args): # type: (CoverageAnalyzeTargetsMissingConfig) -> None + """Identify aggregated coverage in one file missing from another.""" + from_targets, from_path_arcs, from_path_lines = read_report(args.from_file) + to_targets, to_path_arcs, to_path_lines = read_report(args.to_file) + target_indexes = {} + + if args.only_gaps: + arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists) + lines = find_gaps(from_path_lines, from_targets, to_path_lines, target_indexes, args.only_exists) + else: + arcs = find_missing(from_path_arcs, from_targets, to_path_arcs, to_targets, target_indexes, args.only_exists) + lines = find_missing(from_path_lines, from_targets, to_path_lines, to_targets, target_indexes, args.only_exists) + + report = make_report(target_indexes, arcs, lines) + write_report(args, report, args.output_file) + + +def find_gaps( + from_data, # type: IndexedPoints + from_index, # type: t.List[str] + to_data, # type: IndexedPoints + target_indexes, # type: TargetIndexes + only_exists, # type: bool +): # type: (...) -> IndexedPoints + """Find gaps in coverage between the from and to data sets.""" + target_data = {} + + for from_path, from_points in from_data.items(): + if only_exists and not os.path.isfile(to_bytes(from_path)): + continue + + to_points = to_data.get(from_path, {}) + + gaps = set(from_points.keys()) - set(to_points.keys()) + + if gaps: + gap_points = dict((key, value) for key, value in from_points.items() if key in gaps) + target_data[from_path] = dict((gap, set(get_target_index(from_index[i], target_indexes) for i in indexes)) for gap, indexes in gap_points.items()) + + return target_data + + +def find_missing( + from_data, # type: IndexedPoints + from_index, # type: t.List[str] + to_data, # type: IndexedPoints + to_index, # type: t.List[str] + target_indexes, # type: TargetIndexes + only_exists, # type: bool +): # type: (...) -> IndexedPoints + """Find coverage in from_data not present in to_data (arcs or lines).""" + target_data = {} + + for from_path, from_points in from_data.items(): + if only_exists and not os.path.isfile(to_bytes(from_path)): + continue + + to_points = to_data.get(from_path, {}) + + for from_point, from_target_indexes in from_points.items(): + to_target_indexes = to_points.get(from_point, set()) + + remaining_targets = set(from_index[i] for i in from_target_indexes) - set(to_index[i] for i in to_target_indexes) + + if remaining_targets: + target_index = target_data.setdefault(from_path, {}).setdefault(from_point, set()) + target_index.update(get_target_index(name, target_indexes) for name in remaining_targets) + + return target_data diff --git a/test/lib/ansible_test/_internal/coverage/combine.py b/test/lib/ansible_test/_internal/coverage/combine.py new file mode 100644 index 00000000..7f726267 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/combine.py @@ -0,0 +1,303 @@ +"""Combine code coverage files.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..target import ( + walk_compile_targets, + walk_powershell_targets, +) + +from ..io import ( + read_text_file, +) + +from ..util import ( + display, +) + +from ..util_common import ( + ResultType, + write_json_file, + write_json_test_results, +) + +from . import ( + enumerate_python_arcs, + enumerate_powershell_lines, + get_collection_path_regexes, + get_python_coverage_files, + get_python_modules, + get_powershell_coverage_files, + initialize_coverage, + COVERAGE_OUTPUT_FILE_NAME, + COVERAGE_GROUPS, + CoverageConfig, + PathChecker, +) + + +def command_coverage_combine(args): + """Patch paths in coverage files and merge into a single file. + :type args: CoverageConfig + :rtype: list[str] + """ + paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args) + + for path in paths: + display.info('Generated combined output: %s' % path, verbosity=1) + + return paths + + +def _command_coverage_combine_python(args): + """ + :type args: CoverageConfig + :rtype: list[str] + """ + coverage = initialize_coverage(args) + + modules = get_python_modules() + + coverage_files = get_python_coverage_files() + + counter = 0 + sources = _get_coverage_targets(args, walk_compile_targets) + groups = _build_stub_groups(args, sources, lambda line_count: set()) + + collection_search_re, collection_sub_re = get_collection_path_regexes() + + for coverage_file in coverage_files: + counter += 1 + display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2) + + group = get_coverage_group(args, coverage_file) + + if group is None: + display.warning('Unexpected name for coverage file: %s' % coverage_file) + continue + + for filename, arcs in enumerate_python_arcs(coverage_file, coverage, modules, collection_search_re, collection_sub_re): + if args.export: + filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems + + if group not in groups: + groups[group] = {} + + arc_data = groups[group] + + if filename not in arc_data: + arc_data[filename] = set() + + arc_data[filename].update(arcs) + + output_files = [] + + if args.export: + coverage_file = os.path.join(args.export, '') + suffix = '=coverage.combined' + else: + coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME) + suffix = '' + + path_checker = PathChecker(args, collection_search_re) + + for group in sorted(groups): + arc_data = groups[group] + + updated = coverage.CoverageData() + + for filename in arc_data: + if not path_checker.check_path(filename): + continue + + updated.add_arcs({filename: list(arc_data[filename])}) + + if args.all: + updated.add_arcs(dict((source[0], []) for source in sources)) + + if not args.explain: + output_file = coverage_file + group + suffix + updated.write_file(output_file) # always write files to make sure stale files do not exist + + if updated: + # only report files which are non-empty to prevent coverage from reporting errors + output_files.append(output_file) + + path_checker.report() + + return sorted(output_files) + + +def _command_coverage_combine_powershell(args): + """ + :type args: CoverageConfig + :rtype: list[str] + """ + coverage_files = get_powershell_coverage_files() + + def _default_stub_value(lines): + val = {} + for line in range(lines): + val[line] = 0 + return val + + counter = 0 + sources = _get_coverage_targets(args, walk_powershell_targets) + groups = _build_stub_groups(args, sources, _default_stub_value) + + collection_search_re, collection_sub_re = get_collection_path_regexes() + + for coverage_file in coverage_files: + counter += 1 + display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2) + + group = get_coverage_group(args, coverage_file) + + if group is None: + display.warning('Unexpected name for coverage file: %s' % coverage_file) + continue + + for filename, hits in enumerate_powershell_lines(coverage_file, collection_search_re, collection_sub_re): + if args.export: + filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems + + if group not in groups: + groups[group] = {} + + coverage_data = groups[group] + + if filename not in coverage_data: + coverage_data[filename] = {} + + file_coverage = coverage_data[filename] + + for line_no, hit_count in hits.items(): + file_coverage[line_no] = file_coverage.get(line_no, 0) + hit_count + + output_files = [] + + path_checker = PathChecker(args) + + for group in sorted(groups): + coverage_data = dict((filename, data) for filename, data in groups[group].items() if path_checker.check_path(filename)) + + if args.all: + # Add 0 line entries for files not in coverage_data + for source, source_line_count in sources: + if source in coverage_data: + continue + + coverage_data[source] = _default_stub_value(source_line_count) + + if not args.explain: + if args.export: + output_file = os.path.join(args.export, group + '=coverage.combined') + write_json_file(output_file, coverage_data, formatted=False) + output_files.append(output_file) + continue + + output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell' + + write_json_test_results(ResultType.COVERAGE, output_file, coverage_data, formatted=False) + + output_files.append(os.path.join(ResultType.COVERAGE.path, output_file)) + + path_checker.report() + + return sorted(output_files) + + +def _get_coverage_targets(args, walk_func): + """ + :type args: CoverageConfig + :type walk_func: Func + :rtype: list[tuple[str, int]] + """ + sources = [] + + if args.all or args.stub: + # excludes symlinks of regular files to avoid reporting on the same file multiple times + # in the future it would be nice to merge any coverage for symlinks into the real files + for target in walk_func(include_symlinks=False): + target_path = os.path.abspath(target.path) + + target_lines = len(read_text_file(target_path).splitlines()) + + sources.append((target_path, target_lines)) + + sources.sort() + + return sources + + +def _build_stub_groups(args, sources, default_stub_value): + """ + :type args: CoverageConfig + :type sources: List[tuple[str, int]] + :type default_stub_value: Func[int] + :rtype: dict + """ + groups = {} + + if args.stub: + stub_group = [] + stub_groups = [stub_group] + stub_line_limit = 500000 + stub_line_count = 0 + + for source, source_line_count in sources: + stub_group.append((source, source_line_count)) + stub_line_count += source_line_count + + if stub_line_count > stub_line_limit: + stub_line_count = 0 + stub_group = [] + stub_groups.append(stub_group) + + for stub_index, stub_group in enumerate(stub_groups): + if not stub_group: + continue + + groups['=stub-%02d' % (stub_index + 1)] = dict((source, default_stub_value(line_count)) + for source, line_count in stub_group) + + return groups + + +def get_coverage_group(args, coverage_file): + """ + :type args: CoverageConfig + :type coverage_file: str + :rtype: str + """ + parts = os.path.basename(coverage_file).split('=', 4) + + # noinspection PyTypeChecker + if len(parts) != 5 or not parts[4].startswith('coverage.'): + return None + + names = dict( + command=parts[0], + target=parts[1], + environment=parts[2], + version=parts[3], + ) + + export_names = dict( + version=parts[3], + ) + + group = '' + + for part in COVERAGE_GROUPS: + if part in args.group_by: + group += '=%s' % names[part] + elif args.export: + group += '=%s' % export_names.get(part, 'various') + + if args.export: + group = group.lstrip('=') + + return group diff --git a/test/lib/ansible_test/_internal/coverage/erase.py b/test/lib/ansible_test/_internal/coverage/erase.py new file mode 100644 index 00000000..92d241c7 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/erase.py @@ -0,0 +1,27 @@ +"""Erase code coverage files.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..util_common import ( + ResultType, +) + +from . import ( + CoverageConfig, +) + + +def command_coverage_erase(args): # type: (CoverageConfig) -> None + """Erase code coverage data files collected during test runs.""" + coverage_dir = ResultType.COVERAGE.path + + for name in os.listdir(coverage_dir): + if not name.startswith('coverage') and '=coverage.' not in name: + continue + + path = os.path.join(coverage_dir, name) + + if not args.explain: + os.remove(path) diff --git a/test/lib/ansible_test/_internal/coverage/html.py b/test/lib/ansible_test/_internal/coverage/html.py new file mode 100644 index 00000000..63956a19 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/html.py @@ -0,0 +1,45 @@ +"""Generate HTML code coverage reports.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..io import ( + make_dirs, +) + +from ..util import ( + display, +) + +from ..util_common import ( + ResultType, +) + +from .combine import ( + command_coverage_combine, +) + +from . import ( + run_coverage, + CoverageConfig, +) + + +def command_coverage_html(args): + """ + :type args: CoverageConfig + """ + output_files = command_coverage_combine(args) + + for output_file in output_files: + if output_file.endswith('-powershell'): + # coverage.py does not support non-Python files so we just skip the local html report. + display.info("Skipping output file %s in html generation" % output_file, verbosity=3) + continue + + dir_name = os.path.join(ResultType.REPORTS.path, os.path.basename(output_file)) + make_dirs(dir_name) + run_coverage(args, output_file, 'html', ['-i', '-d', dir_name]) + + display.info('HTML report generated: file:///%s' % os.path.join(dir_name, 'index.html')) diff --git a/test/lib/ansible_test/_internal/coverage/report.py b/test/lib/ansible_test/_internal/coverage/report.py new file mode 100644 index 00000000..24efa637 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/report.py @@ -0,0 +1,156 @@ +"""Generate console code coverage reports.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..io import ( + read_json_file, +) + +from ..util import ( + display, +) + +from ..data import ( + data_context, +) + +from .combine import ( + command_coverage_combine, +) + +from . import ( + run_coverage, + CoverageConfig, +) + + +def command_coverage_report(args): + """ + :type args: CoverageReportConfig + """ + output_files = command_coverage_combine(args) + + for output_file in output_files: + if args.group_by or args.stub: + display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:])) + + if output_file.endswith('-powershell'): + display.info(_generate_powershell_output_report(args, output_file)) + else: + options = [] + + if args.show_missing: + options.append('--show-missing') + + if args.include: + options.extend(['--include', args.include]) + + if args.omit: + options.extend(['--omit', args.omit]) + + run_coverage(args, output_file, 'report', options) + + +def _generate_powershell_output_report(args, coverage_file): + """ + :type args: CoverageReportConfig + :type coverage_file: str + :rtype: str + """ + coverage_info = read_json_file(coverage_file) + + root_path = data_context().content.root + '/' + + name_padding = 7 + cover_padding = 8 + + file_report = [] + total_stmts = 0 + total_miss = 0 + + for filename in sorted(coverage_info.keys()): + hit_info = coverage_info[filename] + + if filename.startswith(root_path): + filename = filename[len(root_path):] + + if args.omit and filename in args.omit: + continue + if args.include and filename not in args.include: + continue + + stmts = len(hit_info) + miss = len([c for c in hit_info.values() if c == 0]) + + name_padding = max(name_padding, len(filename) + 3) + + total_stmts += stmts + total_miss += miss + + cover = "{0}%".format(int((stmts - miss) / stmts * 100)) + + missing = [] + current_missing = None + sorted_lines = sorted([int(x) for x in hit_info.keys()]) + for idx, line in enumerate(sorted_lines): + hit = hit_info[str(line)] + if hit == 0 and current_missing is None: + current_missing = line + elif hit != 0 and current_missing is not None: + end_line = sorted_lines[idx - 1] + if current_missing == end_line: + missing.append(str(current_missing)) + else: + missing.append('%s-%s' % (current_missing, end_line)) + current_missing = None + + if current_missing is not None: + end_line = sorted_lines[-1] + if current_missing == end_line: + missing.append(str(current_missing)) + else: + missing.append('%s-%s' % (current_missing, end_line)) + + file_report.append({'name': filename, 'stmts': stmts, 'miss': miss, 'cover': cover, 'missing': missing}) + + if total_stmts == 0: + return '' + + total_percent = '{0}%'.format(int((total_stmts - total_miss) / total_stmts * 100)) + stmts_padding = max(8, len(str(total_stmts))) + miss_padding = max(7, len(str(total_miss))) + + line_length = name_padding + stmts_padding + miss_padding + cover_padding + + header = 'Name'.ljust(name_padding) + 'Stmts'.rjust(stmts_padding) + 'Miss'.rjust(miss_padding) + \ + 'Cover'.rjust(cover_padding) + + if args.show_missing: + header += 'Lines Missing'.rjust(16) + line_length += 16 + + line_break = '-' * line_length + lines = ['%s%s%s%s%s' % (f['name'].ljust(name_padding), str(f['stmts']).rjust(stmts_padding), + str(f['miss']).rjust(miss_padding), f['cover'].rjust(cover_padding), + ' ' + ', '.join(f['missing']) if args.show_missing else '') + for f in file_report] + totals = 'TOTAL'.ljust(name_padding) + str(total_stmts).rjust(stmts_padding) + \ + str(total_miss).rjust(miss_padding) + total_percent.rjust(cover_padding) + + report = '{0}\n{1}\n{2}\n{1}\n{3}'.format(header, line_break, "\n".join(lines), totals) + return report + + +class CoverageReportConfig(CoverageConfig): + """Configuration for the coverage report command.""" + def __init__(self, args): + """ + :type args: any + """ + super(CoverageReportConfig, self).__init__(args) + + self.show_missing = args.show_missing # type: bool + self.include = args.include # type: str + self.omit = args.omit # type: str diff --git a/test/lib/ansible_test/_internal/coverage/xml.py b/test/lib/ansible_test/_internal/coverage/xml.py new file mode 100644 index 00000000..94b5abc5 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage/xml.py @@ -0,0 +1,191 @@ +"""Generate XML code coverage reports.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import time + +from xml.etree.ElementTree import ( + Comment, + Element, + SubElement, + tostring, +) + +from xml.dom import ( + minidom, +) + +from ..io import ( + make_dirs, + read_json_file, +) + +from ..util_common import ( + ResultType, + write_text_test_results, +) + +from ..env import ( + get_ansible_version, +) + +from ..data import ( + data_context, +) + +from .combine import ( + command_coverage_combine, +) + +from . import ( + run_coverage, + CoverageConfig, +) + + +def command_coverage_xml(args): + """ + :type args: CoverageConfig + """ + output_files = command_coverage_combine(args) + + for output_file in output_files: + xml_name = '%s.xml' % os.path.basename(output_file) + if output_file.endswith('-powershell'): + report = _generate_powershell_xml(output_file) + + rough_string = tostring(report, 'utf-8') + reparsed = minidom.parseString(rough_string) + pretty = reparsed.toprettyxml(indent=' ') + + write_text_test_results(ResultType.REPORTS, xml_name, pretty) + else: + xml_path = os.path.join(ResultType.REPORTS.path, xml_name) + make_dirs(ResultType.REPORTS.path) + run_coverage(args, output_file, 'xml', ['-i', '-o', xml_path]) + + +def _generate_powershell_xml(coverage_file): + """ + :type coverage_file: str + :rtype: Element + """ + coverage_info = read_json_file(coverage_file) + + content_root = data_context().content.root + is_ansible = data_context().content.is_ansible + + packages = {} + for path, results in coverage_info.items(): + filename = os.path.splitext(os.path.basename(path))[0] + + if filename.startswith('Ansible.ModuleUtils'): + package = 'ansible.module_utils' + elif is_ansible: + package = 'ansible.modules' + else: + rel_path = path[len(content_root) + 1:] + plugin_type = "modules" if rel_path.startswith("plugins/modules") else "module_utils" + package = 'ansible_collections.%splugins.%s' % (data_context().content.collection.prefix, plugin_type) + + if package not in packages: + packages[package] = {} + + packages[package][path] = results + + elem_coverage = Element('coverage') + elem_coverage.append( + Comment(' Generated by ansible-test from the Ansible project: https://www.ansible.com/ ')) + elem_coverage.append( + Comment(' Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd ')) + + elem_sources = SubElement(elem_coverage, 'sources') + + elem_source = SubElement(elem_sources, 'source') + elem_source.text = data_context().content.root + + elem_packages = SubElement(elem_coverage, 'packages') + + total_lines_hit = 0 + total_line_count = 0 + + for package_name, package_data in packages.items(): + lines_hit, line_count = _add_cobertura_package(elem_packages, package_name, package_data) + + total_lines_hit += lines_hit + total_line_count += line_count + + elem_coverage.attrib.update({ + 'branch-rate': '0', + 'branches-covered': '0', + 'branches-valid': '0', + 'complexity': '0', + 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0", + 'lines-covered': str(total_line_count), + 'lines-valid': str(total_lines_hit), + 'timestamp': str(int(time.time())), + 'version': get_ansible_version(), + }) + + return elem_coverage + + +def _add_cobertura_package(packages, package_name, package_data): + """ + :type packages: SubElement + :type package_name: str + :type package_data: Dict[str, Dict[str, int]] + :rtype: Tuple[int, int] + """ + elem_package = SubElement(packages, 'package') + elem_classes = SubElement(elem_package, 'classes') + + total_lines_hit = 0 + total_line_count = 0 + + for path, results in package_data.items(): + lines_hit = len([True for hits in results.values() if hits]) + line_count = len(results) + + total_lines_hit += lines_hit + total_line_count += line_count + + elem_class = SubElement(elem_classes, 'class') + + class_name = os.path.splitext(os.path.basename(path))[0] + if class_name.startswith("Ansible.ModuleUtils"): + class_name = class_name[20:] + + content_root = data_context().content.root + filename = path + if filename.startswith(content_root): + filename = filename[len(content_root) + 1:] + + elem_class.attrib.update({ + 'branch-rate': '0', + 'complexity': '0', + 'filename': filename, + 'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0", + 'name': class_name, + }) + + SubElement(elem_class, 'methods') + + elem_lines = SubElement(elem_class, 'lines') + + for number, hits in results.items(): + elem_line = SubElement(elem_lines, 'line') + elem_line.attrib.update( + hits=str(hits), + number=str(number), + ) + + elem_package.attrib.update({ + 'branch-rate': '0', + 'complexity': '0', + 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0", + 'name': package_name, + }) + + return total_lines_hit, total_line_count diff --git a/test/lib/ansible_test/_internal/coverage_util.py b/test/lib/ansible_test/_internal/coverage_util.py new file mode 100644 index 00000000..e5434231 --- /dev/null +++ b/test/lib/ansible_test/_internal/coverage_util.py @@ -0,0 +1,125 @@ +"""Utility code for facilitating collection of code coverage when running tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import contextlib +import os +import tempfile + +from .config import ( + IntegrationConfig, + SanityConfig, + TestConfig, +) + +from .io import ( + write_text_file, +) + +from .util import ( + COVERAGE_CONFIG_NAME, + remove_tree, +) + +from .data import ( + data_context, +) + + +@contextlib.contextmanager +def coverage_context(args): # type: (TestConfig) -> None + """Content to set up and clean up code coverage configuration for tests.""" + coverage_setup(args) + + try: + yield + finally: + coverage_cleanup(args) + + +def coverage_setup(args): # type: (TestConfig) -> None + """Set up code coverage configuration before running tests.""" + if not args.coverage: + return + + coverage_config = generate_coverage_config(args) + + if args.explain: + args.coverage_config_base_path = '/tmp/coverage-temp-dir' + else: + args.coverage_config_base_path = tempfile.mkdtemp() + + write_text_file(os.path.join(args.coverage_config_base_path, COVERAGE_CONFIG_NAME), coverage_config) + + +def coverage_cleanup(args): # type: (TestConfig) -> None + """Clean up code coverage configuration after tests have finished.""" + if args.coverage_config_base_path and not args.explain: + remove_tree(args.coverage_config_base_path) + args.coverage_config_base_path = None + + +def generate_coverage_config(args): # type: (TestConfig) -> str + """Generate code coverage configuration for tests.""" + if data_context().content.collection: + coverage_config = generate_collection_coverage_config(args) + else: + coverage_config = generate_ansible_coverage_config() + + return coverage_config + + +def generate_ansible_coverage_config(): # type: () -> str + """Generate code coverage configuration for Ansible tests.""" + coverage_config = ''' +[run] +branch = True +concurrency = multiprocessing +parallel = True + +omit = + */python*/dist-packages/* + */python*/site-packages/* + */python*/distutils/* + */pyshared/* + */pytest + */AnsiballZ_*.py + */test/results/* +''' + + return coverage_config + + +def generate_collection_coverage_config(args): # type: (TestConfig) -> str + """Generate code coverage configuration for Ansible Collection tests.""" + coverage_config = ''' +[run] +branch = True +concurrency = multiprocessing +parallel = True +disable_warnings = + no-data-collected +''' + + if isinstance(args, IntegrationConfig): + coverage_config += ''' +include = + %s/* + */%s/* +''' % (data_context().content.root, data_context().content.collection.directory) + elif isinstance(args, SanityConfig): + # temporary work-around for import sanity test + coverage_config += ''' +include = + %s/* + +omit = + %s/* +''' % (data_context().content.root, os.path.join(data_context().content.root, data_context().content.results_path)) + else: + coverage_config += ''' +include = + %s/* +''' % data_context().content.root + + return coverage_config diff --git a/test/lib/ansible_test/_internal/csharp_import_analysis.py b/test/lib/ansible_test/_internal/csharp_import_analysis.py new file mode 100644 index 00000000..daa8892c --- /dev/null +++ b/test/lib/ansible_test/_internal/csharp_import_analysis.py @@ -0,0 +1,106 @@ +"""Analyze C# import statements.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re + +from .io import ( + open_text_file, +) + +from .util import ( + display, +) + +from .util_common import ( + resolve_csharp_ps_util, +) + +from .data import ( + data_context, +) + + +def get_csharp_module_utils_imports(powershell_targets, csharp_targets): + """Return a dictionary of module_utils names mapped to sets of powershell file paths. + :type powershell_targets: list[TestTarget] - C# files + :type csharp_targets: list[TestTarget] - PS files + :rtype: dict[str, set[str]] + """ + + module_utils = enumerate_module_utils() + + imports_by_target_path = {} + + for target in powershell_targets: + imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, False) + + for target in csharp_targets: + imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, True) + + imports = dict([(module_util, set()) for module_util in module_utils]) + + for target_path in imports_by_target_path: + for module_util in imports_by_target_path[target_path]: + imports[module_util].add(target_path) + + for module_util in sorted(imports): + if not imports[module_util]: + display.warning('No imports found which use the "%s" module_util.' % module_util) + + return imports + + +def get_csharp_module_utils_name(path): # type: (str) -> str + """Return a namespace and name from the given module_utils path.""" + base_path = data_context().content.module_utils_csharp_path + + if data_context().content.collection: + prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.' + else: + prefix = '' + + name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.') + + return name + + +def enumerate_module_utils(): + """Return a list of available module_utils imports. + :rtype: set[str] + """ + return set(get_csharp_module_utils_name(p) + for p in data_context().content.walk_files(data_context().content.module_utils_csharp_path) + if os.path.splitext(p)[1] == '.cs') + + +def extract_csharp_module_utils_imports(path, module_utils, is_pure_csharp): + """Return a list of module_utils imports found in the specified source file. + :type path: str + :type module_utils: set[str] + :type is_pure_csharp: bool + :rtype: set[str] + """ + imports = set() + if is_pure_csharp: + pattern = re.compile(r'(?i)^using\s((?:Ansible|AnsibleCollections)\..+);$') + else: + pattern = re.compile(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((?:Ansible|ansible.collections|\.)\..+)') + + with open_text_file(path) as module_file: + for line_number, line in enumerate(module_file, 1): + match = re.search(pattern, line) + + if not match: + continue + + import_name = resolve_csharp_ps_util(match.group(1), path) + + if import_name in module_utils: + imports.add(import_name) + elif data_context().content.is_ansible or \ + import_name.startswith('ansible_collections.%s' % data_context().content.prefix): + display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name)) + + return imports diff --git a/test/lib/ansible_test/_internal/data.py b/test/lib/ansible_test/_internal/data.py new file mode 100644 index 00000000..38ae6d21 --- /dev/null +++ b/test/lib/ansible_test/_internal/data.py @@ -0,0 +1,200 @@ +"""Context information for the current invocation of ansible-test.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from . import types as t + +from .util import ( + ApplicationError, + import_plugins, + is_subdir, + ANSIBLE_LIB_ROOT, + ANSIBLE_TEST_ROOT, + ANSIBLE_SOURCE_ROOT, + display, +) + +from .provider import ( + find_path_provider, + get_path_provider_classes, + ProviderNotFoundForPath, +) + +from .provider.source import ( + SourceProvider, +) + +from .provider.source.unversioned import ( + UnversionedSource, +) + +from .provider.source.installed import ( + InstalledSource, +) + +from .provider.layout import ( + ContentLayout, + LayoutProvider, +) + + +class DataContext: + """Data context providing details about the current execution environment for ansible-test.""" + def __init__(self): + content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT') + current_path = os.getcwd() + + layout_providers = get_path_provider_classes(LayoutProvider) + source_providers = get_path_provider_classes(SourceProvider) + + self.__layout_providers = layout_providers + self.__source_providers = source_providers + self.__ansible_source = None # type: t.Optional[t.Tuple[t.Tuple[str, str], ...]] + + self.payload_callbacks = [] # type: t.List[t.Callable[t.List[t.Tuple[str, str]], None]] + + if content_path: + content = self.__create_content_layout(layout_providers, source_providers, content_path, False) + elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT): + content = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False) + else: + content = self.__create_content_layout(layout_providers, source_providers, current_path, True) + + self.content = content # type: ContentLayout + + def create_collection_layouts(self): # type: () -> t.List[ContentLayout] + """ + Return a list of collection layouts, one for each collection in the same collection root as the current collection layout. + An empty list is returned if the current content layout is not a collection layout. + """ + layout = self.content + collection = layout.collection + + if not collection: + return [] + + root_path = os.path.join(collection.root, 'ansible_collections') + display.info('Scanning collection root: %s' % root_path, verbosity=1) + namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name))) + collections = [] + + for namespace_name in namespace_names: + namespace_path = os.path.join(root_path, namespace_name) + collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name))) + + for collection_name in collection_names: + collection_path = os.path.join(namespace_path, collection_name) + + if collection_path == os.path.join(collection.root, collection.directory): + collection_layout = layout + else: + collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False) + + file_count = len(collection_layout.all_files()) + + if not file_count: + continue + + display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1) + collections.append(collection_layout) + + return collections + + @staticmethod + def __create_content_layout(layout_providers, # type: t.List[t.Type[LayoutProvider]] + source_providers, # type: t.List[t.Type[SourceProvider]] + root, # type: str + walk, # type: bool + ): # type: (...) -> ContentLayout + """Create a content layout using the given providers and root path.""" + layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk) + + try: + # Begin the search for the source provider at the layout provider root. + # This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error. + # Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project. + # It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control. + source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk) + except ProviderNotFoundForPath: + source_provider = UnversionedSource(layout_provider.root) + + layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root)) + + return layout + + def __create_ansible_source(self): + """Return a tuple of Ansible source files with both absolute and relative paths.""" + if not ANSIBLE_SOURCE_ROOT: + sources = [] + + source_provider = InstalledSource(ANSIBLE_LIB_ROOT) + sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path)) + for path in source_provider.get_paths(source_provider.root)) + + source_provider = InstalledSource(ANSIBLE_TEST_ROOT) + sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path)) + for path in source_provider.get_paths(source_provider.root)) + + return tuple(sources) + + if self.content.is_ansible: + return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files()) + + try: + source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False) + except ProviderNotFoundForPath: + source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT) + + return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root)) + + @property + def ansible_source(self): # type: () -> t.Tuple[t.Tuple[str, str], ...] + """Return a tuple of Ansible source files with both absolute and relative paths.""" + if not self.__ansible_source: + self.__ansible_source = self.__create_ansible_source() + + return self.__ansible_source + + def register_payload_callback(self, callback): # type: (t.Callable[t.List[t.Tuple[str, str]], None]) -> None + """Register the given payload callback.""" + self.payload_callbacks.append(callback) + + +def data_init(): # type: () -> DataContext + """Initialize provider plugins.""" + provider_types = ( + 'layout', + 'source', + ) + + for provider_type in provider_types: + import_plugins('provider/%s' % provider_type) + + try: + context = DataContext() + except ProviderNotFoundForPath: + options = [ + ' - an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/', + ] + + if ANSIBLE_SOURCE_ROOT: + options.insert(0, ' - the Ansible source: %s/' % ANSIBLE_SOURCE_ROOT) + + raise ApplicationError('''The current working directory must be at or below: + +%s + +Current working directory: %s''' % ('\n'.join(options), os.getcwd())) + + return context + + +def data_context(): # type: () -> DataContext + """Return the current data context.""" + try: + return data_context.instance + except AttributeError: + data_context.instance = data_init() + return data_context.instance diff --git a/test/lib/ansible_test/_internal/delegation.py b/test/lib/ansible_test/_internal/delegation.py new file mode 100644 index 00000000..3262dd51 --- /dev/null +++ b/test/lib/ansible_test/_internal/delegation.py @@ -0,0 +1,667 @@ +"""Delegate test execution to another environment.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re +import sys +import tempfile + +from . import types as t + +from .io import ( + make_dirs, +) + +from .executor import ( + SUPPORTED_PYTHON_VERSIONS, + HTTPTESTER_HOSTS, + create_shell_command, + run_httptester, + start_httptester, + get_python_interpreter, + get_python_version, +) + +from .config import ( + TestConfig, + EnvironmentConfig, + IntegrationConfig, + WindowsIntegrationConfig, + NetworkIntegrationConfig, + ShellConfig, + SanityConfig, + UnitsConfig, +) + +from .core_ci import ( + AnsibleCoreCI, +) + +from .manage_ci import ( + ManagePosixCI, + ManageWindowsCI, +) + +from .util import ( + ApplicationError, + common_environment, + display, + ANSIBLE_BIN_PATH, + ANSIBLE_TEST_DATA_ROOT, + ANSIBLE_LIB_ROOT, + ANSIBLE_TEST_ROOT, + tempdir, +) + +from .util_common import ( + run_command, + ResultType, + create_interpreter_wrapper, + get_docker_completion, + get_remote_completion, +) + +from .docker_util import ( + docker_exec, + docker_get, + docker_pull, + docker_put, + docker_rm, + docker_run, + docker_available, + docker_network_disconnect, + get_docker_networks, + get_docker_preferred_network_name, + get_docker_hostname, + is_docker_user_defined_network, +) + +from .cloud import ( + get_cloud_providers, +) + +from .target import ( + IntegrationTarget, +) + +from .data import ( + data_context, +) + +from .payload import ( + create_payload, +) + +from .venv import ( + create_virtual_environment, +) + +from .ci import ( + get_ci_provider, +) + + +def check_delegation_args(args): + """ + :type args: CommonConfig + """ + if not isinstance(args, EnvironmentConfig): + return + + if args.docker: + get_python_version(args, get_docker_completion(), args.docker_raw) + elif args.remote: + get_python_version(args, get_remote_completion(), args.remote) + + +def delegate(args, exclude, require, integration_targets): + """ + :type args: EnvironmentConfig + :type exclude: list[str] + :type require: list[str] + :type integration_targets: tuple[IntegrationTarget] + :rtype: bool + """ + if isinstance(args, TestConfig): + args.metadata.ci_provider = get_ci_provider().code + + make_dirs(ResultType.TMP.path) + + with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd: + args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name)) + args.metadata.to_file(args.metadata_path) + + try: + return delegate_command(args, exclude, require, integration_targets) + finally: + args.metadata_path = None + else: + return delegate_command(args, exclude, require, integration_targets) + + +def delegate_command(args, exclude, require, integration_targets): + """ + :type args: EnvironmentConfig + :type exclude: list[str] + :type require: list[str] + :type integration_targets: tuple[IntegrationTarget] + :rtype: bool + """ + if args.venv: + delegate_venv(args, exclude, require, integration_targets) + return True + + if args.docker: + delegate_docker(args, exclude, require, integration_targets) + return True + + if args.remote: + delegate_remote(args, exclude, require, integration_targets) + return True + + return False + + +def delegate_venv(args, # type: EnvironmentConfig + exclude, # type: t.List[str] + require, # type: t.List[str] + integration_targets, # type: t.Tuple[IntegrationTarget, ...] + ): # type: (...) -> None + """Delegate ansible-test execution to a virtual environment using venv or virtualenv.""" + if args.python: + versions = (args.python_version,) + else: + versions = SUPPORTED_PYTHON_VERSIONS + + if args.httptester: + needs_httptester = sorted(target.name for target in integration_targets if 'needs/httptester/' in target.aliases) + + if needs_httptester: + display.warning('Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s' % ', '.join(needs_httptester)) + + if args.venv_system_site_packages: + suffix = '-ssp' + else: + suffix = '' + + venvs = dict((version, os.path.join(ResultType.TMP.path, 'delegation', 'python%s%s' % (version, suffix))) for version in versions) + venvs = dict((version, path) for version, path in venvs.items() if create_virtual_environment(args, version, path, args.venv_system_site_packages)) + + if not venvs: + raise ApplicationError('No usable virtual environment support found.') + + options = { + '--venv': 0, + '--venv-system-site-packages': 0, + } + + with tempdir() as inject_path: + for version, path in venvs.items(): + create_interpreter_wrapper(os.path.join(path, 'bin', 'python'), os.path.join(inject_path, 'python%s' % version)) + + python_interpreter = os.path.join(inject_path, 'python%s' % args.python_version) + + cmd = generate_command(args, python_interpreter, ANSIBLE_BIN_PATH, data_context().content.root, options, exclude, require) + + if isinstance(args, TestConfig): + if args.coverage and not args.coverage_label: + cmd += ['--coverage-label', 'venv'] + + env = common_environment() + + with tempdir() as library_path: + # expose ansible and ansible_test to the virtual environment (only required when running from an install) + os.symlink(ANSIBLE_LIB_ROOT, os.path.join(library_path, 'ansible')) + os.symlink(ANSIBLE_TEST_ROOT, os.path.join(library_path, 'ansible_test')) + + env.update( + PATH=inject_path + os.path.pathsep + env['PATH'], + PYTHONPATH=library_path, + ) + + run_command(args, cmd, env=env) + + +def delegate_docker(args, exclude, require, integration_targets): + """ + :type args: EnvironmentConfig + :type exclude: list[str] + :type require: list[str] + :type integration_targets: tuple[IntegrationTarget] + """ + test_image = args.docker + privileged = args.docker_privileged + + if isinstance(args, ShellConfig): + use_httptester = args.httptester + else: + use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets) + + if use_httptester: + docker_pull(args, args.httptester) + + docker_pull(args, test_image) + + httptester_id = None + test_id = None + success = False + + options = { + '--docker': 1, + '--docker-privileged': 0, + '--docker-util': 1, + } + + python_interpreter = get_python_interpreter(args, get_docker_completion(), args.docker_raw) + + pwd = '/root' + ansible_root = os.path.join(pwd, 'ansible') + + if data_context().content.collection: + content_root = os.path.join(pwd, data_context().content.collection.directory) + else: + content_root = ansible_root + + remote_results_root = os.path.join(content_root, data_context().content.results_path) + + cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require) + + if isinstance(args, TestConfig): + if args.coverage and not args.coverage_label: + image_label = args.docker_raw + image_label = re.sub('[^a-zA-Z0-9]+', '-', image_label) + cmd += ['--coverage-label', 'docker-%s' % image_label] + + if isinstance(args, IntegrationConfig): + if not args.allow_destructive: + cmd.append('--allow-destructive') + + cmd_options = [] + + if isinstance(args, ShellConfig) or (isinstance(args, IntegrationConfig) and args.debug_strategy): + cmd_options.append('-it') + + with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd: + try: + create_payload(args, local_source_fd.name) + + if use_httptester: + httptester_id = run_httptester(args) + else: + httptester_id = None + + test_options = [ + '--detach', + '--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro', + '--privileged=%s' % str(privileged).lower(), + ] + + if args.docker_memory: + test_options.extend([ + '--memory=%d' % args.docker_memory, + '--memory-swap=%d' % args.docker_memory, + ]) + + docker_socket = '/var/run/docker.sock' + + if args.docker_seccomp != 'default': + test_options += ['--security-opt', 'seccomp=%s' % args.docker_seccomp] + + if get_docker_hostname() != 'localhost' or os.path.exists(docker_socket): + test_options += ['--volume', '%s:%s' % (docker_socket, docker_socket)] + + if httptester_id: + test_options += ['--env', 'HTTPTESTER=1'] + + network = get_docker_preferred_network_name(args) + + if not is_docker_user_defined_network(network): + # legacy links are required when using the default bridge network instead of user-defined networks + for host in HTTPTESTER_HOSTS: + test_options += ['--link', '%s:%s' % (httptester_id, host)] + + if isinstance(args, IntegrationConfig): + cloud_platforms = get_cloud_providers(args) + + for cloud_platform in cloud_platforms: + test_options += cloud_platform.get_docker_run_options() + + test_id = docker_run(args, test_image, options=test_options)[0] + + if args.explain: + test_id = 'test_id' + else: + test_id = test_id.strip() + + # write temporary files to /root since /tmp isn't ready immediately on container start + docker_put(args, test_id, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'docker.sh'), '/root/docker.sh') + docker_exec(args, test_id, ['/bin/bash', '/root/docker.sh']) + docker_put(args, test_id, local_source_fd.name, '/root/test.tgz') + docker_exec(args, test_id, ['tar', 'oxzf', '/root/test.tgz', '-C', '/root']) + + # docker images are only expected to have a single python version available + if isinstance(args, UnitsConfig) and not args.python: + cmd += ['--python', 'default'] + + # run unit tests unprivileged to prevent stray writes to the source tree + # also disconnect from the network once requirements have been installed + if isinstance(args, UnitsConfig): + writable_dirs = [ + os.path.join(content_root, ResultType.JUNIT.relative_path), + os.path.join(content_root, ResultType.COVERAGE.relative_path), + ] + + docker_exec(args, test_id, ['mkdir', '-p'] + writable_dirs) + docker_exec(args, test_id, ['chmod', '777'] + writable_dirs) + docker_exec(args, test_id, ['chmod', '755', '/root']) + docker_exec(args, test_id, ['chmod', '644', os.path.join(content_root, args.metadata_path)]) + + docker_exec(args, test_id, ['useradd', 'pytest', '--create-home']) + + docker_exec(args, test_id, cmd + ['--requirements-mode', 'only'], options=cmd_options) + + networks = get_docker_networks(args, test_id) + + if networks is not None: + for network in networks: + docker_network_disconnect(args, test_id, network) + else: + display.warning('Network disconnection is not supported (this is normal under podman). ' + 'Tests will not be isolated from the network. Network-related tests may misbehave.') + + cmd += ['--requirements-mode', 'skip'] + + cmd_options += ['--user', 'pytest'] + + try: + docker_exec(args, test_id, cmd, options=cmd_options) + # docker_exec will throw SubprocessError if not successful + # If we make it here, all the prep work earlier and the docker_exec line above were all successful. + success = True + finally: + local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path)) + + remote_test_root = os.path.dirname(remote_results_root) + remote_results_name = os.path.basename(remote_results_root) + remote_temp_file = os.path.join('/root', remote_results_name + '.tgz') + + make_dirs(local_test_root) # make sure directory exists for collections which have no tests + + with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd: + docker_exec(args, test_id, ['tar', 'czf', remote_temp_file, '--exclude', ResultType.TMP.name, '-C', remote_test_root, remote_results_name]) + docker_get(args, test_id, remote_temp_file, local_result_fd.name) + run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', local_test_root]) + finally: + if httptester_id: + docker_rm(args, httptester_id) + + if test_id: + if args.docker_terminate == 'always' or (args.docker_terminate == 'success' and success): + docker_rm(args, test_id) + + +def delegate_remote(args, exclude, require, integration_targets): + """ + :type args: EnvironmentConfig + :type exclude: list[str] + :type require: list[str] + :type integration_targets: tuple[IntegrationTarget] + """ + remote = args.parsed_remote + + core_ci = AnsibleCoreCI(args, remote.platform, remote.version, stage=args.remote_stage, provider=args.remote_provider, arch=remote.arch) + success = False + raw = False + + if isinstance(args, ShellConfig): + use_httptester = args.httptester + raw = args.raw + else: + use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets) + + if use_httptester and not docker_available(): + display.warning('Assuming --disable-httptester since `docker` is not available.') + use_httptester = False + + httptester_id = None + ssh_options = [] + content_root = None + + try: + core_ci.start() + + if use_httptester: + httptester_id, ssh_options = start_httptester(args) + + core_ci.wait() + + python_version = get_python_version(args, get_remote_completion(), args.remote) + + if remote.platform == 'windows': + # Windows doesn't need the ansible-test fluff, just run the SSH command + manage = ManageWindowsCI(core_ci) + manage.setup(python_version) + + cmd = ['powershell.exe'] + elif raw: + manage = ManagePosixCI(core_ci) + manage.setup(python_version) + + cmd = create_shell_command(['bash']) + else: + manage = ManagePosixCI(core_ci) + pwd = manage.setup(python_version) + + options = { + '--remote': 1, + } + + python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote) + + ansible_root = os.path.join(pwd, 'ansible') + + if data_context().content.collection: + content_root = os.path.join(pwd, data_context().content.collection.directory) + else: + content_root = ansible_root + + cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require) + + if httptester_id: + cmd += ['--inject-httptester'] + + if isinstance(args, TestConfig): + if args.coverage and not args.coverage_label: + cmd += ['--coverage-label', 'remote-%s-%s' % (remote.platform, remote.version)] + + if isinstance(args, IntegrationConfig): + if not args.allow_destructive: + cmd.append('--allow-destructive') + + # remote instances are only expected to have a single python version available + if isinstance(args, UnitsConfig) and not args.python: + cmd += ['--python', 'default'] + + if isinstance(args, IntegrationConfig): + cloud_platforms = get_cloud_providers(args) + + for cloud_platform in cloud_platforms: + ssh_options += cloud_platform.get_remote_ssh_options() + + try: + manage.ssh(cmd, ssh_options) + success = True + finally: + download = False + + if remote.platform != 'windows': + download = True + + if isinstance(args, ShellConfig): + if args.raw: + download = False + + if download and content_root: + local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path)) + + remote_results_root = os.path.join(content_root, data_context().content.results_path) + remote_results_name = os.path.basename(remote_results_root) + remote_temp_path = os.path.join('/tmp', remote_results_name) + + # AIX cp and GNU cp provide different options, no way could be found to have a common + # pattern and achieve the same goal + cp_opts = '-hr' if remote.platform in ['aix', 'ibmi'] else '-a' + + manage.ssh('rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root)) + manage.download(remote_temp_path, local_test_root) + finally: + if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success): + core_ci.stop() + + if httptester_id: + docker_rm(args, httptester_id) + + +def generate_command(args, python_interpreter, ansible_bin_path, content_root, options, exclude, require): + """ + :type args: EnvironmentConfig + :type python_interpreter: str | None + :type ansible_bin_path: str + :type content_root: str + :type options: dict[str, int] + :type exclude: list[str] + :type require: list[str] + :rtype: list[str] + """ + options['--color'] = 1 + + cmd = [os.path.join(ansible_bin_path, 'ansible-test')] + + if python_interpreter: + cmd = [python_interpreter] + cmd + + # Force the encoding used during delegation. + # This is only needed because ansible-test relies on Python's file system encoding. + # Environments that do not have the locale configured are thus unable to work with unicode file paths. + # Examples include FreeBSD and some Linux containers. + env_vars = dict( + LC_ALL='en_US.UTF-8', + ANSIBLE_TEST_CONTENT_ROOT=content_root, + ) + + env_args = ['%s=%s' % (key, env_vars[key]) for key in sorted(env_vars)] + + cmd = ['/usr/bin/env'] + env_args + cmd + + cmd += list(filter_options(args, sys.argv[1:], options, exclude, require)) + cmd += ['--color', 'yes' if args.color else 'no'] + + if args.requirements: + cmd += ['--requirements'] + + if isinstance(args, ShellConfig): + cmd = create_shell_command(cmd) + elif isinstance(args, SanityConfig): + base_branch = args.base_branch or get_ci_provider().get_base_branch() + + if base_branch: + cmd += ['--base-branch', base_branch] + + return cmd + + +def filter_options(args, argv, options, exclude, require): + """ + :type args: EnvironmentConfig + :type argv: list[str] + :type options: dict[str, int] + :type exclude: list[str] + :type require: list[str] + :rtype: collections.Iterable[str] + """ + options = options.copy() + + options['--requirements'] = 0 + options['--truncate'] = 1 + options['--redact'] = 0 + options['--no-redact'] = 0 + + if isinstance(args, TestConfig): + options.update({ + '--changed': 0, + '--tracked': 0, + '--untracked': 0, + '--ignore-committed': 0, + '--ignore-staged': 0, + '--ignore-unstaged': 0, + '--changed-from': 1, + '--changed-path': 1, + '--metadata': 1, + '--exclude': 1, + '--require': 1, + }) + elif isinstance(args, SanityConfig): + options.update({ + '--base-branch': 1, + }) + + if isinstance(args, IntegrationConfig): + options.update({ + '--no-temp-unicode': 0, + '--no-pip-check': 0, + }) + + if isinstance(args, (NetworkIntegrationConfig, WindowsIntegrationConfig)): + options.update({ + '--inventory': 1, + }) + + remaining = 0 + + for arg in argv: + if not arg.startswith('-') and remaining: + remaining -= 1 + continue + + remaining = 0 + + parts = arg.split('=', 1) + key = parts[0] + + if key in options: + remaining = options[key] - len(parts) + 1 + continue + + yield arg + + for arg in args.delegate_args: + yield arg + + for target in exclude: + yield '--exclude' + yield target + + for target in require: + yield '--require' + yield target + + if isinstance(args, TestConfig): + if args.metadata_path: + yield '--metadata' + yield args.metadata_path + + yield '--truncate' + yield '%d' % args.truncate + + if args.redact: + yield '--redact' + else: + yield '--no-redact' + + if isinstance(args, IntegrationConfig): + if args.no_temp_unicode: + yield '--no-temp-unicode' + + if not args.pip_check: + yield '--no-pip-check' diff --git a/test/lib/ansible_test/_internal/diff.py b/test/lib/ansible_test/_internal/diff.py new file mode 100644 index 00000000..1e2038b9 --- /dev/null +++ b/test/lib/ansible_test/_internal/diff.py @@ -0,0 +1,256 @@ +"""Diff parsing functions and classes.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import textwrap +import traceback + +from . import types as t + +from .util import ( + ApplicationError, +) + + +def parse_diff(lines): + """ + :type lines: list[str] + :rtype: list[FileDiff] + """ + return DiffParser(lines).files + + +class FileDiff: + """Parsed diff for a single file.""" + def __init__(self, old_path, new_path): + """ + :type old_path: str + :type new_path: str + """ + self.old = DiffSide(old_path, new=False) + self.new = DiffSide(new_path, new=True) + self.headers = [] # type: t.List[str] + self.binary = False + + def append_header(self, line): + """ + :type line: str + """ + self.headers.append(line) + + @property + def is_complete(self): + """ + :rtype: bool + """ + return self.old.is_complete and self.new.is_complete + + +class DiffSide: + """Parsed diff for a single 'side' of a single file.""" + def __init__(self, path, new): + """ + :type path: str + :type new: bool + """ + self.path = path + self.new = new + self.prefix = '+' if self.new else '-' + self.eof_newline = True + self.exists = True + + self.lines = [] # type: t.List[t.Tuple[int, str]] + self.lines_and_context = [] # type: t.List[t.Tuple[int, str]] + self.ranges = [] # type: t.List[t.Tuple[int, int]] + + self._next_line_number = 0 + self._lines_remaining = 0 + self._range_start = 0 + + def set_start(self, line_start, line_count): + """ + :type line_start: int + :type line_count: int + """ + self._next_line_number = line_start + self._lines_remaining = line_count + self._range_start = 0 + + def append(self, line): + """ + :type line: str + """ + if self._lines_remaining <= 0: + raise Exception('Diff range overflow.') + + entry = self._next_line_number, line + + if line.startswith(' '): + pass + elif line.startswith(self.prefix): + self.lines.append(entry) + + if not self._range_start: + self._range_start = self._next_line_number + else: + raise Exception('Unexpected diff content prefix.') + + self.lines_and_context.append(entry) + + self._lines_remaining -= 1 + + if self._range_start: + if self.is_complete: + range_end = self._next_line_number + elif line.startswith(' '): + range_end = self._next_line_number - 1 + else: + range_end = 0 + + if range_end: + self.ranges.append((self._range_start, range_end)) + self._range_start = 0 + + self._next_line_number += 1 + + @property + def is_complete(self): + """ + :rtype: bool + """ + return self._lines_remaining == 0 + + def format_lines(self, context=True): + """ + :type context: bool + :rtype: list[str] + """ + if context: + lines = self.lines_and_context + else: + lines = self.lines + + return ['%s:%4d %s' % (self.path, line[0], line[1]) for line in lines] + + +class DiffParser: + """Parse diff lines.""" + def __init__(self, lines): + """ + :type lines: list[str] + """ + self.lines = lines + self.files = [] # type: t.List[FileDiff] + + self.action = self.process_start + self.line_number = 0 + self.previous_line = None # type: t.Optional[str] + self.line = None # type: t.Optional[str] + self.file = None # type: t.Optional[FileDiff] + + for self.line in self.lines: + self.line_number += 1 + + try: + self.action() + except Exception as ex: + message = textwrap.dedent(''' + %s + + Line: %d + Previous: %s + Current: %s + %s + ''').strip() % ( + ex, + self.line_number, + self.previous_line or '', + self.line or '', + traceback.format_exc(), + ) + + raise ApplicationError(message.strip()) + + self.previous_line = self.line + + self.complete_file() + + def process_start(self): + """Process a diff start line.""" + self.complete_file() + + match = re.search(r'^diff --git "?a/(?P.*)"? "?b/(?P.*)"?$', self.line) + + if not match: + raise Exception('Unexpected diff start line.') + + self.file = FileDiff(match.group('old_path'), match.group('new_path')) + self.action = self.process_continue + + def process_range(self): + """Process a diff range line.""" + match = re.search(r'^@@ -((?P[0-9]+),)?(?P[0-9]+) \+((?P[0-9]+),)?(?P[0-9]+) @@', self.line) + + if not match: + raise Exception('Unexpected diff range line.') + + self.file.old.set_start(int(match.group('old_start') or 1), int(match.group('old_count'))) + self.file.new.set_start(int(match.group('new_start') or 1), int(match.group('new_count'))) + self.action = self.process_content + + def process_continue(self): + """Process a diff start, range or header line.""" + if self.line.startswith('diff '): + self.process_start() + elif self.line.startswith('@@ '): + self.process_range() + else: + self.process_header() + + def process_header(self): + """Process a diff header line.""" + if self.line.startswith('Binary files '): + self.file.binary = True + elif self.line == '--- /dev/null': + self.file.old.exists = False + elif self.line == '+++ /dev/null': + self.file.new.exists = False + else: + self.file.append_header(self.line) + + def process_content(self): + """Process a diff content line.""" + if self.line == r'\ No newline at end of file': + if self.previous_line.startswith(' '): + self.file.old.eof_newline = False + self.file.new.eof_newline = False + elif self.previous_line.startswith('-'): + self.file.old.eof_newline = False + elif self.previous_line.startswith('+'): + self.file.new.eof_newline = False + else: + raise Exception('Unexpected previous diff content line.') + + return + + if self.file.is_complete: + self.process_continue() + return + + if self.line.startswith(' '): + self.file.old.append(self.line) + self.file.new.append(self.line) + elif self.line.startswith('-'): + self.file.old.append(self.line) + elif self.line.startswith('+'): + self.file.new.append(self.line) + else: + raise Exception('Unexpected diff content line.') + + def complete_file(self): + """Complete processing of the current file, if any.""" + if not self.file: + return + + self.files.append(self.file) diff --git a/test/lib/ansible_test/_internal/docker_util.py b/test/lib/ansible_test/_internal/docker_util.py new file mode 100644 index 00000000..1b47364d --- /dev/null +++ b/test/lib/ansible_test/_internal/docker_util.py @@ -0,0 +1,409 @@ +"""Functions for accessing docker via the docker cli.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import time + +from .io import ( + open_binary_file, + read_text_file, +) + +from .util import ( + ApplicationError, + common_environment, + display, + find_executable, + SubprocessError, +) + +from .http import ( + urlparse, +) + +from .util_common import ( + run_command, +) + +from .config import ( + EnvironmentConfig, +) + +BUFFER_SIZE = 256 * 256 + + +def docker_available(): + """ + :rtype: bool + """ + return find_executable('docker', required=False) + + +def get_docker_hostname(): # type: () -> str + """Return the hostname of the Docker service.""" + try: + return get_docker_hostname.hostname + except AttributeError: + pass + + docker_host = os.environ.get('DOCKER_HOST') + + if docker_host and docker_host.startswith('tcp://'): + try: + hostname = urlparse(docker_host)[1].split(':')[0] + display.info('Detected Docker host: %s' % hostname, verbosity=1) + except ValueError: + hostname = 'localhost' + display.warning('Could not parse DOCKER_HOST environment variable "%s", falling back to localhost.' % docker_host) + else: + hostname = 'localhost' + display.info('Assuming Docker is available on localhost.', verbosity=1) + + get_docker_hostname.hostname = hostname + + return hostname + + +def get_docker_container_id(): + """ + :rtype: str | None + """ + try: + return get_docker_container_id.container_id + except AttributeError: + pass + + path = '/proc/self/cpuset' + container_id = None + + if os.path.exists(path): + # File content varies based on the environment: + # No Container: / + # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507 + # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891 + # Podman: /../../../../../.. + contents = read_text_file(path) + + cgroup_path, cgroup_name = os.path.split(contents.strip()) + + if cgroup_path in ('/docker', '/azpl_job'): + container_id = cgroup_name + + get_docker_container_id.container_id = container_id + + if container_id: + display.info('Detected execution in Docker container: %s' % container_id, verbosity=1) + + return container_id + + +def get_docker_container_ip(args, container_id): + """ + :type args: EnvironmentConfig + :type container_id: str + :rtype: str + """ + results = docker_inspect(args, container_id) + network_settings = results[0]['NetworkSettings'] + networks = network_settings.get('Networks') + + if networks: + network_name = get_docker_preferred_network_name(args) + ipaddress = networks[network_name]['IPAddress'] + else: + # podman doesn't provide Networks, fall back to using IPAddress + ipaddress = network_settings['IPAddress'] + + if not ipaddress: + raise ApplicationError('Cannot retrieve IP address for container: %s' % container_id) + + return ipaddress + + +def get_docker_network_name(args, container_id): # type: (EnvironmentConfig, str) -> str + """ + Return the network name of the specified container. + Raises an exception if zero or more than one network is found. + """ + networks = get_docker_networks(args, container_id) + + if not networks: + raise ApplicationError('No network found for Docker container: %s.' % container_id) + + if len(networks) > 1: + raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (container_id, ', '.join(networks))) + + return networks[0] + + +def get_docker_preferred_network_name(args): # type: (EnvironmentConfig) -> str + """ + Return the preferred network name for use with Docker. The selection logic is: + - the network selected by the user with `--docker-network` + - the network of the currently running docker container (if any) + - the default docker network (returns None) + """ + network = None + + if args.docker_network: + network = args.docker_network + else: + current_container_id = get_docker_container_id() + + if current_container_id: + # Make sure any additional containers we launch use the same network as the current container we're running in. + # This is needed when ansible-test is running in a container that is not connected to Docker's default network. + network = get_docker_network_name(args, current_container_id) + + return network + + +def is_docker_user_defined_network(network): # type: (str) -> bool + """Return True if the network being used is a user-defined network.""" + return network and network != 'bridge' + + +def get_docker_networks(args, container_id): + """ + :param args: EnvironmentConfig + :param container_id: str + :rtype: list[str] + """ + results = docker_inspect(args, container_id) + # podman doesn't return Networks- just silently return None if it's missing... + networks = results[0]['NetworkSettings'].get('Networks') + if networks is None: + return None + return sorted(networks) + + +def docker_pull(args, image): + """ + :type args: EnvironmentConfig + :type image: str + """ + if ('@' in image or ':' in image) and docker_images(args, image): + display.info('Skipping docker pull of existing image with tag or digest: %s' % image, verbosity=2) + return + + if not args.docker_pull: + display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image) + return + + for _iteration in range(1, 10): + try: + docker_command(args, ['pull', image]) + return + except SubprocessError: + display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image) + time.sleep(3) + + raise ApplicationError('Failed to pull docker image "%s".' % image) + + +def docker_put(args, container_id, src, dst): + """ + :type args: EnvironmentConfig + :type container_id: str + :type src: str + :type dst: str + """ + # avoid 'docker cp' due to a bug which causes 'docker rm' to fail + with open_binary_file(src) as src_fd: + docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE], + options=['-i'], stdin=src_fd, capture=True) + + +def docker_get(args, container_id, src, dst): + """ + :type args: EnvironmentConfig + :type container_id: str + :type src: str + :type dst: str + """ + # avoid 'docker cp' due to a bug which causes 'docker rm' to fail + with open_binary_file(dst, 'wb') as dst_fd: + docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE], + options=['-i'], stdout=dst_fd, capture=True) + + +def docker_run(args, image, options, cmd=None): + """ + :type args: EnvironmentConfig + :type image: str + :type options: list[str] | None + :type cmd: list[str] | None + :rtype: str | None, str | None + """ + if not options: + options = [] + + if not cmd: + cmd = [] + + network = get_docker_preferred_network_name(args) + + if is_docker_user_defined_network(network): + # Only when the network is not the default bridge network. + # Using this with the default bridge network results in an error when using --link: links are only supported for user-defined networks + options.extend(['--network', network]) + + for _iteration in range(1, 3): + try: + return docker_command(args, ['run'] + options + [image] + cmd, capture=True) + except SubprocessError as ex: + display.error(ex) + display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image) + time.sleep(3) + + raise ApplicationError('Failed to run docker image "%s".' % image) + + +def docker_images(args, image): + """ + :param args: CommonConfig + :param image: str + :rtype: list[dict[str, any]] + """ + try: + stdout, _dummy = docker_command(args, ['images', image, '--format', '{{json .}}'], capture=True, always=True) + except SubprocessError as ex: + if 'no such image' in ex.stderr: + return [] # podman does not handle this gracefully, exits 125 + + if 'function "json" not defined' in ex.stderr: + # podman > 2 && < 2.2.0 breaks with --format {{json .}}, and requires --format json + # So we try this as a fallback. If it fails again, we just raise the exception and bail. + stdout, _dummy = docker_command(args, ['images', image, '--format', 'json'], capture=True, always=True) + else: + raise ex + + if stdout.startswith('['): + # modern podman outputs a pretty-printed json list. Just load the whole thing. + return json.loads(stdout) + + # docker outputs one json object per line (jsonl) + return [json.loads(line) for line in stdout.splitlines()] + + +def docker_rm(args, container_id): + """ + :type args: EnvironmentConfig + :type container_id: str + """ + try: + docker_command(args, ['rm', '-f', container_id], capture=True) + except SubprocessError as ex: + if 'no such container' in ex.stderr: + pass # podman does not handle this gracefully, exits 1 + else: + raise ex + + +def docker_inspect(args, container_id): + """ + :type args: EnvironmentConfig + :type container_id: str + :rtype: list[dict] + """ + if args.explain: + return [] + + try: + stdout = docker_command(args, ['inspect', container_id], capture=True)[0] + return json.loads(stdout) + except SubprocessError as ex: + if 'no such image' in ex.stderr: + return [] # podman does not handle this gracefully, exits 125 + try: + return json.loads(ex.stdout) + except Exception: + raise ex + + +def docker_network_disconnect(args, container_id, network): + """ + :param args: EnvironmentConfig + :param container_id: str + :param network: str + """ + docker_command(args, ['network', 'disconnect', network, container_id], capture=True) + + +def docker_network_inspect(args, network): + """ + :type args: EnvironmentConfig + :type network: str + :rtype: list[dict] + """ + if args.explain: + return [] + + try: + stdout = docker_command(args, ['network', 'inspect', network], capture=True)[0] + return json.loads(stdout) + except SubprocessError as ex: + try: + return json.loads(ex.stdout) + except Exception: + raise ex + + +def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None): + """ + :type args: EnvironmentConfig + :type container_id: str + :type cmd: list[str] + :type options: list[str] | None + :type capture: bool + :type stdin: BinaryIO | None + :type stdout: BinaryIO | None + :rtype: str | None, str | None + """ + if not options: + options = [] + + return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout) + + +def docker_info(args): + """ + :param args: CommonConfig + :rtype: dict[str, any] + """ + stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True) + return json.loads(stdout) + + +def docker_version(args): + """ + :param args: CommonConfig + :rtype: dict[str, any] + """ + stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True) + return json.loads(stdout) + + +def docker_command(args, cmd, capture=False, stdin=None, stdout=None, always=False): + """ + :type args: CommonConfig + :type cmd: list[str] + :type capture: bool + :type stdin: file | None + :type stdout: file | None + :type always: bool + :rtype: str | None, str | None + """ + env = docker_environment() + return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always) + + +def docker_environment(): + """ + :rtype: dict[str, str] + """ + env = common_environment() + env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_'))) + return env diff --git a/test/lib/ansible_test/_internal/encoding.py b/test/lib/ansible_test/_internal/encoding.py new file mode 100644 index 00000000..8e014794 --- /dev/null +++ b/test/lib/ansible_test/_internal/encoding.py @@ -0,0 +1,41 @@ +"""Functions for encoding and decoding strings.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from . import types as t + +ENCODING = 'utf-8' + +Text = type(u'') + + +def to_optional_bytes(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[bytes] + """Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None.""" + return None if value is None else to_bytes(value, errors) + + +def to_optional_text(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[t.Text] + """Return the given value as text decoded using UTF-8 if not already text, or None if the value is None.""" + return None if value is None else to_text(value, errors) + + +def to_bytes(value, errors='strict'): # type: (t.AnyStr, str) -> bytes + """Return the given value as bytes encoded using UTF-8 if not already bytes.""" + if isinstance(value, bytes): + return value + + if isinstance(value, Text): + return value.encode(ENCODING, errors) + + raise Exception('value is not bytes or text: %s' % type(value)) + + +def to_text(value, errors='strict'): # type: (t.AnyStr, str) -> t.Text + """Return the given value as text decoded using UTF-8 if not already text.""" + if isinstance(value, bytes): + return value.decode(ENCODING, errors) + + if isinstance(value, Text): + return value + + raise Exception('value is not bytes or text: %s' % type(value)) diff --git a/test/lib/ansible_test/_internal/env.py b/test/lib/ansible_test/_internal/env.py new file mode 100644 index 00000000..60c0245e --- /dev/null +++ b/test/lib/ansible_test/_internal/env.py @@ -0,0 +1,293 @@ +"""Show information about the test environment.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import functools +import os +import platform +import signal +import sys +import time + +from .config import ( + CommonConfig, + TestConfig, +) + +from .io import ( + write_json_file, + read_json_file, +) + +from .util import ( + display, + find_executable, + SubprocessError, + ApplicationError, + get_ansible_version, + get_available_python_versions, +) + +from .util_common import ( + data_context, + write_json_test_results, + ResultType, +) + +from .docker_util import ( + docker_info, + docker_version +) + +from .thread import ( + WrappedThread, +) + +from .constants import ( + TIMEOUT_PATH, +) + +from .test import ( + TestTimeout, +) + +from .executor import ( + SUPPORTED_PYTHON_VERSIONS, +) + +from .ci import ( + get_ci_provider, +) + + +class EnvConfig(CommonConfig): + """Configuration for the tools command.""" + def __init__(self, args): + """ + :type args: any + """ + super(EnvConfig, self).__init__(args, 'env') + + self.show = args.show + self.dump = args.dump + self.timeout = args.timeout + self.list_files = args.list_files + + if not self.show and not self.dump and self.timeout is None and not self.list_files: + # default to --show if no options were given + self.show = True + + +def command_env(args): + """ + :type args: EnvConfig + """ + show_dump_env(args) + list_files_env(args) + set_timeout(args) + + +def show_dump_env(args): + """ + :type args: EnvConfig + """ + if not args.show and not args.dump: + return + + data = dict( + ansible=dict( + version=get_ansible_version(), + ), + docker=get_docker_details(args), + environ=os.environ.copy(), + location=dict( + pwd=os.environ.get('PWD', None), + cwd=os.getcwd(), + ), + git=get_ci_provider().get_git_details(args), + platform=dict( + datetime=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'), + platform=platform.platform(), + uname=platform.uname(), + ), + python=dict( + executable=sys.executable, + version=platform.python_version(), + ), + interpreters=get_available_python_versions(SUPPORTED_PYTHON_VERSIONS), + ) + + if args.show: + verbose = { + 'docker': 3, + 'docker.executable': 0, + 'environ': 2, + 'platform.uname': 1, + } + + show_dict(data, verbose) + + if args.dump and not args.explain: + write_json_test_results(ResultType.BOT, 'data-environment.json', data) + + +def list_files_env(args): # type: (EnvConfig) -> None + """List files on stdout.""" + if not args.list_files: + return + + for path in data_context().content.all_files(): + display.info(path) + + +def set_timeout(args): + """ + :type args: EnvConfig + """ + if args.timeout is None: + return + + if args.timeout: + deadline = (datetime.datetime.utcnow() + datetime.timedelta(minutes=args.timeout)).strftime('%Y-%m-%dT%H:%M:%SZ') + + display.info('Setting a %d minute test timeout which will end at: %s' % (args.timeout, deadline), verbosity=1) + else: + deadline = None + + display.info('Clearing existing test timeout.', verbosity=1) + + if args.explain: + return + + if deadline: + data = dict( + duration=args.timeout, + deadline=deadline, + ) + + write_json_file(TIMEOUT_PATH, data) + elif os.path.exists(TIMEOUT_PATH): + os.remove(TIMEOUT_PATH) + + +def get_timeout(): + """ + :rtype: dict[str, any] | None + """ + if not os.path.exists(TIMEOUT_PATH): + return None + + data = read_json_file(TIMEOUT_PATH) + data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ') + + return data + + +def configure_timeout(args): + """ + :type args: CommonConfig + """ + if isinstance(args, TestConfig): + configure_test_timeout(args) # only tests are subject to the timeout + + +def configure_test_timeout(args): + """ + :type args: TestConfig + """ + timeout = get_timeout() + + if not timeout: + return + + timeout_start = datetime.datetime.utcnow() + timeout_duration = timeout['duration'] + timeout_deadline = timeout['deadline'] + timeout_remaining = timeout_deadline - timeout_start + + test_timeout = TestTimeout(timeout_duration) + + if timeout_remaining <= datetime.timedelta(): + test_timeout.write(args) + + raise ApplicationError('The %d minute test timeout expired %s ago at %s.' % ( + timeout_duration, timeout_remaining * -1, timeout_deadline)) + + display.info('The %d minute test timeout expires in %s at %s.' % ( + timeout_duration, timeout_remaining, timeout_deadline), verbosity=1) + + def timeout_handler(_dummy1, _dummy2): + """Runs when SIGUSR1 is received.""" + test_timeout.write(args) + + raise ApplicationError('Tests aborted after exceeding the %d minute time limit.' % timeout_duration) + + def timeout_waiter(timeout_seconds): + """ + :type timeout_seconds: int + """ + time.sleep(timeout_seconds) + os.kill(os.getpid(), signal.SIGUSR1) + + signal.signal(signal.SIGUSR1, timeout_handler) + + instance = WrappedThread(functools.partial(timeout_waiter, timeout_remaining.seconds)) + instance.daemon = True + instance.start() + + +def show_dict(data, verbose, root_verbosity=0, path=None): + """ + :type data: dict[str, any] + :type verbose: dict[str, int] + :type root_verbosity: int + :type path: list[str] | None + """ + path = path if path else [] + + for key, value in sorted(data.items()): + indent = ' ' * len(path) + key_path = path + [key] + key_name = '.'.join(key_path) + verbosity = verbose.get(key_name, root_verbosity) + + if isinstance(value, (tuple, list)): + display.info(indent + '%s:' % key, verbosity=verbosity) + for item in value: + display.info(indent + ' - %s' % item, verbosity=verbosity) + elif isinstance(value, dict): + min_verbosity = min([verbosity] + [v for k, v in verbose.items() if k.startswith('%s.' % key)]) + display.info(indent + '%s:' % key, verbosity=min_verbosity) + show_dict(value, verbose, verbosity, key_path) + else: + display.info(indent + '%s: %s' % (key, value), verbosity=verbosity) + + +def get_docker_details(args): + """ + :type args: CommonConfig + :rtype: dict[str, any] + """ + docker = find_executable('docker', required=False) + info = None + version = None + + if docker: + try: + info = docker_info(args) + except SubprocessError as ex: + display.warning('Failed to collect docker info:\n%s' % ex) + + try: + version = docker_version(args) + except SubprocessError as ex: + display.warning('Failed to collect docker version:\n%s' % ex) + + docker_details = dict( + executable=docker, + info=info, + version=version, + ) + + return docker_details diff --git a/test/lib/ansible_test/_internal/executor.py b/test/lib/ansible_test/_internal/executor.py new file mode 100644 index 00000000..4f613049 --- /dev/null +++ b/test/lib/ansible_test/_internal/executor.py @@ -0,0 +1,2146 @@ +"""Execute Ansible tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import datetime +import re +import time +import textwrap +import functools +import hashlib +import difflib +import filecmp +import random +import string +import shutil + +from . import types as t + +from .thread import ( + WrappedThread, +) + +from .core_ci import ( + AnsibleCoreCI, + SshKey, +) + +from .manage_ci import ( + ManageWindowsCI, + ManageNetworkCI, +) + +from .cloud import ( + cloud_filter, + cloud_init, + get_cloud_environment, + get_cloud_platforms, + CloudEnvironmentConfig, +) + +from .io import ( + make_dirs, + open_text_file, + read_binary_file, + read_text_file, + write_text_file, +) + +from .util import ( + ApplicationWarning, + ApplicationError, + SubprocessError, + display, + remove_tree, + find_executable, + raw_command, + get_available_port, + generate_pip_command, + find_python, + cmd_quote, + ANSIBLE_LIB_ROOT, + ANSIBLE_TEST_DATA_ROOT, + ANSIBLE_TEST_CONFIG_ROOT, + get_ansible_version, + tempdir, + open_zipfile, + SUPPORTED_PYTHON_VERSIONS, + str_to_version, + version_to_str, +) + +from .util_common import ( + get_docker_completion, + get_network_settings, + get_remote_completion, + get_python_path, + intercept_command, + named_temporary_file, + run_command, + write_json_test_results, + ResultType, + handle_layout_messages, +) + +from .docker_util import ( + docker_pull, + docker_run, + docker_available, + docker_rm, + get_docker_container_id, + get_docker_container_ip, + get_docker_hostname, + get_docker_preferred_network_name, + is_docker_user_defined_network, +) + +from .ansible_util import ( + ansible_environment, + check_pyyaml, +) + +from .target import ( + IntegrationTarget, + walk_internal_targets, + walk_posix_integration_targets, + walk_network_integration_targets, + walk_windows_integration_targets, + TIntegrationTarget, +) + +from .ci import ( + get_ci_provider, +) + +from .classification import ( + categorize_changes, +) + +from .config import ( + TestConfig, + EnvironmentConfig, + IntegrationConfig, + NetworkIntegrationConfig, + PosixIntegrationConfig, + ShellConfig, + WindowsIntegrationConfig, + TIntegrationConfig, +) + +from .metadata import ( + ChangeDescription, +) + +from .integration import ( + integration_test_environment, + integration_test_config_file, + setup_common_temp_dir, + get_inventory_relative_path, + check_inventory, + delegate_inventory, +) + +from .data import ( + data_context, +) + +HTTPTESTER_HOSTS = ( + 'ansible.http.tests', + 'sni1.ansible.http.tests', + 'fail.ansible.http.tests', +) + + +def check_startup(): + """Checks to perform at startup before running commands.""" + check_legacy_modules() + + +def check_legacy_modules(): + """Detect conflicts with legacy core/extras module directories to avoid problems later.""" + for directory in 'core', 'extras': + path = 'lib/ansible/modules/%s' % directory + + for root, _dir_names, file_names in os.walk(path): + if file_names: + # the directory shouldn't exist, but if it does, it must contain no files + raise ApplicationError('Files prohibited in "%s". ' + 'These are most likely legacy modules from version 2.2 or earlier.' % root) + + +def create_shell_command(command): + """ + :type command: list[str] + :rtype: list[str] + """ + optional_vars = ( + 'TERM', + ) + + cmd = ['/usr/bin/env'] + cmd += ['%s=%s' % (var, os.environ[var]) for var in optional_vars if var in os.environ] + cmd += command + + return cmd + + +def get_openssl_version(args, python, python_version): # type: (EnvironmentConfig, str, str) -> t.Optional[t.Tuple[int, ...]] + """Return the openssl version.""" + if not python_version.startswith('2.'): + # OpenSSL version checking only works on Python 3.x. + # This should be the most accurate, since it is the Python we will be using. + version = json.loads(run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sslcheck.py')], capture=True, always=True)[0])['version'] + + if version: + display.info('Detected OpenSSL version %s under Python %s.' % (version_to_str(version), python_version), verbosity=1) + + return tuple(version) + + # Fall back to detecting the OpenSSL version from the CLI. + # This should provide an adequate solution on Python 2.x. + openssl_path = find_executable('openssl', required=False) + + if openssl_path: + try: + result = raw_command([openssl_path, 'version'], capture=True)[0] + except SubprocessError: + result = '' + + match = re.search(r'^OpenSSL (?P[0-9]+\.[0-9]+\.[0-9]+)', result) + + if match: + version = str_to_version(match.group('version')) + + display.info('Detected OpenSSL version %s using the openssl CLI.' % version_to_str(version), verbosity=1) + + return version + + display.info('Unable to detect OpenSSL version.', verbosity=1) + + return None + + +def get_setuptools_version(args, python): # type: (EnvironmentConfig, str) -> t.Tuple[int] + """Return the setuptools version for the given python.""" + try: + return str_to_version(raw_command([python, '-c', 'import setuptools; print(setuptools.__version__)'], capture=True)[0]) + except SubprocessError: + if args.explain: + return tuple() # ignore errors in explain mode in case setuptools is not aleady installed + + raise + + +def get_cryptography_requirement(args, python_version): # type: (EnvironmentConfig, str) -> str + """ + Return the correct cryptography requirement for the given python version. + The version of cryptography installed depends on the python version, setuptools version and openssl version. + """ + python = find_python(python_version) + setuptools_version = get_setuptools_version(args, python) + openssl_version = get_openssl_version(args, python, python_version) + + if setuptools_version >= (18, 5): + if python_version == '2.6': + # cryptography 2.2+ requires python 2.7+ + # see https://github.com/pyca/cryptography/blob/master/CHANGELOG.rst#22---2018-03-19 + cryptography = 'cryptography < 2.2' + elif openssl_version and openssl_version < (1, 1, 0): + # cryptography 3.2 requires openssl 1.1.x or later + # see https://cryptography.io/en/latest/changelog.html#v3-2 + cryptography = 'cryptography < 3.2' + else: + # cryptography 3.4+ fails to install on many systems + # this is a temporary work-around until a more permanent solution is available + cryptography = 'cryptography < 3.4' + else: + # cryptography 2.1+ requires setuptools 18.5+ + # see https://github.com/pyca/cryptography/blob/62287ae18383447585606b9d0765c0f1b8a9777c/setup.py#L26 + cryptography = 'cryptography < 2.1' + + return cryptography + + +def install_command_requirements(args, python_version=None, context=None, enable_pyyaml_check=False): + """ + :type args: EnvironmentConfig + :type python_version: str | None + :type context: str | None + :type enable_pyyaml_check: bool + """ + if not args.explain: + make_dirs(ResultType.COVERAGE.path) + make_dirs(ResultType.DATA.path) + + if isinstance(args, ShellConfig): + if args.raw: + return + + if not args.requirements: + return + + if isinstance(args, ShellConfig): + return + + packages = [] + + if isinstance(args, TestConfig): + if args.coverage: + packages.append('coverage') + if args.junit: + packages.append('junit-xml') + + if not python_version: + python_version = args.python_version + + pip = generate_pip_command(find_python(python_version)) + + # skip packages which have aleady been installed for python_version + + try: + package_cache = install_command_requirements.package_cache + except AttributeError: + package_cache = install_command_requirements.package_cache = {} + + installed_packages = package_cache.setdefault(python_version, set()) + skip_packages = [package for package in packages if package in installed_packages] + + for package in skip_packages: + packages.remove(package) + + installed_packages.update(packages) + + if args.command != 'sanity': + install_ansible_test_requirements(args, pip) + + # make sure setuptools is available before trying to install cryptography + # the installed version of setuptools affects the version of cryptography to install + run_command(args, generate_pip_install(pip, '', packages=['setuptools'])) + + # install the latest cryptography version that the current requirements can support + # use a custom constraints file to avoid the normal constraints file overriding the chosen version of cryptography + # if not installed here later install commands may try to install an unsupported version due to the presence of older setuptools + # this is done instead of upgrading setuptools to allow tests to function with older distribution provided versions of setuptools + run_command(args, generate_pip_install(pip, '', + packages=[get_cryptography_requirement(args, python_version)], + constraints=os.path.join(ANSIBLE_TEST_DATA_ROOT, 'cryptography-constraints.txt'))) + + commands = [generate_pip_install(pip, args.command, packages=packages, context=context)] + + if isinstance(args, IntegrationConfig): + for cloud_platform in get_cloud_platforms(args): + commands.append(generate_pip_install(pip, '%s.cloud.%s' % (args.command, cloud_platform))) + + commands = [cmd for cmd in commands if cmd] + + if not commands: + return # no need to detect changes or run pip check since we are not making any changes + + # only look for changes when more than one requirements file is needed + detect_pip_changes = len(commands) > 1 + + # first pass to install requirements, changes expected unless environment is already set up + install_ansible_test_requirements(args, pip) + changes = run_pip_commands(args, pip, commands, detect_pip_changes) + + if changes: + # second pass to check for conflicts in requirements, changes are not expected here + changes = run_pip_commands(args, pip, commands, detect_pip_changes) + + if changes: + raise ApplicationError('Conflicts detected in requirements. The following commands reported changes during verification:\n%s' % + '\n'.join((' '.join(cmd_quote(c) for c in cmd) for cmd in changes))) + + if args.pip_check: + # ask pip to check for conflicts between installed packages + try: + run_command(args, pip + ['check', '--disable-pip-version-check'], capture=True) + except SubprocessError as ex: + if ex.stderr.strip() == 'ERROR: unknown command "check"': + display.warning('Cannot check pip requirements for conflicts because "pip check" is not supported.') + else: + raise + + if enable_pyyaml_check: + # pyyaml may have been one of the requirements that was installed, so perform an optional check for it + check_pyyaml(args, python_version, required=False) + + +def install_ansible_test_requirements(args, pip): # type: (EnvironmentConfig, t.List[str]) -> None + """Install requirements for ansible-test for the given pip if not already installed.""" + try: + installed = install_command_requirements.installed + except AttributeError: + installed = install_command_requirements.installed = set() + + if tuple(pip) in installed: + return + + # make sure basic ansible-test requirements are met, including making sure that pip is recent enough to support constraints + # virtualenvs created by older distributions may include very old pip versions, such as those created in the centos6 test container (pip 6.0.8) + run_command(args, generate_pip_install(pip, 'ansible-test', use_constraints=False)) + + installed.add(tuple(pip)) + + +def run_pip_commands(args, pip, commands, detect_pip_changes=False): + """ + :type args: EnvironmentConfig + :type pip: list[str] + :type commands: list[list[str]] + :type detect_pip_changes: bool + :rtype: list[list[str]] + """ + changes = [] + + after_list = pip_list(args, pip) if detect_pip_changes else None + + for cmd in commands: + if not cmd: + continue + + before_list = after_list + + run_command(args, cmd) + + after_list = pip_list(args, pip) if detect_pip_changes else None + + if before_list != after_list: + changes.append(cmd) + + return changes + + +def pip_list(args, pip): + """ + :type args: EnvironmentConfig + :type pip: list[str] + :rtype: str + """ + stdout = run_command(args, pip + ['list'], capture=True)[0] + return stdout + + +def generate_pip_install(pip, command, packages=None, constraints=None, use_constraints=True, context=None): + """ + :type pip: list[str] + :type command: str + :type packages: list[str] | None + :type constraints: str | None + :type use_constraints: bool + :type context: str | None + :rtype: list[str] | None + """ + constraints = constraints or os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'constraints.txt') + requirements = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', '%s.txt' % ('%s.%s' % (command, context) if context else command)) + content_constraints = None + + options = [] + + if os.path.exists(requirements) and os.path.getsize(requirements): + options += ['-r', requirements] + + if command == 'sanity' and data_context().content.is_ansible: + requirements = os.path.join(data_context().content.sanity_path, 'code-smell', '%s.requirements.txt' % context) + + if os.path.exists(requirements) and os.path.getsize(requirements): + options += ['-r', requirements] + + if command == 'units': + requirements = os.path.join(data_context().content.unit_path, 'requirements.txt') + + if os.path.exists(requirements) and os.path.getsize(requirements): + options += ['-r', requirements] + + content_constraints = os.path.join(data_context().content.unit_path, 'constraints.txt') + + if command in ('integration', 'windows-integration', 'network-integration'): + requirements = os.path.join(data_context().content.integration_path, 'requirements.txt') + + if os.path.exists(requirements) and os.path.getsize(requirements): + options += ['-r', requirements] + + requirements = os.path.join(data_context().content.integration_path, '%s.requirements.txt' % command) + + if os.path.exists(requirements) and os.path.getsize(requirements): + options += ['-r', requirements] + + content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt') + + if command.startswith('integration.cloud.'): + content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt') + + if packages: + options += packages + + if not options: + return None + + if use_constraints: + if content_constraints and os.path.exists(content_constraints) and os.path.getsize(content_constraints): + # listing content constraints first gives them priority over constraints provided by ansible-test + options.extend(['-c', content_constraints]) + + options.extend(['-c', constraints]) + + return pip + ['install', '--disable-pip-version-check'] + options + + +def command_shell(args): + """ + :type args: ShellConfig + """ + if args.delegate: + raise Delegate() + + install_command_requirements(args) + + if args.inject_httptester: + inject_httptester(args) + + cmd = create_shell_command(['bash', '-i']) + run_command(args, cmd) + + +def command_posix_integration(args): + """ + :type args: PosixIntegrationConfig + """ + handle_layout_messages(data_context().content.integration_messages) + + inventory_relative_path = get_inventory_relative_path(args) + inventory_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, os.path.basename(inventory_relative_path)) + + all_targets = tuple(walk_posix_integration_targets(include_hidden=True)) + internal_targets = command_integration_filter(args, all_targets) + command_integration_filtered(args, internal_targets, all_targets, inventory_path) + + +def command_network_integration(args): + """ + :type args: NetworkIntegrationConfig + """ + handle_layout_messages(data_context().content.integration_messages) + + inventory_relative_path = get_inventory_relative_path(args) + template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template' + + if args.inventory: + inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory) + else: + inventory_path = os.path.join(data_context().content.root, inventory_relative_path) + + if args.no_temp_workdir: + # temporary solution to keep DCI tests working + inventory_exists = os.path.exists(inventory_path) + else: + inventory_exists = os.path.isfile(inventory_path) + + if not args.explain and not args.platform and not inventory_exists: + raise ApplicationError( + 'Inventory not found: %s\n' + 'Use --inventory to specify the inventory path.\n' + 'Use --platform to provision resources and generate an inventory file.\n' + 'See also inventory template: %s' % (inventory_path, template_path) + ) + + check_inventory(args, inventory_path) + delegate_inventory(args, inventory_path) + + all_targets = tuple(walk_network_integration_targets(include_hidden=True)) + internal_targets = command_integration_filter(args, all_targets, init_callback=network_init) + instances = [] # type: t.List[WrappedThread] + + if args.platform: + get_python_path(args, args.python_executable) # initialize before starting threads + + configs = dict((config['platform_version'], config) for config in args.metadata.instance_config) + + for platform_version in args.platform: + platform, version = platform_version.split('/', 1) + config = configs.get(platform_version) + + if not config: + continue + + instance = WrappedThread(functools.partial(network_run, args, platform, version, config)) + instance.daemon = True + instance.start() + instances.append(instance) + + while any(instance.is_alive() for instance in instances): + time.sleep(1) + + remotes = [instance.wait_for_result() for instance in instances] + inventory = network_inventory(remotes) + + display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3) + + if not args.explain: + write_text_file(inventory_path, inventory) + + success = False + + try: + command_integration_filtered(args, internal_targets, all_targets, inventory_path) + success = True + finally: + if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success): + for instance in instances: + instance.result.stop() + + +def network_init(args, internal_targets): # type: (NetworkIntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> None + """Initialize platforms for network integration tests.""" + if not args.platform: + return + + if args.metadata.instance_config is not None: + return + + platform_targets = set(a for target in internal_targets for a in target.aliases if a.startswith('network/')) + + instances = [] # type: t.List[WrappedThread] + + # generate an ssh key (if needed) up front once, instead of for each instance + SshKey(args) + + for platform_version in args.platform: + platform, version = platform_version.split('/', 1) + platform_target = 'network/%s/' % platform + + if platform_target not in platform_targets: + display.warning('Skipping "%s" because selected tests do not target the "%s" platform.' % ( + platform_version, platform)) + continue + + instance = WrappedThread(functools.partial(network_start, args, platform, version)) + instance.daemon = True + instance.start() + instances.append(instance) + + while any(instance.is_alive() for instance in instances): + time.sleep(1) + + args.metadata.instance_config = [instance.wait_for_result() for instance in instances] + + +def network_start(args, platform, version): + """ + :type args: NetworkIntegrationConfig + :type platform: str + :type version: str + :rtype: AnsibleCoreCI + """ + core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider) + core_ci.start() + + return core_ci.save() + + +def network_run(args, platform, version, config): + """ + :type args: NetworkIntegrationConfig + :type platform: str + :type version: str + :type config: dict[str, str] + :rtype: AnsibleCoreCI + """ + core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider, load=False) + core_ci.load(config) + core_ci.wait() + + manage = ManageNetworkCI(core_ci) + manage.wait() + + return core_ci + + +def network_inventory(remotes): + """ + :type remotes: list[AnsibleCoreCI] + :rtype: str + """ + groups = dict([(remote.platform, []) for remote in remotes]) + net = [] + + for remote in remotes: + options = dict( + ansible_host=remote.connection.hostname, + ansible_user=remote.connection.username, + ansible_ssh_private_key_file=os.path.abspath(remote.ssh_key.key), + ) + + settings = get_network_settings(remote.args, remote.platform, remote.version) + + options.update(settings.inventory_vars) + + groups[remote.platform].append( + '%s %s' % ( + remote.name.replace('.', '-'), + ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)), + ) + ) + + net.append(remote.platform) + + groups['net:children'] = net + + template = '' + + for group in groups: + hosts = '\n'.join(groups[group]) + + template += textwrap.dedent(""" + [%s] + %s + """) % (group, hosts) + + inventory = template + + return inventory + + +def command_windows_integration(args): + """ + :type args: WindowsIntegrationConfig + """ + handle_layout_messages(data_context().content.integration_messages) + + inventory_relative_path = get_inventory_relative_path(args) + template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template' + + if args.inventory: + inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory) + else: + inventory_path = os.path.join(data_context().content.root, inventory_relative_path) + + if not args.explain and not args.windows and not os.path.isfile(inventory_path): + raise ApplicationError( + 'Inventory not found: %s\n' + 'Use --inventory to specify the inventory path.\n' + 'Use --windows to provision resources and generate an inventory file.\n' + 'See also inventory template: %s' % (inventory_path, template_path) + ) + + check_inventory(args, inventory_path) + delegate_inventory(args, inventory_path) + + all_targets = tuple(walk_windows_integration_targets(include_hidden=True)) + internal_targets = command_integration_filter(args, all_targets, init_callback=windows_init) + instances = [] # type: t.List[WrappedThread] + pre_target = None + post_target = None + httptester_id = None + + if args.windows: + get_python_path(args, args.python_executable) # initialize before starting threads + + configs = dict((config['platform_version'], config) for config in args.metadata.instance_config) + + for version in args.windows: + config = configs['windows/%s' % version] + + instance = WrappedThread(functools.partial(windows_run, args, version, config)) + instance.daemon = True + instance.start() + instances.append(instance) + + while any(instance.is_alive() for instance in instances): + time.sleep(1) + + remotes = [instance.wait_for_result() for instance in instances] + inventory = windows_inventory(remotes) + + display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3) + + if not args.explain: + write_text_file(inventory_path, inventory) + + use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in internal_targets) + # if running under Docker delegation, the httptester may have already been started + docker_httptester = bool(os.environ.get("HTTPTESTER", False)) + + if use_httptester and not docker_available() and not docker_httptester: + display.warning('Assuming --disable-httptester since `docker` is not available.') + elif use_httptester: + if docker_httptester: + # we are running in a Docker container that is linked to the httptester container, we just need to + # forward these requests to the linked hostname + first_host = HTTPTESTER_HOSTS[0] + ssh_options = ["-R", "8080:%s:80" % first_host, "-R", "8443:%s:443" % first_host] + else: + # we are running directly and need to start the httptester container ourselves and forward the port + # from there manually set so HTTPTESTER env var is set during the run + args.inject_httptester = True + httptester_id, ssh_options = start_httptester(args) + + # to get this SSH command to run in the background we need to set to run in background (-f) and disable + # the pty allocation (-T) + ssh_options.insert(0, "-fT") + + # create a script that will continue to run in the background until the script is deleted, this will + # cleanup and close the connection + def forward_ssh_ports(target): + """ + :type target: IntegrationTarget + """ + if 'needs/httptester/' not in target.aliases: + return + + for remote in [r for r in remotes if r.version != '2008']: + manage = ManageWindowsCI(remote) + manage.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'windows-httptester.ps1'), watcher_path) + + # We cannot pass an array of string with -File so we just use a delimiter for multiple values + script = "powershell.exe -NoProfile -ExecutionPolicy Bypass -File .\\%s -Hosts \"%s\"" \ + % (watcher_path, "|".join(HTTPTESTER_HOSTS)) + if args.verbosity > 3: + script += " -Verbose" + manage.ssh(script, options=ssh_options, force_pty=False) + + def cleanup_ssh_ports(target): + """ + :type target: IntegrationTarget + """ + if 'needs/httptester/' not in target.aliases: + return + + for remote in [r for r in remotes if r.version != '2008']: + # delete the tmp file that keeps the http-tester alive + manage = ManageWindowsCI(remote) + manage.ssh("cmd.exe /c \"del %s /F /Q\"" % watcher_path, force_pty=False) + + watcher_path = "ansible-test-http-watcher-%s.ps1" % time.time() + pre_target = forward_ssh_ports + post_target = cleanup_ssh_ports + + def run_playbook(playbook, run_playbook_vars): # type: (str, t.Dict[str, t.Any]) -> None + playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook) + command = ['ansible-playbook', '-i', inventory_path, playbook_path, '-e', json.dumps(run_playbook_vars)] + if args.verbosity: + command.append('-%s' % ('v' * args.verbosity)) + + env = ansible_environment(args) + intercept_command(args, command, '', env, disable_coverage=True) + + remote_temp_path = None + + if args.coverage and not args.coverage_check: + # Create the remote directory that is writable by everyone. Use Ansible to talk to the remote host. + remote_temp_path = 'C:\\ansible_test_coverage_%s' % time.time() + playbook_vars = {'remote_temp_path': remote_temp_path} + run_playbook('windows_coverage_setup.yml', playbook_vars) + + success = False + + try: + command_integration_filtered(args, internal_targets, all_targets, inventory_path, pre_target=pre_target, + post_target=post_target, remote_temp_path=remote_temp_path) + success = True + finally: + if httptester_id: + docker_rm(args, httptester_id) + + if remote_temp_path: + # Zip up the coverage files that were generated and fetch it back to localhost. + with tempdir() as local_temp_path: + playbook_vars = {'remote_temp_path': remote_temp_path, 'local_temp_path': local_temp_path} + run_playbook('windows_coverage_teardown.yml', playbook_vars) + + for filename in os.listdir(local_temp_path): + with open_zipfile(os.path.join(local_temp_path, filename)) as coverage_zip: + coverage_zip.extractall(ResultType.COVERAGE.path) + + if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success): + for instance in instances: + instance.result.stop() + + +# noinspection PyUnusedLocal +def windows_init(args, internal_targets): # pylint: disable=locally-disabled, unused-argument + """ + :type args: WindowsIntegrationConfig + :type internal_targets: tuple[IntegrationTarget] + """ + if not args.windows: + return + + if args.metadata.instance_config is not None: + return + + instances = [] # type: t.List[WrappedThread] + + for version in args.windows: + instance = WrappedThread(functools.partial(windows_start, args, version)) + instance.daemon = True + instance.start() + instances.append(instance) + + while any(instance.is_alive() for instance in instances): + time.sleep(1) + + args.metadata.instance_config = [instance.wait_for_result() for instance in instances] + + +def windows_start(args, version): + """ + :type args: WindowsIntegrationConfig + :type version: str + :rtype: AnsibleCoreCI + """ + core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider) + core_ci.start() + + return core_ci.save() + + +def windows_run(args, version, config): + """ + :type args: WindowsIntegrationConfig + :type version: str + :type config: dict[str, str] + :rtype: AnsibleCoreCI + """ + core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider, load=False) + core_ci.load(config) + core_ci.wait() + + manage = ManageWindowsCI(core_ci) + manage.wait() + + return core_ci + + +def windows_inventory(remotes): + """ + :type remotes: list[AnsibleCoreCI] + :rtype: str + """ + hosts = [] + + for remote in remotes: + options = dict( + ansible_host=remote.connection.hostname, + ansible_user=remote.connection.username, + ansible_password=remote.connection.password, + ansible_port=remote.connection.port, + ) + + # used for the connection_windows_ssh test target + if remote.ssh_key: + options["ansible_ssh_private_key_file"] = os.path.abspath(remote.ssh_key.key) + + if remote.name == 'windows-2008': + options.update( + # force 2008 to use PSRP for the connection plugin + ansible_connection='psrp', + ansible_psrp_auth='basic', + ansible_psrp_cert_validation='ignore', + ) + elif remote.name == 'windows-2016': + options.update( + # force 2016 to use NTLM + HTTP message encryption + ansible_connection='winrm', + ansible_winrm_server_cert_validation='ignore', + ansible_winrm_transport='ntlm', + ansible_winrm_scheme='http', + ansible_port='5985', + ) + else: + options.update( + ansible_connection='winrm', + ansible_winrm_server_cert_validation='ignore', + ) + + hosts.append( + '%s %s' % ( + remote.name.replace('/', '_'), + ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)), + ) + ) + + template = """ + [windows] + %s + + # support winrm binary module tests (temporary solution) + [testhost:children] + windows + """ + + template = textwrap.dedent(template) + inventory = template % ('\n'.join(hosts)) + + return inventory + + +def command_integration_filter(args, # type: TIntegrationConfig + targets, # type: t.Iterable[TIntegrationTarget] + init_callback=None, # type: t.Callable[[TIntegrationConfig, t.Tuple[TIntegrationTarget, ...]], None] + ): # type: (...) -> t.Tuple[TIntegrationTarget, ...] + """Filter the given integration test targets.""" + targets = tuple(target for target in targets if 'hidden/' not in target.aliases) + changes = get_changes_filter(args) + + # special behavior when the --changed-all-target target is selected based on changes + if args.changed_all_target in changes: + # act as though the --changed-all-target target was in the include list + if args.changed_all_mode == 'include' and args.changed_all_target not in args.include: + args.include.append(args.changed_all_target) + args.delegate_args += ['--include', args.changed_all_target] + # act as though the --changed-all-target target was in the exclude list + elif args.changed_all_mode == 'exclude' and args.changed_all_target not in args.exclude: + args.exclude.append(args.changed_all_target) + + require = args.require + changes + exclude = args.exclude + + internal_targets = walk_internal_targets(targets, args.include, exclude, require) + environment_exclude = get_integration_filter(args, internal_targets) + + environment_exclude += cloud_filter(args, internal_targets) + + if environment_exclude: + exclude += environment_exclude + internal_targets = walk_internal_targets(targets, args.include, exclude, require) + + if not internal_targets: + raise AllTargetsSkipped() + + if args.start_at and not any(target.name == args.start_at for target in internal_targets): + raise ApplicationError('Start at target matches nothing: %s' % args.start_at) + + if init_callback: + init_callback(args, internal_targets) + + cloud_init(args, internal_targets) + + vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path) + + if os.path.exists(vars_file_src): + def integration_config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None + """ + Add the integration config vars file to the payload file list. + This will preserve the file during delegation even if the file is ignored by source control. + """ + files.append((vars_file_src, data_context().content.integration_vars_path)) + + data_context().register_payload_callback(integration_config_callback) + + if args.delegate: + raise Delegate(require=require, exclude=exclude, integration_targets=internal_targets) + + install_command_requirements(args) + + return internal_targets + + +def command_integration_filtered(args, targets, all_targets, inventory_path, pre_target=None, post_target=None, + remote_temp_path=None): + """ + :type args: IntegrationConfig + :type targets: tuple[IntegrationTarget] + :type all_targets: tuple[IntegrationTarget] + :type inventory_path: str + :type pre_target: (IntegrationTarget) -> None | None + :type post_target: (IntegrationTarget) -> None | None + :type remote_temp_path: str | None + """ + found = False + passed = [] + failed = [] + + targets_iter = iter(targets) + all_targets_dict = dict((target.name, target) for target in all_targets) + + setup_errors = [] + setup_targets_executed = set() + + for target in all_targets: + for setup_target in target.setup_once + target.setup_always: + if setup_target not in all_targets_dict: + setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target)) + + if setup_errors: + raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors))) + + check_pyyaml(args, args.python_version) + + test_dir = os.path.join(ResultType.TMP.path, 'output_dir') + + if not args.explain and any('needs/ssh/' in target.aliases for target in targets): + max_tries = 20 + display.info('SSH service required for tests. Checking to make sure we can connect.') + for i in range(1, max_tries + 1): + try: + run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True) + display.info('SSH service responded.') + break + except SubprocessError: + if i == max_tries: + raise + seconds = 3 + display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds) + time.sleep(seconds) + + # Windows is different as Ansible execution is done locally but the host is remote + if args.inject_httptester and not isinstance(args, WindowsIntegrationConfig): + inject_httptester(args) + + start_at_task = args.start_at_task + + results = {} + + current_environment = None # type: t.Optional[EnvironmentDescription] + + # common temporary directory path that will be valid on both the controller and the remote + # it must be common because it will be referenced in environment variables that are shared across multiple hosts + common_temp_path = '/tmp/ansible-test-%s' % ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(8)) + + setup_common_temp_dir(args, common_temp_path) + + try: + for target in targets_iter: + if args.start_at and not found: + found = target.name == args.start_at + + if not found: + continue + + if args.list_targets: + print(target.name) + continue + + tries = 2 if args.retry_on_error else 1 + verbosity = args.verbosity + + cloud_environment = get_cloud_environment(args, target) + + original_environment = current_environment if current_environment else EnvironmentDescription(args) + current_environment = None + + display.info('>>> Environment Description\n%s' % original_environment, verbosity=3) + + try: + while tries: + tries -= 1 + + try: + if cloud_environment: + cloud_environment.setup_once() + + run_setup_targets(args, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, False) + + start_time = time.time() + + run_setup_targets(args, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, True) + + if not args.explain: + # create a fresh test directory for each test target + remove_tree(test_dir) + make_dirs(test_dir) + + if pre_target: + pre_target(target) + + try: + if target.script_path: + command_integration_script(args, target, test_dir, inventory_path, common_temp_path, + remote_temp_path=remote_temp_path) + else: + command_integration_role(args, target, start_at_task, test_dir, inventory_path, + common_temp_path, remote_temp_path=remote_temp_path) + start_at_task = None + finally: + if post_target: + post_target(target) + + end_time = time.time() + + results[target.name] = dict( + name=target.name, + type=target.type, + aliases=target.aliases, + modules=target.modules, + run_time_seconds=int(end_time - start_time), + setup_once=target.setup_once, + setup_always=target.setup_always, + coverage=args.coverage, + coverage_label=args.coverage_label, + python_version=args.python_version, + ) + + break + except SubprocessError: + if cloud_environment: + cloud_environment.on_failure(target, tries) + + if not original_environment.validate(target.name, throw=False): + raise + + if not tries: + raise + + display.warning('Retrying test target "%s" with maximum verbosity.' % target.name) + display.verbosity = args.verbosity = 6 + + start_time = time.time() + current_environment = EnvironmentDescription(args) + end_time = time.time() + + EnvironmentDescription.check(original_environment, current_environment, target.name, throw=True) + + results[target.name]['validation_seconds'] = int(end_time - start_time) + + passed.append(target) + except Exception as ex: + failed.append(target) + + if args.continue_on_error: + display.error(ex) + continue + + display.notice('To resume at this test target, use the option: --start-at %s' % target.name) + + next_target = next(targets_iter, None) + + if next_target: + display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name) + + raise + finally: + display.verbosity = args.verbosity = verbosity + + finally: + if not args.explain: + if args.coverage: + coverage_temp_path = os.path.join(common_temp_path, ResultType.COVERAGE.name) + coverage_save_path = ResultType.COVERAGE.path + + for filename in os.listdir(coverage_temp_path): + shutil.copy(os.path.join(coverage_temp_path, filename), os.path.join(coverage_save_path, filename)) + + remove_tree(common_temp_path) + + result_name = '%s-%s.json' % ( + args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0)))) + + data = dict( + targets=results, + ) + + write_json_test_results(ResultType.DATA, result_name, data) + + if failed: + raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % ( + len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed))) + + +def start_httptester(args): + """ + :type args: EnvironmentConfig + :rtype: str, list[str] + """ + + # map ports from remote -> localhost -> container + # passing through localhost is only used when ansible-test is not already running inside a docker container + ports = [ + dict( + remote=8080, + container=80, + ), + dict( + remote=8443, + container=443, + ), + ] + + container_id = get_docker_container_id() + + if not container_id: + for item in ports: + item['localhost'] = get_available_port() + + docker_pull(args, args.httptester) + + httptester_id = run_httptester(args, dict((port['localhost'], port['container']) for port in ports if 'localhost' in port)) + + if container_id: + container_host = get_docker_container_ip(args, httptester_id) + display.info('Found httptester container address: %s' % container_host, verbosity=1) + else: + container_host = get_docker_hostname() + + ssh_options = [] + + for port in ports: + ssh_options += ['-R', '%d:%s:%d' % (port['remote'], container_host, port.get('localhost', port['container']))] + + return httptester_id, ssh_options + + +def run_httptester(args, ports=None): + """ + :type args: EnvironmentConfig + :type ports: dict[int, int] | None + :rtype: str + """ + options = [ + '--detach', + ] + + if ports: + for localhost_port, container_port in ports.items(): + options += ['-p', '%d:%d' % (localhost_port, container_port)] + + network = get_docker_preferred_network_name(args) + + if is_docker_user_defined_network(network): + # network-scoped aliases are only supported for containers in user defined networks + for alias in HTTPTESTER_HOSTS: + options.extend(['--network-alias', alias]) + + httptester_id = docker_run(args, args.httptester, options=options)[0] + + if args.explain: + httptester_id = 'httptester_id' + else: + httptester_id = httptester_id.strip() + + return httptester_id + + +def inject_httptester(args): + """ + :type args: CommonConfig + """ + comment = ' # ansible-test httptester\n' + append_lines = ['127.0.0.1 %s%s' % (host, comment) for host in HTTPTESTER_HOSTS] + hosts_path = '/etc/hosts' + + original_lines = read_text_file(hosts_path).splitlines(True) + + if not any(line.endswith(comment) for line in original_lines): + write_text_file(hosts_path, ''.join(original_lines + append_lines)) + + # determine which forwarding mechanism to use + pfctl = find_executable('pfctl', required=False) + iptables = find_executable('iptables', required=False) + + if pfctl: + kldload = find_executable('kldload', required=False) + + if kldload: + try: + run_command(args, ['kldload', 'pf'], capture=True) + except SubprocessError: + pass # already loaded + + rules = ''' +rdr pass inet proto tcp from any to any port 80 -> 127.0.0.1 port 8080 +rdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8443 +''' + cmd = ['pfctl', '-ef', '-'] + + try: + run_command(args, cmd, capture=True, data=rules) + except SubprocessError: + pass # non-zero exit status on success + + elif iptables: + ports = [ + (80, 8080), + (443, 8443), + ] + + for src, dst in ports: + rule = ['-o', 'lo', '-p', 'tcp', '--dport', str(src), '-j', 'REDIRECT', '--to-port', str(dst)] + + try: + # check for existing rule + cmd = ['iptables', '-t', 'nat', '-C', 'OUTPUT'] + rule + run_command(args, cmd, capture=True) + except SubprocessError: + # append rule when it does not exist + cmd = ['iptables', '-t', 'nat', '-A', 'OUTPUT'] + rule + run_command(args, cmd, capture=True) + else: + raise ApplicationError('No supported port forwarding mechanism detected.') + + +def run_setup_targets(args, test_dir, target_names, targets_dict, targets_executed, inventory_path, temp_path, always): + """ + :type args: IntegrationConfig + :type test_dir: str + :type target_names: list[str] + :type targets_dict: dict[str, IntegrationTarget] + :type targets_executed: set[str] + :type inventory_path: str + :type temp_path: str + :type always: bool + """ + for target_name in target_names: + if not always and target_name in targets_executed: + continue + + target = targets_dict[target_name] + + if not args.explain: + # create a fresh test directory for each test target + remove_tree(test_dir) + make_dirs(test_dir) + + if target.script_path: + command_integration_script(args, target, test_dir, inventory_path, temp_path) + else: + command_integration_role(args, target, None, test_dir, inventory_path, temp_path) + + targets_executed.add(target_name) + + +def integration_environment(args, target, test_dir, inventory_path, ansible_config, env_config): + """ + :type args: IntegrationConfig + :type target: IntegrationTarget + :type test_dir: str + :type inventory_path: str + :type ansible_config: str | None + :type env_config: CloudEnvironmentConfig | None + :rtype: dict[str, str] + """ + env = ansible_environment(args, ansible_config=ansible_config) + + if args.inject_httptester: + env.update(dict( + HTTPTESTER='1', + )) + + callback_plugins = ['junit'] + (env_config.callback_plugins or [] if env_config else []) + + integration = dict( + JUNIT_OUTPUT_DIR=ResultType.JUNIT.path, + ANSIBLE_CALLBACK_WHITELIST=','.join(sorted(set(callback_plugins))), + ANSIBLE_TEST_CI=args.metadata.ci_provider or get_ci_provider().code, + ANSIBLE_TEST_COVERAGE='check' if args.coverage_check else ('yes' if args.coverage else ''), + OUTPUT_DIR=test_dir, + INVENTORY_PATH=os.path.abspath(inventory_path), + ) + + if args.debug_strategy: + env.update(dict(ANSIBLE_STRATEGY='debug')) + + if 'non_local/' in target.aliases: + if args.coverage: + display.warning('Skipping coverage reporting on Ansible modules for non-local test: %s' % target.name) + + env.update(dict(ANSIBLE_TEST_REMOTE_INTERPRETER='')) + + env.update(integration) + + return env + + +def command_integration_script(args, target, test_dir, inventory_path, temp_path, remote_temp_path=None): + """ + :type args: IntegrationConfig + :type target: IntegrationTarget + :type test_dir: str + :type inventory_path: str + :type temp_path: str + :type remote_temp_path: str | None + """ + display.info('Running %s integration test script' % target.name) + + env_config = None + + if isinstance(args, PosixIntegrationConfig): + cloud_environment = get_cloud_environment(args, target) + + if cloud_environment: + env_config = cloud_environment.get_environment_config() + + with integration_test_environment(args, target, inventory_path) as test_env: + cmd = ['./%s' % os.path.basename(target.script_path)] + + if args.verbosity: + cmd.append('-' + ('v' * args.verbosity)) + + env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config) + cwd = os.path.join(test_env.targets_dir, target.relative_path) + + env.update(dict( + # support use of adhoc ansible commands in collections without specifying the fully qualified collection name + ANSIBLE_PLAYBOOK_DIR=cwd, + )) + + if env_config and env_config.env_vars: + env.update(env_config.env_vars) + + with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path: + if config_path: + cmd += ['-e', '@%s' % config_path] + + module_coverage = 'non_local/' not in target.aliases + intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path, + remote_temp_path=remote_temp_path, module_coverage=module_coverage) + + +def command_integration_role(args, target, start_at_task, test_dir, inventory_path, temp_path, remote_temp_path=None): + """ + :type args: IntegrationConfig + :type target: IntegrationTarget + :type start_at_task: str | None + :type test_dir: str + :type inventory_path: str + :type temp_path: str + :type remote_temp_path: str | None + """ + display.info('Running %s integration test role' % target.name) + + env_config = None + + vars_files = [] + variables = dict( + output_dir=test_dir, + ) + + if isinstance(args, WindowsIntegrationConfig): + hosts = 'windows' + gather_facts = False + variables.update(dict( + win_output_dir=r'C:\ansible_testing', + )) + elif isinstance(args, NetworkIntegrationConfig): + hosts = target.network_platform + gather_facts = False + else: + hosts = 'testhost' + gather_facts = True + + cloud_environment = get_cloud_environment(args, target) + + if cloud_environment: + env_config = cloud_environment.get_environment_config() + + with integration_test_environment(args, target, inventory_path) as test_env: + if os.path.exists(test_env.vars_file): + vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir)) + + play = dict( + hosts=hosts, + gather_facts=gather_facts, + vars_files=vars_files, + vars=variables, + roles=[ + target.name, + ], + ) + + if env_config: + if env_config.ansible_vars: + variables.update(env_config.ansible_vars) + + play.update(dict( + environment=env_config.env_vars, + module_defaults=env_config.module_defaults, + )) + + playbook = json.dumps([play], indent=4, sort_keys=True) + + with named_temporary_file(args=args, directory=test_env.integration_dir, prefix='%s-' % target.name, suffix='.yml', content=playbook) as playbook_path: + filename = os.path.basename(playbook_path) + + display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3) + + cmd = ['ansible-playbook', filename, '-i', os.path.relpath(test_env.inventory_path, test_env.integration_dir)] + + if start_at_task: + cmd += ['--start-at-task', start_at_task] + + if args.tags: + cmd += ['--tags', args.tags] + + if args.skip_tags: + cmd += ['--skip-tags', args.skip_tags] + + if args.diff: + cmd += ['--diff'] + + if isinstance(args, NetworkIntegrationConfig): + if args.testcase: + cmd += ['-e', 'testcase=%s' % args.testcase] + + if args.verbosity: + cmd.append('-' + ('v' * args.verbosity)) + + env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config) + cwd = test_env.integration_dir + + env.update(dict( + # support use of adhoc ansible commands in collections without specifying the fully qualified collection name + ANSIBLE_PLAYBOOK_DIR=cwd, + )) + + env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir + + module_coverage = 'non_local/' not in target.aliases + intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path, + remote_temp_path=remote_temp_path, module_coverage=module_coverage) + + +def get_changes_filter(args): + """ + :type args: TestConfig + :rtype: list[str] + """ + paths = detect_changes(args) + + if not args.metadata.change_description: + if paths: + changes = categorize_changes(args, paths, args.command) + else: + changes = ChangeDescription() + + args.metadata.change_description = changes + + if paths is None: + return [] # change detection not enabled, do not filter targets + + if not paths: + raise NoChangesDetected() + + if args.metadata.change_description.targets is None: + raise NoTestsForChanges() + + return args.metadata.change_description.targets + + +def detect_changes(args): + """ + :type args: TestConfig + :rtype: list[str] | None + """ + if args.changed: + paths = get_ci_provider().detect_changes(args) + elif args.changed_from or args.changed_path: + paths = args.changed_path or [] + if args.changed_from: + paths += read_text_file(args.changed_from).splitlines() + else: + return None # change detection not enabled + + if paths is None: + return None # act as though change detection not enabled, do not filter targets + + display.info('Detected changes in %d file(s).' % len(paths)) + + for path in paths: + display.info(path, verbosity=1) + + return paths + + +def get_integration_filter(args, targets): + """ + :type args: IntegrationConfig + :type targets: tuple[IntegrationTarget] + :rtype: list[str] + """ + if args.docker: + return get_integration_docker_filter(args, targets) + + if args.remote: + return get_integration_remote_filter(args, targets) + + return get_integration_local_filter(args, targets) + + +def common_integration_filter(args, targets, exclude): + """ + :type args: IntegrationConfig + :type targets: tuple[IntegrationTarget] + :type exclude: list[str] + """ + override_disabled = set(target for target in args.include if target.startswith('disabled/')) + + if not args.allow_disabled: + skip = 'disabled/' + override = [target.name for target in targets if override_disabled & set(target.aliases)] + skipped = [target.name for target in targets if skip in target.aliases and target.name not in override] + if skipped: + exclude.extend(skipped) + display.warning('Excluding tests marked "%s" which require --allow-disabled or prefixing with "disabled/": %s' + % (skip.rstrip('/'), ', '.join(skipped))) + + override_unsupported = set(target for target in args.include if target.startswith('unsupported/')) + + if not args.allow_unsupported: + skip = 'unsupported/' + override = [target.name for target in targets if override_unsupported & set(target.aliases)] + skipped = [target.name for target in targets if skip in target.aliases and target.name not in override] + if skipped: + exclude.extend(skipped) + display.warning('Excluding tests marked "%s" which require --allow-unsupported or prefixing with "unsupported/": %s' + % (skip.rstrip('/'), ', '.join(skipped))) + + override_unstable = set(target for target in args.include if target.startswith('unstable/')) + + if args.allow_unstable_changed: + override_unstable |= set(args.metadata.change_description.focused_targets or []) + + if not args.allow_unstable: + skip = 'unstable/' + override = [target.name for target in targets if override_unstable & set(target.aliases)] + skipped = [target.name for target in targets if skip in target.aliases and target.name not in override] + if skipped: + exclude.extend(skipped) + display.warning('Excluding tests marked "%s" which require --allow-unstable or prefixing with "unstable/": %s' + % (skip.rstrip('/'), ', '.join(skipped))) + + # only skip a Windows test if using --windows and all the --windows versions are defined in the aliases as skip/windows/%s + if isinstance(args, WindowsIntegrationConfig) and args.windows: + all_skipped = [] + not_skipped = [] + + for target in targets: + if "skip/windows/" not in target.aliases: + continue + + skip_valid = [] + skip_missing = [] + for version in args.windows: + if "skip/windows/%s/" % version in target.aliases: + skip_valid.append(version) + else: + skip_missing.append(version) + + if skip_missing and skip_valid: + not_skipped.append((target.name, skip_valid, skip_missing)) + elif skip_valid: + all_skipped.append(target.name) + + if all_skipped: + exclude.extend(all_skipped) + skip_aliases = ["skip/windows/%s/" % w for w in args.windows] + display.warning('Excluding tests marked "%s" which are set to skip with --windows %s: %s' + % ('", "'.join(skip_aliases), ', '.join(args.windows), ', '.join(all_skipped))) + + if not_skipped: + for target, skip_valid, skip_missing in not_skipped: + # warn when failing to skip due to lack of support for skipping only some versions + display.warning('Including test "%s" which was marked to skip for --windows %s but not %s.' + % (target, ', '.join(skip_valid), ', '.join(skip_missing))) + + +def get_integration_local_filter(args, targets): + """ + :type args: IntegrationConfig + :type targets: tuple[IntegrationTarget] + :rtype: list[str] + """ + exclude = [] + + common_integration_filter(args, targets, exclude) + + if not args.allow_root and os.getuid() != 0: + skip = 'needs/root/' + skipped = [target.name for target in targets if skip in target.aliases] + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which require --allow-root or running as root: %s' + % (skip.rstrip('/'), ', '.join(skipped))) + + override_destructive = set(target for target in args.include if target.startswith('destructive/')) + + if not args.allow_destructive: + skip = 'destructive/' + override = [target.name for target in targets if override_destructive & set(target.aliases)] + skipped = [target.name for target in targets if skip in target.aliases and target.name not in override] + if skipped: + exclude.extend(skipped) + display.warning('Excluding tests marked "%s" which require --allow-destructive or prefixing with "destructive/" to run locally: %s' + % (skip.rstrip('/'), ', '.join(skipped))) + + exclude_targets_by_python_version(targets, args.python_version, exclude) + + return exclude + + +def get_integration_docker_filter(args, targets): + """ + :type args: IntegrationConfig + :type targets: tuple[IntegrationTarget] + :rtype: list[str] + """ + exclude = [] + + common_integration_filter(args, targets, exclude) + + skip = 'skip/docker/' + skipped = [target.name for target in targets if skip in target.aliases] + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which cannot run under docker: %s' + % (skip.rstrip('/'), ', '.join(skipped))) + + if not args.docker_privileged: + skip = 'needs/privileged/' + skipped = [target.name for target in targets if skip in target.aliases] + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which require --docker-privileged to run under docker: %s' + % (skip.rstrip('/'), ', '.join(skipped))) + + python_version = get_python_version(args, get_docker_completion(), args.docker_raw) + + exclude_targets_by_python_version(targets, python_version, exclude) + + return exclude + + +def get_integration_remote_filter(args, targets): + """ + :type args: IntegrationConfig + :type targets: tuple[IntegrationTarget] + :rtype: list[str] + """ + remote = args.parsed_remote + + exclude = [] + + common_integration_filter(args, targets, exclude) + + skips = { + 'skip/%s' % remote.platform: remote.platform, + 'skip/%s/%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version), + 'skip/%s%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version), # legacy syntax, use above format + } + + if remote.arch: + skips.update({ + 'skip/%s/%s' % (remote.arch, remote.platform): '%s on %s' % (remote.platform, remote.arch), + 'skip/%s/%s/%s' % (remote.arch, remote.platform, remote.version): '%s %s on %s' % (remote.platform, remote.version, remote.arch), + }) + + for skip, description in skips.items(): + skipped = [target.name for target in targets if skip in target.skips] + if skipped: + exclude.append(skip + '/') + display.warning('Excluding tests marked "%s" which are not supported on %s: %s' % (skip, description, ', '.join(skipped))) + + python_version = get_python_version(args, get_remote_completion(), args.remote) + + exclude_targets_by_python_version(targets, python_version, exclude) + + return exclude + + +def exclude_targets_by_python_version(targets, python_version, exclude): + """ + :type targets: tuple[IntegrationTarget] + :type python_version: str + :type exclude: list[str] + """ + if not python_version: + display.warning('Python version unknown. Unable to skip tests based on Python version.') + return + + python_major_version = python_version.split('.')[0] + + skip = 'skip/python%s/' % python_version + skipped = [target.name for target in targets if skip in target.aliases] + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which are not supported on python %s: %s' + % (skip.rstrip('/'), python_version, ', '.join(skipped))) + + skip = 'skip/python%s/' % python_major_version + skipped = [target.name for target in targets if skip in target.aliases] + if skipped: + exclude.append(skip) + display.warning('Excluding tests marked "%s" which are not supported on python %s: %s' + % (skip.rstrip('/'), python_version, ', '.join(skipped))) + + +def get_python_version(args, configs, name): + """ + :type args: EnvironmentConfig + :type configs: dict[str, dict[str, str]] + :type name: str + """ + config = configs.get(name, {}) + config_python = config.get('python') + + if not config or not config_python: + if args.python: + return args.python + + display.warning('No Python version specified. ' + 'Use completion config or the --python option to specify one.', unique=True) + + return '' # failure to provide a version may result in failures or reduced functionality later + + supported_python_versions = config_python.split(',') + default_python_version = supported_python_versions[0] + + if args.python and args.python not in supported_python_versions: + raise ApplicationError('Python %s is not supported by %s. Supported Python version(s) are: %s' % ( + args.python, name, ', '.join(sorted(supported_python_versions)))) + + python_version = args.python or default_python_version + + return python_version + + +def get_python_interpreter(args, configs, name): + """ + :type args: EnvironmentConfig + :type configs: dict[str, dict[str, str]] + :type name: str + """ + if args.python_interpreter: + return args.python_interpreter + + config = configs.get(name, {}) + + if not config: + if args.python: + guess = 'python%s' % args.python + else: + guess = 'python' + + display.warning('Using "%s" as the Python interpreter. ' + 'Use completion config or the --python-interpreter option to specify the path.' % guess, unique=True) + + return guess + + python_version = get_python_version(args, configs, name) + + python_dir = config.get('python_dir', '/usr/bin') + python_interpreter = os.path.join(python_dir, 'python%s' % python_version) + python_interpreter = config.get('python%s' % python_version, python_interpreter) + + return python_interpreter + + +class EnvironmentDescription: + """Description of current running environment.""" + def __init__(self, args): + """Initialize snapshot of environment configuration. + :type args: IntegrationConfig + """ + self.args = args + + if self.args.explain: + self.data = {} + return + + warnings = [] + + versions = [''] + versions += SUPPORTED_PYTHON_VERSIONS + versions += list(set(v.split('.')[0] for v in SUPPORTED_PYTHON_VERSIONS)) + + version_check = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'versions.py') + python_paths = dict((v, find_executable('python%s' % v, required=False)) for v in sorted(versions)) + pip_paths = dict((v, find_executable('pip%s' % v, required=False)) for v in sorted(versions)) + program_versions = dict((v, self.get_version([python_paths[v], version_check], warnings)) for v in sorted(python_paths) if python_paths[v]) + pip_interpreters = dict((v, self.get_shebang(pip_paths[v])) for v in sorted(pip_paths) if pip_paths[v]) + known_hosts_hash = self.get_hash(os.path.expanduser('~/.ssh/known_hosts')) + + for version in sorted(versions): + self.check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings) + + for warning in warnings: + display.warning(warning, unique=True) + + self.data = dict( + python_paths=python_paths, + pip_paths=pip_paths, + program_versions=program_versions, + pip_interpreters=pip_interpreters, + known_hosts_hash=known_hosts_hash, + warnings=warnings, + ) + + @staticmethod + def check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings): + """ + :type version: str + :param python_paths: dict[str, str] + :param pip_paths: dict[str, str] + :param pip_interpreters: dict[str, str] + :param warnings: list[str] + """ + python_label = 'Python%s' % (' %s' % version if version else '') + + pip_path = pip_paths.get(version) + python_path = python_paths.get(version) + + if not python_path and not pip_path: + # neither python or pip is present for this version + return + + if not python_path: + warnings.append('A %s interpreter was not found, yet a matching pip was found at "%s".' % (python_label, pip_path)) + return + + if not pip_path: + warnings.append('A %s interpreter was found at "%s", yet a matching pip was not found.' % (python_label, python_path)) + return + + pip_shebang = pip_interpreters.get(version) + + match = re.search(r'#!\s*(?P[^\s]+)', pip_shebang) + + if not match: + warnings.append('A %s pip was found at "%s", but it does not have a valid shebang: %s' % (python_label, pip_path, pip_shebang)) + return + + pip_interpreter = os.path.realpath(match.group('command')) + python_interpreter = os.path.realpath(python_path) + + if pip_interpreter == python_interpreter: + return + + try: + identical = filecmp.cmp(pip_interpreter, python_interpreter) + except OSError: + identical = False + + if identical: + return + + warnings.append('A %s pip was found at "%s", but it uses interpreter "%s" instead of "%s".' % ( + python_label, pip_path, pip_interpreter, python_interpreter)) + + def __str__(self): + """ + :rtype: str + """ + return json.dumps(self.data, sort_keys=True, indent=4) + + def validate(self, target_name, throw): + """ + :type target_name: str + :type throw: bool + :rtype: bool + """ + current = EnvironmentDescription(self.args) + + return self.check(self, current, target_name, throw) + + @staticmethod + def check(original, current, target_name, throw): + """ + :type original: EnvironmentDescription + :type current: EnvironmentDescription + :type target_name: str + :type throw: bool + :rtype: bool + """ + original_json = str(original) + current_json = str(current) + + if original_json == current_json: + return True + + unified_diff = '\n'.join(difflib.unified_diff( + a=original_json.splitlines(), + b=current_json.splitlines(), + fromfile='original.json', + tofile='current.json', + lineterm='', + )) + + message = ('Test target "%s" has changed the test environment!\n' + 'If these changes are necessary, they must be reverted before the test finishes.\n' + '>>> Original Environment\n' + '%s\n' + '>>> Current Environment\n' + '%s\n' + '>>> Environment Diff\n' + '%s' + % (target_name, original_json, current_json, unified_diff)) + + if throw: + raise ApplicationError(message) + + display.error(message) + + return False + + @staticmethod + def get_version(command, warnings): + """ + :type command: list[str] + :type warnings: list[text] + :rtype: list[str] + """ + try: + stdout, stderr = raw_command(command, capture=True, cmd_verbosity=2) + except SubprocessError as ex: + warnings.append(u'%s' % ex) + return None # all failures are equal, we don't care why it failed, only that it did + + return [line.strip() for line in ((stdout or '').strip() + (stderr or '').strip()).splitlines()] + + @staticmethod + def get_shebang(path): + """ + :type path: str + :rtype: str + """ + with open_text_file(path) as script_fd: + return script_fd.readline().strip() + + @staticmethod + def get_hash(path): + """ + :type path: str + :rtype: str | None + """ + if not os.path.exists(path): + return None + + file_hash = hashlib.md5() + + file_hash.update(read_binary_file(path)) + + return file_hash.hexdigest() + + +class NoChangesDetected(ApplicationWarning): + """Exception when change detection was performed, but no changes were found.""" + def __init__(self): + super(NoChangesDetected, self).__init__('No changes detected.') + + +class NoTestsForChanges(ApplicationWarning): + """Exception when changes detected, but no tests trigger as a result.""" + def __init__(self): + super(NoTestsForChanges, self).__init__('No tests found for detected changes.') + + +class Delegate(Exception): + """Trigger command delegation.""" + def __init__(self, exclude=None, require=None, integration_targets=None): + """ + :type exclude: list[str] | None + :type require: list[str] | None + :type integration_targets: tuple[IntegrationTarget] | None + """ + super(Delegate, self).__init__() + + self.exclude = exclude or [] + self.require = require or [] + self.integration_targets = integration_targets or tuple() + + +class AllTargetsSkipped(ApplicationWarning): + """All targets skipped.""" + def __init__(self): + super(AllTargetsSkipped, self).__init__('All targets skipped.') diff --git a/test/lib/ansible_test/_internal/git.py b/test/lib/ansible_test/_internal/git.py new file mode 100644 index 00000000..acc39f3f --- /dev/null +++ b/test/lib/ansible_test/_internal/git.py @@ -0,0 +1,137 @@ +"""Wrapper around git command-line tools.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from . import types as t + +from .util import ( + SubprocessError, + raw_command, +) + + +class Git: + """Wrapper around git command-line tools.""" + def __init__(self, root=None): # type: (t.Optional[str]) -> None + self.git = 'git' + self.root = root + + def get_diff(self, args, git_options=None): + """ + :type args: list[str] + :type git_options: list[str] | None + :rtype: list[str] + """ + cmd = ['diff'] + args + if git_options is None: + git_options = ['-c', 'core.quotePath='] + return self.run_git_split(git_options + cmd, '\n', str_errors='replace') + + def get_diff_names(self, args): + """ + :type args: list[str] + :rtype: list[str] + """ + cmd = ['diff', '--name-only', '--no-renames', '-z'] + args + return self.run_git_split(cmd, '\0') + + def get_submodule_paths(self): # type: () -> t.List[str] + """Return a list of submodule paths recursively.""" + cmd = ['submodule', 'status', '--recursive'] + output = self.run_git_split(cmd, '\n') + submodule_paths = [re.search(r'^.[0-9a-f]+ (?P[^ ]+)', line).group('path') for line in output] + + # status is returned for all submodules in the current git repository relative to the current directory + # when the current directory is not the root of the git repository this can yield relative paths which are not below the current directory + # this can occur when multiple collections are in a git repo and some collections are submodules when others are not + # specifying "." as the path to enumerate would limit results to the current directory, but can cause the git command to fail with the error: + # error: pathspec '.' did not match any file(s) known to git + # this can occur when the current directory contains no files tracked by git + # instead we'll filter out the relative paths, since we're only interested in those at or below the current directory + submodule_paths = [path for path in submodule_paths if not path.startswith('../')] + + return submodule_paths + + def get_file_names(self, args): + """ + :type args: list[str] + :rtype: list[str] + """ + cmd = ['ls-files', '-z'] + args + return self.run_git_split(cmd, '\0') + + def get_branches(self): + """ + :rtype: list[str] + """ + cmd = ['for-each-ref', 'refs/heads/', '--format', '%(refname:strip=2)'] + return self.run_git_split(cmd) + + def get_branch(self): + """ + :rtype: str + """ + cmd = ['symbolic-ref', '--short', 'HEAD'] + return self.run_git(cmd).strip() + + def get_rev_list(self, commits=None, max_count=None): + """ + :type commits: list[str] | None + :type max_count: int | None + :rtype: list[str] + """ + cmd = ['rev-list'] + + if commits: + cmd += commits + else: + cmd += ['HEAD'] + + if max_count: + cmd += ['--max-count', '%s' % max_count] + + return self.run_git_split(cmd) + + def get_branch_fork_point(self, branch): + """ + :type branch: str + :rtype: str + """ + cmd = ['merge-base', '--fork-point', branch] + return self.run_git(cmd).strip() + + def is_valid_ref(self, ref): + """ + :type ref: str + :rtype: bool + """ + cmd = ['show', ref] + try: + self.run_git(cmd, str_errors='replace') + return True + except SubprocessError: + return False + + def run_git_split(self, cmd, separator=None, str_errors='strict'): + """ + :type cmd: list[str] + :type separator: str | None + :type str_errors: str + :rtype: list[str] + """ + output = self.run_git(cmd, str_errors=str_errors).strip(separator) + + if not output: + return [] + + return output.split(separator) + + def run_git(self, cmd, str_errors='strict'): + """ + :type cmd: list[str] + :type str_errors: str + :rtype: str + """ + return raw_command([self.git] + cmd, cwd=self.root, capture=True, str_errors=str_errors)[0] diff --git a/test/lib/ansible_test/_internal/http.py b/test/lib/ansible_test/_internal/http.py new file mode 100644 index 00000000..6607a10b --- /dev/null +++ b/test/lib/ansible_test/_internal/http.py @@ -0,0 +1,181 @@ +""" +Primitive replacement for requests to avoid extra dependency. +Avoids use of urllib2 due to lack of SNI support. +""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import time + +try: + from urllib import urlencode +except ImportError: + # noinspection PyCompatibility, PyUnresolvedReferences + from urllib.parse import urlencode # pylint: disable=locally-disabled, import-error, no-name-in-module + +try: + # noinspection PyCompatibility + from urlparse import urlparse, urlunparse, parse_qs +except ImportError: + # noinspection PyCompatibility, PyUnresolvedReferences + from urllib.parse import urlparse, urlunparse, parse_qs # pylint: disable=locally-disabled, ungrouped-imports + +from .util import ( + ApplicationError, + SubprocessError, + display, +) + +from .util_common import ( + CommonConfig, + run_command, +) + + +class HttpClient: + """Make HTTP requests via curl.""" + def __init__(self, args, always=False, insecure=False, proxy=None): + """ + :type args: CommonConfig + :type always: bool + :type insecure: bool + """ + self.args = args + self.always = always + self.insecure = insecure + self.proxy = proxy + + self.username = None + self.password = None + + def get(self, url): + """ + :type url: str + :rtype: HttpResponse + """ + return self.request('GET', url) + + def delete(self, url): + """ + :type url: str + :rtype: HttpResponse + """ + return self.request('DELETE', url) + + def put(self, url, data=None, headers=None): + """ + :type url: str + :type data: str | None + :type headers: dict[str, str] | None + :rtype: HttpResponse + """ + return self.request('PUT', url, data, headers) + + def request(self, method, url, data=None, headers=None): + """ + :type method: str + :type url: str + :type data: str | None + :type headers: dict[str, str] | None + :rtype: HttpResponse + """ + cmd = ['curl', '-s', '-S', '-i', '-X', method] + + if self.insecure: + cmd += ['--insecure'] + + if headers is None: + headers = {} + + headers['Expect'] = '' # don't send expect continue header + + if self.username: + if self.password: + display.sensitive.add(self.password) + cmd += ['-u', '%s:%s' % (self.username, self.password)] + else: + cmd += ['-u', self.username] + + for header in headers.keys(): + cmd += ['-H', '%s: %s' % (header, headers[header])] + + if data is not None: + cmd += ['-d', data] + + if self.proxy: + cmd += ['-x', self.proxy] + + cmd += [url] + + attempts = 0 + max_attempts = 3 + sleep_seconds = 3 + + # curl error codes which are safe to retry (request never sent to server) + retry_on_status = ( + 6, # CURLE_COULDNT_RESOLVE_HOST + ) + + stdout = '' + + while True: + attempts += 1 + + try: + stdout = run_command(self.args, cmd, capture=True, always=self.always, cmd_verbosity=2)[0] + break + except SubprocessError as ex: + if ex.status in retry_on_status and attempts < max_attempts: + display.warning(u'%s' % ex) + time.sleep(sleep_seconds) + continue + + raise + + if self.args.explain and not self.always: + return HttpResponse(method, url, 200, '') + + header, body = stdout.split('\r\n\r\n', 1) + + response_headers = header.split('\r\n') + first_line = response_headers[0] + http_response = first_line.split(' ') + status_code = int(http_response[1]) + + return HttpResponse(method, url, status_code, body) + + +class HttpResponse: + """HTTP response from curl.""" + def __init__(self, method, url, status_code, response): + """ + :type method: str + :type url: str + :type status_code: int + :type response: str + """ + self.method = method + self.url = url + self.status_code = status_code + self.response = response + + def json(self): + """ + :rtype: any + """ + try: + return json.loads(self.response) + except ValueError: + raise HttpError(self.status_code, 'Cannot parse response to %s %s as JSON:\n%s' % (self.method, self.url, self.response)) + + +class HttpError(ApplicationError): + """HTTP response as an error.""" + def __init__(self, status, message): + """ + :type status: int + :type message: str + """ + super(HttpError, self).__init__('%s: %s' % (status, message)) + self.status = status diff --git a/test/lib/ansible_test/_internal/import_analysis.py b/test/lib/ansible_test/_internal/import_analysis.py new file mode 100644 index 00000000..9cc5376f --- /dev/null +++ b/test/lib/ansible_test/_internal/import_analysis.py @@ -0,0 +1,362 @@ +"""Analyze python import statements.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ast +import os +import re + +from . import types as t + +from .io import ( + read_binary_file, +) + +from .util import ( + display, + ApplicationError, + is_subdir, +) + +from .data import ( + data_context, +) + +VIRTUAL_PACKAGES = set([ + 'ansible.module_utils.six', +]) + + +def get_python_module_utils_imports(compile_targets): + """Return a dictionary of module_utils names mapped to sets of python file paths. + :type compile_targets: list[TestTarget] + :rtype: dict[str, set[str]] + """ + + module_utils = enumerate_module_utils() + + virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES)) + module_utils -= virtual_utils + + imports_by_target_path = {} + + for target in compile_targets: + imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils) + + def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str] + """Recursively expand module_utils imports from module_utils files.""" + display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4) + + if seen is None: + seen = set([import_name]) + + results = set([import_name]) + + # virtual packages depend on the modules they contain instead of the reverse + if import_name in VIRTUAL_PACKAGES: + for sub_import in sorted(virtual_utils): + if sub_import.startswith('%s.' % import_name): + if sub_import in seen: + continue + + seen.add(sub_import) + + matches = sorted(recurse_import(sub_import, depth + 1, seen)) + + for result in matches: + results.add(result) + + import_path = get_import_path(import_name) + + if import_path not in imports_by_target_path: + import_path = get_import_path(import_name, package=True) + + if import_path not in imports_by_target_path: + raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name) + + # process imports in reverse so the deepest imports come first + for name in sorted(imports_by_target_path[import_path], reverse=True): + if name in virtual_utils: + continue + + if name in seen: + continue + + seen.add(name) + + matches = sorted(recurse_import(name, depth + 1, seen)) + + for result in matches: + results.add(result) + + return results + + for module_util in module_utils: + # recurse over module_utils imports while excluding self + module_util_imports = recurse_import(module_util) + module_util_imports.remove(module_util) + + # add recursive imports to all path entries which import this module_util + for target_path in imports_by_target_path: + if module_util in imports_by_target_path[target_path]: + for module_util_import in sorted(module_util_imports): + if module_util_import not in imports_by_target_path[target_path]: + display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6) + imports_by_target_path[target_path].add(module_util_import) + + imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils]) + + for target_path in imports_by_target_path: + for module_util in imports_by_target_path[target_path]: + imports[module_util].add(target_path) + + # for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package + for virtual_util in virtual_utils: + parent_package = '.'.join(virtual_util.split('.')[:-1]) + imports[virtual_util] = imports[parent_package] + display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6) + + for module_util in sorted(imports): + if not imports[module_util]: + package_path = get_import_path(module_util, package=True) + + if os.path.exists(package_path) and not os.path.getsize(package_path): + continue # ignore empty __init__.py files + + display.warning('No imports found which use the "%s" module_util.' % module_util) + + return imports + + +def get_python_module_utils_name(path): # type: (str) -> str + """Return a namespace and name from the given module_utils path.""" + base_path = data_context().content.module_utils_path + + if data_context().content.collection: + prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils' + else: + prefix = 'ansible.module_utils' + + if path.endswith('/__init__.py'): + path = os.path.dirname(path) + + if path == base_path: + name = prefix + else: + name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.') + + return name + + +def enumerate_module_utils(): + """Return a list of available module_utils imports. + :rtype: set[str] + """ + module_utils = [] + + for path in data_context().content.walk_files(data_context().content.module_utils_path): + ext = os.path.splitext(path)[1] + + if ext != '.py': + continue + + module_utils.append(get_python_module_utils_name(path)) + + return set(module_utils) + + +def extract_python_module_utils_imports(path, module_utils): + """Return a list of module_utils imports found in the specified source file. + :type path: str + :type module_utils: set[str] + :rtype: set[str] + """ + # Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding. + # See: https://www.python.org/dev/peps/pep-0263 + # Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised. + code = read_binary_file(path) + + try: + tree = ast.parse(code) + except SyntaxError as ex: + # Treat this error as a warning so tests can be executed as best as possible. + # The compile test will detect and report this syntax error. + display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg)) + return set() + + finder = ModuleUtilFinder(path, module_utils) + finder.visit(tree) + return finder.imports + + +def get_import_path(name, package=False): # type: (str, bool) -> str + """Return a path from an import name.""" + if package: + filename = os.path.join(name.replace('.', '/'), '__init__.py') + else: + filename = '%s.py' % name.replace('.', '/') + + if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils': + path = os.path.join('lib', filename) + elif data_context().content.collection and ( + name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or + name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name): + path = '/'.join(filename.split('/')[3:]) + else: + raise Exception('Unexpected import name: %s' % name) + + return path + + +def path_to_module(path): # type: (str) -> str + """Convert the given path to a module name.""" + module = os.path.splitext(path)[0].replace(os.path.sep, '.') + + if module.endswith('.__init__'): + module = module[:-9] + + return module + + +def relative_to_absolute(name, level, module, path, lineno): # type: (str, int, str, str, int) -> str + """Convert a relative import to an absolute import.""" + if level <= 0: + absolute_name = name + elif not module: + display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno)) + absolute_name = 'relative.nomodule' + else: + parts = module.split('.') + + if level >= len(parts): + display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno)) + absolute_name = 'relative.abovelevel' + else: + absolute_name = '.'.join(parts[:-level] + [name]) + + return absolute_name + + +class ModuleUtilFinder(ast.NodeVisitor): + """AST visitor to find valid module_utils imports.""" + def __init__(self, path, module_utils): + """Return a list of module_utils imports found in the specified source file. + :type path: str + :type module_utils: set[str] + """ + self.path = path + self.module_utils = module_utils + self.imports = set() + + # implicitly import parent package + + if path.endswith('/__init__.py'): + path = os.path.split(path)[0] + + if path.startswith('lib/ansible/module_utils/'): + package = os.path.split(path)[0].replace('/', '.')[4:] + + if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES: + self.add_import(package, 0) + + self.module = None + + if data_context().content.is_ansible: + # Various parts of the Ansible source tree execute within diffent modules. + # To support import analysis, each file which uses relative imports must reside under a path defined here. + # The mapping is a tuple consisting of a path pattern to match and a replacement path. + # During analyis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry. + path_map = ( + ('^hacking/build_library/build_ansible/', 'build_ansible/'), + ('^lib/ansible/', 'ansible/'), + ('^test/lib/ansible_test/_data/sanity/validate-modules/', 'validate_modules/'), + ('^test/units/', 'test/units/'), + ('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'), + ('^test/integration/targets/.*/ansible_collections/(?P[^/]*)/(?P[^/]*)/', r'ansible_collections/\g/\g/'), + ('^test/integration/targets/.*/library/', 'ansible/modules/'), + ) + + for pattern, replacement in path_map: + if re.search(pattern, self.path): + revised_path = re.sub(pattern, replacement, self.path) + self.module = path_to_module(revised_path) + break + else: + # This assumes that all files within the collection are executed by Ansible as part of the collection. + # While that will usually be true, there are exceptions which will result in this resolution being incorrect. + self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path)) + + # noinspection PyPep8Naming + # pylint: disable=locally-disabled, invalid-name + def visit_Import(self, node): + """ + :type node: ast.Import + """ + self.generic_visit(node) + + # import ansible.module_utils.MODULE[.MODULE] + # import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE] + self.add_imports([alias.name for alias in node.names], node.lineno) + + # noinspection PyPep8Naming + # pylint: disable=locally-disabled, invalid-name + def visit_ImportFrom(self, node): + """ + :type node: ast.ImportFrom + """ + self.generic_visit(node) + + if not node.module: + return + + module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno) + + if not module.startswith('ansible'): + return + + # from ansible.module_utils import MODULE[, MODULE] + # from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE] + # from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE] + # from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE] + self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno) + + def add_import(self, name, line_number): + """ + :type name: str + :type line_number: int + """ + import_name = name + + while self.is_module_util_name(name): + if name in self.module_utils: + if name not in self.imports: + display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5) + self.imports.add(name) + + return # duplicate imports are ignored + + name = '.'.join(name.split('.')[:-1]) + + if is_subdir(self.path, data_context().content.test_path): + return # invalid imports in tests are ignored + + # Treat this error as a warning so tests can be executed as best as possible. + # This error should be detected by unit or integration tests. + display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name)) + + def add_imports(self, names, line_no): # type: (t.List[str], int) -> None + """Add the given import names if they are module_utils imports.""" + for name in names: + if self.is_module_util_name(name): + self.add_import(name, line_no) + + @staticmethod + def is_module_util_name(name): # type: (str) -> bool + """Return True if the given name is a module_util name for the content under test. External module_utils are ignored.""" + if data_context().content.is_ansible and name.startswith('ansible.module_utils.'): + return True + + if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name): + return True + + return False diff --git a/test/lib/ansible_test/_internal/init.py b/test/lib/ansible_test/_internal/init.py new file mode 100644 index 00000000..682e6b0c --- /dev/null +++ b/test/lib/ansible_test/_internal/init.py @@ -0,0 +1,16 @@ +"""Early initialization for ansible-test before most other imports have been performed.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import resource + +from .constants import ( + SOFT_RLIMIT_NOFILE, +) + +CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE) +DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1]) + +if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE: + resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE) + CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE diff --git a/test/lib/ansible_test/_internal/integration/__init__.py b/test/lib/ansible_test/_internal/integration/__init__.py new file mode 100644 index 00000000..f7be34e7 --- /dev/null +++ b/test/lib/ansible_test/_internal/integration/__init__.py @@ -0,0 +1,349 @@ +"""Ansible integration test infrastructure.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import contextlib +import json +import os +import shutil +import tempfile + +from .. import types as t + +from ..encoding import ( + to_bytes, +) + +from ..target import ( + analyze_integration_target_dependencies, + walk_integration_targets, +) + +from ..config import ( + IntegrationConfig, + NetworkIntegrationConfig, + PosixIntegrationConfig, + WindowsIntegrationConfig, +) + +from ..io import ( + make_dirs, + write_text_file, + read_text_file, +) + +from ..util import ( + ApplicationError, + display, + COVERAGE_CONFIG_NAME, + MODE_DIRECTORY, + MODE_DIRECTORY_WRITE, + MODE_FILE, +) + +from ..util_common import ( + named_temporary_file, + ResultType, +) + +from ..coverage_util import ( + generate_coverage_config, +) + +from ..cache import ( + CommonCache, +) + +from ..cloud import ( + CloudEnvironmentConfig, +) + +from ..data import ( + data_context, +) + + +def setup_common_temp_dir(args, path): + """ + :type args: IntegrationConfig + :type path: str + """ + if args.explain: + return + + os.mkdir(path) + os.chmod(path, MODE_DIRECTORY) + + if args.coverage: + coverage_config_path = os.path.join(path, COVERAGE_CONFIG_NAME) + + coverage_config = generate_coverage_config(args) + + write_text_file(coverage_config_path, coverage_config) + + os.chmod(coverage_config_path, MODE_FILE) + + coverage_output_path = os.path.join(path, ResultType.COVERAGE.name) + + os.mkdir(coverage_output_path) + os.chmod(coverage_output_path, MODE_DIRECTORY_WRITE) + + +def generate_dependency_map(integration_targets): + """ + :type integration_targets: list[IntegrationTarget] + :rtype: dict[str, set[IntegrationTarget]] + """ + targets_dict = dict((target.name, target) for target in integration_targets) + target_dependencies = analyze_integration_target_dependencies(integration_targets) + dependency_map = {} + + invalid_targets = set() + + for dependency, dependents in target_dependencies.items(): + dependency_target = targets_dict.get(dependency) + + if not dependency_target: + invalid_targets.add(dependency) + continue + + for dependent in dependents: + if dependent not in dependency_map: + dependency_map[dependent] = set() + + dependency_map[dependent].add(dependency_target) + + if invalid_targets: + raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets))) + + return dependency_map + + +def get_files_needed(target_dependencies): + """ + :type target_dependencies: list[IntegrationTarget] + :rtype: list[str] + """ + files_needed = [] + + for target_dependency in target_dependencies: + files_needed += target_dependency.needs_file + + files_needed = sorted(set(files_needed)) + + invalid_paths = [path for path in files_needed if not os.path.isfile(path)] + + if invalid_paths: + raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths)) + + return files_needed + + +def check_inventory(args, inventory_path): # type: (IntegrationConfig, str) -> None + """Check the given inventory for issues.""" + if args.docker or args.remote: + if os.path.exists(inventory_path): + inventory = read_text_file(inventory_path) + + if 'ansible_ssh_private_key_file' in inventory: + display.warning('Use of "ansible_ssh_private_key_file" in inventory with the --docker or --remote option is unsupported and will likely fail.') + + +def get_inventory_relative_path(args): # type: (IntegrationConfig) -> str + """Return the inventory path used for the given integration configuration relative to the content root.""" + inventory_names = { + PosixIntegrationConfig: 'inventory', + WindowsIntegrationConfig: 'inventory.winrm', + NetworkIntegrationConfig: 'inventory.networking', + } # type: t.Dict[t.Type[IntegrationConfig], str] + + return os.path.join(data_context().content.integration_path, inventory_names[type(args)]) + + +def delegate_inventory(args, inventory_path_src): # type: (IntegrationConfig, str) -> None + """Make the given inventory available during delegation.""" + if isinstance(args, PosixIntegrationConfig): + return + + def inventory_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None + """ + Add the inventory file to the payload file list. + This will preserve the file during delegation even if it is ignored or is outside the content and install roots. + """ + inventory_path = get_inventory_relative_path(args) + inventory_tuple = inventory_path_src, inventory_path + + if os.path.isfile(inventory_path_src) and inventory_tuple not in files: + originals = [item for item in files if item[1] == inventory_path] + + if originals: + for original in originals: + files.remove(original) + + display.warning('Overriding inventory file "%s" with "%s".' % (inventory_path, inventory_path_src)) + else: + display.notice('Sourcing inventory file "%s" from "%s".' % (inventory_path, inventory_path_src)) + + files.append(inventory_tuple) + + data_context().register_payload_callback(inventory_callback) + + +@contextlib.contextmanager +def integration_test_environment(args, target, inventory_path_src): + """ + :type args: IntegrationConfig + :type target: IntegrationTarget + :type inventory_path_src: str + """ + ansible_config_src = args.get_ansible_config() + ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command) + + if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases: + display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.') + + integration_dir = os.path.join(data_context().content.root, data_context().content.integration_path) + targets_dir = os.path.join(data_context().content.root, data_context().content.integration_targets_path) + inventory_path = inventory_path_src + ansible_config = ansible_config_src + vars_file = os.path.join(data_context().content.root, data_context().content.integration_vars_path) + + yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file) + return + + # When testing a collection, the temporary directory must reside within the collection. + # This is necessary to enable support for the default collection for non-collection content (playbooks and roles). + root_temp_dir = os.path.join(ResultType.TMP.path, 'integration') + + prefix = '%s-' % target.name + suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8' + + if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases: + display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.') + suffix = '-ansible' + + if args.explain: + temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix)) + else: + make_dirs(root_temp_dir) + temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir) + + try: + display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2) + + inventory_relative_path = get_inventory_relative_path(args) + inventory_path = os.path.join(temp_dir, inventory_relative_path) + + cache = IntegrationCache(args) + + target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set()))) + + files_needed = get_files_needed(target_dependencies) + + integration_dir = os.path.join(temp_dir, data_context().content.integration_path) + targets_dir = os.path.join(temp_dir, data_context().content.integration_targets_path) + ansible_config = os.path.join(temp_dir, ansible_config_relative) + + vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path) + vars_file = os.path.join(temp_dir, data_context().content.integration_vars_path) + + file_copies = [ + (ansible_config_src, ansible_config), + (inventory_path_src, inventory_path), + ] + + if os.path.exists(vars_file_src): + file_copies.append((vars_file_src, vars_file)) + + file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed] + + integration_targets_relative_path = data_context().content.integration_targets_path + + directory_copies = [ + ( + os.path.join(integration_targets_relative_path, target.relative_path), + os.path.join(temp_dir, integration_targets_relative_path, target.relative_path) + ) + for target in target_dependencies + ] + + directory_copies = sorted(set(directory_copies)) + file_copies = sorted(set(file_copies)) + + if not args.explain: + make_dirs(integration_dir) + + for dir_src, dir_dst in directory_copies: + display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2) + + if not args.explain: + shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True) + + for file_src, file_dst in file_copies: + display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2) + + if not args.explain: + make_dirs(os.path.dirname(file_dst)) + shutil.copy2(file_src, file_dst) + + yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file) + finally: + if not args.explain: + shutil.rmtree(temp_dir) + + +@contextlib.contextmanager +def integration_test_config_file(args, env_config, integration_dir): + """ + :type args: IntegrationConfig + :type env_config: CloudEnvironmentConfig + :type integration_dir: str + """ + if not env_config: + yield None + return + + config_vars = (env_config.ansible_vars or {}).copy() + + config_vars.update(dict( + ansible_test=dict( + environment=env_config.env_vars, + module_defaults=env_config.module_defaults, + ) + )) + + config_file = json.dumps(config_vars, indent=4, sort_keys=True) + + with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path: + filename = os.path.relpath(path, integration_dir) + + display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3) + + yield path + + +class IntegrationEnvironment: + """Details about the integration environment.""" + def __init__(self, integration_dir, targets_dir, inventory_path, ansible_config, vars_file): + self.integration_dir = integration_dir + self.targets_dir = targets_dir + self.inventory_path = inventory_path + self.ansible_config = ansible_config + self.vars_file = vars_file + + +class IntegrationCache(CommonCache): + """Integration cache.""" + @property + def integration_targets(self): + """ + :rtype: list[IntegrationTarget] + """ + return self.get('integration_targets', lambda: list(walk_integration_targets())) + + @property + def dependency_map(self): + """ + :rtype: dict[str, set[IntegrationTarget]] + """ + return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets)) diff --git a/test/lib/ansible_test/_internal/io.py b/test/lib/ansible_test/_internal/io.py new file mode 100644 index 00000000..0f61cd2d --- /dev/null +++ b/test/lib/ansible_test/_internal/io.py @@ -0,0 +1,94 @@ +"""Functions for disk IO.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import errno +import io +import json +import os + +from . import types as t + +from .encoding import ( + ENCODING, + to_bytes, + to_text, +) + + +def read_json_file(path): # type: (t.AnyStr) -> t.Any + """Parse and return the json content from the specified path.""" + return json.loads(read_text_file(path)) + + +def read_text_file(path): # type: (t.AnyStr) -> t.Text + """Return the contents of the specified path as text.""" + return to_text(read_binary_file(path)) + + +def read_binary_file(path): # type: (t.AnyStr) -> bytes + """Return the contents of the specified path as bytes.""" + with open_binary_file(path) as file: + return file.read() + + +def make_dirs(path): # type: (str) -> None + """Create a directory at path, including any necessary parent directories.""" + try: + os.makedirs(to_bytes(path)) + except OSError as ex: + if ex.errno != errno.EEXIST: + raise + + +def write_json_file(path, # type: str + content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]] + create_directories=False, # type: bool + formatted=True, # type: bool + encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]] + ): # type: (...) -> None + """Write the given json content to the specified path, optionally creating missing directories.""" + text_content = json.dumps(content, + sort_keys=formatted, + indent=4 if formatted else None, + separators=(', ', ': ') if formatted else (',', ':'), + cls=encoder, + ) + '\n' + + write_text_file(path, text_content, create_directories=create_directories) + + +def write_text_file(path, content, create_directories=False): # type: (str, str, bool) -> None + """Write the given text content to the specified path, optionally creating missing directories.""" + if create_directories: + make_dirs(os.path.dirname(path)) + + with open_binary_file(path, 'wb') as file: + file.write(to_bytes(content)) + + +def open_text_file(path, mode='r'): # type: (str, str) -> t.TextIO + """Open the given path for text access.""" + if 'b' in mode: + raise Exception('mode cannot include "b" for text files: %s' % mode) + + # noinspection PyTypeChecker + return io.open(to_bytes(path), mode, encoding=ENCODING) + + +def open_binary_file(path, mode='rb'): # type: (str, str) -> t.BinaryIO + """Open the given path for binary access.""" + if 'b' not in mode: + raise Exception('mode must include "b" for binary files: %s' % mode) + + # noinspection PyTypeChecker + return io.open(to_bytes(path), mode) + + +class SortedSetEncoder(json.JSONEncoder): + """Encode sets as sorted lists.""" + def default(self, obj): # pylint: disable=method-hidden, arguments-differ + if isinstance(obj, set): + return sorted(obj) + + return super(SortedSetEncoder).default(self, obj) diff --git a/test/lib/ansible_test/_internal/manage_ci.py b/test/lib/ansible_test/_internal/manage_ci.py new file mode 100644 index 00000000..e81dad68 --- /dev/null +++ b/test/lib/ansible_test/_internal/manage_ci.py @@ -0,0 +1,335 @@ +"""Access Ansible Core CI remote services.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import tempfile +import time + +from .util import ( + SubprocessError, + ApplicationError, + cmd_quote, + display, + ANSIBLE_TEST_DATA_ROOT, +) + +from .util_common import ( + intercept_command, + get_network_settings, + run_command, +) + +from .core_ci import ( + AnsibleCoreCI, +) + +from .ansible_util import ( + ansible_environment, +) + +from .config import ( + ShellConfig, +) + +from .payload import ( + create_payload, +) + + +class ManageWindowsCI: + """Manage access to a Windows instance provided by Ansible Core CI.""" + def __init__(self, core_ci): + """ + :type core_ci: AnsibleCoreCI + """ + self.core_ci = core_ci + self.ssh_args = ['-i', self.core_ci.ssh_key.key] + + ssh_options = dict( + BatchMode='yes', + StrictHostKeyChecking='no', + UserKnownHostsFile='/dev/null', + ServerAliveInterval=15, + ServerAliveCountMax=4, + ) + + for ssh_option in sorted(ssh_options): + self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])] + + def setup(self, python_version): + """Used in delegate_remote to setup the host, no action is required for Windows. + :type python_version: str + """ + + def wait(self): + """Wait for instance to respond to ansible ping.""" + extra_vars = [ + 'ansible_connection=winrm', + 'ansible_host=%s' % self.core_ci.connection.hostname, + 'ansible_user=%s' % self.core_ci.connection.username, + 'ansible_password=%s' % self.core_ci.connection.password, + 'ansible_port=%s' % self.core_ci.connection.port, + 'ansible_winrm_server_cert_validation=ignore', + ] + + name = 'windows_%s' % self.core_ci.version + + env = ansible_environment(self.core_ci.args) + cmd = ['ansible', '-m', 'ansible.windows.win_ping', '-i', '%s,' % name, name, '-e', ' '.join(extra_vars)] + + for dummy in range(1, 120): + try: + intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True) + return + except SubprocessError: + time.sleep(10) + + raise ApplicationError('Timeout waiting for %s/%s instance %s.' % + (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id)) + + def download(self, remote, local): + """ + :type remote: str + :type local: str + """ + self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local) + + def upload(self, local, remote): + """ + :type local: str + :type remote: str + """ + self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote)) + + def ssh(self, command, options=None, force_pty=True): + """ + :type command: str | list[str] + :type options: list[str] | None + :type force_pty: bool + """ + if not options: + options = [] + if force_pty: + options.append('-tt') + + if isinstance(command, list): + command = ' '.join(cmd_quote(c) for c in command) + + run_command(self.core_ci.args, + ['ssh', '-q'] + self.ssh_args + + options + + ['-p', '22', + '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] + + [command]) + + def scp(self, src, dst): + """ + :type src: str + :type dst: str + """ + for dummy in range(1, 10): + try: + run_command(self.core_ci.args, + ['scp'] + self.ssh_args + + ['-P', '22', '-q', '-r', src, dst]) + return + except SubprocessError: + time.sleep(10) + + raise ApplicationError('Failed transfer: %s -> %s' % (src, dst)) + + +class ManageNetworkCI: + """Manage access to a network instance provided by Ansible Core CI.""" + def __init__(self, core_ci): + """ + :type core_ci: AnsibleCoreCI + """ + self.core_ci = core_ci + + def wait(self): + """Wait for instance to respond to ansible ping.""" + settings = get_network_settings(self.core_ci.args, self.core_ci.platform, self.core_ci.version) + + extra_vars = [ + 'ansible_host=%s' % self.core_ci.connection.hostname, + 'ansible_port=%s' % self.core_ci.connection.port, + 'ansible_ssh_private_key_file=%s' % self.core_ci.ssh_key.key, + ] + [ + '%s=%s' % (key, value) for key, value in settings.inventory_vars.items() + ] + + name = '%s-%s' % (self.core_ci.platform, self.core_ci.version.replace('.', '-')) + + env = ansible_environment(self.core_ci.args) + cmd = [ + 'ansible', + '-m', '%s%s_command' % (settings.collection + '.' if settings.collection else '', self.core_ci.platform), + '-a', 'commands=?', + '-u', self.core_ci.connection.username, + '-i', '%s,' % name, + '-e', ' '.join(extra_vars), + name, + ] + + for dummy in range(1, 90): + try: + intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True) + return + except SubprocessError: + time.sleep(10) + + raise ApplicationError('Timeout waiting for %s/%s instance %s.' % + (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id)) + + +class ManagePosixCI: + """Manage access to a POSIX instance provided by Ansible Core CI.""" + def __init__(self, core_ci): + """ + :type core_ci: AnsibleCoreCI + """ + self.core_ci = core_ci + self.ssh_args = ['-i', self.core_ci.ssh_key.key] + + ssh_options = dict( + BatchMode='yes', + StrictHostKeyChecking='no', + UserKnownHostsFile='/dev/null', + ServerAliveInterval=15, + ServerAliveCountMax=4, + ) + + for ssh_option in sorted(ssh_options): + self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])] + + if self.core_ci.platform == 'freebsd': + if self.core_ci.provider == 'aws': + self.become = ['su', '-l', 'root', '-c'] + elif self.core_ci.provider == 'azure': + self.become = ['sudo', '-in', 'sh', '-c'] + else: + raise NotImplementedError('provider %s has not been implemented' % self.core_ci.provider) + elif self.core_ci.platform == 'macos': + self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH', 'sh', '-c'] + elif self.core_ci.platform == 'osx': + self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH'] + elif self.core_ci.platform == 'rhel' or self.core_ci.platform == 'centos': + self.become = ['sudo', '-in', 'bash', '-c'] + elif self.core_ci.platform in ['aix', 'ibmi']: + self.become = [] + + def setup(self, python_version): + """Start instance and wait for it to become ready and respond to an ansible ping. + :type python_version: str + :rtype: str + """ + pwd = self.wait() + + display.info('Remote working directory: %s' % pwd, verbosity=1) + + if isinstance(self.core_ci.args, ShellConfig): + if self.core_ci.args.raw: + return pwd + + self.configure(python_version) + self.upload_source() + + return pwd + + def wait(self): # type: () -> str + """Wait for instance to respond to SSH.""" + for dummy in range(1, 90): + try: + stdout = self.ssh('pwd', capture=True)[0] + + if self.core_ci.args.explain: + return '/pwd' + + pwd = stdout.strip().splitlines()[-1] + + if not pwd.startswith('/'): + raise Exception('Unexpected current working directory "%s" from "pwd" command output:\n%s' % (pwd, stdout)) + + return pwd + except SubprocessError: + time.sleep(10) + + raise ApplicationError('Timeout waiting for %s/%s instance %s.' % + (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id)) + + def configure(self, python_version): + """Configure remote host for testing. + :type python_version: str + """ + self.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'remote.sh'), '/tmp') + self.ssh('chmod +x /tmp/remote.sh && /tmp/remote.sh %s %s' % (self.core_ci.platform, python_version)) + + def upload_source(self): + """Upload and extract source.""" + with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd: + remote_source_dir = '/tmp' + remote_source_path = os.path.join(remote_source_dir, os.path.basename(local_source_fd.name)) + + create_payload(self.core_ci.args, local_source_fd.name) + + self.upload(local_source_fd.name, remote_source_dir) + # AIX does not provide the GNU tar version, leading to parameters + # being different and -z not being recognized. This pattern works + # with both versions of tar. + self.ssh( + 'rm -rf ~/ansible ~/ansible_collections && cd ~/ && gunzip --stdout %s | tar oxf - && rm %s' % + (remote_source_path, remote_source_path) + ) + + def download(self, remote, local): + """ + :type remote: str + :type local: str + """ + self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local) + + def upload(self, local, remote): + """ + :type local: str + :type remote: str + """ + self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote)) + + def ssh(self, command, options=None, capture=False): + """ + :type command: str | list[str] + :type options: list[str] | None + :type capture: bool + :rtype: str | None, str | None + """ + if not options: + options = [] + + if isinstance(command, list): + command = ' '.join(cmd_quote(c) for c in command) + + command = cmd_quote(command) if self.become else command + return run_command(self.core_ci.args, + ['ssh', '-tt', '-q'] + self.ssh_args + + options + + ['-p', str(self.core_ci.connection.port), + '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] + + self.become + [command], capture=capture) + + def scp(self, src, dst): + """ + :type src: str + :type dst: str + """ + for dummy in range(1, 10): + try: + run_command(self.core_ci.args, + ['scp'] + self.ssh_args + + ['-P', str(self.core_ci.connection.port), '-q', '-r', src, dst]) + return + except SubprocessError: + time.sleep(10) + + raise ApplicationError('Failed transfer: %s -> %s' % (src, dst)) diff --git a/test/lib/ansible_test/_internal/metadata.py b/test/lib/ansible_test/_internal/metadata.py new file mode 100644 index 00000000..36575d0c --- /dev/null +++ b/test/lib/ansible_test/_internal/metadata.py @@ -0,0 +1,151 @@ +"""Test metadata for passing data to delegated tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from . import types as t + +from .util import ( + display, +) + +from .io import ( + write_json_file, + read_json_file, +) + +from .diff import ( + parse_diff, + FileDiff, +) + + +class Metadata: + """Metadata object for passing data to delegated tests.""" + def __init__(self): + """Initialize metadata.""" + self.changes = {} # type: t.Dict[str, t.Tuple[t.Tuple[int, int]]] + self.cloud_config = None # type: t.Optional[t.Dict[str, str]] + self.instance_config = None # type: t.Optional[t.List[t.Dict[str, str]]] + self.change_description = None # type: t.Optional[ChangeDescription] + self.ci_provider = None # type: t.Optional[str] + + def populate_changes(self, diff): + """ + :type diff: list[str] | None + """ + patches = parse_diff(diff) + patches = sorted(patches, key=lambda k: k.new.path) # type: t.List[FileDiff] + + self.changes = dict((patch.new.path, tuple(patch.new.ranges)) for patch in patches) + + renames = [patch.old.path for patch in patches if patch.old.path != patch.new.path and patch.old.exists and patch.new.exists] + deletes = [patch.old.path for patch in patches if not patch.new.exists] + + # make sure old paths which were renamed or deleted are registered in changes + for path in renames + deletes: + if path in self.changes: + # old path was replaced with another file + continue + + # failed tests involving deleted files should be using line 0 since there is no content remaining + self.changes[path] = ((0, 0),) + + def to_dict(self): + """ + :rtype: dict[str, any] + """ + return dict( + changes=self.changes, + cloud_config=self.cloud_config, + instance_config=self.instance_config, + ci_provider=self.ci_provider, + change_description=self.change_description.to_dict(), + ) + + def to_file(self, path): + """ + :type path: path + """ + data = self.to_dict() + + display.info('>>> Metadata: %s\n%s' % (path, data), verbosity=3) + + write_json_file(path, data) + + @staticmethod + def from_file(path): + """ + :type path: str + :rtype: Metadata + """ + data = read_json_file(path) + return Metadata.from_dict(data) + + @staticmethod + def from_dict(data): + """ + :type data: dict[str, any] + :rtype: Metadata + """ + metadata = Metadata() + metadata.changes = data['changes'] + metadata.cloud_config = data['cloud_config'] + metadata.instance_config = data['instance_config'] + metadata.ci_provider = data['ci_provider'] + metadata.change_description = ChangeDescription.from_dict(data['change_description']) + + return metadata + + +class ChangeDescription: + """Description of changes.""" + def __init__(self): + self.command = '' # type: str + self.changed_paths = [] # type: t.List[str] + self.deleted_paths = [] # type: t.List[str] + self.regular_command_targets = {} # type: t.Dict[str, t.List[str]] + self.focused_command_targets = {} # type: t.Dict[str, t.List[str]] + self.no_integration_paths = [] # type: t.List[str] + + @property + def targets(self): + """ + :rtype: list[str] | None + """ + return self.regular_command_targets.get(self.command) + + @property + def focused_targets(self): + """ + :rtype: list[str] | None + """ + return self.focused_command_targets.get(self.command) + + def to_dict(self): + """ + :rtype: dict[str, any] + """ + return dict( + command=self.command, + changed_paths=self.changed_paths, + deleted_paths=self.deleted_paths, + regular_command_targets=self.regular_command_targets, + focused_command_targets=self.focused_command_targets, + no_integration_paths=self.no_integration_paths, + ) + + @staticmethod + def from_dict(data): + """ + :param data: dict[str, any] + :rtype: ChangeDescription + """ + changes = ChangeDescription() + changes.command = data['command'] + changes.changed_paths = data['changed_paths'] + changes.deleted_paths = data['deleted_paths'] + changes.regular_command_targets = data['regular_command_targets'] + changes.focused_command_targets = data['focused_command_targets'] + changes.no_integration_paths = data['no_integration_paths'] + + return changes diff --git a/test/lib/ansible_test/_internal/payload.py b/test/lib/ansible_test/_internal/payload.py new file mode 100644 index 00000000..161faba0 --- /dev/null +++ b/test/lib/ansible_test/_internal/payload.py @@ -0,0 +1,146 @@ +"""Payload management for sending Ansible files and test content to other systems (VMs, containers).""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import atexit +import os +import stat +import tarfile +import tempfile +import time + +from . import types as t + +from .config import ( + IntegrationConfig, + ShellConfig, +) + +from .util import ( + display, + ANSIBLE_SOURCE_ROOT, + remove_tree, + is_subdir, +) + +from .data import ( + data_context, +) + +from .util_common import ( + CommonConfig, +) + +# improve performance by disabling uid/gid lookups +tarfile.pwd = None +tarfile.grp = None + +# this bin symlink map must exactly match the contents of the bin directory +# it is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible +ANSIBLE_BIN_SYMLINK_MAP = { + 'ansible': '../lib/ansible/cli/scripts/ansible_cli_stub.py', + 'ansible-config': 'ansible', + 'ansible-connection': '../lib/ansible/cli/scripts/ansible_connection_cli_stub.py', + 'ansible-console': 'ansible', + 'ansible-doc': 'ansible', + 'ansible-galaxy': 'ansible', + 'ansible-inventory': 'ansible', + 'ansible-playbook': 'ansible', + 'ansible-pull': 'ansible', + 'ansible-test': '../test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py', + 'ansible-vault': 'ansible', +} + + +def create_payload(args, dst_path): # type: (CommonConfig, str) -> None + """Create a payload for delegation.""" + if args.explain: + return + + files = list(data_context().ansible_source) + filters = {} + + def make_executable(tar_info): # type: (tarfile.TarInfo) -> t.Optional[tarfile.TarInfo] + """Make the given file executable.""" + tar_info.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP + return tar_info + + if not ANSIBLE_SOURCE_ROOT: + # reconstruct the bin directory which is not available when running from an ansible install + files.extend(create_temporary_bin_files(args)) + filters.update(dict((os.path.join('ansible', path[3:]), make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../'))) + + if not data_context().content.is_ansible: + # exclude unnecessary files when not testing ansible itself + files = [f for f in files if + is_subdir(f[1], 'bin/') or + is_subdir(f[1], 'lib/ansible/') or + is_subdir(f[1], 'test/lib/ansible_test/')] + + if not isinstance(args, (ShellConfig, IntegrationConfig)): + # exclude built-in ansible modules when they are not needed + files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py'] + + collection_layouts = data_context().create_collection_layouts() + + content_files = [] + extra_files = [] + + for layout in collection_layouts: + if layout == data_context().content: + # include files from the current collection (layout.collection.directory will be added later) + content_files.extend((os.path.join(layout.root, path), path) for path in data_context().content.all_files()) + else: + # include files from each collection in the same collection root as the content being tested + extra_files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files()) + else: + # when testing ansible itself the ansible source is the content + content_files = files + # there are no extra files when testing ansible itself + extra_files = [] + + for callback in data_context().payload_callbacks: + # execute callbacks only on the content paths + # this is done before placing them in the appropriate subdirectory (see below) + callback(content_files) + + # place ansible source files under the 'ansible' directory on the delegated host + files = [(src, os.path.join('ansible', dst)) for src, dst in files] + + if data_context().content.collection: + # place collection files under the 'ansible_collections/{namespace}/{collection}' directory on the delegated host + files.extend((src, os.path.join(data_context().content.collection.directory, dst)) for src, dst in content_files) + # extra files already have the correct destination path + files.extend(extra_files) + + # maintain predictable file order + files = sorted(set(files)) + + display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1) + + start = time.time() + + with tarfile.TarFile.open(dst_path, mode='w:gz', compresslevel=4, format=tarfile.GNU_FORMAT) as tar: + for src, dst in files: + display.info('%s -> %s' % (src, dst), verbosity=4) + tar.add(src, dst, filter=filters.get(dst)) + + duration = time.time() - start + payload_size_bytes = os.path.getsize(dst_path) + + display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1) + + +def create_temporary_bin_files(args): # type: (CommonConfig) -> t.Tuple[t.Tuple[str, str], ...] + """Create a temporary ansible bin directory populated using the symlink map.""" + if args.explain: + temp_path = '/tmp/ansible-tmp-bin' + else: + temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin') + atexit.register(remove_tree, temp_path) + + for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items(): + path = os.path.join(temp_path, name) + os.symlink(dest, path) + + return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP)) diff --git a/test/lib/ansible_test/_internal/powershell_import_analysis.py b/test/lib/ansible_test/_internal/powershell_import_analysis.py new file mode 100644 index 00000000..cfc61859 --- /dev/null +++ b/test/lib/ansible_test/_internal/powershell_import_analysis.py @@ -0,0 +1,105 @@ +"""Analyze powershell import statements.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re + +from .io import ( + read_text_file, +) + +from .util import ( + display, +) + +from .util_common import ( + resolve_csharp_ps_util, +) + +from .data import ( + data_context, +) + + +def get_powershell_module_utils_imports(powershell_targets): + """Return a dictionary of module_utils names mapped to sets of powershell file paths. + :type powershell_targets: list[TestTarget] + :rtype: dict[str, set[str]] + """ + + module_utils = enumerate_module_utils() + + imports_by_target_path = {} + + for target in powershell_targets: + imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils) + + imports = dict([(module_util, set()) for module_util in module_utils]) + + for target_path in imports_by_target_path: + for module_util in imports_by_target_path[target_path]: + imports[module_util].add(target_path) + + for module_util in sorted(imports): + if not imports[module_util]: + display.warning('No imports found which use the "%s" module_util.' % module_util) + + return imports + + +def get_powershell_module_utils_name(path): # type: (str) -> str + """Return a namespace and name from the given module_utils path.""" + base_path = data_context().content.module_utils_powershell_path + + if data_context().content.collection: + prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.' + else: + prefix = '' + + name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.') + + return name + + +def enumerate_module_utils(): + """Return a list of available module_utils imports. + :rtype: set[str] + """ + return set(get_powershell_module_utils_name(p) + for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path) + if os.path.splitext(p)[1] == '.psm1') + + +def extract_powershell_module_utils_imports(path, module_utils): + """Return a list of module_utils imports found in the specified source file. + :type path: str + :type module_utils: set[str] + :rtype: set[str] + """ + imports = set() + + code = read_text_file(path) + + if data_context().content.is_ansible and '# POWERSHELL_COMMON' in code: + imports.add('Ansible.ModuleUtils.Legacy') + + lines = code.splitlines() + line_number = 0 + + for line in lines: + line_number += 1 + match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line) + + if not match: + continue + + import_name = resolve_csharp_ps_util(match.group(1), path) + + if import_name in module_utils: + imports.add(import_name) + elif data_context().content.is_ansible or \ + import_name.startswith('ansible_collections.%s' % data_context().content.prefix): + display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name)) + + return imports diff --git a/test/lib/ansible_test/_internal/provider/__init__.py b/test/lib/ansible_test/_internal/provider/__init__.py new file mode 100644 index 00000000..6e034b53 --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/__init__.py @@ -0,0 +1,78 @@ +"""Provider (plugin) infrastructure for ansible-test.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import abc +import os + +from .. import types as t + +from ..util import ( + ABC, + ApplicationError, + get_subclasses, +) + + +try: + TPathProvider = t.TypeVar('TPathProvider', bound='PathProvider') +except AttributeError: + TPathProvider = None # pylint: disable=invalid-name + + +def get_path_provider_classes(provider_type): # type: (t.Type[TPathProvider]) -> t.List[t.Type[TPathProvider]] + """Return a list of path provider classes of the given type.""" + return sorted(get_subclasses(provider_type), key=lambda c: (c.priority, c.__name__)) + + +def find_path_provider(provider_type, # type: t.Type[TPathProvider], + provider_classes, # type: t.List[t.Type[TPathProvider]] + path, # type: str + walk, # type: bool + ): # type: (...) -> TPathProvider + """Return the first found path provider of the given type for the given path.""" + sequences = sorted(set(pc.sequence for pc in provider_classes if pc.sequence > 0)) + + for sequence in sequences: + candidate_path = path + tier_classes = [pc for pc in provider_classes if pc.sequence == sequence] + + while True: + for provider_class in tier_classes: + if provider_class.is_content_root(candidate_path): + return provider_class(candidate_path) + + if not walk: + break + + parent_path = os.path.dirname(candidate_path) + + if parent_path == candidate_path: + break + + candidate_path = parent_path + + raise ProviderNotFoundForPath(provider_type, path) + + +class ProviderNotFoundForPath(ApplicationError): + """Exception generated when a path based provider cannot be found for a given path.""" + def __init__(self, provider_type, path): # type: (t.Type, str) -> None + super(ProviderNotFoundForPath, self).__init__('No %s found for path: %s' % (provider_type.__name__, path)) + + self.provider_type = provider_type + self.path = path + + +class PathProvider(ABC): + """Base class for provider plugins that are path based.""" + sequence = 500 + priority = 500 + + def __init__(self, root): # type: (str) -> None + self.root = root + + @staticmethod + @abc.abstractmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" diff --git a/test/lib/ansible_test/_internal/provider/layout/__init__.py b/test/lib/ansible_test/_internal/provider/layout/__init__.py new file mode 100644 index 00000000..03d596fc --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/layout/__init__.py @@ -0,0 +1,232 @@ +"""Code for finding content.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import abc +import collections +import os + +from ... import types as t + +from ...util import ( + ANSIBLE_SOURCE_ROOT, +) + +from .. import ( + PathProvider, +) + + +class Layout: + """Description of content locations and helper methods to access content.""" + def __init__(self, + root, # type: str + paths, # type: t.List[str] + ): # type: (...) -> None + self.root = root + + self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep) + self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths + self.__paths_tree = paths_to_tree(self.__paths) + self.__files_tree = paths_to_tree(self.__files) + + def all_files(self, include_symlinked_directories=False): # type: (bool) -> t.List[str] + """Return a list of all file paths.""" + if include_symlinked_directories: + return self.__paths + + return self.__files + + def walk_files(self, directory, include_symlinked_directories=False): # type: (str, bool) -> t.List[str] + """Return a list of file paths found recursively under the given directory.""" + if include_symlinked_directories: + tree = self.__paths_tree + else: + tree = self.__files_tree + + parts = directory.rstrip(os.path.sep).split(os.path.sep) + item = get_tree_item(tree, parts) + + if not item: + return [] + + directories = collections.deque(item[0].values()) + + files = list(item[1]) + + while directories: + item = directories.pop() + directories.extend(item[0].values()) + files.extend(item[1]) + + return files + + def get_dirs(self, directory): # type: (str) -> t.List[str] + """Return a list directory paths found directly under the given directory.""" + parts = directory.rstrip(os.path.sep).split(os.path.sep) + item = get_tree_item(self.__files_tree, parts) + return [os.path.join(directory, key) for key in item[0].keys()] if item else [] + + def get_files(self, directory): # type: (str) -> t.List[str] + """Return a list of file paths found directly under the given directory.""" + parts = directory.rstrip(os.path.sep).split(os.path.sep) + item = get_tree_item(self.__files_tree, parts) + return item[1] if item else [] + + +class ContentLayout(Layout): + """Information about the current Ansible content being tested.""" + def __init__(self, + root, # type: str + paths, # type: t.List[str] + plugin_paths, # type: t.Dict[str, str] + collection, # type: t.Optional[CollectionDetail] + test_path, # type: str + results_path, # type: str + sanity_path, # type: str + sanity_messages, # type: t.Optional[LayoutMessages] + integration_path, # type: str + integration_targets_path, # type: str + integration_vars_path, # type: str + integration_messages, # type: t.Optional[LayoutMessages] + unit_path, # type: str + unit_module_path, # type: str + unit_module_utils_path, # type: str + unit_messages, # type: t.Optional[LayoutMessages] + ): # type: (...) -> None + super(ContentLayout, self).__init__(root, paths) + + self.plugin_paths = plugin_paths + self.collection = collection + self.test_path = test_path + self.results_path = results_path + self.sanity_path = sanity_path + self.sanity_messages = sanity_messages + self.integration_path = integration_path + self.integration_targets_path = integration_targets_path + self.integration_vars_path = integration_vars_path + self.integration_messages = integration_messages + self.unit_path = unit_path + self.unit_module_path = unit_module_path + self.unit_module_utils_path = unit_module_utils_path + self.unit_messages = unit_messages + + self.is_ansible = root == ANSIBLE_SOURCE_ROOT + + @property + def prefix(self): # type: () -> str + """Return the collection prefix or an empty string if not a collection.""" + if self.collection: + return self.collection.prefix + + return '' + + @property + def module_path(self): # type: () -> t.Optional[str] + """Return the path where modules are found, if any.""" + return self.plugin_paths.get('modules') + + @property + def module_utils_path(self): # type: () -> t.Optional[str] + """Return the path where module_utils are found, if any.""" + return self.plugin_paths.get('module_utils') + + @property + def module_utils_powershell_path(self): # type: () -> t.Optional[str] + """Return the path where powershell module_utils are found, if any.""" + if self.is_ansible: + return os.path.join(self.plugin_paths['module_utils'], 'powershell') + + return self.plugin_paths.get('module_utils') + + @property + def module_utils_csharp_path(self): # type: () -> t.Optional[str] + """Return the path where csharp module_utils are found, if any.""" + if self.is_ansible: + return os.path.join(self.plugin_paths['module_utils'], 'csharp') + + return self.plugin_paths.get('module_utils') + + +class LayoutMessages: + """Messages generated during layout creation that should be deferred for later display.""" + def __init__(self): + self.info = [] # type: t.List[str] + self.warning = [] # type: t.List[str] + self.error = [] # type: t.List[str] + + +class CollectionDetail: + """Details about the layout of the current collection.""" + def __init__(self, + name, # type: str + namespace, # type: str + root, # type: str + ): # type: (...) -> None + self.name = name + self.namespace = namespace + self.root = root + self.full_name = '%s.%s' % (namespace, name) + self.prefix = '%s.' % self.full_name + self.directory = os.path.join('ansible_collections', namespace, name) + + +class LayoutProvider(PathProvider): + """Base class for layout providers.""" + PLUGIN_TYPES = ( + 'action', + 'become', + 'cache', + 'callback', + 'cliconf', + 'connection', + 'doc_fragments', + 'filter', + 'httpapi', + 'inventory', + 'lookup', + 'module_utils', + 'modules', + 'netconf', + 'shell', + 'strategy', + 'terminal', + 'test', + 'vars', + ) + + @abc.abstractmethod + def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout + """Create a layout using the given root and paths.""" + + +def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple(t.Dict[str, t.Any], t.List[str]) + """Return a filesystem tree from the given list of paths.""" + tree = {}, [] + + for path in paths: + parts = path.split(os.path.sep) + root = tree + + for part in parts[:-1]: + if part not in root[0]: + root[0][part] = {}, [] + + root = root[0][part] + + root[1].append(path) + + return tree + + +def get_tree_item(tree, parts): # type: (t.Tuple(t.Dict[str, t.Any], t.List[str]), t.List[str]) -> t.Optional[t.Tuple(t.Dict[str, t.Any], t.List[str])] + """Return the portion of the tree found under the path given by parts, or None if it does not exist.""" + root = tree + + for part in parts: + root = root[0].get(part) + + if not root: + return None + + return root diff --git a/test/lib/ansible_test/_internal/provider/layout/ansible.py b/test/lib/ansible_test/_internal/provider/layout/ansible.py new file mode 100644 index 00000000..49ca482b --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/layout/ansible.py @@ -0,0 +1,47 @@ +"""Layout provider for Ansible source.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ... import types as t + +from . import ( + ContentLayout, + LayoutProvider, +) + + +class AnsibleLayout(LayoutProvider): + """Layout provider for Ansible source.""" + @staticmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" + return os.path.exists(os.path.join(path, 'setup.py')) and os.path.exists(os.path.join(path, 'bin/ansible-test')) + + def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout + """Create a Layout using the given root and paths.""" + plugin_paths = dict((p, os.path.join('lib/ansible/plugins', p)) for p in self.PLUGIN_TYPES) + + plugin_paths.update(dict( + modules='lib/ansible/modules', + module_utils='lib/ansible/module_utils', + )) + + return ContentLayout(root, + paths, + plugin_paths=plugin_paths, + collection=None, + test_path='test', + results_path='test/results', + sanity_path='test/sanity', + sanity_messages=None, + integration_path='test/integration', + integration_targets_path='test/integration/targets', + integration_vars_path='test/integration/integration_config.yml', + integration_messages=None, + unit_path='test/units', + unit_module_path='test/units/modules', + unit_module_utils_path='test/units/module_utils', + unit_messages=None, + ) diff --git a/test/lib/ansible_test/_internal/provider/layout/collection.py b/test/lib/ansible_test/_internal/provider/layout/collection.py new file mode 100644 index 00000000..ffad29f2 --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/layout/collection.py @@ -0,0 +1,123 @@ +"""Layout provider for Ansible collections.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ... import types as t + +from . import ( + ContentLayout, + LayoutProvider, + CollectionDetail, + LayoutMessages, +) + + +class CollectionLayout(LayoutProvider): + """Layout provider for Ansible collections.""" + __module_path = 'plugins/modules' + __unit_path = 'test/unit' + + @staticmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" + if os.path.basename(os.path.dirname(os.path.dirname(path))) == 'ansible_collections': + return True + + return False + + def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout + """Create a Layout using the given root and paths.""" + plugin_paths = dict((p, os.path.join('plugins', p)) for p in self.PLUGIN_TYPES) + + collection_root = os.path.dirname(os.path.dirname(root)) + collection_dir = os.path.relpath(root, collection_root) + collection_namespace, collection_name = collection_dir.split(os.path.sep) + + collection_root = os.path.dirname(collection_root) + + sanity_messages = LayoutMessages() + integration_messages = LayoutMessages() + unit_messages = LayoutMessages() + + # these apply to all test commands + self.__check_test_path(paths, sanity_messages) + self.__check_test_path(paths, integration_messages) + self.__check_test_path(paths, unit_messages) + + # these apply to specific test commands + integration_targets_path = self.__check_integration_path(paths, integration_messages) + self.__check_unit_path(paths, unit_messages) + + return ContentLayout(root, + paths, + plugin_paths=plugin_paths, + collection=CollectionDetail( + name=collection_name, + namespace=collection_namespace, + root=collection_root, + ), + test_path='tests', + results_path='tests/output', + sanity_path='tests/sanity', + sanity_messages=sanity_messages, + integration_path='tests/integration', + integration_targets_path=integration_targets_path.rstrip(os.path.sep), + integration_vars_path='tests/integration/integration_config.yml', + integration_messages=integration_messages, + unit_path='tests/unit', + unit_module_path='tests/unit/plugins/modules', + unit_module_utils_path='tests/unit/plugins/module_utils', + unit_messages=unit_messages, + ) + + @staticmethod + def __check_test_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None + modern_test_path = 'tests/' + modern_test_path_found = any(path.startswith(modern_test_path) for path in paths) + legacy_test_path = 'test/' + legacy_test_path_found = any(path.startswith(legacy_test_path) for path in paths) + + if modern_test_path_found and legacy_test_path_found: + messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_test_path, modern_test_path)) + elif legacy_test_path_found: + messages.warning.append('Ignoring tests in "%s" that should be in "%s".' % (legacy_test_path, modern_test_path)) + + @staticmethod + def __check_integration_path(paths, messages): # type: (t.List[str], LayoutMessages) -> str + modern_integration_path = 'roles/test/' + modern_integration_path_found = any(path.startswith(modern_integration_path) for path in paths) + legacy_integration_path = 'tests/integration/targets/' + legacy_integration_path_found = any(path.startswith(legacy_integration_path) for path in paths) + + if modern_integration_path_found and legacy_integration_path_found: + messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_integration_path, modern_integration_path)) + integration_targets_path = modern_integration_path + elif legacy_integration_path_found: + messages.info.append('Falling back to tests in "%s" because "%s" was not found.' % (legacy_integration_path, modern_integration_path)) + integration_targets_path = legacy_integration_path + elif modern_integration_path_found: + messages.info.append('Loading tests from "%s".' % modern_integration_path) + integration_targets_path = modern_integration_path + else: + messages.error.append('Cannot run integration tests without "%s" or "%s".' % (modern_integration_path, legacy_integration_path)) + integration_targets_path = modern_integration_path + + return integration_targets_path + + @staticmethod + def __check_unit_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None + modern_unit_path = 'tests/unit/' + modern_unit_path_found = any(path.startswith(modern_unit_path) for path in paths) + legacy_unit_path = 'tests/units/' # test/units/ will be covered by the warnings for test/ vs tests/ + legacy_unit_path_found = any(path.startswith(legacy_unit_path) for path in paths) + + if modern_unit_path_found and legacy_unit_path_found: + messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_unit_path, modern_unit_path)) + elif legacy_unit_path_found: + messages.warning.append('Rename "%s" to "%s" to run unit tests.' % (legacy_unit_path, modern_unit_path)) + elif modern_unit_path_found: + pass # unit tests only run from one directory so no message is needed + else: + messages.error.append('Cannot run unit tests without "%s".' % modern_unit_path) diff --git a/test/lib/ansible_test/_internal/provider/source/__init__.py b/test/lib/ansible_test/_internal/provider/source/__init__.py new file mode 100644 index 00000000..fab28b09 --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/source/__init__.py @@ -0,0 +1,18 @@ +"""Common code for source providers.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import abc + +from ... import types as t + +from .. import ( + PathProvider, +) + + +class SourceProvider(PathProvider): + """Base class for source providers.""" + @abc.abstractmethod + def get_paths(self, path): # type: (str) -> t.List[str] + """Return the list of available content paths under the given path.""" diff --git a/test/lib/ansible_test/_internal/provider/source/git.py b/test/lib/ansible_test/_internal/provider/source/git.py new file mode 100644 index 00000000..0bf81a1c --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/source/git.py @@ -0,0 +1,72 @@ +"""Source provider for a content root managed by git version control.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ... import types as t + +from ...git import ( + Git, +) + +from ...encoding import ( + to_bytes, +) + +from ...util import ( + SubprocessError, +) + +from . import ( + SourceProvider, +) + + +class GitSource(SourceProvider): + """Source provider for a content root managed by git version control.""" + @staticmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" + return os.path.exists(os.path.join(path, '.git')) + + def get_paths(self, path): # type: (str) -> t.List[str] + """Return the list of available content paths under the given path.""" + paths = self.__get_paths(path) + + try: + submodule_paths = Git(path).get_submodule_paths() + except SubprocessError: + if path == self.root: + raise + + # older versions of git require submodule commands to be executed from the top level of the working tree + # git version 2.18.1 (centos8) does not have this restriction + # git version 1.8.3.1 (centos7) does + # fall back to using the top level directory of the working tree only when needed + # this avoids penalizing newer git versions with a potentially slower analysis due to additional submodules + rel_path = os.path.relpath(path, self.root) + os.path.sep + + submodule_paths = Git(self.root).get_submodule_paths() + submodule_paths = [os.path.relpath(p, rel_path) for p in submodule_paths if p.startswith(rel_path)] + + for submodule_path in submodule_paths: + paths.extend(os.path.join(submodule_path, p) for p in self.__get_paths(os.path.join(path, submodule_path))) + + # git reports submodule directories as regular files + paths = [p for p in paths if p not in submodule_paths] + + return paths + + @staticmethod + def __get_paths(path): # type: (str) -> t.List[str] + """Return the list of available content paths under the given path.""" + git = Git(path) + paths = git.get_file_names(['--cached', '--others', '--exclude-standard']) + deleted_paths = git.get_file_names(['--deleted']) + paths = sorted(set(paths) - set(deleted_paths)) + + # directory symlinks are reported by git as regular files but they need to be treated as directories + paths = [path + os.path.sep if os.path.isdir(to_bytes(path)) else path for path in paths] + + return paths diff --git a/test/lib/ansible_test/_internal/provider/source/installed.py b/test/lib/ansible_test/_internal/provider/source/installed.py new file mode 100644 index 00000000..d24a6e3d --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/source/installed.py @@ -0,0 +1,43 @@ +"""Source provider for content which has been installed.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ... import types as t + +from . import ( + SourceProvider, +) + + +class InstalledSource(SourceProvider): + """Source provider for content which has been installed.""" + sequence = 0 # disable automatic detection + + @staticmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" + return False + + def get_paths(self, path): # type: (str) -> t.List[str] + """Return the list of available content paths under the given path.""" + paths = [] + + kill_extensions = ( + '.pyc', + '.pyo', + ) + + for root, _dummy, file_names in os.walk(path): + rel_root = os.path.relpath(root, path) + + if rel_root == '.': + rel_root = '' + + paths.extend([os.path.join(rel_root, file_name) for file_name in file_names + if not os.path.splitext(file_name)[1] in kill_extensions]) + + # NOTE: directory symlinks are ignored as there should be no directory symlinks for an install + + return paths diff --git a/test/lib/ansible_test/_internal/provider/source/unversioned.py b/test/lib/ansible_test/_internal/provider/source/unversioned.py new file mode 100644 index 00000000..09105789 --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/source/unversioned.py @@ -0,0 +1,87 @@ +"""Fallback source provider when no other provider matches the content root.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ... import types as t + +from ...constants import ( + TIMEOUT_PATH, +) + +from ...encoding import ( + to_bytes, +) + +from . import ( + SourceProvider, +) + + +class UnversionedSource(SourceProvider): + """Fallback source provider when no other provider matches the content root.""" + sequence = 0 # disable automatic detection + + @staticmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" + return False + + def get_paths(self, path): # type: (str) -> t.List[str] + """Return the list of available content paths under the given path.""" + paths = [] + + kill_any_dir = ( + '.idea', + '.pytest_cache', + '__pycache__', + 'ansible.egg-info', + 'ansible_base.egg-info', + ) + + kill_sub_dir = { + 'test': ( + 'results', + 'cache', + 'output', + ), + 'tests': ( + 'output', + ), + 'docs/docsite': ( + '_build', + ), + } + + kill_sub_file = { + '': ( + TIMEOUT_PATH, + ), + } + + kill_extensions = ( + '.pyc', + '.pyo', + '.retry', + ) + + for root, dir_names, file_names in os.walk(path): + rel_root = os.path.relpath(root, path) + + if rel_root == '.': + rel_root = '' + + for kill in kill_any_dir + kill_sub_dir.get(rel_root, ()): + if kill in dir_names: + dir_names.remove(kill) + + kill_files = kill_sub_file.get(rel_root, ()) + + paths.extend([os.path.join(rel_root, file_name) for file_name in file_names + if not os.path.splitext(file_name)[1] in kill_extensions and file_name not in kill_files]) + + # include directory symlinks since they will not be traversed and would otherwise go undetected + paths.extend([os.path.join(rel_root, dir_name) + os.path.sep for dir_name in dir_names if os.path.islink(to_bytes(dir_name))]) + + return paths diff --git a/test/lib/ansible_test/_internal/sanity/__init__.py b/test/lib/ansible_test/_internal/sanity/__init__.py new file mode 100644 index 00000000..976bbb2f --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/__init__.py @@ -0,0 +1,946 @@ +"""Execute Ansible sanity tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import abc +import glob +import os +import re +import collections + +from .. import types as t + +from ..io import ( + read_json_file, +) + +from ..util import ( + ApplicationError, + SubprocessError, + display, + import_plugins, + load_plugins, + parse_to_list_of_dict, + ABC, + ANSIBLE_TEST_DATA_ROOT, + is_binary_file, + read_lines_without_comments, + get_available_python_versions, + find_python, + is_subdir, + paths_to_dirs, + get_ansible_version, + str_to_version, +) + +from ..util_common import ( + run_command, + intercept_command, + handle_layout_messages, +) + +from ..ansible_util import ( + ansible_environment, +) + +from ..target import ( + walk_internal_targets, + walk_sanity_targets, + TestTarget, +) + +from ..executor import ( + get_changes_filter, + AllTargetsSkipped, + Delegate, + install_command_requirements, + SUPPORTED_PYTHON_VERSIONS, +) + +from ..config import ( + SanityConfig, +) + +from ..test import ( + TestSuccess, + TestFailure, + TestSkipped, + TestMessage, + calculate_best_confidence, +) + +from ..data import ( + data_context, +) + +COMMAND = 'sanity' +SANITY_ROOT = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sanity') + + +def command_sanity(args): + """ + :type args: SanityConfig + """ + handle_layout_messages(data_context().content.sanity_messages) + + changes = get_changes_filter(args) + require = args.require + changes + targets = SanityTargets.create(args.include, args.exclude, require) + + if not targets.include: + raise AllTargetsSkipped() + + if args.delegate: + raise Delegate(require=changes, exclude=args.exclude) + + tests = sanity_get_tests() + + if args.test: + tests = [target for target in tests if target.name in args.test] + else: + disabled = [target.name for target in tests if not target.enabled and not args.allow_disabled] + tests = [target for target in tests if target.enabled or args.allow_disabled] + + if disabled: + display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled))) + + if args.skip_test: + tests = [target for target in tests if target.name not in args.skip_test] + + total = 0 + failed = [] + + for test in tests: + if args.list_tests: + display.info(test.name) + continue + + available_versions = sorted(get_available_python_versions(SUPPORTED_PYTHON_VERSIONS).keys()) + + if args.python: + # specific version selected + versions = (args.python,) + elif isinstance(test, SanityMultipleVersion): + # try all supported versions for multi-version tests when a specific version has not been selected + versions = test.supported_python_versions + elif not test.supported_python_versions or args.python_version in test.supported_python_versions: + # the test works with any version or the version we're already running + versions = (args.python_version,) + else: + # available versions supported by the test + versions = tuple(sorted(set(available_versions) & set(test.supported_python_versions))) + # use the lowest available version supported by the test or the current version as a fallback (which will be skipped) + versions = versions[:1] or (args.python_version,) + + for version in versions: + if isinstance(test, SanityMultipleVersion): + skip_version = version + else: + skip_version = None + + options = '' + + if test.supported_python_versions and version not in test.supported_python_versions: + display.warning("Skipping sanity test '%s' on unsupported Python %s." % (test.name, version)) + result = SanitySkipped(test.name, skip_version) + elif not args.python and version not in available_versions: + display.warning("Skipping sanity test '%s' on Python %s due to missing interpreter." % (test.name, version)) + result = SanitySkipped(test.name, skip_version) + else: + if test.supported_python_versions: + display.info("Running sanity test '%s' with Python %s" % (test.name, version)) + else: + display.info("Running sanity test '%s'" % test.name) + + if isinstance(test, SanityCodeSmellTest): + settings = test.load_processor(args) + elif isinstance(test, SanityMultipleVersion): + settings = test.load_processor(args, version) + elif isinstance(test, SanitySingleVersion): + settings = test.load_processor(args) + elif isinstance(test, SanityVersionNeutral): + settings = test.load_processor(args) + else: + raise Exception('Unsupported test type: %s' % type(test)) + + all_targets = targets.targets + + if test.all_targets: + usable_targets = targets.targets + elif test.no_targets: + usable_targets = tuple() + else: + usable_targets = targets.include + + all_targets = SanityTargets.filter_and_inject_targets(test, all_targets) + usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets) + + usable_targets = sorted(test.filter_targets(list(usable_targets))) + usable_targets = settings.filter_skipped_targets(usable_targets) + sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets)) + + if usable_targets or test.no_targets: + install_command_requirements(args, version, context=test.name, enable_pyyaml_check=True) + + if isinstance(test, SanityCodeSmellTest): + result = test.test(args, sanity_targets, version) + elif isinstance(test, SanityMultipleVersion): + result = test.test(args, sanity_targets, version) + options = ' --python %s' % version + elif isinstance(test, SanitySingleVersion): + result = test.test(args, sanity_targets, version) + elif isinstance(test, SanityVersionNeutral): + result = test.test(args, sanity_targets) + else: + raise Exception('Unsupported test type: %s' % type(test)) + else: + result = SanitySkipped(test.name, skip_version) + + result.write(args) + + total += 1 + + if isinstance(result, SanityFailure): + failed.append(result.test + options) + + if failed: + message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % ( + len(failed), total, '\n'.join(failed)) + + if args.failure_ok: + display.error(message) + else: + raise ApplicationError(message) + + +def collect_code_smell_tests(): # type: () -> t.Tuple[SanityFunc, ...] + """Return a tuple of available code smell sanity tests.""" + paths = glob.glob(os.path.join(SANITY_ROOT, 'code-smell', '*.py')) + + if data_context().content.is_ansible: + # include Ansible specific code-smell tests which are not configured to be skipped + ansible_code_smell_root = os.path.join(data_context().content.root, 'test', 'sanity', 'code-smell') + skip_tests = read_lines_without_comments(os.path.join(ansible_code_smell_root, 'skip.txt'), remove_blank_lines=True, optional=True) + paths.extend(path for path in glob.glob(os.path.join(ansible_code_smell_root, '*.py')) if os.path.basename(path) not in skip_tests) + + paths = sorted(p for p in paths if os.access(p, os.X_OK) and os.path.isfile(p)) + tests = tuple(SanityCodeSmellTest(p) for p in paths) + + return tests + + +def sanity_get_tests(): + """ + :rtype: tuple[SanityFunc] + """ + return SANITY_TESTS + + +class SanityIgnoreParser: + """Parser for the consolidated sanity test ignore file.""" + NO_CODE = '_' + + def __init__(self, args): # type: (SanityConfig) -> None + if data_context().content.collection: + ansible_version = '%s.%s' % tuple(get_ansible_version().split('.')[:2]) + + ansible_label = 'Ansible %s' % ansible_version + file_name = 'ignore-%s.txt' % ansible_version + else: + ansible_label = 'Ansible' + file_name = 'ignore.txt' + + self.args = args + self.relative_path = os.path.join(data_context().content.sanity_path, file_name) + self.path = os.path.join(data_context().content.root, self.relative_path) + self.ignores = collections.defaultdict(lambda: collections.defaultdict(dict)) # type: t.Dict[str, t.Dict[str, t.Dict[str, int]]] + self.skips = collections.defaultdict(lambda: collections.defaultdict(int)) # type: t.Dict[str, t.Dict[str, int]] + self.parse_errors = [] # type: t.List[t.Tuple[int, int, str]] + self.file_not_found_errors = [] # type: t.List[t.Tuple[int, str]] + + lines = read_lines_without_comments(self.path, optional=True) + targets = SanityTargets.get_targets() + paths = set(target.path for target in targets) + tests_by_name = {} # type: t.Dict[str, SanityTest] + versioned_test_names = set() # type: t.Set[str] + unversioned_test_names = {} # type: t.Dict[str, str] + directories = paths_to_dirs(list(paths)) + paths_by_test = {} # type: t.Dict[str, t.Set[str]] + + display.info('Read %d sanity test ignore line(s) for %s from: %s' % (len(lines), ansible_label, self.relative_path), verbosity=1) + + for test in sanity_get_tests(): + test_targets = SanityTargets.filter_and_inject_targets(test, targets) + + paths_by_test[test.name] = set(target.path for target in test.filter_targets(test_targets)) + + if isinstance(test, SanityMultipleVersion): + versioned_test_names.add(test.name) + tests_by_name.update(dict(('%s-%s' % (test.name, python_version), test) for python_version in test.supported_python_versions)) + else: + unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS)) + tests_by_name[test.name] = test + + for line_no, line in enumerate(lines, start=1): + if not line: + self.parse_errors.append((line_no, 1, "Line cannot be empty or contain only a comment")) + continue + + parts = line.split(' ') + path = parts[0] + codes = parts[1:] + + if not path: + self.parse_errors.append((line_no, 1, "Line cannot start with a space")) + continue + + if path.endswith(os.path.sep): + if path not in directories: + self.file_not_found_errors.append((line_no, path)) + continue + else: + if path not in paths: + self.file_not_found_errors.append((line_no, path)) + continue + + if not codes: + self.parse_errors.append((line_no, len(path), "Error code required after path")) + continue + + code = codes[0] + + if not code: + self.parse_errors.append((line_no, len(path) + 1, "Error code after path cannot be empty")) + continue + + if len(codes) > 1: + self.parse_errors.append((line_no, len(path) + len(code) + 2, "Error code cannot contain spaces")) + continue + + parts = code.split('!') + code = parts[0] + commands = parts[1:] + + parts = code.split(':') + test_name = parts[0] + error_codes = parts[1:] + + test = tests_by_name.get(test_name) + + if not test: + unversioned_name = unversioned_test_names.get(test_name) + + if unversioned_name: + self.parse_errors.append((line_no, len(path) + len(unversioned_name) + 2, "Sanity test '%s' cannot use a Python version like '%s'" % ( + unversioned_name, test_name))) + elif test_name in versioned_test_names: + self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires a Python version like '%s-%s'" % ( + test_name, test_name, args.python_version))) + else: + self.parse_errors.append((line_no, len(path) + 2, "Sanity test '%s' does not exist" % test_name)) + + continue + + if path.endswith(os.path.sep) and not test.include_directories: + self.parse_errors.append((line_no, 1, "Sanity test '%s' does not support directory paths" % test_name)) + continue + + if path not in paths_by_test[test.name] and not test.no_targets: + self.parse_errors.append((line_no, 1, "Sanity test '%s' does not test path '%s'" % (test_name, path))) + continue + + if commands and error_codes: + self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Error code cannot contain both '!' and ':' characters")) + continue + + if commands: + command = commands[0] + + if len(commands) > 1: + self.parse_errors.append((line_no, len(path) + len(test_name) + len(command) + 3, "Error code cannot contain multiple '!' characters")) + continue + + if command == 'skip': + if not test.can_skip: + self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' cannot be skipped" % test_name)) + continue + + existing_line_no = self.skips.get(test_name, {}).get(path) + + if existing_line_no: + self.parse_errors.append((line_no, 1, "Duplicate '%s' skip for path '%s' first found on line %d" % (test_name, path, existing_line_no))) + continue + + self.skips[test_name][path] = line_no + continue + + self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Command '!%s' not recognized" % command)) + continue + + if not test.can_ignore: + self.parse_errors.append((line_no, len(path) + 1, "Sanity test '%s' cannot be ignored" % test_name)) + continue + + if test.error_code: + if not error_codes: + self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires an error code" % test_name)) + continue + + error_code = error_codes[0] + + if len(error_codes) > 1: + self.parse_errors.append((line_no, len(path) + len(test_name) + len(error_code) + 3, "Error code cannot contain multiple ':' characters")) + continue + + if error_code in test.optional_error_codes: + self.parse_errors.append((line_no, len(path) + len(test_name) + 3, "Optional error code '%s' cannot be ignored" % ( + error_code))) + continue + else: + if error_codes: + self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' does not support error codes" % test_name)) + continue + + error_code = self.NO_CODE + + existing = self.ignores.get(test_name, {}).get(path, {}).get(error_code) + + if existing: + if test.error_code: + self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for error code '%s' for path '%s' first found on line %d" % ( + test_name, error_code, path, existing))) + else: + self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for path '%s' first found on line %d" % ( + test_name, path, existing))) + + continue + + self.ignores[test_name][path][error_code] = line_no + + @staticmethod + def load(args): # type: (SanityConfig) -> SanityIgnoreParser + """Return the current SanityIgnore instance, initializing it if needed.""" + try: + return SanityIgnoreParser.instance + except AttributeError: + pass + + SanityIgnoreParser.instance = SanityIgnoreParser(args) + return SanityIgnoreParser.instance + + +class SanityIgnoreProcessor: + """Processor for sanity test ignores for a single run of one sanity test.""" + def __init__(self, + args, # type: SanityConfig + test, # type: SanityTest + python_version, # type: t.Optional[str] + ): # type: (...) -> None + name = test.name + code = test.error_code + + if python_version: + full_name = '%s-%s' % (name, python_version) + else: + full_name = name + + self.args = args + self.test = test + self.code = code + self.parser = SanityIgnoreParser.load(args) + self.ignore_entries = self.parser.ignores.get(full_name, {}) + self.skip_entries = self.parser.skips.get(full_name, {}) + self.used_line_numbers = set() # type: t.Set[int] + + def filter_skipped_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given targets, with any skipped paths filtered out.""" + return sorted(target for target in targets if target.path not in self.skip_entries) + + def process_errors(self, errors, paths): # type: (t.List[SanityMessage], t.List[str]) -> t.List[SanityMessage] + """Return the given errors filtered for ignores and with any settings related errors included.""" + errors = self.filter_messages(errors) + errors.extend(self.get_errors(paths)) + + errors = sorted(set(errors)) + + return errors + + def filter_messages(self, messages): # type: (t.List[SanityMessage]) -> t.List[SanityMessage] + """Return a filtered list of the given messages using the entries that have been loaded.""" + filtered = [] + + for message in messages: + if message.code in self.test.optional_error_codes and not self.args.enable_optional_errors: + continue + + path_entry = self.ignore_entries.get(message.path) + + if path_entry: + code = message.code if self.code else SanityIgnoreParser.NO_CODE + line_no = path_entry.get(code) + + if line_no: + self.used_line_numbers.add(line_no) + continue + + filtered.append(message) + + return filtered + + def get_errors(self, paths): # type: (t.List[str]) -> t.List[SanityMessage] + """Return error messages related to issues with the file.""" + messages = [] + + # unused errors + + unused = [] # type: t.List[t.Tuple[int, str, str]] + + if self.test.no_targets or self.test.all_targets: + # tests which do not accept a target list, or which use all targets, always return all possible errors, so all ignores can be checked + targets = SanityTargets.get_targets() + test_targets = SanityTargets.filter_and_inject_targets(self.test, targets) + paths = [target.path for target in test_targets] + + for path in paths: + path_entry = self.ignore_entries.get(path) + + if not path_entry: + continue + + unused.extend((line_no, path, code) for code, line_no in path_entry.items() if line_no not in self.used_line_numbers) + + messages.extend(SanityMessage( + code=self.code, + message="Ignoring '%s' on '%s' is unnecessary" % (code, path) if self.code else "Ignoring '%s' is unnecessary" % path, + path=self.parser.relative_path, + line=line, + column=1, + confidence=calculate_best_confidence(((self.parser.path, line), (path, 0)), self.args.metadata) if self.args.metadata.changes else None, + ) for line, path, code in unused) + + return messages + + +class SanitySuccess(TestSuccess): + """Sanity test success.""" + def __init__(self, test, python_version=None): + """ + :type test: str + :type python_version: str + """ + super(SanitySuccess, self).__init__(COMMAND, test, python_version) + + +class SanitySkipped(TestSkipped): + """Sanity test skipped.""" + def __init__(self, test, python_version=None): + """ + :type test: str + :type python_version: str + """ + super(SanitySkipped, self).__init__(COMMAND, test, python_version) + + +class SanityFailure(TestFailure): + """Sanity test failure.""" + def __init__(self, test, python_version=None, messages=None, summary=None): + """ + :type test: str + :type python_version: str + :type messages: list[SanityMessage] + :type summary: unicode + """ + super(SanityFailure, self).__init__(COMMAND, test, python_version, messages, summary) + + +class SanityMessage(TestMessage): + """Single sanity test message for one file.""" + + +class SanityTargets: + """Sanity test target information.""" + def __init__(self, targets, include): # type: (t.Tuple[TestTarget], t.Tuple[TestTarget]) -> None + self.targets = targets + self.include = include + + @staticmethod + def create(include, exclude, require): # type: (t.List[str], t.List[str], t.List[str]) -> SanityTargets + """Create a SanityTargets instance from the given include, exclude and require lists.""" + _targets = SanityTargets.get_targets() + _include = walk_internal_targets(_targets, include, exclude, require) + return SanityTargets(_targets, _include) + + @staticmethod + def filter_and_inject_targets(test, targets): # type: (SanityTest, t.Iterable[TestTarget]) -> t.List[TestTarget] + """Filter and inject targets based on test requirements and the given target list.""" + test_targets = list(targets) + + if not test.include_symlinks: + # remove all symlinks unless supported by the test + test_targets = [target for target in test_targets if not target.symlink] + + if not test.include_directories or not test.include_symlinks: + # exclude symlinked directories unless supported by the test + test_targets = [target for target in test_targets if not target.path.endswith(os.path.sep)] + + if test.include_directories: + # include directories containing any of the included files + test_targets += tuple(TestTarget(path, None, None, '') for path in paths_to_dirs([target.path for target in test_targets])) + + if not test.include_symlinks: + # remove all directory symlinks unless supported by the test + test_targets = [target for target in test_targets if not target.symlink] + + return test_targets + + @staticmethod + def get_targets(): # type: () -> t.Tuple[TestTarget, ...] + """Return a tuple of sanity test targets. Uses a cached version when available.""" + try: + return SanityTargets.get_targets.targets + except AttributeError: + SanityTargets.get_targets.targets = tuple(sorted(walk_sanity_targets())) + + return SanityTargets.get_targets.targets + + +class SanityTest(ABC): + """Sanity test base class.""" + __metaclass__ = abc.ABCMeta + + ansible_only = False + + def __init__(self, name): + self.name = name + self.enabled = True + + # Optional error codes represent errors which spontaneously occur without changes to the content under test, such as those based on the current date. + # Because these errors can be unpredictable they behave differently than normal error codes: + # * They are not reported by default. The `--enable-optional-errors` option must be used to display these errors. + # * They cannot be ignored. This is done to maintain the integrity of the ignore system. + self.optional_error_codes = set() + + @property + def error_code(self): # type: () -> t.Optional[str] + """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" + return None + + @property + def can_ignore(self): # type: () -> bool + """True if the test supports ignore entries.""" + return True + + @property + def can_skip(self): # type: () -> bool + """True if the test supports skip entries.""" + return not self.all_targets and not self.no_targets + + @property + def all_targets(self): # type: () -> bool + """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets.""" + return False + + @property + def no_targets(self): # type: () -> bool + """True if the test does not use test targets. Mutually exclusive with all_targets.""" + return False + + @property + def include_directories(self): # type: () -> bool + """True if the test targets should include directories.""" + return False + + @property + def include_symlinks(self): # type: () -> bool + """True if the test targets should include symlinks.""" + return False + + @property + def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] + """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" + return tuple(python_version for python_version in SUPPORTED_PYTHON_VERSIONS if python_version.startswith('3.')) + + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] # pylint: disable=unused-argument + """Return the given list of test targets, filtered to include only those relevant for the test.""" + if self.no_targets: + return [] + + raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name) + + +class SanityCodeSmellTest(SanityTest): + """Sanity test script.""" + def __init__(self, path): + name = os.path.splitext(os.path.basename(path))[0] + config_path = os.path.splitext(path)[0] + '.json' + + super(SanityCodeSmellTest, self).__init__(name) + + self.path = path + self.config_path = config_path if os.path.exists(config_path) else None + self.config = None + + if self.config_path: + self.config = read_json_file(self.config_path) + + if self.config: + self.enabled = not self.config.get('disabled') + + self.output = self.config.get('output') # type: t.Optional[str] + self.extensions = self.config.get('extensions') # type: t.List[str] + self.prefixes = self.config.get('prefixes') # type: t.List[str] + self.files = self.config.get('files') # type: t.List[str] + self.text = self.config.get('text') # type: t.Optional[bool] + self.ignore_self = self.config.get('ignore_self') # type: bool + self.intercept = self.config.get('intercept') # type: bool + self.minimum_python_version = self.config.get('minimum_python_version') # type: t.Optional[str] + + self.__all_targets = self.config.get('all_targets') # type: bool + self.__no_targets = self.config.get('no_targets') # type: bool + self.__include_directories = self.config.get('include_directories') # type: bool + self.__include_symlinks = self.config.get('include_symlinks') # type: bool + else: + self.output = None + self.extensions = [] + self.prefixes = [] + self.files = [] + self.text = None # type: t.Optional[bool] + self.ignore_self = False + self.intercept = False + self.minimum_python_version = None # type: t.Optional[str] + + self.__all_targets = False + self.__no_targets = True + self.__include_directories = False + self.__include_symlinks = False + + if self.no_targets: + mutually_exclusive = ( + 'extensions', + 'prefixes', + 'files', + 'text', + 'ignore_self', + 'all_targets', + 'include_directories', + 'include_symlinks', + ) + + problems = sorted(name for name in mutually_exclusive if getattr(self, name)) + + if problems: + raise ApplicationError('Sanity test "%s" option "no_targets" is mutually exclusive with options: %s' % (self.name, ', '.join(problems))) + + @property + def all_targets(self): # type: () -> bool + """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets.""" + return self.__all_targets + + @property + def no_targets(self): # type: () -> bool + """True if the test does not use test targets. Mutually exclusive with all_targets.""" + return self.__no_targets + + @property + def include_directories(self): # type: () -> bool + """True if the test targets should include directories.""" + return self.__include_directories + + @property + def include_symlinks(self): # type: () -> bool + """True if the test targets should include symlinks.""" + return self.__include_symlinks + + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + if self.no_targets: + return [] + + if self.text is not None: + if self.text: + targets = [target for target in targets if not is_binary_file(target.path)] + else: + targets = [target for target in targets if is_binary_file(target.path)] + + if self.extensions: + targets = [target for target in targets if os.path.splitext(target.path)[1] in self.extensions + or (is_subdir(target.path, 'bin') and '.py' in self.extensions)] + + if self.prefixes: + targets = [target for target in targets if any(target.path.startswith(pre) for pre in self.prefixes)] + + if self.files: + targets = [target for target in targets if os.path.basename(target.path) in self.files] + + if self.ignore_self and data_context().content.is_ansible: + relative_self_path = os.path.relpath(self.path, data_context().content.root) + targets = [target for target in targets if target.path != relative_self_path] + + return targets + + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + if self.minimum_python_version: + if str_to_version(python_version) < str_to_version(self.minimum_python_version): + display.warning("Skipping sanity test '%s' on unsupported Python %s; requires Python %s or newer." % ( + self.name, python_version, self.minimum_python_version)) + return SanitySkipped(self.name, 'Test requires Python %s or newer' % (self.minimum_python_version, )) + + cmd = [find_python(python_version), self.path] + + env = ansible_environment(args, color=False) + + pattern = None + data = None + + settings = self.load_processor(args) + + paths = [target.path for target in targets.include] + + if self.config: + if self.output == 'path-line-column-message': + pattern = '^(?P[^:]*):(?P[0-9]+):(?P[0-9]+): (?P.*)$' + elif self.output == 'path-message': + pattern = '^(?P[^:]*): (?P.*)$' + else: + pattern = ApplicationError('Unsupported output type: %s' % self.output) + + if not self.no_targets: + data = '\n'.join(paths) + + if data: + display.info(data, verbosity=4) + + try: + if self.intercept: + stdout, stderr = intercept_command(args, cmd, target_name='sanity.%s' % self.name, data=data, env=env, capture=True, disable_coverage=True) + else: + stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True) + + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if args.explain: + return SanitySuccess(self.name) + + if stdout and not stderr: + if pattern: + matches = parse_to_list_of_dict(pattern, stdout) + + messages = [SanityMessage( + message=m['message'], + path=m['path'], + line=int(m.get('line', 0)), + column=int(m.get('column', 0)), + ) for m in matches] + + messages = settings.process_errors(messages, paths) + + if not messages: + return SanitySuccess(self.name) + + return SanityFailure(self.name, messages=messages) + + if stderr or status: + summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) + return SanityFailure(self.name, summary=summary) + + messages = settings.process_errors([], paths) + + if messages: + return SanityFailure(self.name, messages=messages) + + return SanitySuccess(self.name) + + def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor + """Load the ignore processor for this sanity test.""" + return SanityIgnoreProcessor(args, self, None) + + +class SanityFunc(SanityTest): + """Base class for sanity test plugins.""" + def __init__(self): + name = self.__class__.__name__ + name = re.sub(r'Test$', '', name) # drop Test suffix + name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization + + super(SanityFunc, self).__init__(name) + + +class SanityVersionNeutral(SanityFunc): + """Base class for sanity test plugins which are idependent of the python version being used.""" + @abc.abstractmethod + def test(self, args, targets): + """ + :type args: SanityConfig + :type targets: SanityTargets + :rtype: TestResult + """ + + def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor + """Load the ignore processor for this sanity test.""" + return SanityIgnoreProcessor(args, self, None) + + @property + def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] + """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" + return None + + +class SanitySingleVersion(SanityFunc): + """Base class for sanity test plugins which should run on a single python version.""" + @abc.abstractmethod + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + + def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor + """Load the ignore processor for this sanity test.""" + return SanityIgnoreProcessor(args, self, None) + + +class SanityMultipleVersion(SanityFunc): + """Base class for sanity test plugins which should run on multiple python versions.""" + @abc.abstractmethod + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + + def load_processor(self, args, python_version): # type: (SanityConfig, str) -> SanityIgnoreProcessor + """Load the ignore processor for this sanity test.""" + return SanityIgnoreProcessor(args, self, python_version) + + @property + def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] + """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" + return SUPPORTED_PYTHON_VERSIONS + + +SANITY_TESTS = ( +) + + +def sanity_init(): + """Initialize full sanity test list (includes code-smell scripts determined at runtime).""" + import_plugins('sanity') + sanity_plugins = {} # type: t.Dict[str, t.Type[SanityFunc]] + load_plugins(SanityFunc, sanity_plugins) + sanity_tests = tuple([plugin() for plugin in sanity_plugins.values() if data_context().content.is_ansible or not plugin.ansible_only]) + global SANITY_TESTS # pylint: disable=locally-disabled, global-statement + SANITY_TESTS = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name)) diff --git a/test/lib/ansible_test/_internal/sanity/ansible_doc.py b/test/lib/ansible_test/_internal/sanity/ansible_doc.py new file mode 100644 index 00000000..c6b997cf --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/ansible_doc.py @@ -0,0 +1,144 @@ +"""Sanity test for ansible-doc.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import collections +import os +import re + +from .. import types as t + +from ..sanity import ( + SanitySingleVersion, + SanityFailure, + SanitySuccess, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + display, + is_subdir, +) + +from ..util_common import ( + intercept_command, +) + +from ..ansible_util import ( + ansible_environment, +) + +from ..config import ( + SanityConfig, +) + +from ..data import ( + data_context, +) + +from ..coverage_util import ( + coverage_context, +) + + +class AnsibleDocTest(SanitySingleVersion): + """Sanity test for ansible-doc.""" + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + # This should use documentable plugins from constants instead + unsupported_plugin_types = set([ + # not supported by ansible-doc + 'action', + 'doc_fragments', + 'filter', + 'module_utils', + 'terminal', + 'test', + ]) + + plugin_paths = [plugin_path for plugin_type, plugin_path in data_context().content.plugin_paths.items() if plugin_type not in unsupported_plugin_types] + + return [target for target in targets + if os.path.splitext(target.path)[1] == '.py' + and os.path.basename(target.path) != '__init__.py' + and any(is_subdir(target.path, path) for path in plugin_paths) + ] + + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + settings = self.load_processor(args) + + paths = [target.path for target in targets.include] + + doc_targets = collections.defaultdict(list) + target_paths = collections.defaultdict(dict) + + remap_types = dict( + modules='module', + ) + + for plugin_type, plugin_path in data_context().content.plugin_paths.items(): + plugin_type = remap_types.get(plugin_type, plugin_type) + + for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]: + plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0] + + if plugin_name.startswith('_'): + plugin_name = plugin_name[1:] + + doc_targets[plugin_type].append(data_context().content.prefix + plugin_name) + target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path + + env = ansible_environment(args, color=False) + error_messages = [] + + for doc_type in sorted(doc_targets): + for format_option in [None, '--json']: + cmd = ['ansible-doc', '-t', doc_type] + if format_option is not None: + cmd.append(format_option) + cmd.extend(sorted(doc_targets[doc_type])) + + try: + with coverage_context(args): + stdout, stderr = intercept_command(args, cmd, target_name='ansible-doc', env=env, capture=True, python_version=python_version) + + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if status: + summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr) + return SanityFailure(self.name, summary=summary) + + if stdout: + display.info(stdout.strip(), verbosity=3) + + if stderr: + # ignore removed module/plugin warnings + stderr = re.sub(r'\[WARNING\]: [^ ]+ [^ ]+ has been removed\n', '', stderr).strip() + + if stderr: + summary = u'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr) + return SanityFailure(self.name, summary=summary) + + if args.explain: + return SanitySuccess(self.name) + + error_messages = settings.process_errors(error_messages, paths) + + if error_messages: + return SanityFailure(self.name, messages=error_messages) + + return SanitySuccess(self.name) diff --git a/test/lib/ansible_test/_internal/sanity/bin_symlinks.py b/test/lib/ansible_test/_internal/sanity/bin_symlinks.py new file mode 100644 index 00000000..bd0ba58e --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/bin_symlinks.py @@ -0,0 +1,110 @@ +"""Sanity test for symlinks in the bin directory.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from .. import types as t + +from ..sanity import ( + SanityVersionNeutral, + SanityMessage, + SanityFailure, + SanitySuccess, +) + +from ..config import ( + SanityConfig, +) + +from ..data import ( + data_context, +) + +from ..payload import ( + ANSIBLE_BIN_SYMLINK_MAP, + __file__ as symlink_map_full_path, +) + +from ..util import ( + ANSIBLE_BIN_PATH, + ANSIBLE_TEST_DATA_ROOT, +) + + +class BinSymlinksTest(SanityVersionNeutral): + """Sanity test for symlinks in the bin directory.""" + ansible_only = True + + @property + def can_ignore(self): # type: () -> bool + """True if the test supports ignore entries.""" + return False + + @property + def no_targets(self): # type: () -> bool + """True if the test does not use test targets. Mutually exclusive with all_targets.""" + return True + + # noinspection PyUnusedLocal + def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument + """ + :type args: SanityConfig + :type targets: SanityTargets + :rtype: TestResult + """ + bin_root = ANSIBLE_BIN_PATH + bin_names = os.listdir(bin_root) + bin_paths = sorted(os.path.join(bin_root, path) for path in bin_names) + + injector_root = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector') + injector_names = os.listdir(injector_root) + + errors = [] # type: t.List[t.Tuple[str, str]] + + symlink_map_path = os.path.relpath(symlink_map_full_path, data_context().content.root) + + for bin_path in bin_paths: + if not os.path.islink(bin_path): + errors.append((bin_path, 'not a symbolic link')) + continue + + dest = os.readlink(bin_path) + + if not os.path.exists(bin_path): + errors.append((bin_path, 'points to non-existent path "%s"' % dest)) + continue + + if not os.path.isfile(bin_path): + errors.append((bin_path, 'points to non-file "%s"' % dest)) + continue + + map_dest = ANSIBLE_BIN_SYMLINK_MAP.get(os.path.basename(bin_path)) + + if not map_dest: + errors.append((bin_path, 'missing from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % symlink_map_path)) + continue + + if dest != map_dest: + errors.append((bin_path, 'points to "%s" instead of "%s" from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, map_dest, symlink_map_path))) + continue + + if not os.access(bin_path, os.X_OK): + errors.append((bin_path, 'points to non-executable file "%s"' % dest)) + continue + + for bin_name, dest in ANSIBLE_BIN_SYMLINK_MAP.items(): + if bin_name not in bin_names: + bin_path = os.path.join(bin_root, bin_name) + errors.append((bin_path, 'missing symlink to "%s" defined in ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, symlink_map_path))) + + if bin_name not in injector_names: + injector_path = os.path.join(injector_root, bin_name) + errors.append((injector_path, 'missing symlink to "python.py"')) + + messages = [SanityMessage(message=message, path=os.path.relpath(path, data_context().content.root), confidence=100) for path, message in errors] + + if errors: + return SanityFailure(self.name, messages=messages) + + return SanitySuccess(self.name) diff --git a/test/lib/ansible_test/_internal/sanity/compile.py b/test/lib/ansible_test/_internal/sanity/compile.py new file mode 100644 index 00000000..5a517272 --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/compile.py @@ -0,0 +1,92 @@ +"""Sanity test for proper python syntax.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from .. import types as t + +from ..sanity import ( + SanityMultipleVersion, + SanityMessage, + SanityFailure, + SanitySuccess, + SanityTargets, + SANITY_ROOT, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + display, + find_python, + parse_to_list_of_dict, + is_subdir, +) + +from ..util_common import ( + run_command, +) + +from ..config import ( + SanityConfig, +) + + +class CompileTest(SanityMultipleVersion): + """Sanity test for proper python syntax.""" + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')] + + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + settings = self.load_processor(args, python_version) + + paths = [target.path for target in targets.include] + + cmd = [find_python(python_version), os.path.join(SANITY_ROOT, 'compile', 'compile.py')] + + data = '\n'.join(paths) + + display.info(data, verbosity=4) + + try: + stdout, stderr = run_command(args, cmd, data=data, capture=True) + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if stderr: + raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) + + if args.explain: + return SanitySuccess(self.name, python_version=python_version) + + pattern = r'^(?P[^:]*):(?P[0-9]+):(?P[0-9]+): (?P.*)$' + + results = parse_to_list_of_dict(pattern, stdout) + + results = [SanityMessage( + message=r['message'], + path=r['path'].replace('./', ''), + line=int(r['line']), + column=int(r['column']), + ) for r in results] + + results = settings.process_errors(results, paths) + + if results: + return SanityFailure(self.name, messages=results, python_version=python_version) + + return SanitySuccess(self.name, python_version=python_version) diff --git a/test/lib/ansible_test/_internal/sanity/ignores.py b/test/lib/ansible_test/_internal/sanity/ignores.py new file mode 100644 index 00000000..8b6df50c --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/ignores.py @@ -0,0 +1,89 @@ +"""Sanity test for the sanity ignore file.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..sanity import ( + SanityFailure, + SanityIgnoreParser, + SanityVersionNeutral, + SanitySuccess, + SanityMessage, +) + +from ..test import ( + calculate_confidence, + calculate_best_confidence, +) + +from ..config import ( + SanityConfig, +) + + +class IgnoresTest(SanityVersionNeutral): + """Sanity test for sanity test ignore entries.""" + @property + def can_ignore(self): # type: () -> bool + """True if the test supports ignore entries.""" + return False + + @property + def no_targets(self): # type: () -> bool + """True if the test does not use test targets. Mutually exclusive with all_targets.""" + return True + + # noinspection PyUnusedLocal + def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument + """ + :type args: SanityConfig + :type targets: SanityTargets + :rtype: TestResult + """ + sanity_ignore = SanityIgnoreParser.load(args) + + messages = [] + + # parse errors + + messages.extend(SanityMessage( + message=message, + path=sanity_ignore.relative_path, + line=line, + column=column, + confidence=calculate_confidence(sanity_ignore.path, line, args.metadata) if args.metadata.changes else None, + ) for line, column, message in sanity_ignore.parse_errors) + + # file not found errors + + messages.extend(SanityMessage( + message="%s '%s' does not exist" % ("Directory" if path.endswith(os.path.sep) else "File", path), + path=sanity_ignore.relative_path, + line=line, + column=1, + confidence=calculate_best_confidence(((sanity_ignore.path, line), (path, 0)), args.metadata) if args.metadata.changes else None, + ) for line, path in sanity_ignore.file_not_found_errors) + + # conflicting ignores and skips + + for test_name, ignores in sanity_ignore.ignores.items(): + for ignore_path, ignore_entry in ignores.items(): + skip_line_no = sanity_ignore.skips.get(test_name, {}).get(ignore_path) + + if not skip_line_no: + continue + + for ignore_line_no in ignore_entry.values(): + messages.append(SanityMessage( + message="Ignoring '%s' is unnecessary due to skip entry on line %d" % (ignore_path, skip_line_no), + path=sanity_ignore.relative_path, + line=ignore_line_no, + column=1, + confidence=calculate_confidence(sanity_ignore.path, ignore_line_no, args.metadata) if args.metadata.changes else None, + )) + + if messages: + return SanityFailure(self.name, messages=messages) + + return SanitySuccess(self.name) diff --git a/test/lib/ansible_test/_internal/sanity/import.py b/test/lib/ansible_test/_internal/sanity/import.py new file mode 100644 index 00000000..7d4776ae --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/import.py @@ -0,0 +1,184 @@ +"""Sanity test for proper import exception handling.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from .. import types as t + +from ..sanity import ( + SanityMultipleVersion, + SanityMessage, + SanityFailure, + SanitySuccess, + SanitySkipped, + SANITY_ROOT, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + remove_tree, + display, + parse_to_list_of_dict, + is_subdir, + generate_pip_command, + find_python, +) + +from ..util_common import ( + intercept_command, + run_command, + ResultType, +) + +from ..ansible_util import ( + ansible_environment, +) + +from ..executor import ( + generate_pip_install, +) + +from ..config import ( + SanityConfig, +) + +from ..coverage_util import ( + coverage_context, +) + +from ..venv import ( + create_virtual_environment, +) + +from ..data import ( + data_context, +) + + +class ImportTest(SanityMultipleVersion): + """Sanity test for proper import exception handling.""" + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + return [target for target in targets if os.path.splitext(target.path)[1] == '.py' and + (is_subdir(target.path, data_context().content.module_path) or is_subdir(target.path, data_context().content.module_utils_path))] + + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + capture_pip = args.verbosity < 2 + + python = find_python(python_version) + + if python_version.startswith('2.') and args.requirements: + # hack to make sure that virtualenv is available under Python 2.x + # on Python 3.x we can use the built-in venv + pip = generate_pip_command(python) + run_command(args, generate_pip_install(pip, '', packages=['virtualenv']), capture=capture_pip) + + settings = self.load_processor(args, python_version) + + paths = [target.path for target in targets.include] + + env = ansible_environment(args, color=False) + + temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import') + + # create a clean virtual environment to minimize the available imports beyond the python standard library + virtual_environment_path = os.path.join(temp_root, 'minimal-py%s' % python_version.replace('.', '')) + virtual_environment_bin = os.path.join(virtual_environment_path, 'bin') + + remove_tree(virtual_environment_path) + + if not create_virtual_environment(args, python_version, virtual_environment_path): + display.warning("Skipping sanity test '%s' on Python %s due to missing virtual environment support." % (self.name, python_version)) + return SanitySkipped(self.name, python_version) + + # add the importer to our virtual environment so it can be accessed through the coverage injector + importer_path = os.path.join(virtual_environment_bin, 'importer.py') + yaml_to_json_path = os.path.join(virtual_environment_bin, 'yaml_to_json.py') + if not args.explain: + os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'importer.py')), importer_path) + os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'yaml_to_json.py')), yaml_to_json_path) + + # activate the virtual environment + env['PATH'] = '%s:%s' % (virtual_environment_bin, env['PATH']) + + env.update( + SANITY_TEMP_PATH=ResultType.TMP.path, + ) + + if data_context().content.collection: + env.update( + SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name, + SANITY_EXTERNAL_PYTHON=python, + ) + + virtualenv_python = os.path.join(virtual_environment_bin, 'python') + virtualenv_pip = generate_pip_command(virtualenv_python) + + # make sure coverage is available in the virtual environment if needed + if args.coverage: + run_command(args, generate_pip_install(virtualenv_pip, '', packages=['setuptools']), env=env, capture=capture_pip) + run_command(args, generate_pip_install(virtualenv_pip, '', packages=['coverage']), env=env, capture=capture_pip) + + try: + # In some environments pkg_resources is installed as a separate pip package which needs to be removed. + # For example, using Python 3.8 on Ubuntu 18.04 a virtualenv is created with only pip and setuptools. + # However, a venv is created with an additional pkg-resources package which is independent of setuptools. + # Making sure pkg-resources is removed preserves the import test consistency between venv and virtualenv. + # Additionally, in the above example, the pyparsing package vendored with pkg-resources is out-of-date and generates deprecation warnings. + # Thus it is important to remove pkg-resources to prevent system installed packages from generating deprecation warnings. + run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pkg-resources'], env=env, capture=capture_pip) + except SubprocessError: + pass + + run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'setuptools'], env=env, capture=capture_pip) + run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pip'], env=env, capture=capture_pip) + + cmd = ['importer.py'] + + data = '\n'.join(paths) + + display.info(data, verbosity=4) + + results = [] + + try: + with coverage_context(args): + stdout, stderr = intercept_command(args, cmd, self.name, env, capture=True, data=data, python_version=python_version, + virtualenv=virtualenv_python) + + if stdout or stderr: + raise SubprocessError(cmd, stdout=stdout, stderr=stderr) + except SubprocessError as ex: + if ex.status != 10 or ex.stderr or not ex.stdout: + raise + + pattern = r'^(?P[^:]*):(?P[0-9]+):(?P[0-9]+): (?P.*)$' + + results = parse_to_list_of_dict(pattern, ex.stdout) + + relative_temp_root = os.path.relpath(temp_root, data_context().content.root) + os.path.sep + + results = [SanityMessage( + message=r['message'], + path=os.path.relpath(r['path'], relative_temp_root) if r['path'].startswith(relative_temp_root) else r['path'], + line=int(r['line']), + column=int(r['column']), + ) for r in results] + + results = settings.process_errors(results, paths) + + if results: + return SanityFailure(self.name, messages=results, python_version=python_version) + + return SanitySuccess(self.name, python_version=python_version) diff --git a/test/lib/ansible_test/_internal/sanity/integration_aliases.py b/test/lib/ansible_test/_internal/sanity/integration_aliases.py new file mode 100644 index 00000000..e21c093a --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/integration_aliases.py @@ -0,0 +1,399 @@ +"""Sanity test to check integration test aliases.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import textwrap +import os + +from .. import types as t + +from ..sanity import ( + SanityVersionNeutral, + SanityMessage, + SanityFailure, + SanitySuccess, + SanityTargets, + SANITY_ROOT, +) + +from ..config import ( + SanityConfig, +) + +from ..target import ( + filter_targets, + walk_posix_integration_targets, + walk_windows_integration_targets, + walk_integration_targets, + walk_module_targets, +) + +from ..cloud import ( + get_cloud_platforms, +) + +from ..io import ( + read_text_file, +) + +from ..util import ( + display, + find_python, + raw_command, +) + +from ..util_common import ( + write_json_test_results, + ResultType, +) + + +class IntegrationAliasesTest(SanityVersionNeutral): + """Sanity test to evaluate integration test aliases.""" + CI_YML = '.azure-pipelines/azure-pipelines.yml' + TEST_ALIAS_PREFIX = 'shippable' # this will be changed at some point in the future + + DISABLED = 'disabled/' + UNSTABLE = 'unstable/' + UNSUPPORTED = 'unsupported/' + + EXPLAIN_URL = 'https://docs.ansible.com/ansible/devel/dev_guide/testing/sanity/integration-aliases.html' + + TEMPLATE_DISABLED = """ + The following integration tests are **disabled** [[explain]({explain_url}#disabled)]: + + {tests} + + Consider fixing the integration tests before or alongside changes. + """ + + TEMPLATE_UNSTABLE = """ + The following integration tests are **unstable** [[explain]({explain_url}#unstable)]: + + {tests} + + Tests may need to be restarted due to failures unrelated to changes. + """ + + TEMPLATE_UNSUPPORTED = """ + The following integration tests are **unsupported** [[explain]({explain_url}#unsupported)]: + + {tests} + + Consider running the tests manually or extending test infrastructure to add support. + """ + + TEMPLATE_UNTESTED = """ + The following modules have **no integration tests** [[explain]({explain_url}#untested)]: + + {tests} + + Consider adding integration tests before or alongside changes. + """ + + ansible_only = True + + def __init__(self): + super(IntegrationAliasesTest, self).__init__() + + self._ci_config = {} # type: t.Dict[str, t.Any] + self._ci_test_groups = {} # type: t.Dict[str, t.List[int]] + + @property + def can_ignore(self): # type: () -> bool + """True if the test supports ignore entries.""" + return False + + @property + def no_targets(self): # type: () -> bool + """True if the test does not use test targets. Mutually exclusive with all_targets.""" + return True + + def load_ci_config(self, args): # type: (SanityConfig) -> t.Dict[str, t.Any] + """Load and return the CI YAML configuration.""" + if not self._ci_config: + self._ci_config = self.load_yaml(args, self.CI_YML) + + return self._ci_config + + @property + def ci_test_groups(self): # type: () -> t.Dict[str, t.List[int]] + """Return a dictionary of CI test names and their group(s).""" + if not self._ci_test_groups: + test_groups = {} + + for stage in self._ci_config['stages']: + for job in stage['jobs']: + if job.get('template') != 'templates/matrix.yml': + continue + + parameters = job['parameters'] + + groups = parameters.get('groups', []) + test_format = parameters.get('testFormat', '{0}') + test_group_format = parameters.get('groupFormat', '{0}/{{1}}') + + for target in parameters['targets']: + test = target.get('test') or target.get('name') + + if groups: + tests_formatted = [test_group_format.format(test_format).format(test, group) for group in groups] + else: + tests_formatted = [test_format.format(test)] + + for test_formatted in tests_formatted: + parts = test_formatted.split('/') + key = parts[0] + + if key in ('sanity', 'units'): + continue + + try: + group = int(parts[-1]) + except ValueError: + continue + + if group < 1 or group > 99: + continue + + group_set = test_groups.setdefault(key, set()) + group_set.add(group) + + self._ci_test_groups = dict((key, sorted(value)) for key, value in test_groups.items()) + + return self._ci_test_groups + + def format_test_group_alias(self, name, fallback=''): + """ + :type name: str + :type fallback: str + :rtype: str + """ + group_numbers = self.ci_test_groups.get(name, None) + + if group_numbers: + if min(group_numbers) != 1: + display.warning('Min test group "%s" in %s is %d instead of 1.' % (name, self.CI_YML, min(group_numbers)), unique=True) + + if max(group_numbers) != len(group_numbers): + display.warning('Max test group "%s" in %s is %d instead of %d.' % (name, self.CI_YML, max(group_numbers), len(group_numbers)), unique=True) + + if max(group_numbers) > 9: + alias = '%s/%s/group(%s)/' % (self.TEST_ALIAS_PREFIX, name, '|'.join(str(i) for i in range(min(group_numbers), max(group_numbers) + 1))) + elif len(group_numbers) > 1: + alias = '%s/%s/group[%d-%d]/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers), max(group_numbers)) + else: + alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers)) + elif fallback: + alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, fallback, 1) + else: + raise Exception('cannot find test group "%s" in %s' % (name, self.CI_YML)) + + return alias + + def load_yaml(self, args, path): # type: (SanityConfig, str) -> t.Dict[str, t.Any] + """Load the specified YAML file and return the contents.""" + yaml_to_json_path = os.path.join(SANITY_ROOT, self.name, 'yaml_to_json.py') + python = find_python(args.python_version) + + return json.loads(raw_command([python, yaml_to_json_path], data=read_text_file(path), capture=True)[0]) + + def test(self, args, targets): + """ + :type args: SanityConfig + :type targets: SanityTargets + :rtype: TestResult + """ + if args.explain: + return SanitySuccess(self.name) + + if not os.path.isfile(self.CI_YML): + return SanityFailure(self.name, messages=[SanityMessage( + message='file missing', + path=self.CI_YML, + )]) + + results = dict( + comments=[], + labels={}, + ) + + self.load_ci_config(args) + self.check_changes(args, results) + + write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results) + + messages = [] + + messages += self.check_posix_targets(args) + messages += self.check_windows_targets() + + if messages: + return SanityFailure(self.name, messages=messages) + + return SanitySuccess(self.name) + + def check_posix_targets(self, args): + """ + :type args: SanityConfig + :rtype: list[SanityMessage] + """ + posix_targets = tuple(walk_posix_integration_targets()) + + clouds = get_cloud_platforms(args, posix_targets) + cloud_targets = ['cloud/%s/' % cloud for cloud in clouds] + + all_cloud_targets = tuple(filter_targets(posix_targets, ['cloud/'], include=True, directories=False, errors=False)) + invalid_cloud_targets = tuple(filter_targets(all_cloud_targets, cloud_targets, include=False, directories=False, errors=False)) + + messages = [] + + for target in invalid_cloud_targets: + for alias in target.aliases: + if alias.startswith('cloud/') and alias != 'cloud/': + if any(alias.startswith(cloud_target) for cloud_target in cloud_targets): + continue + + messages.append(SanityMessage('invalid alias `%s`' % alias, '%s/aliases' % target.path)) + + messages += self.check_ci_group( + targets=tuple(filter_targets(posix_targets, ['cloud/', '%s/generic/' % self.TEST_ALIAS_PREFIX], include=False, + directories=False, errors=False)), + find=self.format_test_group_alias('linux').replace('linux', 'posix'), + find_incidental=['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX], + ) + + messages += self.check_ci_group( + targets=tuple(filter_targets(posix_targets, ['%s/generic/' % self.TEST_ALIAS_PREFIX], include=True, directories=False, + errors=False)), + find=self.format_test_group_alias('generic'), + ) + + for cloud in clouds: + messages += self.check_ci_group( + targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], include=True, directories=False, errors=False)), + find=self.format_test_group_alias(cloud, 'cloud'), + find_incidental=['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX], + ) + + return messages + + def check_windows_targets(self): + """ + :rtype: list[SanityMessage] + """ + windows_targets = tuple(walk_windows_integration_targets()) + + messages = [] + + messages += self.check_ci_group( + targets=windows_targets, + find=self.format_test_group_alias('windows'), + find_incidental=['%s/windows/incidental/' % self.TEST_ALIAS_PREFIX], + ) + + return messages + + def check_ci_group(self, targets, find, find_incidental=None): + """ + :type targets: tuple[CompletionTarget] + :type find: str + :type find_incidental: list[str] | None + :rtype: list[SanityMessage] + """ + all_paths = set(target.path for target in targets) + supported_paths = set(target.path for target in filter_targets(targets, [find], include=True, directories=False, errors=False)) + unsupported_paths = set(target.path for target in filter_targets(targets, [self.UNSUPPORTED], include=True, directories=False, errors=False)) + + if find_incidental: + incidental_paths = set(target.path for target in filter_targets(targets, find_incidental, include=True, directories=False, errors=False)) + else: + incidental_paths = set() + + unassigned_paths = all_paths - supported_paths - unsupported_paths - incidental_paths + conflicting_paths = supported_paths & unsupported_paths + + unassigned_message = 'missing alias `%s` or `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/')) + conflicting_message = 'conflicting alias `%s` and `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/')) + + messages = [] + + for path in unassigned_paths: + messages.append(SanityMessage(unassigned_message, '%s/aliases' % path)) + + for path in conflicting_paths: + messages.append(SanityMessage(conflicting_message, '%s/aliases' % path)) + + return messages + + def check_changes(self, args, results): + """ + :type args: SanityConfig + :type results: dict[str, any] + """ + integration_targets = list(walk_integration_targets()) + module_targets = list(walk_module_targets()) + + integration_targets_by_name = dict((target.name, target) for target in integration_targets) + module_names_by_path = dict((target.path, target.module) for target in module_targets) + + disabled_targets = [] + unstable_targets = [] + unsupported_targets = [] + + for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]: + for target in args.metadata.change_description.focused_command_targets[command]: + if self.DISABLED in integration_targets_by_name[target].aliases: + disabled_targets.append(target) + elif self.UNSTABLE in integration_targets_by_name[target].aliases: + unstable_targets.append(target) + elif self.UNSUPPORTED in integration_targets_by_name[target].aliases: + unsupported_targets.append(target) + + untested_modules = [] + + for path in args.metadata.change_description.no_integration_paths: + module = module_names_by_path.get(path) + + if module: + untested_modules.append(module) + + comments = [ + self.format_comment(self.TEMPLATE_DISABLED, disabled_targets), + self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets), + self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets), + self.format_comment(self.TEMPLATE_UNTESTED, untested_modules), + ] + + comments = [comment for comment in comments if comment] + + labels = dict( + needs_tests=bool(untested_modules), + disabled_tests=bool(disabled_targets), + unstable_tests=bool(unstable_targets), + unsupported_tests=bool(unsupported_targets), + ) + + results['comments'] += comments + results['labels'].update(labels) + + def format_comment(self, template, targets): + """ + :type template: str + :type targets: list[str] + :rtype: str | None + """ + if not targets: + return None + + tests = '\n'.join('- %s' % target for target in targets) + + data = dict( + explain_url=self.EXPLAIN_URL, + tests=tests, + ) + + message = textwrap.dedent(template).strip().format(**data) + + return message diff --git a/test/lib/ansible_test/_internal/sanity/pep8.py b/test/lib/ansible_test/_internal/sanity/pep8.py new file mode 100644 index 00000000..9eb40dbc --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/pep8.py @@ -0,0 +1,109 @@ +"""Sanity test for PEP 8 style guidelines using pycodestyle.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from .. import types as t + +from ..sanity import ( + SanitySingleVersion, + SanityMessage, + SanityFailure, + SanitySuccess, + SANITY_ROOT, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + read_lines_without_comments, + parse_to_list_of_dict, + find_python, + is_subdir, +) + +from ..util_common import ( + run_command, +) + +from ..config import ( + SanityConfig, +) + + +class Pep8Test(SanitySingleVersion): + """Sanity test for PEP 8 style guidelines using pycodestyle.""" + @property + def error_code(self): # type: () -> t.Optional[str] + """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" + return 'A100' + + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')] + + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + current_ignore_file = os.path.join(SANITY_ROOT, 'pep8', 'current-ignore.txt') + current_ignore = sorted(read_lines_without_comments(current_ignore_file, remove_blank_lines=True)) + + settings = self.load_processor(args) + + paths = [target.path for target in targets.include] + + cmd = [ + find_python(python_version), + '-m', 'pycodestyle', + '--max-line-length', '160', + '--config', '/dev/null', + '--ignore', ','.join(sorted(current_ignore)), + ] + paths + + if paths: + try: + stdout, stderr = run_command(args, cmd, capture=True) + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if stderr: + raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) + else: + stdout = None + + if args.explain: + return SanitySuccess(self.name) + + if stdout: + pattern = '^(?P[^:]*):(?P[0-9]+):(?P[0-9]+): (?P[WE][0-9]{3}) (?P.*)$' + + results = parse_to_list_of_dict(pattern, stdout) + else: + results = [] + + results = [SanityMessage( + message=r['message'], + path=r['path'], + line=int(r['line']), + column=int(r['column']), + level='warning' if r['code'].startswith('W') else 'error', + code=r['code'], + ) for r in results] + + errors = settings.process_errors(results, paths) + + if errors: + return SanityFailure(self.name, messages=errors) + + return SanitySuccess(self.name) diff --git a/test/lib/ansible_test/_internal/sanity/pslint.py b/test/lib/ansible_test/_internal/sanity/pslint.py new file mode 100644 index 00000000..256eee04 --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/pslint.py @@ -0,0 +1,121 @@ +"""Sanity test using PSScriptAnalyzer.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import re + +from .. import types as t + +from ..sanity import ( + SanityVersionNeutral, + SanityMessage, + SanityFailure, + SanitySuccess, + SanitySkipped, + SANITY_ROOT, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + find_executable, + ANSIBLE_TEST_DATA_ROOT, +) + +from ..util_common import ( + run_command, +) + +from ..config import ( + SanityConfig, +) + +from ..data import ( + data_context, +) + + +class PslintTest(SanityVersionNeutral): + """Sanity test using PSScriptAnalyzer.""" + @property + def error_code(self): # type: () -> t.Optional[str] + """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" + return 'AnsibleTest' + + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + return [target for target in targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1', '.psd1')] + + def test(self, args, targets): + """ + :type args: SanityConfig + :type targets: SanityTargets + :rtype: TestResult + """ + settings = self.load_processor(args) + + paths = [target.path for target in targets.include] + + if not find_executable('pwsh', required='warning'): + return SanitySkipped(self.name) + + cmds = [] + + if args.requirements: + cmds.append([os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'sanity.ps1')]) + + cmds.append([os.path.join(SANITY_ROOT, 'pslint', 'pslint.ps1')] + paths) + + stdout = '' + + for cmd in cmds: + try: + stdout, stderr = run_command(args, cmd, capture=True) + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if stderr: + raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) + + if args.explain: + return SanitySuccess(self.name) + + severity = [ + 'Information', + 'Warning', + 'Error', + 'ParseError', + ] + + cwd = data_context().content.root + '/' + + # replace unicode smart quotes and ellipsis with ascii versions + stdout = re.sub(u'[\u2018\u2019]', "'", stdout) + stdout = re.sub(u'[\u201c\u201d]', '"', stdout) + stdout = re.sub(u'[\u2026]', '...', stdout) + + messages = json.loads(stdout) + + errors = [SanityMessage( + code=m['RuleName'], + message=m['Message'], + path=m['ScriptPath'].replace(cwd, ''), + line=m['Line'] or 0, + column=m['Column'] or 0, + level=severity[m['Severity']], + ) for m in messages] + + errors = settings.process_errors(errors, paths) + + if errors: + return SanityFailure(self.name, messages=errors) + + return SanitySuccess(self.name) diff --git a/test/lib/ansible_test/_internal/sanity/pylint.py b/test/lib/ansible_test/_internal/sanity/pylint.py new file mode 100644 index 00000000..324e5873 --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/pylint.py @@ -0,0 +1,289 @@ +"""Sanity test using pylint.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import itertools +import json +import os +import datetime + +from .. import types as t + +from ..sanity import ( + SanitySingleVersion, + SanityMessage, + SanityFailure, + SanitySuccess, + SANITY_ROOT, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + display, + ConfigParser, + is_subdir, + find_python, +) + +from ..util_common import ( + run_command, +) + +from ..ansible_util import ( + ansible_environment, + get_collection_detail, + CollectionDetail, + CollectionDetailError, +) + +from ..config import ( + SanityConfig, +) + +from ..data import ( + data_context, +) + + +class PylintTest(SanitySingleVersion): + """Sanity test using pylint.""" + + def __init__(self): + super(PylintTest, self).__init__() + self.optional_error_codes.update([ + 'ansible-deprecated-date', + 'too-complex', + ]) + + @property + def error_code(self): # type: () -> t.Optional[str] + """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" + return 'ansible-test' + + @property + def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] + """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" + # Python 3.9 is not supported on pylint < 2.5.0. + # Unfortunately pylint 2.5.0 and later include an unfixed regression. + # See: https://github.com/PyCQA/pylint/issues/3701 + return tuple(python_version for python_version in super(PylintTest, self).supported_python_versions if python_version not in ('3.9',)) + + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')] + + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + plugin_dir = os.path.join(SANITY_ROOT, 'pylint', 'plugins') + plugin_names = sorted(p[0] for p in [ + os.path.splitext(p) for p in os.listdir(plugin_dir)] if p[1] == '.py' and p[0] != '__init__') + + settings = self.load_processor(args) + + paths = [target.path for target in targets.include] + + module_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in + paths if is_subdir(p, data_context().content.module_path)] + module_dirs = sorted(set([p[0] for p in module_paths if len(p) > 1])) + + large_module_group_threshold = 500 + large_module_groups = [key for key, value in + itertools.groupby(module_paths, lambda p: p[0] if len(p) > 1 else '') if len(list(value)) > large_module_group_threshold] + + large_module_group_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in paths + if any(is_subdir(p, os.path.join(data_context().content.module_path, g)) for g in large_module_groups)] + large_module_group_dirs = sorted(set([os.path.sep.join(p[:2]) for p in large_module_group_paths if len(p) > 2])) + + contexts = [] + remaining_paths = set(paths) + + def add_context(available_paths, context_name, context_filter): + """ + :type available_paths: set[str] + :type context_name: str + :type context_filter: (str) -> bool + """ + filtered_paths = set(p for p in available_paths if context_filter(p)) + contexts.append((context_name, sorted(filtered_paths))) + available_paths -= filtered_paths + + def filter_path(path_filter=None): + """ + :type path_filter: str + :rtype: (str) -> bool + """ + def context_filter(path_to_filter): + """ + :type path_to_filter: str + :rtype: bool + """ + return is_subdir(path_to_filter, path_filter) + + return context_filter + + for large_module_dir in large_module_group_dirs: + add_context(remaining_paths, 'modules/%s' % large_module_dir, filter_path(os.path.join(data_context().content.module_path, large_module_dir))) + + for module_dir in module_dirs: + add_context(remaining_paths, 'modules/%s' % module_dir, filter_path(os.path.join(data_context().content.module_path, module_dir))) + + add_context(remaining_paths, 'modules', filter_path(data_context().content.module_path)) + add_context(remaining_paths, 'module_utils', filter_path(data_context().content.module_utils_path)) + + add_context(remaining_paths, 'units', filter_path(data_context().content.unit_path)) + + if data_context().content.collection: + add_context(remaining_paths, 'collection', lambda p: True) + else: + add_context(remaining_paths, 'validate-modules', filter_path('test/lib/ansible_test/_data/sanity/validate-modules/')) + add_context(remaining_paths, 'validate-modules-unit', filter_path('test/lib/ansible_test/tests/validate-modules-unit/')) + add_context(remaining_paths, 'sanity', filter_path('test/lib/ansible_test/_data/sanity/')) + add_context(remaining_paths, 'ansible-test', filter_path('test/lib/')) + add_context(remaining_paths, 'test', filter_path('test/')) + add_context(remaining_paths, 'hacking', filter_path('hacking/')) + add_context(remaining_paths, 'ansible', lambda p: True) + + messages = [] + context_times = [] + + python = find_python(python_version) + + collection_detail = None + + if data_context().content.collection: + try: + collection_detail = get_collection_detail(args, python) + + if not collection_detail.version: + display.warning('Skipping pylint collection version checks since no collection version was found.') + except CollectionDetailError as ex: + display.warning('Skipping pylint collection version checks since collection detail loading failed: %s' % ex.reason) + + test_start = datetime.datetime.utcnow() + + for context, context_paths in sorted(contexts): + if not context_paths: + continue + + context_start = datetime.datetime.utcnow() + messages += self.pylint(args, context, context_paths, plugin_dir, plugin_names, python, collection_detail) + context_end = datetime.datetime.utcnow() + + context_times.append('%s: %d (%s)' % (context, len(context_paths), context_end - context_start)) + + test_end = datetime.datetime.utcnow() + + for context_time in context_times: + display.info(context_time, verbosity=4) + + display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4) + + errors = [SanityMessage( + message=m['message'].replace('\n', ' '), + path=m['path'], + line=int(m['line']), + column=int(m['column']), + level=m['type'], + code=m['symbol'], + ) for m in messages] + + if args.explain: + return SanitySuccess(self.name) + + errors = settings.process_errors(errors, paths) + + if errors: + return SanityFailure(self.name, messages=errors) + + return SanitySuccess(self.name) + + @staticmethod + def pylint( + args, # type: SanityConfig + context, # type: str + paths, # type: t.List[str] + plugin_dir, # type: str + plugin_names, # type: t.List[str] + python, # type: str + collection_detail, # type: CollectionDetail + ): # type: (...) -> t.List[t.Dict[str, str]] + """Run pylint using the config specified by the context on the specified paths.""" + rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', context.split('/')[0] + '.cfg') + + if not os.path.exists(rcfile): + if data_context().content.collection: + rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'collection.cfg') + else: + rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'default.cfg') + + parser = ConfigParser() + parser.read(rcfile) + + if parser.has_section('ansible-test'): + config = dict(parser.items('ansible-test')) + else: + config = dict() + + disable_plugins = set(i.strip() for i in config.get('disable-plugins', '').split(',') if i) + load_plugins = set(plugin_names + ['pylint.extensions.mccabe']) - disable_plugins + + cmd = [ + python, + '-m', 'pylint', + '--jobs', '0', + '--reports', 'n', + '--max-line-length', '160', + '--max-complexity', '20', + '--rcfile', rcfile, + '--output-format', 'json', + '--load-plugins', ','.join(load_plugins), + ] + paths + + if data_context().content.collection: + cmd.extend(['--collection-name', data_context().content.collection.full_name]) + + if collection_detail and collection_detail.version: + cmd.extend(['--collection-version', collection_detail.version]) + + append_python_path = [plugin_dir] + + if data_context().content.collection: + append_python_path.append(data_context().content.collection.root) + + env = ansible_environment(args) + env['PYTHONPATH'] += os.path.pathsep + os.path.pathsep.join(append_python_path) + + # expose plugin paths for use in custom plugins + env.update(dict(('ANSIBLE_TEST_%s_PATH' % k.upper(), os.path.abspath(v) + os.path.sep) for k, v in data_context().content.plugin_paths.items())) + + if paths: + display.info('Checking %d file(s) in context "%s" with config: %s' % (len(paths), context, rcfile), verbosity=1) + + try: + stdout, stderr = run_command(args, cmd, env=env, capture=True) + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if stderr or status >= 32: + raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) + else: + stdout = None + + if not args.explain and stdout: + messages = json.loads(stdout) + else: + messages = [] + + return messages diff --git a/test/lib/ansible_test/_internal/sanity/rstcheck.py b/test/lib/ansible_test/_internal/sanity/rstcheck.py new file mode 100644 index 00000000..2d8a01d5 --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/rstcheck.py @@ -0,0 +1,95 @@ +"""Sanity test using rstcheck.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from .. import types as t + +from ..sanity import ( + SanitySingleVersion, + SanityMessage, + SanityFailure, + SanitySuccess, + SANITY_ROOT, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + parse_to_list_of_dict, + read_lines_without_comments, + find_python, +) + +from ..util_common import ( + run_command, +) + +from ..config import ( + SanityConfig, +) + + +class RstcheckTest(SanitySingleVersion): + """Sanity test using rstcheck.""" + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + return [target for target in targets if os.path.splitext(target.path)[1] in ('.rst',)] + + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + ignore_file = os.path.join(SANITY_ROOT, 'rstcheck', 'ignore-substitutions.txt') + ignore_substitutions = sorted(set(read_lines_without_comments(ignore_file, remove_blank_lines=True))) + + settings = self.load_processor(args) + + paths = [target.path for target in targets.include] + + cmd = [ + find_python(python_version), + '-m', 'rstcheck', + '--report', 'warning', + '--ignore-substitutions', ','.join(ignore_substitutions), + ] + paths + + try: + stdout, stderr = run_command(args, cmd, capture=True) + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if stdout: + raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) + + if args.explain: + return SanitySuccess(self.name) + + pattern = r'^(?P[^:]*):(?P[0-9]+): \((?PINFO|WARNING|ERROR|SEVERE)/[0-4]\) (?P.*)$' + + results = parse_to_list_of_dict(pattern, stderr) + + results = [SanityMessage( + message=r['message'], + path=r['path'], + line=int(r['line']), + column=0, + level=r['level'], + ) for r in results] + + settings.process_errors(results, paths) + + if results: + return SanityFailure(self.name, messages=results) + + return SanitySuccess(self.name) diff --git a/test/lib/ansible_test/_internal/sanity/sanity_docs.py b/test/lib/ansible_test/_internal/sanity/sanity_docs.py new file mode 100644 index 00000000..44638075 --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/sanity_docs.py @@ -0,0 +1,62 @@ +"""Sanity test for documentation of sanity tests.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ..sanity import ( + SanityVersionNeutral, + SanityMessage, + SanityFailure, + SanitySuccess, + sanity_get_tests, +) + +from ..config import ( + SanityConfig, +) + +from ..data import ( + data_context, +) + + +class SanityDocsTest(SanityVersionNeutral): + """Sanity test for documentation of sanity tests.""" + ansible_only = True + + @property + def can_ignore(self): # type: () -> bool + """True if the test supports ignore entries.""" + return False + + @property + def no_targets(self): # type: () -> bool + """True if the test does not use test targets. Mutually exclusive with all_targets.""" + return True + + # noinspection PyUnusedLocal + def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument + """ + :type args: SanityConfig + :type targets: SanityTargets + :rtype: TestResult + """ + sanity_dir = 'docs/docsite/rst/dev_guide/testing/sanity' + sanity_docs = set(part[0] for part in (os.path.splitext(os.path.basename(path)) for path in data_context().content.get_files(sanity_dir)) + if part[1] == '.rst') + sanity_tests = set(sanity_test.name for sanity_test in sanity_get_tests()) + + missing = sanity_tests - sanity_docs + + results = [] + + results += [SanityMessage( + message='missing docs for ansible-test sanity --test %s' % r, + path=os.path.join(sanity_dir, '%s.rst' % r), + ) for r in sorted(missing)] + + if results: + return SanityFailure(self.name, messages=results) + + return SanitySuccess(self.name) diff --git a/test/lib/ansible_test/_internal/sanity/shellcheck.py b/test/lib/ansible_test/_internal/sanity/shellcheck.py new file mode 100644 index 00000000..82689ced --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/shellcheck.py @@ -0,0 +1,110 @@ +"""Sanity test using shellcheck.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from xml.etree.ElementTree import ( + fromstring, + Element, +) + +from .. import types as t + +from ..sanity import ( + SanityVersionNeutral, + SanityMessage, + SanityFailure, + SanitySuccess, + SanitySkipped, + SANITY_ROOT, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + read_lines_without_comments, + find_executable, +) + +from ..util_common import ( + run_command, +) + +from ..config import ( + SanityConfig, +) + + +class ShellcheckTest(SanityVersionNeutral): + """Sanity test using shellcheck.""" + @property + def error_code(self): # type: () -> t.Optional[str] + """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" + return 'AT1000' + + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + return [target for target in targets if os.path.splitext(target.path)[1] == '.sh'] + + def test(self, args, targets): + """ + :type args: SanityConfig + :type targets: SanityTargets + :rtype: TestResult + """ + exclude_file = os.path.join(SANITY_ROOT, 'shellcheck', 'exclude.txt') + exclude = set(read_lines_without_comments(exclude_file, remove_blank_lines=True, optional=True)) + + settings = self.load_processor(args) + + paths = [target.path for target in targets.include] + + if not find_executable('shellcheck', required='warning'): + return SanitySkipped(self.name) + + cmd = [ + 'shellcheck', + '-e', ','.join(sorted(exclude)), + '--format', 'checkstyle', + ] + paths + + try: + stdout, stderr = run_command(args, cmd, capture=True) + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if stderr or status > 1: + raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) + + if args.explain: + return SanitySuccess(self.name) + + # json output is missing file paths in older versions of shellcheck, so we'll use xml instead + root = fromstring(stdout) # type: Element + + results = [] + + for item in root: # type: Element + for entry in item: # type: Element + results.append(SanityMessage( + message=entry.attrib['message'], + path=item.attrib['name'], + line=int(entry.attrib['line']), + column=int(entry.attrib['column']), + level=entry.attrib['severity'], + code=entry.attrib['source'].replace('ShellCheck.', ''), + )) + + results = settings.process_errors(results, paths) + + if results: + return SanityFailure(self.name, messages=results) + + return SanitySuccess(self.name) diff --git a/test/lib/ansible_test/_internal/sanity/validate_modules.py b/test/lib/ansible_test/_internal/sanity/validate_modules.py new file mode 100644 index 00000000..add3cdc7 --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/validate_modules.py @@ -0,0 +1,149 @@ +"""Sanity test using validate-modules.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os + +from .. import types as t + +from ..sanity import ( + SanitySingleVersion, + SanityMessage, + SanityFailure, + SanitySuccess, + SANITY_ROOT, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + display, + find_python, +) + +from ..util_common import ( + run_command, +) + +from ..ansible_util import ( + ansible_environment, + get_collection_detail, + CollectionDetailError, +) + +from ..config import ( + SanityConfig, +) + +from ..ci import ( + get_ci_provider, +) + +from ..data import ( + data_context, +) + + +class ValidateModulesTest(SanitySingleVersion): + """Sanity test using validate-modules.""" + + def __init__(self): + super(ValidateModulesTest, self).__init__() + self.optional_error_codes.update([ + 'deprecated-date', + ]) + + @property + def error_code(self): # type: () -> t.Optional[str] + """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" + return 'A100' + + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + return [target for target in targets if target.module] + + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + env = ansible_environment(args, color=False) + + settings = self.load_processor(args) + + paths = [target.path for target in targets.include] + + python = find_python(python_version) + + cmd = [ + python, + os.path.join(SANITY_ROOT, 'validate-modules', 'validate-modules'), + '--format', 'json', + '--arg-spec', + ] + paths + + if data_context().content.collection: + cmd.extend(['--collection', data_context().content.collection.directory]) + + try: + collection_detail = get_collection_detail(args, python) + + if collection_detail.version: + cmd.extend(['--collection-version', collection_detail.version]) + else: + display.warning('Skipping validate-modules collection version checks since no collection version was found.') + except CollectionDetailError as ex: + display.warning('Skipping validate-modules collection version checks since collection detail loading failed: %s' % ex.reason) + else: + base_branch = args.base_branch or get_ci_provider().get_base_branch() + + if base_branch: + cmd.extend([ + '--base-branch', base_branch, + ]) + else: + display.warning('Cannot perform module comparison against the base branch because the base branch was not detected.') + + try: + stdout, stderr = run_command(args, cmd, env=env, capture=True) + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if stderr or status not in (0, 3): + raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) + + if args.explain: + return SanitySuccess(self.name) + + messages = json.loads(stdout) + + errors = [] + + for filename in messages: + output = messages[filename] + + for item in output['errors']: + errors.append(SanityMessage( + path=filename, + line=int(item['line']) if 'line' in item else 0, + column=int(item['column']) if 'column' in item else 0, + level='error', + code='%s' % item['code'], + message=item['msg'], + )) + + errors = settings.process_errors(errors, paths) + + if errors: + return SanityFailure(self.name, messages=errors) + + return SanitySuccess(self.name) diff --git a/test/lib/ansible_test/_internal/sanity/yamllint.py b/test/lib/ansible_test/_internal/sanity/yamllint.py new file mode 100644 index 00000000..85a576d0 --- /dev/null +++ b/test/lib/ansible_test/_internal/sanity/yamllint.py @@ -0,0 +1,136 @@ +"""Sanity test using yamllint.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os + +from .. import types as t + +from ..import ansible_util + +from ..sanity import ( + SanitySingleVersion, + SanityMessage, + SanityFailure, + SanitySkipped, + SanitySuccess, + SANITY_ROOT, +) + +from ..target import ( + TestTarget, +) + +from ..util import ( + SubprocessError, + display, + is_subdir, + find_python, +) + +from ..util_common import ( + run_command, +) + +from ..config import ( + SanityConfig, +) + +from ..data import ( + data_context, +) + + +class YamllintTest(SanitySingleVersion): + """Sanity test using yamllint.""" + @property + def error_code(self): # type: () -> t.Optional[str] + """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" + return 'ansible-test' + + def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] + """Return the given list of test targets, filtered to include only those relevant for the test.""" + yaml_targets = [target for target in targets if os.path.splitext(target.path)[1] in ('.yml', '.yaml')] + + for plugin_type, plugin_path in sorted(data_context().content.plugin_paths.items()): + if plugin_type == 'module_utils': + continue + + yaml_targets.extend([target for target in targets if + os.path.splitext(target.path)[1] == '.py' and + os.path.basename(target.path) != '__init__.py' and + is_subdir(target.path, plugin_path)]) + + return yaml_targets + + def test(self, args, targets, python_version): + """ + :type args: SanityConfig + :type targets: SanityTargets + :type python_version: str + :rtype: TestResult + """ + pyyaml_presence = ansible_util.check_pyyaml(args, python_version, quiet=True) + if not pyyaml_presence['cloader']: + display.warning("Skipping sanity test '%s' due to missing libyaml support in PyYAML." + % self.name) + return SanitySkipped(self.name) + + settings = self.load_processor(args) + + paths = [target.path for target in targets.include] + + python = find_python(python_version) + + results = self.test_paths(args, paths, python) + results = settings.process_errors(results, paths) + + if results: + return SanityFailure(self.name, messages=results) + + return SanitySuccess(self.name) + + @staticmethod + def test_paths(args, paths, python): + """ + :type args: SanityConfig + :type paths: list[str] + :type python: str + :rtype: list[SanityMessage] + """ + cmd = [ + python, + os.path.join(SANITY_ROOT, 'yamllint', 'yamllinter.py'), + ] + + data = '\n'.join(paths) + + display.info(data, verbosity=4) + + try: + stdout, stderr = run_command(args, cmd, data=data, capture=True) + status = 0 + except SubprocessError as ex: + stdout = ex.stdout + stderr = ex.stderr + status = ex.status + + if stderr: + raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) + + if args.explain: + return [] + + results = json.loads(stdout)['messages'] + + results = [SanityMessage( + code=r['code'], + message=r['message'], + path=r['path'], + line=int(r['line']), + column=int(r['column']), + level=r['level'], + ) for r in results] + + return results diff --git a/test/lib/ansible_test/_internal/target.py b/test/lib/ansible_test/_internal/target.py new file mode 100644 index 00000000..7bafd717 --- /dev/null +++ b/test/lib/ansible_test/_internal/target.py @@ -0,0 +1,694 @@ +"""Test target identification, iteration and inclusion/exclusion.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import collections +import os +import re +import itertools +import abc + +from . import types as t + +from .encoding import ( + to_bytes, + to_text, +) + +from .io import ( + read_text_file, +) + +from .util import ( + ApplicationError, + display, + read_lines_without_comments, + is_subdir, +) + +from .data import ( + data_context, +) + +MODULE_EXTENSIONS = '.py', '.ps1' + +try: + TCompletionTarget = t.TypeVar('TCompletionTarget', bound='CompletionTarget') +except AttributeError: + TCompletionTarget = None # pylint: disable=invalid-name + +try: + TIntegrationTarget = t.TypeVar('TIntegrationTarget', bound='IntegrationTarget') +except AttributeError: + TIntegrationTarget = None # pylint: disable=invalid-name + + +def find_target_completion(target_func, prefix): + """ + :type target_func: () -> collections.Iterable[CompletionTarget] + :type prefix: unicode + :rtype: list[str] + """ + try: + targets = target_func() + short = os.environ.get('COMP_TYPE') == '63' # double tab completion from bash + matches = walk_completion_targets(targets, prefix, short) + return matches + except Exception as ex: # pylint: disable=locally-disabled, broad-except + return [u'%s' % ex] + + +def walk_completion_targets(targets, prefix, short=False): + """ + :type targets: collections.Iterable[CompletionTarget] + :type prefix: str + :type short: bool + :rtype: tuple[str] + """ + aliases = set(alias for target in targets for alias in target.aliases) + + if prefix.endswith('/') and prefix in aliases: + aliases.remove(prefix) + + matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]] + + if short: + offset = len(os.path.dirname(prefix)) + if offset: + offset += 1 + relative_matches = [match[offset:] for match in matches if len(match) > offset] + if len(relative_matches) > 1: + matches = relative_matches + + return tuple(sorted(matches)) + + +def walk_internal_targets(targets, includes=None, excludes=None, requires=None): + """ + :type targets: collections.Iterable[T <= CompletionTarget] + :type includes: list[str] + :type excludes: list[str] + :type requires: list[str] + :rtype: tuple[T <= CompletionTarget] + """ + targets = tuple(targets) + + include_targets = sorted(filter_targets(targets, includes, errors=True, directories=False), key=lambda include_target: include_target.name) + + if requires: + require_targets = set(filter_targets(targets, requires, errors=True, directories=False)) + include_targets = [require_target for require_target in include_targets if require_target in require_targets] + + if excludes: + list(filter_targets(targets, excludes, errors=True, include=False, directories=False)) + + internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False, directories=False)) + return tuple(sorted(internal_targets, key=lambda sort_target: sort_target.name)) + + +def filter_targets(targets, # type: t.Iterable[TCompletionTarget] + patterns, # type: t.List[str] + include=True, # type: bool + directories=True, # type: bool + errors=True, # type: bool + ): # type: (...) -> t.Iterable[TCompletionTarget] + """Iterate over the given targets and filter them based on the supplied arguments.""" + unmatched = set(patterns or ()) + compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None + + for target in targets: + matched_directories = set() + match = False + + if patterns: + for alias in target.aliases: + for pattern in patterns: + if compiled_patterns[pattern].match(alias): + match = True + + try: + unmatched.remove(pattern) + except KeyError: + pass + + if alias.endswith('/'): + if target.base_path and len(target.base_path) > len(alias): + matched_directories.add(target.base_path) + else: + matched_directories.add(alias) + elif include: + match = True + if not target.base_path: + matched_directories.add('.') + for alias in target.aliases: + if alias.endswith('/'): + if target.base_path and len(target.base_path) > len(alias): + matched_directories.add(target.base_path) + else: + matched_directories.add(alias) + + if match != include: + continue + + if directories and matched_directories: + yield DirectoryTarget(to_text(sorted(matched_directories, key=len)[0]), target.modules) + else: + yield target + + if errors: + if unmatched: + raise TargetPatternsNotMatched(unmatched) + + +def walk_module_targets(): + """ + :rtype: collections.Iterable[TestTarget] + """ + for target in walk_test_targets(path=data_context().content.module_path, module_path=data_context().content.module_path, extensions=MODULE_EXTENSIONS): + if not target.module: + continue + + yield target + + +def walk_units_targets(): + """ + :rtype: collections.Iterable[TestTarget] + """ + return walk_test_targets(path=data_context().content.unit_path, module_path=data_context().content.unit_module_path, extensions=('.py',), prefix='test_') + + +def walk_compile_targets(include_symlinks=True): + """ + :type include_symlinks: bool + :rtype: collections.Iterable[TestTarget] + """ + return walk_test_targets(module_path=data_context().content.module_path, extensions=('.py',), extra_dirs=('bin',), include_symlinks=include_symlinks) + + +def walk_powershell_targets(include_symlinks=True): + """ + :rtype: collections.Iterable[TestTarget] + """ + return walk_test_targets(module_path=data_context().content.module_path, extensions=('.ps1', '.psm1'), include_symlinks=include_symlinks) + + +def walk_sanity_targets(): + """ + :rtype: collections.Iterable[TestTarget] + """ + return walk_test_targets(module_path=data_context().content.module_path, include_symlinks=True, include_symlinked_directories=True) + + +def walk_posix_integration_targets(include_hidden=False): + """ + :type include_hidden: bool + :rtype: collections.Iterable[IntegrationTarget] + """ + for target in walk_integration_targets(): + if 'posix/' in target.aliases or (include_hidden and 'hidden/posix/' in target.aliases): + yield target + + +def walk_network_integration_targets(include_hidden=False): + """ + :type include_hidden: bool + :rtype: collections.Iterable[IntegrationTarget] + """ + for target in walk_integration_targets(): + if 'network/' in target.aliases or (include_hidden and 'hidden/network/' in target.aliases): + yield target + + +def walk_windows_integration_targets(include_hidden=False): + """ + :type include_hidden: bool + :rtype: collections.Iterable[IntegrationTarget] + """ + for target in walk_integration_targets(): + if 'windows/' in target.aliases or (include_hidden and 'hidden/windows/' in target.aliases): + yield target + + +def walk_integration_targets(): + """ + :rtype: collections.Iterable[IntegrationTarget] + """ + path = data_context().content.integration_targets_path + modules = frozenset(target.module for target in walk_module_targets()) + paths = data_context().content.walk_files(path) + prefixes = load_integration_prefixes() + targets_path_tuple = tuple(path.split(os.path.sep)) + + entry_dirs = ( + 'defaults', + 'files', + 'handlers', + 'meta', + 'tasks', + 'templates', + 'vars', + ) + + entry_files = ( + 'main.yml', + 'main.yaml', + ) + + entry_points = [] + + for entry_dir in entry_dirs: + for entry_file in entry_files: + entry_points.append(os.path.join(os.path.sep, entry_dir, entry_file)) + + # any directory with at least one file is a target + path_tuples = set(tuple(os.path.dirname(p).split(os.path.sep)) + for p in paths) + + # also detect targets which are ansible roles, looking for standard entry points + path_tuples.update(tuple(os.path.dirname(os.path.dirname(p)).split(os.path.sep)) + for p in paths if any(p.endswith(entry_point) for entry_point in entry_points)) + + # remove the top-level directory if it was included + if targets_path_tuple in path_tuples: + path_tuples.remove(targets_path_tuple) + + previous_path_tuple = None + paths = [] + + for path_tuple in sorted(path_tuples): + if previous_path_tuple and previous_path_tuple == path_tuple[:len(previous_path_tuple)]: + # ignore nested directories + continue + + previous_path_tuple = path_tuple + paths.append(os.path.sep.join(path_tuple)) + + for path in paths: + yield IntegrationTarget(to_text(path), modules, prefixes) + + +def load_integration_prefixes(): + """ + :rtype: dict[str, str] + """ + path = data_context().content.integration_path + file_paths = sorted(f for f in data_context().content.get_files(path) if os.path.splitext(os.path.basename(f))[0] == 'target-prefixes') + prefixes = {} + + for file_path in file_paths: + prefix = os.path.splitext(file_path)[1][1:] + prefixes.update(dict((k, prefix) for k in read_text_file(file_path).splitlines())) + + return prefixes + + +def walk_test_targets(path=None, module_path=None, extensions=None, prefix=None, extra_dirs=None, include_symlinks=False, include_symlinked_directories=False): + """ + :type path: str | None + :type module_path: str | None + :type extensions: tuple[str] | None + :type prefix: str | None + :type extra_dirs: tuple[str] | None + :type include_symlinks: bool + :type include_symlinked_directories: bool + :rtype: collections.Iterable[TestTarget] + """ + if path: + file_paths = data_context().content.walk_files(path, include_symlinked_directories=include_symlinked_directories) + else: + file_paths = data_context().content.all_files(include_symlinked_directories=include_symlinked_directories) + + for file_path in file_paths: + name, ext = os.path.splitext(os.path.basename(file_path)) + + if extensions and ext not in extensions: + continue + + if prefix and not name.startswith(prefix): + continue + + symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep))) + + if symlink and not include_symlinks: + continue + + yield TestTarget(to_text(file_path), module_path, prefix, path, symlink) + + file_paths = [] + + if extra_dirs: + for extra_dir in extra_dirs: + for file_path in data_context().content.get_files(extra_dir): + file_paths.append(file_path) + + for file_path in file_paths: + symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep))) + + if symlink and not include_symlinks: + continue + + yield TestTarget(file_path, module_path, prefix, path, symlink) + + +def analyze_integration_target_dependencies(integration_targets): + """ + :type integration_targets: list[IntegrationTarget] + :rtype: dict[str,set[str]] + """ + real_target_root = os.path.realpath(data_context().content.integration_targets_path) + '/' + + role_targets = [target for target in integration_targets if target.type == 'role'] + hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases) + + dependencies = collections.defaultdict(set) + + # handle setup dependencies + for target in integration_targets: + for setup_target_name in target.setup_always + target.setup_once: + dependencies[setup_target_name].add(target.name) + + # handle target dependencies + for target in integration_targets: + for need_target in target.needs_target: + dependencies[need_target].add(target.name) + + # handle symlink dependencies between targets + # this use case is supported, but discouraged + for target in integration_targets: + for path in data_context().content.walk_files(target.path): + if not os.path.islink(to_bytes(path.rstrip(os.path.sep))): + continue + + real_link_path = os.path.realpath(path) + + if not real_link_path.startswith(real_target_root): + continue + + link_target = real_link_path[len(real_target_root):].split('/')[0] + + if link_target == target.name: + continue + + dependencies[link_target].add(target.name) + + # intentionally primitive analysis of role meta to avoid a dependency on pyyaml + # script based targets are scanned as they may execute a playbook with role dependencies + for target in integration_targets: + meta_dir = os.path.join(target.path, 'meta') + + if not os.path.isdir(meta_dir): + continue + + meta_paths = data_context().content.get_files(meta_dir) + + for meta_path in meta_paths: + if os.path.exists(meta_path): + # try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file) + try: + meta_lines = read_text_file(meta_path).splitlines() + except UnicodeDecodeError: + continue + + for meta_line in meta_lines: + if re.search(r'^ *#.*$', meta_line): + continue + + if not meta_line.strip(): + continue + + for hidden_target_name in hidden_role_target_names: + if hidden_target_name in meta_line: + dependencies[hidden_target_name].add(target.name) + + while True: + changes = 0 + + for dummy, dependent_target_names in dependencies.items(): + for dependent_target_name in list(dependent_target_names): + new_target_names = dependencies.get(dependent_target_name) + + if new_target_names: + for new_target_name in new_target_names: + if new_target_name not in dependent_target_names: + dependent_target_names.add(new_target_name) + changes += 1 + + if not changes: + break + + for target_name in sorted(dependencies): + consumers = dependencies[target_name] + + if not consumers: + continue + + display.info('%s:' % target_name, verbosity=4) + + for consumer in sorted(consumers): + display.info(' %s' % consumer, verbosity=4) + + return dependencies + + +class CompletionTarget: + """Command-line argument completion target base class.""" + __metaclass__ = abc.ABCMeta + + def __init__(self): + self.name = None + self.path = None + self.base_path = None + self.modules = tuple() + self.aliases = tuple() + + def __eq__(self, other): + if isinstance(other, CompletionTarget): + return self.__repr__() == other.__repr__() + + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + return self.name.__lt__(other.name) + + def __gt__(self, other): + return self.name.__gt__(other.name) + + def __hash__(self): + return hash(self.__repr__()) + + def __repr__(self): + if self.modules: + return '%s (%s)' % (self.name, ', '.join(self.modules)) + + return self.name + + +class DirectoryTarget(CompletionTarget): + """Directory target.""" + def __init__(self, path, modules): + """ + :type path: str + :type modules: tuple[str] + """ + super(DirectoryTarget, self).__init__() + + self.name = path + self.path = path + self.modules = modules + + +class TestTarget(CompletionTarget): + """Generic test target.""" + def __init__(self, path, module_path, module_prefix, base_path, symlink=None): + """ + :type path: str + :type module_path: str | None + :type module_prefix: str | None + :type base_path: str + :type symlink: bool | None + """ + super(TestTarget, self).__init__() + + if symlink is None: + symlink = os.path.islink(to_bytes(path.rstrip(os.path.sep))) + + self.name = path + self.path = path + self.base_path = base_path + '/' if base_path else None + self.symlink = symlink + + name, ext = os.path.splitext(os.path.basename(self.path)) + + if module_path and is_subdir(path, module_path) and name != '__init__' and ext in MODULE_EXTENSIONS: + self.module = name[len(module_prefix or ''):].lstrip('_') + self.modules = (self.module,) + else: + self.module = None + self.modules = tuple() + + aliases = [self.path, self.module] + parts = self.path.split('/') + + for i in range(1, len(parts)): + alias = '%s/' % '/'.join(parts[:i]) + aliases.append(alias) + + aliases = [a for a in aliases if a] + + self.aliases = tuple(sorted(aliases)) + + +class IntegrationTarget(CompletionTarget): + """Integration test target.""" + non_posix = frozenset(( + 'network', + 'windows', + )) + + categories = frozenset(non_posix | frozenset(( + 'posix', + 'module', + 'needs', + 'skip', + ))) + + def __init__(self, path, modules, prefixes): + """ + :type path: str + :type modules: frozenset[str] + :type prefixes: dict[str, str] + """ + super(IntegrationTarget, self).__init__() + + self.relative_path = os.path.relpath(path, data_context().content.integration_targets_path) + self.name = self.relative_path.replace(os.path.sep, '.') + self.path = path + + # script_path and type + + file_paths = data_context().content.get_files(path) + runme_path = os.path.join(path, 'runme.sh') + + if runme_path in file_paths: + self.type = 'script' + self.script_path = runme_path + else: + self.type = 'role' # ansible will consider these empty roles, so ansible-test should as well + self.script_path = None + + # static_aliases + + aliases_path = os.path.join(path, 'aliases') + + if aliases_path in file_paths: + static_aliases = tuple(read_lines_without_comments(aliases_path, remove_blank_lines=True)) + else: + static_aliases = tuple() + + # modules + + if self.name in modules: + module_name = self.name + elif self.name.startswith('win_') and self.name[4:] in modules: + module_name = self.name[4:] + else: + module_name = None + + self.modules = tuple(sorted(a for a in static_aliases + tuple([module_name]) if a in modules)) + + # groups + + groups = [self.type] + groups += [a for a in static_aliases if a not in modules] + groups += ['module/%s' % m for m in self.modules] + + if not self.modules: + groups.append('non_module') + + if 'destructive' not in groups: + groups.append('non_destructive') + + if '_' in self.name: + prefix = self.name[:self.name.find('_')] + else: + prefix = None + + if prefix in prefixes: + group = prefixes[prefix] + + if group != prefix: + group = '%s/%s' % (group, prefix) + + groups.append(group) + + if self.name.startswith('win_'): + groups.append('windows') + + if self.name.startswith('connection_'): + groups.append('connection') + + if self.name.startswith('setup_') or self.name.startswith('prepare_'): + groups.append('hidden') + + if self.type not in ('script', 'role'): + groups.append('hidden') + + targets_relative_path = data_context().content.integration_targets_path + + # Collect skip entries before group expansion to avoid registering more specific skip entries as less specific versions. + self.skips = tuple(g for g in groups if g.startswith('skip/')) + + # Collect file paths before group expansion to avoid including the directories. + # Ignore references to test targets, as those must be defined using `needs/target/*` or other target references. + self.needs_file = tuple(sorted(set('/'.join(g.split('/')[2:]) for g in groups if + g.startswith('needs/file/') and not g.startswith('needs/file/%s/' % targets_relative_path)))) + + # network platform + networks = [g.split('/')[1] for g in groups if g.startswith('network/')] + self.network_platform = networks[0] if networks else None + + for group in itertools.islice(groups, 0, len(groups)): + if '/' in group: + parts = group.split('/') + for i in range(1, len(parts)): + groups.append('/'.join(parts[:i])) + + if not any(g in self.non_posix for g in groups): + groups.append('posix') + + # aliases + + aliases = [self.name] + \ + ['%s/' % g for g in groups] + \ + ['%s/%s' % (g, self.name) for g in groups if g not in self.categories] + + if 'hidden/' in aliases: + aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')] + + self.aliases = tuple(sorted(set(aliases))) + + # configuration + + self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/')))) + self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/')))) + self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/')))) + + +class TargetPatternsNotMatched(ApplicationError): + """One or more targets were not matched when a match was required.""" + def __init__(self, patterns): + """ + :type patterns: set[str] + """ + self.patterns = sorted(patterns) + + if len(patterns) > 1: + message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns) + else: + message = 'Target pattern not matched: %s' % self.patterns[0] + + super(TargetPatternsNotMatched, self).__init__(message) diff --git a/test/lib/ansible_test/_internal/test.py b/test/lib/ansible_test/_internal/test.py new file mode 100644 index 00000000..8d9629a9 --- /dev/null +++ b/test/lib/ansible_test/_internal/test.py @@ -0,0 +1,524 @@ +"""Classes for storing and processing test results.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import re + +from . import types as t + +from .util import ( + display, + get_ansible_version, +) + +from .util_common import ( + write_text_test_results, + write_json_test_results, + ResultType, +) + +from .config import ( + TestConfig, +) + + +def calculate_best_confidence(choices, metadata): + """ + :type choices: tuple[tuple[str, int]] + :type metadata: Metadata + :rtype: int + """ + best_confidence = 0 + + for path, line in choices: + confidence = calculate_confidence(path, line, metadata) + best_confidence = max(confidence, best_confidence) + + return best_confidence + + +def calculate_confidence(path, line, metadata): + """ + :type path: str + :type line: int + :type metadata: Metadata + :rtype: int + """ + ranges = metadata.changes.get(path) + + # no changes were made to the file + if not ranges: + return 0 + + # changes were made to the same file and line + if any(r[0] <= line <= r[1] in r for r in ranges): + return 100 + + # changes were made to the same file and the line number is unknown + if line == 0: + return 75 + + # changes were made to the same file and the line number is different + return 50 + + +class TestResult: + """Base class for test results.""" + def __init__(self, command, test, python_version=None): + """ + :type command: str + :type test: str + :type python_version: str + """ + self.command = command + self.test = test + self.python_version = python_version + self.name = self.test or self.command + + if self.python_version: + self.name += '-python-%s' % self.python_version + + try: + import junit_xml + except ImportError: + junit_xml = None + + self.junit = junit_xml + + def write(self, args): + """ + :type args: TestConfig + """ + self.write_console() + self.write_bot(args) + + if args.lint: + self.write_lint() + + if args.junit: + if self.junit: + self.write_junit(args) + else: + display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True) + + def write_console(self): + """Write results to console.""" + + def write_lint(self): + """Write lint results to stdout.""" + + def write_bot(self, args): + """ + :type args: TestConfig + """ + + def write_junit(self, args): + """ + :type args: TestConfig + """ + + def create_result_name(self, extension): + """ + :type extension: str + :rtype: str + """ + name = 'ansible-test-%s' % self.command + + if self.test: + name += '-%s' % self.test + + if self.python_version: + name += '-python-%s' % self.python_version + + name += extension + + return name + + def save_junit(self, args, test_case, properties=None): + """ + :type args: TestConfig + :type test_case: junit_xml.TestCase + :type properties: dict[str, str] | None + :rtype: str | None + """ + test_suites = [ + self.junit.TestSuite( + name='ansible-test', + test_cases=[test_case], + timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(), + properties=properties, + ), + ] + + # the junit_xml API is changing in version 2.0.0 + # TestSuite.to_xml_string is being replaced with to_xml_report_string + # see: https://github.com/kyrus/python-junit-xml/blob/63db26da353790500642fd02cae1543eb41aab8b/junit_xml/__init__.py#L249-L261 + try: + to_xml_string = self.junit.to_xml_report_string + except AttributeError: + to_xml_string = self.junit.TestSuite.to_xml_string + + report = to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8') + + if args.explain: + return + + write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report) + + +class TestTimeout(TestResult): + """Test timeout.""" + def __init__(self, timeout_duration): + """ + :type timeout_duration: int + """ + super(TestTimeout, self).__init__(command='timeout', test='') + + self.timeout_duration = timeout_duration + + def write(self, args): + """ + :type args: TestConfig + """ + message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration + + # Include a leading newline to improve readability on Shippable "Tests" tab. + # Without this, the first line becomes indented. + output = ''' +One or more of the following situations may be responsible: + +- Code changes have resulted in tests that hang or run for an excessive amount of time. +- Tests have been added which exceed the time limit when combined with existing tests. +- Test infrastructure and/or external dependencies are operating slower than normal.''' + + if args.coverage: + output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.' + + output += '\n\nConsult the console log for additional details on where the timeout occurred.' + + timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + + # hack to avoid requiring junit-xml, which may not be pre-installed outside our test containers + xml = ''' + + +\t +\t\t +\t\t\t%s +\t\t +\t + +''' % (timestamp, message, output) + + write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), xml.lstrip()) + + +class TestSuccess(TestResult): + """Test success.""" + def write_junit(self, args): + """ + :type args: TestConfig + """ + test_case = self.junit.TestCase(classname=self.command, name=self.name) + + self.save_junit(args, test_case) + + +class TestSkipped(TestResult): + """Test skipped.""" + def write_console(self): + """Write results to console.""" + display.info('No tests applicable.', verbosity=1) + + def write_junit(self, args): + """ + :type args: TestConfig + """ + test_case = self.junit.TestCase(classname=self.command, name=self.name) + test_case.add_skipped_info('No tests applicable.') + + self.save_junit(args, test_case) + + +class TestFailure(TestResult): + """Test failure.""" + def __init__(self, command, test, python_version=None, messages=None, summary=None): + """ + :type command: str + :type test: str + :type python_version: str | None + :type messages: list[TestMessage] | None + :type summary: unicode | None + """ + super(TestFailure, self).__init__(command, test, python_version) + + if messages: + messages = sorted(messages) + else: + messages = [] + + self.messages = messages + self.summary = summary + + def write(self, args): + """ + :type args: TestConfig + """ + if args.metadata.changes: + self.populate_confidence(args.metadata) + + super(TestFailure, self).write(args) + + def write_console(self): + """Write results to console.""" + if self.summary: + display.error(self.summary) + else: + if self.python_version: + specifier = ' on python %s' % self.python_version + else: + specifier = '' + + display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier)) + + for message in self.messages: + display.error(message.format(show_confidence=True)) + + doc_url = self.find_docs() + if doc_url: + display.info('See documentation for help: %s' % doc_url) + + def write_lint(self): + """Write lint results to stdout.""" + if self.summary: + command = self.format_command() + message = 'The test `%s` failed. See stderr output for details.' % command + path = '' + message = TestMessage(message, path) + print(message) + else: + for message in self.messages: + print(message) + + def write_junit(self, args): + """ + :type args: TestConfig + """ + title = self.format_title() + output = self.format_block() + + test_case = self.junit.TestCase(classname=self.command, name=self.name) + + # Include a leading newline to improve readability on Shippable "Tests" tab. + # Without this, the first line becomes indented. + test_case.add_failure_info(message=title, output='\n%s' % output) + + self.save_junit(args, test_case) + + def write_bot(self, args): + """ + :type args: TestConfig + """ + docs = self.find_docs() + message = self.format_title(help_link=docs) + output = self.format_block() + + if self.messages: + verified = all((m.confidence or 0) >= 50 for m in self.messages) + else: + verified = False + + bot_data = dict( + verified=verified, + docs=docs, + results=[ + dict( + message=message, + output=output, + ), + ], + ) + + if args.explain: + return + + write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data) + + def populate_confidence(self, metadata): + """ + :type metadata: Metadata + """ + for message in self.messages: + if message.confidence is None: + message.confidence = calculate_confidence(message.path, message.line, metadata) + + def format_command(self): + """ + :rtype: str + """ + command = 'ansible-test %s' % self.command + + if self.test: + command += ' --test %s' % self.test + + if self.python_version: + command += ' --python %s' % self.python_version + + return command + + def find_docs(self): + """ + :rtype: str + """ + if self.command != 'sanity': + return None # only sanity tests have docs links + + # Use the major.minor version for the URL only if this a release that + # matches the pattern 2.4.0, otherwise, use 'devel' + ansible_version = get_ansible_version() + url_version = 'devel' + if re.search(r'^[0-9.]+$', ansible_version): + url_version = '.'.join(ansible_version.split('.')[:2]) + + testing_docs_url = 'https://docs.ansible.com/ansible/%s/dev_guide/testing' % url_version + + url = '%s/%s/' % (testing_docs_url, self.command) + + if self.test: + url += '%s.html' % self.test + + return url + + def format_title(self, help_link=None): + """ + :type help_link: str | None + :rtype: str + """ + command = self.format_command() + + if self.summary: + reason = 'the error' + else: + reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages) + + if help_link: + help_link_markup = ' [[explain](%s)]' % help_link + else: + help_link_markup = '' + + title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason) + + return title + + def format_block(self): + """ + :rtype: str + """ + if self.summary: + block = self.summary + else: + block = '\n'.join(m.format() for m in self.messages) + + message = block.strip() + + # Hack to remove ANSI color reset code from SubprocessError messages. + message = message.replace(display.clear, '') + + return message + + +class TestMessage: + """Single test message for one file.""" + def __init__(self, message, path, line=0, column=0, level='error', code=None, confidence=None): + """ + :type message: str + :type path: str + :type line: int + :type column: int + :type level: str + :type code: str | None + :type confidence: int | None + """ + self.__path = path + self.__line = line + self.__column = column + self.__level = level + self.__code = code + self.__message = message + + self.confidence = confidence + + @property + def path(self): # type: () -> str + """Return the path.""" + return self.__path + + @property + def line(self): # type: () -> int + """Return the line number, or 0 if none is available.""" + return self.__line + + @property + def column(self): # type: () -> int + """Return the column number, or 0 if none is available.""" + return self.__column + + @property + def level(self): # type: () -> str + """Return the level.""" + return self.__level + + @property + def code(self): # type: () -> t.Optional[str] + """Return the code, if any.""" + return self.__code + + @property + def message(self): # type: () -> str + """Return the message.""" + return self.__message + + @property + def tuple(self): # type: () -> t.Tuple[str, int, int, str, t.Optional[str], str] + """Return a tuple with all the immutable values of this test message.""" + return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message + + def __lt__(self, other): + return self.tuple < other.tuple + + def __le__(self, other): + return self.tuple <= other.tuple + + def __eq__(self, other): + return self.tuple == other.tuple + + def __ne__(self, other): + return self.tuple != other.tuple + + def __gt__(self, other): + return self.tuple > other.tuple + + def __ge__(self, other): + return self.tuple >= other.tuple + + def __hash__(self): + return hash(self.tuple) + + def __str__(self): + return self.format() + + def format(self, show_confidence=False): + """ + :type show_confidence: bool + :rtype: str + """ + if self.__code: + msg = '%s: %s' % (self.__code, self.__message) + else: + msg = self.__message + + if show_confidence and self.confidence is not None: + msg += ' (%d%%)' % self.confidence + + return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg) diff --git a/test/lib/ansible_test/_internal/thread.py b/test/lib/ansible_test/_internal/thread.py new file mode 100644 index 00000000..49fbc1ba --- /dev/null +++ b/test/lib/ansible_test/_internal/thread.py @@ -0,0 +1,57 @@ +"""Python threading tools.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import threading +import sys + +try: + # noinspection PyPep8Naming + import Queue as queue +except ImportError: + # noinspection PyUnresolvedReferences + import queue # pylint: disable=locally-disabled, import-error + + +class WrappedThread(threading.Thread): + """Wrapper around Thread which captures results and exceptions.""" + def __init__(self, action): + """ + :type action: () -> any + """ + # noinspection PyOldStyleClasses + super(WrappedThread, self).__init__() + self._result = queue.Queue() + self.action = action + self.result = None + + def run(self): + """ + Run action and capture results or exception. + Do not override. Do not call directly. Executed by the start() method. + """ + # We truly want to catch anything that the worker thread might do including call sys.exit. + # Therefore we catch *everything* (including old-style class exceptions) + # noinspection PyBroadException, PyPep8 + try: + self._result.put((self.action(), None)) + # pylint: disable=locally-disabled, bare-except + except: # noqa + self._result.put((None, sys.exc_info())) + + def wait_for_result(self): + """ + Wait for thread to exit and return the result or raise an exception. + :rtype: any + """ + result, exception = self._result.get() + + if exception: + if sys.version_info[0] > 2: + raise exception[1].with_traceback(exception[2]) + # noinspection PyRedundantParentheses + exec('raise exception[0], exception[1], exception[2]') # pylint: disable=locally-disabled, exec-used + + self.result = result + + return result diff --git a/test/lib/ansible_test/_internal/types.py b/test/lib/ansible_test/_internal/types.py new file mode 100644 index 00000000..46ef7066 --- /dev/null +++ b/test/lib/ansible_test/_internal/types.py @@ -0,0 +1,32 @@ +"""Import wrapper for type hints when available.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +TYPE_CHECKING = False + +try: + from typing import ( + Any, + AnyStr, + BinaryIO, + Callable, + Dict, + FrozenSet, + Generator, + IO, + Iterable, + Iterator, + List, + Optional, + Pattern, + Set, + Text, + TextIO, + Tuple, + Type, + TYPE_CHECKING, + TypeVar, + Union, + ) +except ImportError: + pass diff --git a/test/lib/ansible_test/_internal/units/__init__.py b/test/lib/ansible_test/_internal/units/__init__.py new file mode 100644 index 00000000..22145431 --- /dev/null +++ b/test/lib/ansible_test/_internal/units/__init__.py @@ -0,0 +1,159 @@ +"""Execute unit tests using pytest.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + +from ..util import ( + ANSIBLE_TEST_DATA_ROOT, + display, + get_available_python_versions, + is_subdir, + SubprocessError, + REMOTE_ONLY_PYTHON_VERSIONS, +) + +from ..util_common import ( + intercept_command, + ResultType, + handle_layout_messages, +) + +from ..ansible_util import ( + ansible_environment, + check_pyyaml, +) + +from ..target import ( + walk_internal_targets, + walk_units_targets, +) + +from ..config import ( + UnitsConfig, +) + +from ..coverage_util import ( + coverage_context, +) + +from ..data import ( + data_context, +) + +from ..executor import ( + AllTargetsSkipped, + Delegate, + get_changes_filter, + install_command_requirements, + SUPPORTED_PYTHON_VERSIONS, +) + + +def command_units(args): + """ + :type args: UnitsConfig + """ + handle_layout_messages(data_context().content.unit_messages) + + changes = get_changes_filter(args) + require = args.require + changes + include = walk_internal_targets(walk_units_targets(), args.include, args.exclude, require) + + paths = [target.path for target in include] + remote_paths = [path for path in paths + if is_subdir(path, data_context().content.unit_module_path) + or is_subdir(path, data_context().content.unit_module_utils_path)] + + if not paths: + raise AllTargetsSkipped() + + if args.python and args.python in REMOTE_ONLY_PYTHON_VERSIONS and not remote_paths: + raise AllTargetsSkipped() + + if args.delegate: + raise Delegate(require=changes, exclude=args.exclude) + + version_commands = [] + + available_versions = sorted(get_available_python_versions(list(SUPPORTED_PYTHON_VERSIONS)).keys()) + + for version in SUPPORTED_PYTHON_VERSIONS: + # run all versions unless version given, in which case run only that version + if args.python and version != args.python_version: + continue + + if not args.python and version not in available_versions: + display.warning("Skipping unit tests on Python %s due to missing interpreter." % version) + continue + + if args.requirements_mode != 'skip': + install_command_requirements(args, version) + + env = ansible_environment(args) + + cmd = [ + 'pytest', + '--boxed', + '-r', 'a', + '-n', str(args.num_workers) if args.num_workers else 'auto', + '--color', + 'yes' if args.color else 'no', + '-p', 'no:cacheprovider', + '-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest.ini'), + '--junit-xml', os.path.join(ResultType.JUNIT.path, 'python%s-units.xml' % version), + ] + + if not data_context().content.collection: + cmd.append('--durations=25') + + if version != '2.6': + # added in pytest 4.5.0, which requires python 2.7+ + cmd.append('--strict-markers') + + plugins = [] + + if args.coverage: + plugins.append('ansible_pytest_coverage') + + if data_context().content.collection: + plugins.append('ansible_pytest_collections') + + if plugins: + env['PYTHONPATH'] += ':%s' % os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest/plugins') + env['PYTEST_PLUGINS'] = ','.join(plugins) + + if args.collect_only: + cmd.append('--collect-only') + + if args.verbosity: + cmd.append('-' + ('v' * args.verbosity)) + + if version in REMOTE_ONLY_PYTHON_VERSIONS: + test_paths = remote_paths + else: + test_paths = paths + + if not test_paths: + continue + + cmd.extend(test_paths) + + version_commands.append((version, cmd, env)) + + if args.requirements_mode == 'only': + sys.exit() + + for version, command, env in version_commands: + check_pyyaml(args, version) + + display.info('Unit test with Python %s' % version) + + try: + with coverage_context(args): + intercept_command(args, command, target_name='units', env=env, python_version=version) + except SubprocessError as ex: + # pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case + if ex.status != 5: + raise diff --git a/test/lib/ansible_test/_internal/util.py b/test/lib/ansible_test/_internal/util.py new file mode 100644 index 00000000..005c3e05 --- /dev/null +++ b/test/lib/ansible_test/_internal/util.py @@ -0,0 +1,853 @@ +"""Miscellaneous utility functions and classes.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import contextlib +import errno +import fcntl +import inspect +import os +import pkgutil +import random +import re +import shutil +import socket +import stat +import string +import subprocess +import sys +import tempfile +import time +import zipfile + +from struct import unpack, pack +from termios import TIOCGWINSZ + +try: + from abc import ABC +except ImportError: + from abc import ABCMeta + ABC = ABCMeta('ABC', (), {}) + +try: + # noinspection PyCompatibility + from configparser import ConfigParser +except ImportError: + # noinspection PyCompatibility,PyUnresolvedReferences + from ConfigParser import SafeConfigParser as ConfigParser + +try: + # noinspection PyProtectedMember + from shlex import quote as cmd_quote +except ImportError: + # noinspection PyProtectedMember + from pipes import quote as cmd_quote + +from . import types as t + +from .encoding import ( + to_bytes, + to_optional_bytes, + to_optional_text, +) + +from .io import ( + open_binary_file, + read_text_file, +) + +try: + C = t.TypeVar('C') +except AttributeError: + C = None + + +PYTHON_PATHS = {} # type: t.Dict[str, str] + +try: + # noinspection PyUnresolvedReferences + MAXFD = subprocess.MAXFD +except AttributeError: + MAXFD = -1 + +COVERAGE_CONFIG_NAME = 'coveragerc' + +ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +# assume running from install +ANSIBLE_ROOT = os.path.dirname(ANSIBLE_TEST_ROOT) +ANSIBLE_BIN_PATH = os.path.dirname(os.path.abspath(sys.argv[0])) +ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'ansible') +ANSIBLE_SOURCE_ROOT = None + +if not os.path.exists(ANSIBLE_LIB_ROOT): + # running from source + ANSIBLE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(ANSIBLE_TEST_ROOT))) + ANSIBLE_BIN_PATH = os.path.join(ANSIBLE_ROOT, 'bin') + ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'lib', 'ansible') + ANSIBLE_SOURCE_ROOT = ANSIBLE_ROOT + +ANSIBLE_TEST_DATA_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_data') +ANSIBLE_TEST_CONFIG_ROOT = os.path.join(ANSIBLE_TEST_ROOT, 'config') + +# Modes are set to allow all users the same level of access. +# This permits files to be used in tests that change users. +# The only exception is write access to directories for the user creating them. +# This avoids having to modify the directory permissions a second time. + +MODE_READ = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH + +MODE_FILE = MODE_READ +MODE_FILE_EXECUTE = MODE_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH +MODE_FILE_WRITE = MODE_FILE | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH + +MODE_DIRECTORY = MODE_READ | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH +MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH + +REMOTE_ONLY_PYTHON_VERSIONS = ( + '2.6', +) + +SUPPORTED_PYTHON_VERSIONS = ( + '2.6', + '2.7', + '3.5', + '3.6', + '3.7', + '3.8', + '3.9', +) + + +def remove_file(path): + """ + :type path: str + """ + if os.path.isfile(path): + os.remove(path) + + +def read_lines_without_comments(path, remove_blank_lines=False, optional=False): # type: (str, bool, bool) -> t.List[str] + """ + Returns lines from the specified text file with comments removed. + Comments are any content from a hash symbol to the end of a line. + Any spaces immediately before a comment are also removed. + """ + if optional and not os.path.exists(path): + return [] + + lines = read_text_file(path).splitlines() + + lines = [re.sub(r' *#.*$', '', line) for line in lines] + + if remove_blank_lines: + lines = [line for line in lines if line] + + return lines + + +def find_executable(executable, cwd=None, path=None, required=True): + """ + :type executable: str + :type cwd: str + :type path: str + :type required: bool | str + :rtype: str | None + """ + match = None + real_cwd = os.getcwd() + + if not cwd: + cwd = real_cwd + + if os.path.dirname(executable): + target = os.path.join(cwd, executable) + if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK): + match = executable + else: + if path is None: + path = os.environ.get('PATH', os.path.defpath) + + if path: + path_dirs = path.split(os.path.pathsep) + seen_dirs = set() + + for path_dir in path_dirs: + if path_dir in seen_dirs: + continue + + seen_dirs.add(path_dir) + + if os.path.abspath(path_dir) == real_cwd: + path_dir = cwd + + candidate = os.path.join(path_dir, executable) + + if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK): + match = candidate + break + + if not match and required: + message = 'Required program "%s" not found.' % executable + + if required != 'warning': + raise ApplicationError(message) + + display.warning(message) + + return match + + +def find_python(version, path=None, required=True): + """ + :type version: str + :type path: str | None + :type required: bool + :rtype: str + """ + version_info = tuple(int(n) for n in version.split('.')) + + if not path and version_info == sys.version_info[:len(version_info)]: + python_bin = sys.executable + else: + python_bin = find_executable('python%s' % version, path=path, required=required) + + return python_bin + + +def get_ansible_version(): # type: () -> str + """Return the Ansible version.""" + try: + return get_ansible_version.version + except AttributeError: + pass + + # ansible may not be in our sys.path + # avoids a symlink to release.py since ansible placement relative to ansible-test may change during delegation + load_module(os.path.join(ANSIBLE_LIB_ROOT, 'release.py'), 'ansible_release') + + # noinspection PyUnresolvedReferences + from ansible_release import __version__ as ansible_version # pylint: disable=import-error + + get_ansible_version.version = ansible_version + + return ansible_version + + +def get_available_python_versions(versions): # type: (t.List[str]) -> t.Dict[str, str] + """Return a dictionary indicating which of the requested Python versions are available.""" + try: + return get_available_python_versions.result + except AttributeError: + pass + + get_available_python_versions.result = dict((version, path) for version, path in + ((version, find_python(version, required=False)) for version in versions) if path) + + return get_available_python_versions.result + + +def generate_pip_command(python): + """ + :type python: str + :rtype: list[str] + """ + return [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'quiet_pip.py')] + + +def raw_command(cmd, capture=False, env=None, data=None, cwd=None, explain=False, stdin=None, stdout=None, + cmd_verbosity=1, str_errors='strict'): + """ + :type cmd: collections.Iterable[str] + :type capture: bool + :type env: dict[str, str] | None + :type data: str | None + :type cwd: str | None + :type explain: bool + :type stdin: file | None + :type stdout: file | None + :type cmd_verbosity: int + :type str_errors: str + :rtype: str | None, str | None + """ + if not cwd: + cwd = os.getcwd() + + if not env: + env = common_environment() + + cmd = list(cmd) + + escaped_cmd = ' '.join(cmd_quote(c) for c in cmd) + + display.info('Run command: %s' % escaped_cmd, verbosity=cmd_verbosity, truncate=True) + display.info('Working directory: %s' % cwd, verbosity=2) + + program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning') + + if program: + display.info('Program found: %s' % program, verbosity=2) + + for key in sorted(env.keys()): + display.info('%s=%s' % (key, env[key]), verbosity=2) + + if explain: + return None, None + + communicate = False + + if stdin is not None: + data = None + communicate = True + elif data is not None: + stdin = subprocess.PIPE + communicate = True + + if stdout: + communicate = True + + if capture: + stdout = stdout or subprocess.PIPE + stderr = subprocess.PIPE + communicate = True + else: + stderr = None + + start = time.time() + process = None + + try: + try: + cmd_bytes = [to_bytes(c) for c in cmd] + env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items()) + process = subprocess.Popen(cmd_bytes, env=env_bytes, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd) + except OSError as ex: + if ex.errno == errno.ENOENT: + raise ApplicationError('Required program "%s" not found.' % cmd[0]) + raise + + if communicate: + data_bytes = to_optional_bytes(data) + stdout_bytes, stderr_bytes = process.communicate(data_bytes) + stdout_text = to_optional_text(stdout_bytes, str_errors) or u'' + stderr_text = to_optional_text(stderr_bytes, str_errors) or u'' + else: + process.wait() + stdout_text, stderr_text = None, None + finally: + if process and process.returncode is None: + process.kill() + display.info('') # the process we're interrupting may have completed a partial line of output + display.notice('Killed command to avoid an orphaned child process during handling of an unexpected exception.') + + status = process.returncode + runtime = time.time() - start + + display.info('Command exited with status %s after %s seconds.' % (status, runtime), verbosity=4) + + if status == 0: + return stdout_text, stderr_text + + raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime) + + +def common_environment(): + """Common environment used for executing all programs.""" + env = dict( + LC_ALL='en_US.UTF-8', + PATH=os.environ.get('PATH', os.path.defpath), + ) + + required = ( + 'HOME', + ) + + optional = ( + 'HTTPTESTER', + 'LD_LIBRARY_PATH', + 'SSH_AUTH_SOCK', + # MacOS High Sierra Compatibility + # http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html + # Example configuration for macOS: + # export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES + 'OBJC_DISABLE_INITIALIZE_FORK_SAFETY', + 'ANSIBLE_KEEP_REMOTE_FILES', + # MacOS Homebrew Compatibility + # https://cryptography.io/en/latest/installation/#building-cryptography-on-macos + # This may also be required to install pyyaml with libyaml support when installed in non-standard locations. + # Example configuration for brew on macOS: + # export LDFLAGS="-L$(brew --prefix openssl)/lib/ -L$(brew --prefix libyaml)/lib/" + # export CFLAGS="-I$(brew --prefix openssl)/include/ -I$(brew --prefix libyaml)/include/" + # However, this is not adequate for PyYAML 3.13, which is the latest version supported on Python 2.6. + # For that version the standard location must be used, or `pip install` must be invoked with additional options: + # --global-option=build_ext --global-option=-L{path_to_lib_dir} + 'LDFLAGS', + 'CFLAGS', + ) + + env.update(pass_vars(required=required, optional=optional)) + + return env + + +def pass_vars(required, optional): + """ + :type required: collections.Iterable[str] + :type optional: collections.Iterable[str] + :rtype: dict[str, str] + """ + env = {} + + for name in required: + if name not in os.environ: + raise MissingEnvironmentVariable(name) + env[name] = os.environ[name] + + for name in optional: + if name not in os.environ: + continue + env[name] = os.environ[name] + + return env + + +def deepest_path(path_a, path_b): + """Return the deepest of two paths, or None if the paths are unrelated. + :type path_a: str + :type path_b: str + :rtype: str | None + """ + if path_a == '.': + path_a = '' + + if path_b == '.': + path_b = '' + + if path_a.startswith(path_b): + return path_a or '.' + + if path_b.startswith(path_a): + return path_b or '.' + + return None + + +def remove_tree(path): + """ + :type path: str + """ + try: + shutil.rmtree(to_bytes(path)) + except OSError as ex: + if ex.errno != errno.ENOENT: + raise + + +def is_binary_file(path): + """ + :type path: str + :rtype: bool + """ + assume_text = set([ + '.cfg', + '.conf', + '.crt', + '.cs', + '.css', + '.html', + '.ini', + '.j2', + '.js', + '.json', + '.md', + '.pem', + '.ps1', + '.psm1', + '.py', + '.rst', + '.sh', + '.txt', + '.xml', + '.yaml', + '.yml', + ]) + + assume_binary = set([ + '.bin', + '.eot', + '.gz', + '.ico', + '.iso', + '.jpg', + '.otf', + '.p12', + '.png', + '.pyc', + '.rpm', + '.ttf', + '.woff', + '.woff2', + '.zip', + ]) + + ext = os.path.splitext(path)[1] + + if ext in assume_text: + return False + + if ext in assume_binary: + return True + + with open_binary_file(path) as path_fd: + # noinspection PyTypeChecker + return b'\0' in path_fd.read(4096) + + +def generate_password(): + """Generate a random password. + :rtype: str + """ + chars = [ + string.ascii_letters, + string.digits, + string.ascii_letters, + string.digits, + '-', + ] * 4 + + password = ''.join([random.choice(char) for char in chars[:-1]]) + + display.sensitive.add(password) + + return password + + +class Display: + """Manages color console output.""" + clear = '\033[0m' + red = '\033[31m' + green = '\033[32m' + yellow = '\033[33m' + blue = '\033[34m' + purple = '\033[35m' + cyan = '\033[36m' + + verbosity_colors = { + 0: None, + 1: green, + 2: blue, + 3: cyan, + } + + def __init__(self): + self.verbosity = 0 + self.color = sys.stdout.isatty() + self.warnings = [] + self.warnings_unique = set() + self.info_stderr = False + self.rows = 0 + self.columns = 0 + self.truncate = 0 + self.redact = True + self.sensitive = set() + + if os.isatty(0): + self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2] + + def __warning(self, message): + """ + :type message: str + """ + self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr) + + def review_warnings(self): + """Review all warnings which previously occurred.""" + if not self.warnings: + return + + self.__warning('Reviewing previous %d warning(s):' % len(self.warnings)) + + for warning in self.warnings: + self.__warning(warning) + + def warning(self, message, unique=False, verbosity=0): + """ + :type message: str + :type unique: bool + :type verbosity: int + """ + if verbosity > self.verbosity: + return + + if unique: + if message in self.warnings_unique: + return + + self.warnings_unique.add(message) + + self.__warning(message) + self.warnings.append(message) + + def notice(self, message): + """ + :type message: str + """ + self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr) + + def error(self, message): + """ + :type message: str + """ + self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr) + + def info(self, message, verbosity=0, truncate=False): + """ + :type message: str + :type verbosity: int + :type truncate: bool + """ + if self.verbosity >= verbosity: + color = self.verbosity_colors.get(verbosity, self.yellow) + self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout, truncate=truncate) + + def print_message(self, message, color=None, fd=sys.stdout, truncate=False): # pylint: disable=locally-disabled, invalid-name + """ + :type message: str + :type color: str | None + :type fd: file + :type truncate: bool + """ + if self.redact and self.sensitive: + for item in self.sensitive: + if not item: + continue + + message = message.replace(item, '*' * len(item)) + + if truncate: + if len(message) > self.truncate > 5: + message = message[:self.truncate - 5] + ' ...' + + if color and self.color: + # convert color resets in message to desired color + message = message.replace(self.clear, color) + message = '%s%s%s' % (color, message, self.clear) + + if sys.version_info[0] == 2: + message = to_bytes(message) + + print(message, file=fd) + fd.flush() + + +class ApplicationError(Exception): + """General application error.""" + + +class ApplicationWarning(Exception): + """General application warning which interrupts normal program flow.""" + + +class SubprocessError(ApplicationError): + """Error resulting from failed subprocess execution.""" + def __init__(self, cmd, status=0, stdout=None, stderr=None, runtime=None): + """ + :type cmd: list[str] + :type status: int + :type stdout: str | None + :type stderr: str | None + :type runtime: float | None + """ + message = 'Command "%s" returned exit status %s.\n' % (' '.join(cmd_quote(c) for c in cmd), status) + + if stderr: + message += '>>> Standard Error\n' + message += '%s%s\n' % (stderr.strip(), Display.clear) + + if stdout: + message += '>>> Standard Output\n' + message += '%s%s\n' % (stdout.strip(), Display.clear) + + message = message.strip() + + super(SubprocessError, self).__init__(message) + + self.cmd = cmd + self.message = message + self.status = status + self.stdout = stdout + self.stderr = stderr + self.runtime = runtime + + +class MissingEnvironmentVariable(ApplicationError): + """Error caused by missing environment variable.""" + def __init__(self, name): + """ + :type name: str + """ + super(MissingEnvironmentVariable, self).__init__('Missing environment variable: %s' % name) + + self.name = name + + +def parse_to_list_of_dict(pattern, value): + """ + :type pattern: str + :type value: str + :return: list[dict[str, str]] + """ + matched = [] + unmatched = [] + + for line in value.splitlines(): + match = re.search(pattern, line) + + if match: + matched.append(match.groupdict()) + else: + unmatched.append(line) + + if unmatched: + raise Exception('Pattern "%s" did not match values:\n%s' % (pattern, '\n'.join(unmatched))) + + return matched + + +def get_available_port(): + """ + :rtype: int + """ + # this relies on the kernel not reusing previously assigned ports immediately + socket_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + with contextlib.closing(socket_fd): + socket_fd.bind(('', 0)) + return socket_fd.getsockname()[1] + + +def get_subclasses(class_type): # type: (t.Type[C]) -> t.Set[t.Type[C]] + """Returns the set of types that are concrete subclasses of the given type.""" + subclasses = set() # type: t.Set[t.Type[C]] + queue = [class_type] # type: t.List[t.Type[C]] + + while queue: + parent = queue.pop() + + for child in parent.__subclasses__(): + if child not in subclasses: + if not inspect.isabstract(child): + subclasses.add(child) + queue.append(child) + + return subclasses + + +def is_subdir(candidate_path, path): # type: (str, str) -> bool + """Returns true if candidate_path is path or a subdirectory of path.""" + if not path.endswith(os.path.sep): + path += os.path.sep + + if not candidate_path.endswith(os.path.sep): + candidate_path += os.path.sep + + return candidate_path.startswith(path) + + +def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str] + """Returns a list of directories extracted from the given list of paths.""" + dir_names = set() + + for path in paths: + while True: + path = os.path.dirname(path) + + if not path or path == os.path.sep: + break + + dir_names.add(path + os.path.sep) + + return sorted(dir_names) + + +def str_to_version(version): # type: (str) -> t.Tuple[int, ...] + """Return a version tuple from a version string.""" + return tuple(int(n) for n in version.split('.')) + + +def version_to_str(version): # type: (t.Tuple[int, ...]) -> str + """Return a version string from a version tuple.""" + return '.'.join(str(n) for n in version) + + +def import_plugins(directory, root=None): # type: (str, t.Optional[str]) -> None + """ + Import plugins from the given directory relative to the given root. + If the root is not provided, the 'lib' directory for the test runner will be used. + """ + if root is None: + root = os.path.dirname(__file__) + + path = os.path.join(root, directory) + package = __name__.rsplit('.', 1)[0] + prefix = '%s.%s.' % (package, directory.replace(os.path.sep, '.')) + + for (_module_loader, name, _ispkg) in pkgutil.iter_modules([path], prefix=prefix): + module_path = os.path.join(root, name[len(package) + 1:].replace('.', os.path.sep) + '.py') + load_module(module_path, name) + + +def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None + """ + Load plugins of the specified type and track them in the specified database. + Only plugins which have already been imported will be loaded. + """ + plugins = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) # type: t.Dict[str, t.Type[C]] + + for plugin in plugins: + database[plugin] = plugins[plugin] + + +def load_module(path, name): # type: (str, str) -> None + """Load a Python module using the given name and path.""" + if name in sys.modules: + return + + if sys.version_info >= (3, 4): + # noinspection PyUnresolvedReferences + import importlib.util + + # noinspection PyUnresolvedReferences + spec = importlib.util.spec_from_file_location(name, path) + # noinspection PyUnresolvedReferences + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + sys.modules[name] = module + else: + # noinspection PyDeprecation + import imp + + # load_source (and thus load_module) require a file opened with `open` in text mode + with open(to_bytes(path)) as module_file: + # noinspection PyDeprecation + imp.load_module(name, module_file, path, ('.py', 'r', imp.PY_SOURCE)) + + +@contextlib.contextmanager +def tempdir(): # type: () -> str + """Creates a temporary directory that is deleted outside the context scope.""" + temp_path = tempfile.mkdtemp() + yield temp_path + shutil.rmtree(temp_path) + + +@contextlib.contextmanager +def open_zipfile(path, mode='r'): + """Opens a zip file and closes the file automatically.""" + zib_obj = zipfile.ZipFile(path, mode=mode) + yield zib_obj + zib_obj.close() + + +display = Display() # pylint: disable=locally-disabled, invalid-name diff --git a/test/lib/ansible_test/_internal/util_common.py b/test/lib/ansible_test/_internal/util_common.py new file mode 100644 index 00000000..1ac2e60d --- /dev/null +++ b/test/lib/ansible_test/_internal/util_common.py @@ -0,0 +1,487 @@ +"""Common utility code that depends on CommonConfig.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import atexit +import contextlib +import os +import shutil +import sys +import tempfile +import textwrap + +from . import types as t + +from .encoding import ( + to_bytes, +) + +from .util import ( + common_environment, + COVERAGE_CONFIG_NAME, + display, + find_python, + remove_tree, + MODE_DIRECTORY, + MODE_FILE_EXECUTE, + PYTHON_PATHS, + raw_command, + read_lines_without_comments, + ANSIBLE_TEST_DATA_ROOT, + ApplicationError, +) + +from .io import ( + write_text_file, + write_json_file, +) + +from .data import ( + data_context, +) + +from .provider.layout import ( + LayoutMessages, +) + +DOCKER_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]] +REMOTE_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]] +NETWORK_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]] + + +class ResultType: + """Test result type.""" + BOT = None # type: ResultType + COVERAGE = None # type: ResultType + DATA = None # type: ResultType + JUNIT = None # type: ResultType + LOGS = None # type: ResultType + REPORTS = None # type: ResultType + TMP = None # type: ResultType + + @staticmethod + def _populate(): + ResultType.BOT = ResultType('bot') + ResultType.COVERAGE = ResultType('coverage') + ResultType.DATA = ResultType('data') + ResultType.JUNIT = ResultType('junit') + ResultType.LOGS = ResultType('logs') + ResultType.REPORTS = ResultType('reports') + ResultType.TMP = ResultType('.tmp') + + def __init__(self, name): # type: (str) -> None + self.name = name + + @property + def relative_path(self): # type: () -> str + """The content relative path to the results.""" + return os.path.join(data_context().content.results_path, self.name) + + @property + def path(self): # type: () -> str + """The absolute path to the results.""" + return os.path.join(data_context().content.root, self.relative_path) + + def __str__(self): # type: () -> str + return self.name + + +# noinspection PyProtectedMember +ResultType._populate() # pylint: disable=protected-access + + +class CommonConfig: + """Configuration common to all commands.""" + def __init__(self, args, command): + """ + :type args: any + :type command: str + """ + self.command = command + + self.color = args.color # type: bool + self.explain = args.explain # type: bool + self.verbosity = args.verbosity # type: int + self.debug = args.debug # type: bool + self.truncate = args.truncate # type: int + self.redact = args.redact # type: bool + + self.info_stderr = False # type: bool + + self.cache = {} + + def get_ansible_config(self): # type: () -> str + """Return the path to the Ansible config for the given config.""" + return os.path.join(ANSIBLE_TEST_DATA_ROOT, 'ansible.cfg') + + +class NetworkPlatformSettings: + """Settings required for provisioning a network platform.""" + def __init__(self, collection, inventory_vars): # type: (str, t.Type[str, str]) -> None + self.collection = collection + self.inventory_vars = inventory_vars + + +def get_docker_completion(): + """ + :rtype: dict[str, dict[str, str]] + """ + return get_parameterized_completion(DOCKER_COMPLETION, 'docker') + + +def get_remote_completion(): + """ + :rtype: dict[str, dict[str, str]] + """ + return get_parameterized_completion(REMOTE_COMPLETION, 'remote') + + +def get_network_completion(): + """ + :rtype: dict[str, dict[str, str]] + """ + return get_parameterized_completion(NETWORK_COMPLETION, 'network') + + +def get_parameterized_completion(cache, name): + """ + :type cache: dict[str, dict[str, str]] + :type name: str + :rtype: dict[str, dict[str, str]] + """ + if not cache: + if data_context().content.collection: + context = 'collection' + else: + context = 'ansible-base' + + images = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', '%s.txt' % name), remove_blank_lines=True) + + cache.update(dict(kvp for kvp in [parse_parameterized_completion(i) for i in images] if kvp and kvp[1].get('context', context) == context)) + + return cache + + +def parse_parameterized_completion(value): # type: (str) -> t.Optional[t.Tuple[str, t.Dict[str, str]]] + """Parse the given completion entry, returning the entry name and a dictionary of key/value settings.""" + values = value.split() + + if not values: + return None + + name = values[0] + data = dict((kvp[0], kvp[1] if len(kvp) > 1 else '') for kvp in [item.split('=', 1) for item in values[1:]]) + + return name, data + + +def docker_qualify_image(name): + """ + :type name: str + :rtype: str + """ + config = get_docker_completion().get(name, {}) + + return config.get('name', name) + + +def get_network_settings(args, platform, version): # type: (NetworkIntegrationConfig, str, str) -> NetworkPlatformSettings + """Returns settings for the given network platform and version.""" + platform_version = '%s/%s' % (platform, version) + completion = get_network_completion().get(platform_version, {}) + collection = args.platform_collection.get(platform, completion.get('collection')) + + settings = NetworkPlatformSettings( + collection, + dict( + ansible_connection=args.platform_connection.get(platform, completion.get('connection')), + ansible_network_os='%s.%s' % (collection, platform) if collection else platform, + ) + ) + + return settings + + +def handle_layout_messages(messages): # type: (t.Optional[LayoutMessages]) -> None + """Display the given layout messages.""" + if not messages: + return + + for message in messages.info: + display.info(message, verbosity=1) + + for message in messages.warning: + display.warning(message) + + if messages.error: + raise ApplicationError('\n'.join(messages.error)) + + +@contextlib.contextmanager +def named_temporary_file(args, prefix, suffix, directory, content): + """ + :param args: CommonConfig + :param prefix: str + :param suffix: str + :param directory: str + :param content: str | bytes | unicode + :rtype: str + """ + if args.explain: + yield os.path.join(directory, '%stemp%s' % (prefix, suffix)) + else: + with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd: + tempfile_fd.write(to_bytes(content)) + tempfile_fd.flush() + + yield tempfile_fd.name + + +def write_json_test_results(category, # type: ResultType + name, # type: str + content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]] + formatted=True, # type: bool + encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]] + ): # type: (...) -> None + """Write the given json content to the specified test results path, creating directories as needed.""" + path = os.path.join(category.path, name) + write_json_file(path, content, create_directories=True, formatted=formatted, encoder=encoder) + + +def write_text_test_results(category, name, content): # type: (ResultType, str, str) -> None + """Write the given text content to the specified test results path, creating directories as needed.""" + path = os.path.join(category.path, name) + write_text_file(path, content, create_directories=True) + + +def get_python_path(args, interpreter): + """ + :type args: TestConfig + :type interpreter: str + :rtype: str + """ + python_path = PYTHON_PATHS.get(interpreter) + + if python_path: + return python_path + + prefix = 'python-' + suffix = '-ansible' + + root_temp_dir = '/tmp' + + if args.explain: + return os.path.join(root_temp_dir, ''.join((prefix, 'temp', suffix))) + + python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir) + injected_interpreter = os.path.join(python_path, 'python') + + # A symlink is faster than the execv wrapper, but isn't compatible with virtual environments. + # Attempt to detect when it is safe to use a symlink by checking the real path of the interpreter. + use_symlink = os.path.dirname(os.path.realpath(interpreter)) == os.path.dirname(interpreter) + + if use_symlink: + display.info('Injecting "%s" as a symlink to the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1) + + os.symlink(interpreter, injected_interpreter) + else: + display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1) + + create_interpreter_wrapper(interpreter, injected_interpreter) + + os.chmod(python_path, MODE_DIRECTORY) + + if not PYTHON_PATHS: + atexit.register(cleanup_python_paths) + + PYTHON_PATHS[interpreter] = python_path + + return python_path + + +def create_temp_dir(prefix=None, suffix=None, base_dir=None): # type: (t.Optional[str], t.Optional[str], t.Optional[str]) -> str + """Create a temporary directory that persists until the current process exits.""" + temp_path = tempfile.mkdtemp(prefix=prefix or 'tmp', suffix=suffix or '', dir=base_dir) + atexit.register(remove_tree, temp_path) + return temp_path + + +def create_interpreter_wrapper(interpreter, injected_interpreter): # type: (str, str) -> None + """Create a wrapper for the given Python interpreter at the specified path.""" + # sys.executable is used for the shebang to guarantee it is a binary instead of a script + # injected_interpreter could be a script from the system or our own wrapper created for the --venv option + shebang_interpreter = sys.executable + + code = textwrap.dedent(''' + #!%s + + from __future__ import absolute_import + + from os import execv + from sys import argv + + python = '%s' + + execv(python, [python] + argv[1:]) + ''' % (shebang_interpreter, interpreter)).lstrip() + + write_text_file(injected_interpreter, code) + + os.chmod(injected_interpreter, MODE_FILE_EXECUTE) + + +def cleanup_python_paths(): + """Clean up all temporary python directories.""" + for path in sorted(PYTHON_PATHS.values()): + display.info('Cleaning up temporary python directory: %s' % path, verbosity=2) + shutil.rmtree(path) + + +def get_coverage_environment(args, target_name, version, temp_path, module_coverage, remote_temp_path=None): + """ + :type args: TestConfig + :type target_name: str + :type version: str + :type temp_path: str + :type module_coverage: bool + :type remote_temp_path: str | None + :rtype: dict[str, str] + """ + if temp_path: + # integration tests (both localhost and the optional testhost) + # config and results are in a temporary directory + coverage_config_base_path = temp_path + coverage_output_base_path = temp_path + elif args.coverage_config_base_path: + # unit tests, sanity tests and other special cases (localhost only) + # config is in a temporary directory + # results are in the source tree + coverage_config_base_path = args.coverage_config_base_path + coverage_output_base_path = os.path.join(data_context().content.root, data_context().content.results_path) + else: + raise Exception('No temp path and no coverage config base path. Check for missing coverage_context usage.') + + config_file = os.path.join(coverage_config_base_path, COVERAGE_CONFIG_NAME) + coverage_file = os.path.join(coverage_output_base_path, ResultType.COVERAGE.name, '%s=%s=%s=%s=coverage' % ( + args.command, target_name, args.coverage_label or 'local-%s' % version, 'python-%s' % version)) + + if not args.explain and not os.path.exists(config_file): + raise Exception('Missing coverage config file: %s' % config_file) + + if args.coverage_check: + # cause the 'coverage' module to be found, but not imported or enabled + coverage_file = '' + + # Enable code coverage collection on local Python programs (this does not include Ansible modules). + # Used by the injectors to support code coverage. + # Used by the pytest unit test plugin to support code coverage. + # The COVERAGE_FILE variable is also used directly by the 'coverage' module. + env = dict( + COVERAGE_CONF=config_file, + COVERAGE_FILE=coverage_file, + ) + + if module_coverage: + # Enable code coverage collection on Ansible modules (both local and remote). + # Used by the AnsiballZ wrapper generator in lib/ansible/executor/module_common.py to support code coverage. + env.update(dict( + _ANSIBLE_COVERAGE_CONFIG=config_file, + _ANSIBLE_COVERAGE_OUTPUT=coverage_file, + )) + + if remote_temp_path: + # Include the command, target and label so the remote host can create a filename with that info. The remote + # is responsible for adding '={language version}=coverage.{hostname}.{pid}.{id}' + env['_ANSIBLE_COVERAGE_REMOTE_OUTPUT'] = os.path.join(remote_temp_path, '%s=%s=%s' % ( + args.command, target_name, args.coverage_label or 'remote')) + env['_ANSIBLE_COVERAGE_REMOTE_WHITELIST'] = os.path.join(data_context().content.root, '*') + + return env + + +def intercept_command(args, cmd, target_name, env, capture=False, data=None, cwd=None, python_version=None, temp_path=None, module_coverage=True, + virtualenv=None, disable_coverage=False, remote_temp_path=None): + """ + :type args: TestConfig + :type cmd: collections.Iterable[str] + :type target_name: str + :type env: dict[str, str] + :type capture: bool + :type data: str | None + :type cwd: str | None + :type python_version: str | None + :type temp_path: str | None + :type module_coverage: bool + :type virtualenv: str | None + :type disable_coverage: bool + :type remote_temp_path: str | None + :rtype: str | None, str | None + """ + if not env: + env = common_environment() + else: + env = env.copy() + + cmd = list(cmd) + version = python_version or args.python_version + interpreter = virtualenv or find_python(version) + inject_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector') + + if not virtualenv: + # injection of python into the path is required when not activating a virtualenv + # otherwise scripts may find the wrong interpreter or possibly no interpreter + python_path = get_python_path(args, interpreter) + inject_path = python_path + os.path.pathsep + inject_path + + env['PATH'] = inject_path + os.path.pathsep + env['PATH'] + env['ANSIBLE_TEST_PYTHON_VERSION'] = version + env['ANSIBLE_TEST_PYTHON_INTERPRETER'] = interpreter + + if args.coverage and not disable_coverage: + # add the necessary environment variables to enable code coverage collection + env.update(get_coverage_environment(args, target_name, version, temp_path, module_coverage, + remote_temp_path=remote_temp_path)) + + return run_command(args, cmd, capture=capture, env=env, data=data, cwd=cwd) + + +def resolve_csharp_ps_util(import_name, path): + """ + :type import_name: str + :type path: str + """ + if data_context().content.is_ansible or not import_name.startswith('.'): + # We don't support relative paths for builtin utils, there's no point. + return import_name + + packages = import_name.split('.') + module_packages = path.split(os.path.sep) + + for package in packages: + if not module_packages or package: + break + del module_packages[-1] + + return 'ansible_collections.%s%s' % (data_context().content.prefix, + '.'.join(module_packages + [p for p in packages if p])) + + +def run_command(args, cmd, capture=False, env=None, data=None, cwd=None, always=False, stdin=None, stdout=None, + cmd_verbosity=1, str_errors='strict'): + """ + :type args: CommonConfig + :type cmd: collections.Iterable[str] + :type capture: bool + :type env: dict[str, str] | None + :type data: str | None + :type cwd: str | None + :type always: bool + :type stdin: file | None + :type stdout: file | None + :type cmd_verbosity: int + :type str_errors: str + :rtype: str | None, str | None + """ + explain = args.explain and not always + return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout, + cmd_verbosity=cmd_verbosity, str_errors=str_errors) diff --git a/test/lib/ansible_test/_internal/venv.py b/test/lib/ansible_test/_internal/venv.py new file mode 100644 index 00000000..37eef367 --- /dev/null +++ b/test/lib/ansible_test/_internal/venv.py @@ -0,0 +1,227 @@ +"""Virtual environment management.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import sys + +from . import types as t + +from .config import ( + EnvironmentConfig, +) + +from .util import ( + find_python, + SubprocessError, + get_available_python_versions, + SUPPORTED_PYTHON_VERSIONS, + ANSIBLE_TEST_DATA_ROOT, + display, + remove_tree, +) + +from .util_common import ( + run_command, +) + + +def create_virtual_environment(args, # type: EnvironmentConfig + version, # type: str + path, # type: str + system_site_packages=False, # type: bool + pip=True, # type: bool + ): # type: (...) -> bool + """Create a virtual environment using venv or virtualenv for the requested Python version.""" + if os.path.isdir(path): + display.info('Using existing Python %s virtual environment: %s' % (version, path), verbosity=1) + return True + + python = find_python(version, required=False) + python_version = tuple(int(v) for v in version.split('.')) + + if not python: + # the requested python version could not be found + return False + + if python_version >= (3, 0): + # use the built-in 'venv' module on Python 3.x + # creating a virtual environment using 'venv' when running in a virtual environment created by 'virtualenv' results + # in a copy of the original virtual environment instead of creation of a new one + # avoid this issue by only using "real" python interpreters to invoke 'venv' + for real_python in iterate_real_pythons(args, version): + if run_venv(args, real_python, system_site_packages, pip, path): + display.info('Created Python %s virtual environment using "venv": %s' % (version, path), verbosity=1) + return True + + # something went wrong, most likely the package maintainer for the Python installation removed ensurepip + # which will prevent creation of a virtual environment without installation of other OS packages + + # use the installed 'virtualenv' module on the Python requested version + if run_virtualenv(args, python, python, system_site_packages, pip, path): + display.info('Created Python %s virtual environment using "virtualenv": %s' % (version, path), verbosity=1) + return True + + available_pythons = get_available_python_versions(SUPPORTED_PYTHON_VERSIONS) + + for available_python_version, available_python_interpreter in sorted(available_pythons.items()): + virtualenv_version = get_virtualenv_version(args, available_python_interpreter) + + if not virtualenv_version: + # virtualenv not available for this Python or we were unable to detect the version + continue + + if python_version == (2, 6) and virtualenv_version >= (16, 0, 0): + # virtualenv 16.0.0 dropped python 2.6 support: https://virtualenv.pypa.io/en/latest/changes/#v16-0-0-2018-05-16 + continue + + # try using 'virtualenv' from another Python to setup the desired version + if run_virtualenv(args, available_python_interpreter, python, system_site_packages, pip, path): + display.info('Created Python %s virtual environment using "virtualenv" on Python %s: %s' % (version, available_python_version, path), verbosity=1) + return True + + # no suitable 'virtualenv' available + return False + + +def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t.Iterable[str] + """ + Iterate through available real python interpreters of the requested version. + The current interpreter will be checked and then the path will be searched. + """ + version_info = tuple(int(n) for n in version.split('.')) + current_python = None + + if version_info == sys.version_info[:len(version_info)]: + current_python = sys.executable + real_prefix = get_python_real_prefix(args, current_python) + + if real_prefix: + current_python = find_python(version, os.path.join(real_prefix, 'bin')) + + if current_python: + yield current_python + + path = os.environ.get('PATH', os.path.defpath) + + if not path: + return + + found_python = find_python(version, path) + + if not found_python: + return + + if found_python == current_python: + return + + real_prefix = get_python_real_prefix(args, found_python) + + if real_prefix: + found_python = find_python(version, os.path.join(real_prefix, 'bin')) + + if found_python: + yield found_python + + +def get_python_real_prefix(args, path): # type: (EnvironmentConfig, str) -> t.Optional[str] + """ + Return the real prefix of the specified interpreter or None if the interpreter is not a virtual environment created by 'virtualenv'. + """ + cmd = [path, os.path.join(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'virtualenvcheck.py'))] + check_result = json.loads(run_command(args, cmd, capture=True, always=True)[0]) + real_prefix = check_result['real_prefix'] + return real_prefix + + +def run_venv(args, # type: EnvironmentConfig + run_python, # type: str + system_site_packages, # type: bool + pip, # type: bool + path, # type: str + ): # type: (...) -> bool + """Create a virtual environment using the 'venv' module. Not available on Python 2.x.""" + cmd = [run_python, '-m', 'venv'] + + if system_site_packages: + cmd.append('--system-site-packages') + + if not pip: + cmd.append('--without-pip') + + cmd.append(path) + + try: + run_command(args, cmd, capture=True) + except SubprocessError as ex: + remove_tree(path) + + if args.verbosity > 1: + display.error(ex) + + return False + + return True + + +def run_virtualenv(args, # type: EnvironmentConfig + run_python, # type: str + env_python, # type: str + system_site_packages, # type: bool + pip, # type: bool + path, # type: str + ): # type: (...) -> bool + """Create a virtual environment using the 'virtualenv' module.""" + # always specify --python to guarantee the desired interpreter is provided + # otherwise virtualenv may select a different interpreter than the one running virtualenv + cmd = [run_python, '-m', 'virtualenv', '--python', env_python] + + if system_site_packages: + cmd.append('--system-site-packages') + + if not pip: + cmd.append('--no-pip') + + cmd.append(path) + + try: + run_command(args, cmd, capture=True) + except SubprocessError as ex: + remove_tree(path) + + if args.verbosity > 1: + display.error(ex) + + return False + + return True + + +def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t.Optional[t.Tuple[int, ...]] + """Get the virtualenv version for the given python intepreter, if available.""" + try: + return get_virtualenv_version.result + except AttributeError: + pass + + get_virtualenv_version.result = None + + cmd = [python, '-m', 'virtualenv', '--version'] + + try: + stdout = run_command(args, cmd, capture=True)[0] + except SubprocessError as ex: + if args.verbosity > 1: + display.error(ex) + + stdout = '' + + if stdout: + # noinspection PyBroadException + try: + get_virtualenv_version.result = tuple(int(v) for v in stdout.strip().split('.')) + except Exception: # pylint: disable=broad-except + pass + + return get_virtualenv_version.result diff --git a/test/lib/ansible_test/config/cloud-config-aws.ini.template b/test/lib/ansible_test/config/cloud-config-aws.ini.template new file mode 100644 index 00000000..88b9fea6 --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-aws.ini.template @@ -0,0 +1,26 @@ +# This is the configuration template for ansible-test AWS integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Using the automatically provisioned AWS credentials in ansible-test. +# +# If you do not want to use the automatically provisioned temporary AWS credentials, +# fill in the @VAR placeholders below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration instead of temporary credentials. +# +# NOTE: Automatic provisioning of AWS credentials requires an ansible-core-ci API key. + +[default] +aws_access_key: @ACCESS_KEY +aws_secret_key: @SECRET_KEY +security_token: @SECURITY_TOKEN +aws_region: @REGION +# aws_cleanup controls whether the environment is cleaned up after tests have completed +# This only applies to tests that have a cleanup stage +# Defaults to true when using this template +# aws_cleanup: true +# aliases for backwards compatibility with older integration test playbooks +ec2_access_key: {{ aws_access_key }} +ec2_secret_key: {{ aws_secret_key }} +ec2_region: {{ aws_region }} diff --git a/test/lib/ansible_test/config/cloud-config-azure.ini.template b/test/lib/ansible_test/config/cloud-config-azure.ini.template new file mode 100644 index 00000000..ac5266ba --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-azure.ini.template @@ -0,0 +1,32 @@ +# This is the configuration template for ansible-test Azure integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Using the automatically provisioned Azure credentials in ansible-test. +# +# If you do not want to use the automatically provisioned temporary Azure credentials, +# fill in the values below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration instead of temporary credentials. +# +# NOTE: Automatic provisioning of Azure credentials requires one of: +# 1) ansible-core-ci API key in ~/.ansible-core-ci.key +# 2) Sherlock URL (including API key) in ~/.ansible-sherlock-ci.cfg + +[default] +# Provide either Service Principal or Active Directory credentials below. + +# Service Principal +AZURE_CLIENT_ID: +AZURE_SECRET: +AZURE_SUBSCRIPTION_ID: +AZURE_TENANT: + +# Active Directory +AZURE_AD_USER: +AZURE_PASSWORD: +AZURE_SUBSCRIPTION_ID: + +# Resource Groups +RESOURCE_GROUP: +RESOURCE_GROUP_SECONDARY: diff --git a/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template b/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template new file mode 100644 index 00000000..1c99e9b8 --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template @@ -0,0 +1,9 @@ +# This is the configuration template for ansible-test cloudscale integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# + +[default] +cloudscale_api_token = @API_TOKEN diff --git a/test/lib/ansible_test/config/cloud-config-cs.ini.template b/test/lib/ansible_test/config/cloud-config-cs.ini.template new file mode 100644 index 00000000..f8d8a915 --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-cs.ini.template @@ -0,0 +1,18 @@ +# This is the configuration template for ansible-test CloudStack integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Using the automatically provisioned cloudstack-sim docker container in ansible-test. +# +# If you do not want to use the automatically provided CloudStack simulator, +# fill in the @VAR placeholders below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration and not launch the simulator. +# +# It is recommended that you DO NOT use this template unless you cannot use the simulator. + +[default] +endpoint = http://@HOST:@PORT/client/api +key = @KEY +secret = @SECRET +timeout = 60 diff --git a/test/lib/ansible_test/config/cloud-config-gcp.ini.template b/test/lib/ansible_test/config/cloud-config-gcp.ini.template new file mode 100644 index 00000000..00a20971 --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-gcp.ini.template @@ -0,0 +1,18 @@ +# This is the configuration template for ansible-test GCP integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Using the automatically provisioned cloudstack-sim docker container in ansible-test. +# +# If you do not want to use the automatically provided GCP simulator, +# fill in the @VAR placeholders below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration and not launch the simulator. +# +# It is recommended that you DO NOT use this template unless you cannot use the simulator. + +[default] +gcp_project: @PROJECT +gcp_cred_file: @CRED_FILE +gcp_cred_kind: @CRED_KIND +gcp_cred_email: @CRED_EMAIL diff --git a/test/lib/ansible_test/config/cloud-config-hcloud.ini.template b/test/lib/ansible_test/config/cloud-config-hcloud.ini.template new file mode 100644 index 00000000..8db658db --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-hcloud.ini.template @@ -0,0 +1,15 @@ +# This is the configuration template for ansible-test Hetzner Cloud integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Using the automatically provisioned Hetzner Cloud credentials in ansible-test. +# +# If you do not want to use the automatically provisioned temporary Hetzner Cloud credentials, +# fill in the @VAR placeholders below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration instead of temporary credentials. +# +# NOTE: Automatic provisioning of Hetzner Cloud credentials requires an ansible-core-ci API key. + +[default] +hcloud_api_token= @TOKEN diff --git a/test/lib/ansible_test/config/cloud-config-opennebula.ini.template b/test/lib/ansible_test/config/cloud-config-opennebula.ini.template new file mode 100644 index 00000000..00c56db1 --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-opennebula.ini.template @@ -0,0 +1,20 @@ +# This is the configuration template for ansible-test OpenNebula integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Running integration tests against previously recorded XMLRPC fixtures +# +# If you want to test against a Live OpenNebula platform, +# fill in the values below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration. +# +# If you run with @FIXTURES enabled (true) then you can decide if you want to +# run in @REPLAY mode (true) or, record mode (false). + +[default] +opennebula_url: @URL +opennebula_username: @USERNAME +opennebula_password: @PASSWORD +opennebula_test_fixture: @FIXTURES +opennebula_test_fixture_replay: @REPLAY \ No newline at end of file diff --git a/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template b/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template new file mode 100644 index 00000000..0a10f23b --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template @@ -0,0 +1,12 @@ +# This is the configuration template for ansible-test OpenShift integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Using the automatically provisioned openshift-origin docker container in ansible-test. +# +# If you do not want to use the automatically provided OpenShift container, +# place your kubeconfig file next to this file, with the same name, but without the .template extension. +# This will cause ansible-test to use the given configuration and not launch the automatically provided container. +# +# It is recommended that you DO NOT use this template unless you cannot use the automatically provided container. diff --git a/test/lib/ansible_test/config/cloud-config-scaleway.ini.template b/test/lib/ansible_test/config/cloud-config-scaleway.ini.template new file mode 100644 index 00000000..f10419e0 --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-scaleway.ini.template @@ -0,0 +1,13 @@ +# This is the configuration template for ansible-test Scaleway integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# +# If you want to test against the Vultr public API, +# fill in the values below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration. + +[default] +key = @KEY +org = @ORG diff --git a/test/lib/ansible_test/config/cloud-config-tower.ini.template b/test/lib/ansible_test/config/cloud-config-tower.ini.template new file mode 100644 index 00000000..c76740ab --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-tower.ini.template @@ -0,0 +1,18 @@ +# This is the configuration template for ansible-test Tower integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Using the automatically provisioned Tower credentials in ansible-test. +# +# If you do not want to use the automatically provisioned temporary Tower credentials, +# fill in the @VAR placeholders below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration instead of temporary credentials. +# +# NOTE: Automatic provisioning of Tower credentials requires an ansible-core-ci API key. + +[default] +version=@VERSION +host=@HOST +username=@USERNAME +password=@PASSWORD diff --git a/test/lib/ansible_test/config/cloud-config-vcenter.ini.template b/test/lib/ansible_test/config/cloud-config-vcenter.ini.template new file mode 100644 index 00000000..eff8bf74 --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-vcenter.ini.template @@ -0,0 +1,26 @@ +# This is the configuration template for ansible-test VMware integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Using the automatically provisioned VMware credentials in ansible-test. +# +# If you do not want to use the automatically provisioned temporary VMware credentials, +# fill in the @VAR placeholders below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration instead of temporary credentials. +# +# NOTE: Automatic provisioning of VMware credentials requires an ansible-core-ci API key. + +[DEFAULT] +vcenter_username: @VMWARE_USERNAME +vcenter_password: @VMWARE_PASSWORD +vcenter_hostname: @VMWARE_HOSTNAME +vmware_validate_certs: @VMWARE_VALIDATE_CERTS +esxi1_username: @ESXI1_USERNAME +esxi1_hostname: @ESXI1_HOSTNAME +esxi1_password: @ESXI1_PASSWORD +esxi2_username: @ESXI2_USERNAME +esxi2_hostname: @ESXI2_HOSTNAME +esxi2_password: @ESXI2_PASSWORD +vmware_proxy_host: @VMWARE_PROXY_HOST +vmware_proxy_port: @VMWARE_PROXY_PORT diff --git a/test/lib/ansible_test/config/cloud-config-vultr.ini.template b/test/lib/ansible_test/config/cloud-config-vultr.ini.template new file mode 100644 index 00000000..48b82108 --- /dev/null +++ b/test/lib/ansible_test/config/cloud-config-vultr.ini.template @@ -0,0 +1,12 @@ +# This is the configuration template for ansible-test Vultr integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# +# If you want to test against the Vultr public API, +# fill in the values below and save this file without the .template extension. +# This will cause ansible-test to use the given configuration. + +[default] +key = @KEY diff --git a/test/lib/ansible_test/config/inventory.networking.template b/test/lib/ansible_test/config/inventory.networking.template new file mode 100644 index 00000000..a1545684 --- /dev/null +++ b/test/lib/ansible_test/config/inventory.networking.template @@ -0,0 +1,42 @@ +# This is the configuration template for ansible-test network-integration tests. +# +# You do not need this template if you are: +# +# 1) Running integration tests without using ansible-test. +# 2) Using the `--platform` option to provision temporary network instances on EC2. +# +# If you do not want to use the automatically provisioned temporary network instances, +# fill in the @VAR placeholders below and save this file without the .template extension. +# +# NOTE: Automatic provisioning of network instances on EC2 requires an ansible-core-ci API key. + +[@OS] +@NAME ansible_connection="local" ansible_host=@HOST ansible_network_os="@OS" ansible_user="@USER" ansible_ssh_private_key_file="@KEY_FILE" + +[aci:vars] +aci_hostname=your-apic-1 +aci_username=admin +aci_password=your-apic-password +aci_validate_certs=no +aci_use_ssl=yes +aci_use_proxy=no + +[aci] +localhost ansible_ssh_host=127.0.0.1 ansible_connection=local + +[mso:vars] +mso_hostname=your-mso-1 +mso_username=admin +mso_password=your-mso-password +mso_validate_certs=no +mso_use_ssl=yes +mso_use_proxy=no + +[mso] +localhost ansible_ssh_host=127.0.0.1 ansible_connection=local + +### +# Example +# +# [vyos] +# vyos01.example.net ansible_connection=local ansible_network_os="vyos" ansible_user=admin ansible_ssh_pass=mypassword diff --git a/test/lib/ansible_test/config/inventory.winrm.template b/test/lib/ansible_test/config/inventory.winrm.template new file mode 100644 index 00000000..34bbee2d --- /dev/null +++ b/test/lib/ansible_test/config/inventory.winrm.template @@ -0,0 +1,28 @@ +# This is the configuration template for ansible-test windows-integration tests. +# It can also be used with the legacy `make` based method of running tests. +# +# You do not need this template if you are: +# +# 1) Using the `--windows` option to provision temporary Windows instances on EC2. +# +# If you do not want to use the automatically provisioned temporary Windows instances, +# fill in the @VAR placeholders below and save this file without the .template extension. +# +# NOTE: Automatic provisioning of Windows instances on EC2 requires an ansible-core-ci API key. +# +# REMINDER: Standard ports for winrm are 5985 (HTTP) and 5986 (HTTPS). + +[windows] +@NAME ansible_host=@HOST ansible_user=@USER ansible_password=@PASSWORD ansible_port=@PORT + +[windows:vars] +ansible_connection=winrm +ansible_winrm_server_cert_validation=ignore + +# support winrm connection tests (temporary solution, does not support testing enable/disable of pipelining) +[winrm:children] +windows + +# support tests that target testhost +[testhost:children] +windows diff --git a/test/sanity/code-smell/configure-remoting-ps1.json b/test/sanity/code-smell/configure-remoting-ps1.json new file mode 100644 index 00000000..593b765d --- /dev/null +++ b/test/sanity/code-smell/configure-remoting-ps1.json @@ -0,0 +1,4 @@ +{ + "no_targets": true, + "output": "path-message" +} diff --git a/test/sanity/code-smell/configure-remoting-ps1.py b/test/sanity/code-smell/configure-remoting-ps1.py new file mode 100755 index 00000000..51dff20c --- /dev/null +++ b/test/sanity/code-smell/configure-remoting-ps1.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + + +def main(): + # required by external automated processes and should not be moved, renamed or converted to a symbolic link + original = 'examples/scripts/ConfigureRemotingForAnsible.ps1' + # required to be packaged with ansible-test and must match the original file, but cannot be a symbolic link + # the packaged version is needed to run tests when ansible-test has been installed + # keeping the packaged version identical to the original makes sure tests cover both files + packaged = 'test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1' + + copy_valid = False + + if os.path.isfile(original) and os.path.isfile(packaged): + with open(original, 'rb') as original_file: + original_content = original_file.read() + + with open(packaged, 'rb') as packaged_file: + packaged_content = packaged_file.read() + + if original_content == packaged_content: + copy_valid = True + + if not copy_valid: + print('%s: must be an exact copy of "%s"' % (packaged, original)) + + for path in [original, packaged]: + directory = path + + while True: + directory = os.path.dirname(directory) + + if not directory: + break + + if not os.path.isdir(directory): + print('%s: must be a directory' % directory) + + if os.path.islink(directory): + print('%s: cannot be a symbolic link' % directory) + + if not os.path.isfile(path): + print('%s: must be a file' % path) + + if os.path.islink(path): + print('%s: cannot be a symbolic link' % path) + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/deprecated-config.json b/test/sanity/code-smell/deprecated-config.json new file mode 100644 index 00000000..4a884860 --- /dev/null +++ b/test/sanity/code-smell/deprecated-config.json @@ -0,0 +1,10 @@ +{ + "all_targets": true, + "output": "path-message", + "extensions": [ + ".py" + ], + "prefixes": [ + "lib/ansible/" + ] +} diff --git a/test/sanity/code-smell/deprecated-config.py b/test/sanity/code-smell/deprecated-config.py new file mode 100755 index 00000000..08e93c36 --- /dev/null +++ b/test/sanity/code-smell/deprecated-config.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# (c) 2018, Matt Martz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import mmap +import os +import re +import sys + +from distutils.version import StrictVersion + +import yaml + +import ansible.config + +from ansible.plugins.loader import fragment_loader +from ansible.release import __version__ as ansible_version +from ansible.utils.plugin_docs import get_docstring + +DOC_RE = re.compile(b'^DOCUMENTATION', flags=re.M) +ANSIBLE_MAJOR = StrictVersion('.'.join(ansible_version.split('.')[:2])) + + +def find_deprecations(obj, path=None): + if not isinstance(obj, (list, dict)): + return + + try: + items = obj.items() + except AttributeError: + items = enumerate(obj) + + for key, value in items: + if path is None: + this_path = [] + else: + this_path = path[:] + + this_path.append(key) + + if key != 'deprecated': + for result in find_deprecations(value, path=this_path): + yield result + else: + try: + version = value['version'] + this_path.append('version') + except KeyError: + version = value['removed_in'] + this_path.append('removed_in') + if StrictVersion(version) <= ANSIBLE_MAJOR: + yield (this_path, version) + + +def main(): + plugins = [] + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'rb') as f: + try: + mm_file = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + except ValueError: + continue + if DOC_RE.search(mm_file): + plugins.append(path) + mm_file.close() + + for plugin in plugins: + data = {} + data['doc'], data['examples'], data['return'], data['metadata'] = get_docstring(plugin, fragment_loader) + for result in find_deprecations(data['doc']): + print( + '%s: %s is scheduled for removal in %s' % (plugin, '.'.join(str(i) for i in result[0][:-2]), result[1]) + ) + + base = os.path.join(os.path.dirname(ansible.config.__file__), 'base.yml') + with open(base) as f: + data = yaml.safe_load(f) + + for result in find_deprecations(data): + print('%s: %s is scheduled for removal in %s' % (base, '.'.join(str(i) for i in result[0][:-2]), result[1])) + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/deprecated-config.requirements.txt b/test/sanity/code-smell/deprecated-config.requirements.txt new file mode 100644 index 00000000..cfefdeec --- /dev/null +++ b/test/sanity/code-smell/deprecated-config.requirements.txt @@ -0,0 +1,2 @@ +jinja2 # ansible-base requirement +pyyaml diff --git a/test/sanity/code-smell/docs-build.json b/test/sanity/code-smell/docs-build.json new file mode 100644 index 00000000..0218bfc5 --- /dev/null +++ b/test/sanity/code-smell/docs-build.json @@ -0,0 +1,6 @@ +{ + "intercept": true, + "disabled": true, + "no_targets": true, + "output": "path-line-column-message" +} diff --git a/test/sanity/code-smell/docs-build.py b/test/sanity/code-smell/docs-build.py new file mode 100755 index 00000000..80eca15f --- /dev/null +++ b/test/sanity/code-smell/docs-build.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re +import shutil +import subprocess +import sys +import tempfile + + +def main(): + base_dir = os.getcwd() + os.path.sep + docs_dir = os.path.abspath('docs/docsite') + + # TODO: Remove this temporary hack to constrain 'cryptography' when we have + # a better story for dealing with it. + tmpfd, tmp = tempfile.mkstemp() + requirements_txt = os.path.join(base_dir, 'requirements.txt') + shutil.copy2(requirements_txt, tmp) + lines = [] + with open(requirements_txt, 'r') as f: + for line in f.readlines(): + if line.strip() == 'cryptography': + line = 'cryptography < 3.4\n' + lines.append(line) + + with open(requirements_txt, 'w') as f: + f.writelines(lines) + + try: + cmd = ['make', 'core_singlehtmldocs'] + sphinx = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=docs_dir) + stdout, stderr = sphinx.communicate() + finally: + shutil.move(tmp, requirements_txt) + + stdout = stdout.decode('utf-8') + stderr = stderr.decode('utf-8') + + if sphinx.returncode != 0: + sys.stderr.write("Command '%s' failed with status code: %d\n" % (' '.join(cmd), sphinx.returncode)) + + if stdout.strip(): + stdout = simplify_stdout(stdout) + + sys.stderr.write("--> Standard Output\n") + sys.stderr.write("%s\n" % stdout.strip()) + + if stderr.strip(): + sys.stderr.write("--> Standard Error\n") + sys.stderr.write("%s\n" % stderr.strip()) + + sys.exit(1) + + with open('docs/docsite/rst_warnings', 'r') as warnings_fd: + output = warnings_fd.read().strip() + lines = output.splitlines() + + known_warnings = { + 'block-quote-missing-blank-line': r'^Block quote ends without a blank line; unexpected unindent.$', + 'literal-block-lex-error': r'^Could not lex literal_block as "[^"]*". Highlighting skipped.$', + 'duplicate-label': r'^duplicate label ', + 'undefined-label': r'undefined label: ', + 'unknown-document': r'unknown document: ', + 'toc-tree-missing-document': r'toctree contains reference to nonexisting document ', + 'reference-target-not-found': r'[^ ]* reference target not found: ', + 'not-in-toc-tree': r"document isn't included in any toctree$", + 'unexpected-indentation': r'^Unexpected indentation.$', + 'definition-list-missing-blank-line': r'^Definition list ends without a blank line; unexpected unindent.$', + 'explicit-markup-missing-blank-line': r'Explicit markup ends without a blank line; unexpected unindent.$', + 'toc-tree-glob-pattern-no-match': r"^toctree glob pattern '[^']*' didn't match any documents$", + 'unknown-interpreted-text-role': '^Unknown interpreted text role "[^"]*".$', + } + + for line in lines: + match = re.search('^(?P[^:]+):((?P[0-9]+):)?((?P[0-9]+):)? (?PWARNING|ERROR): (?P.*)$', line) + + if not match: + path = 'docs/docsite/rst/index.rst' + lineno = 0 + column = 0 + code = 'unknown' + message = line + + # surface unknown lines while filtering out known lines to avoid excessive output + print('%s:%d:%d: %s: %s' % (path, lineno, column, code, message)) + continue + + path = match.group('path') + lineno = int(match.group('line') or 0) + column = int(match.group('column') or 0) + level = match.group('level').lower() + message = match.group('message') + + path = os.path.abspath(path) + + if path.startswith(base_dir): + path = path[len(base_dir):] + + if path.startswith('rst/'): + path = 'docs/docsite/' + path # fix up paths reported relative to `docs/docsite/` + + if level == 'warning': + code = 'warning' + + for label, pattern in known_warnings.items(): + if re.search(pattern, message): + code = label + break + else: + code = 'error' + + print('%s:%d:%d: %s: %s' % (path, lineno, column, code, message)) + + +def simplify_stdout(value): + """Simplify output by omitting earlier 'rendering: ...' messages.""" + lines = value.strip().splitlines() + + rendering = [] + keep = [] + + def truncate_rendering(): + """Keep last rendering line (if any) with a message about omitted lines as needed.""" + if not rendering: + return + + notice = rendering[-1] + + if len(rendering) > 1: + notice += ' (%d previous rendering line(s) omitted)' % (len(rendering) - 1) + + keep.append(notice) + # Could change to rendering.clear() if we do not support python2 + rendering[:] = [] + + for line in lines: + if line.startswith('rendering: '): + rendering.append(line) + continue + + truncate_rendering() + keep.append(line) + + truncate_rendering() + + result = '\n'.join(keep) + + return result + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/docs-build.requirements.txt b/test/sanity/code-smell/docs-build.requirements.txt new file mode 100644 index 00000000..5e458795 --- /dev/null +++ b/test/sanity/code-smell/docs-build.requirements.txt @@ -0,0 +1,6 @@ +jinja2 +pyyaml +sphinx +sphinx-notfound-page +straight.plugin +antsibull diff --git a/test/sanity/code-smell/no-unwanted-files.json b/test/sanity/code-smell/no-unwanted-files.json new file mode 100644 index 00000000..7a89ebbe --- /dev/null +++ b/test/sanity/code-smell/no-unwanted-files.json @@ -0,0 +1,7 @@ +{ + "include_symlinks": true, + "prefixes": [ + "lib/" + ], + "output": "path-message" +} diff --git a/test/sanity/code-smell/no-unwanted-files.py b/test/sanity/code-smell/no-unwanted-files.py new file mode 100755 index 00000000..bff09152 --- /dev/null +++ b/test/sanity/code-smell/no-unwanted-files.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +"""Prevent unwanted files from being added to the source tree.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + + +def main(): + """Main entry point.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + + allowed_extensions = ( + '.cs', + '.ps1', + '.psm1', + '.py', + ) + + skip_paths = set([ + 'lib/ansible/config/ansible_builtin_runtime.yml', # not included in the sanity ignore file since it won't exist until after migration + ]) + + skip_directories = ( + 'lib/ansible/galaxy/data/', + ) + + for path in paths: + if path in skip_paths: + continue + + if any(path.startswith(skip_directory) for skip_directory in skip_directories): + continue + + if path.startswith('lib/') and not path.startswith('lib/ansible/'): + print('%s: all "lib" content must reside in the "lib/ansible" directory' % path) + continue + + ext = os.path.splitext(path)[1] + + if ext not in allowed_extensions: + print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions))) + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/obsolete-files.json b/test/sanity/code-smell/obsolete-files.json new file mode 100644 index 00000000..02d39204 --- /dev/null +++ b/test/sanity/code-smell/obsolete-files.json @@ -0,0 +1,17 @@ +{ + "include_symlinks": true, + "prefixes": [ + "test/runner/", + "test/sanity/ansible-doc/", + "test/sanity/compile/", + "test/sanity/import/", + "test/sanity/pep8/", + "test/sanity/pslint/", + "test/sanity/pylint/", + "test/sanity/rstcheck/", + "test/sanity/shellcheck/", + "test/sanity/validate-modules/", + "test/sanity/yamllint/" + ], + "output": "path-message" +} diff --git a/test/sanity/code-smell/obsolete-files.py b/test/sanity/code-smell/obsolete-files.py new file mode 100755 index 00000000..e9ddc8a5 --- /dev/null +++ b/test/sanity/code-smell/obsolete-files.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +"""Prevent files from being added to directories that are now obsolete.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + + +def main(): + """Main entry point.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + + for path in paths: + print('%s: directory "%s/" is obsolete and should not contain any files' % (path, os.path.dirname(path))) + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/package-data.json b/test/sanity/code-smell/package-data.json new file mode 100644 index 00000000..2b8a5326 --- /dev/null +++ b/test/sanity/code-smell/package-data.json @@ -0,0 +1,6 @@ +{ + "intercept": true, + "disabled": true, + "all_targets": true, + "output": "path-message" +} diff --git a/test/sanity/code-smell/package-data.py b/test/sanity/code-smell/package-data.py new file mode 100755 index 00000000..ca5f5ef5 --- /dev/null +++ b/test/sanity/code-smell/package-data.py @@ -0,0 +1,379 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import contextlib +import fnmatch +import glob +import os +import re +import shutil +import subprocess +import sys +import tarfile +import tempfile + + +def assemble_files_to_ship(complete_file_list): + """ + This looks for all files which should be shipped in the sdist + """ + # All files which are in the repository except these: + ignore_patterns = ( + # Developer-only tools + '.azure-pipelines/*', + '.github/*', + '.github/*/*', + 'changelogs/fragments/*', + 'hacking/backport/*', + 'hacking/shippable/*', + 'hacking/tests/*', + 'hacking/ticket_stubs/*', + 'test/sanity/code-smell/botmeta.*', + 'test/utils/*', + 'test/utils/*/*', + 'test/utils/*/*/*', + '.git*', + ) + ignore_files = frozenset(( + # Developer-only tools + 'changelogs/config.yaml', + 'hacking/README.md', + 'hacking/ansible-profile', + 'hacking/cgroup_perf_recap_graph.py', + 'hacking/create_deprecated_issues.py', + 'hacking/deprecated_issue_template.md', + 'hacking/fix_test_syntax.py', + 'hacking/get_library.py', + 'hacking/metadata-tool.py', + 'hacking/report.py', + 'hacking/return_skeleton_generator.py', + 'hacking/test-module', + 'hacking/test-module.py', + 'test/support/README.md', + '.cherry_picker.toml', + '.mailmap', + # Generated as part of a build step + 'docs/docsite/rst/conf.py', + 'docs/docsite/rst/index.rst', + # Possibly should be included + 'examples/scripts/uptime.py', + 'examples/scripts/my_test.py', + 'examples/scripts/my_test_info.py', + 'examples/scripts/my_test_facts.py', + 'examples/DOCUMENTATION.yml', + 'examples/play.yml', + 'examples/hosts.yaml', + 'examples/hosts.yml', + 'examples/inventory_script_schema.json', + 'examples/plugin_filters.yml', + 'hacking/env-setup', + 'hacking/env-setup.fish', + 'MANIFEST', + )) + + # These files are generated and then intentionally added to the sdist + + # Manpages + manpages = ['docs/man/man1/ansible.1'] + for dirname, dummy, files in os.walk('bin'): + for filename in files: + path = os.path.join(dirname, filename) + if os.path.islink(path): + if os.readlink(path) == 'ansible': + manpages.append('docs/man/man1/%s.1' % filename) + + # Misc + misc_generated_files = [ + 'SYMLINK_CACHE.json', + 'PKG-INFO', + ] + + shipped_files = manpages + misc_generated_files + + for path in complete_file_list: + if path not in ignore_files: + for ignore in ignore_patterns: + if fnmatch.fnmatch(path, ignore): + break + else: + shipped_files.append(path) + + return shipped_files + + +def assemble_files_to_install(complete_file_list): + """ + This looks for all of the files which should show up in an installation of ansible + """ + ignore_patterns = tuple() + + pkg_data_files = [] + for path in complete_file_list: + + if path.startswith("lib/ansible"): + prefix = 'lib' + elif path.startswith("test/lib/ansible_test"): + prefix = 'test/lib' + else: + continue + + for ignore in ignore_patterns: + if fnmatch.fnmatch(path, ignore): + break + else: + pkg_data_files.append(os.path.relpath(path, prefix)) + + return pkg_data_files + + +@contextlib.contextmanager +def clean_repository(file_list): + """Copy the repository to clean it of artifacts""" + # Create a tempdir that will be the clean repo + with tempfile.TemporaryDirectory() as repo_root: + directories = set((repo_root + os.path.sep,)) + + for filename in file_list: + # Determine if we need to create the directory + directory = os.path.dirname(filename) + dest_dir = os.path.join(repo_root, directory) + if dest_dir not in directories: + os.makedirs(dest_dir) + + # Keep track of all the directories that now exist + path_components = directory.split(os.path.sep) + path = repo_root + for component in path_components: + path = os.path.join(path, component) + if path not in directories: + directories.add(path) + + # Copy the file + shutil.copy2(filename, dest_dir, follow_symlinks=False) + + yield repo_root + + +def create_sdist(tmp_dir): + """Create an sdist in the repository""" + create = subprocess.Popen( + ['make', 'snapshot', 'SDIST_DIR=%s' % tmp_dir], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + + stderr = create.communicate()[1] + + if create.returncode != 0: + raise Exception('make snapshot failed:\n%s' % stderr) + + # Determine path to sdist + tmp_dir_files = os.listdir(tmp_dir) + + if not tmp_dir_files: + raise Exception('sdist was not created in the temp dir') + elif len(tmp_dir_files) > 1: + raise Exception('Unexpected extra files in the temp dir') + + return os.path.join(tmp_dir, tmp_dir_files[0]) + + +def extract_sdist(sdist_path, tmp_dir): + """Untar the sdist""" + # Untar the sdist from the tmp_dir + with tarfile.open(os.path.join(tmp_dir, sdist_path), 'r|*') as sdist: + sdist.extractall(path=tmp_dir) + + # Determine the sdist directory name + sdist_filename = os.path.basename(sdist_path) + tmp_dir_files = os.listdir(tmp_dir) + try: + tmp_dir_files.remove(sdist_filename) + except ValueError: + # Unexpected could not find original sdist in temp dir + raise + + if len(tmp_dir_files) > 1: + raise Exception('Unexpected extra files in the temp dir') + elif len(tmp_dir_files) < 1: + raise Exception('sdist extraction did not occur i nthe temp dir') + + return os.path.join(tmp_dir, tmp_dir_files[0]) + + +def install_sdist(tmp_dir, sdist_dir): + """Install the extracted sdist into the temporary directory""" + install = subprocess.Popen( + ['python', 'setup.py', 'install', '--root=%s' % tmp_dir], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + cwd=os.path.join(tmp_dir, sdist_dir), + ) + + stdout, stderr = install.communicate() + + if install.returncode != 0: + raise Exception('sdist install failed:\n%s' % stderr) + + # Determine the prefix for the installed files + match = re.search('^creating (%s/.*?/(?:site|dist)-packages)/ansible$' % + tmp_dir, stdout, flags=re.M) + return match.group(1) + + +def check_sdist_contains_expected(sdist_dir, to_ship_files): + """Check that the files we expect to ship are present in the sdist""" + results = [] + for filename in to_ship_files: + path = os.path.join(sdist_dir, filename) + if not os.path.exists(path): + results.append('%s: File was not added to sdist' % filename) + + # Also changelog + changelog_files = glob.glob(os.path.join(sdist_dir, 'changelogs/CHANGELOG-v2.[0-9]*.rst')) + if not changelog_files: + results.append('changelogs/CHANGELOG-v2.*.rst: Changelog file was not added to the sdist') + elif len(changelog_files) > 1: + results.append('changelogs/CHANGELOG-v2.*.rst: Too many changelog files: %s' + % changelog_files) + + return results + + +def check_sdist_files_are_wanted(sdist_dir, to_ship_files): + """Check that all files in the sdist are desired""" + results = [] + for dirname, dummy, files in os.walk(sdist_dir): + dirname = os.path.relpath(dirname, start=sdist_dir) + if dirname == '.': + dirname = '' + + for filename in files: + path = os.path.join(dirname, filename) + if path not in to_ship_files: + if fnmatch.fnmatch(path, 'changelogs/CHANGELOG-v2.[0-9]*.rst'): + # changelog files are expected + continue + + # FIXME: ansible-test doesn't pass the paths of symlinks to us so we aren't + # checking those + if not os.path.islink(os.path.join(sdist_dir, path)): + results.append('%s: File in sdist was not in the repository' % path) + + return results + + +def check_installed_contains_expected(install_dir, to_install_files): + """Check that all the files we expect to be installed are""" + results = [] + for filename in to_install_files: + path = os.path.join(install_dir, filename) + if not os.path.exists(path): + results.append('%s: File not installed' % os.path.join('lib', filename)) + + return results + + +EGG_RE = re.compile('ansible[^/]+\\.egg-info/(PKG-INFO|SOURCES.txt|' + 'dependency_links.txt|not-zip-safe|requires.txt|top_level.txt)$') + + +def check_installed_files_are_wanted(install_dir, to_install_files): + """Check that all installed files were desired""" + results = [] + + for dirname, dummy, files in os.walk(install_dir): + dirname = os.path.relpath(dirname, start=install_dir) + if dirname == '.': + dirname = '' + + for filename in files: + # If this is a byte code cache, look for the python file's name + directory = dirname + if filename.endswith('.pyc') or filename.endswith('.pyo'): + # Remove the trailing "o" or c" + filename = filename[:-1] + + if directory.endswith('%s__pycache__' % os.path.sep): + # Python3 byte code cache, look for the basename of + # __pycache__/__init__.cpython-36.py + segments = filename.rsplit('.', 2) + if len(segments) >= 3: + filename = '.'.join((segments[0], segments[2])) + directory = os.path.dirname(directory) + + path = os.path.join(directory, filename) + + # Test that the file was listed for installation + if path not in to_install_files: + # FIXME: ansible-test doesn't pass the paths of symlinks to us so we + # aren't checking those + if not os.path.islink(os.path.join(install_dir, path)): + if not EGG_RE.match(path): + results.append('%s: File was installed but was not supposed to be' % path) + + return results + + +def _find_symlinks(): + symlink_list = [] + for dirname, directories, filenames in os.walk('.'): + for filename in filenames: + path = os.path.join(dirname, filename) + # Strip off "./" from the front + path = path[2:] + if os.path.islink(path): + symlink_list.append(path) + + return symlink_list + + +def main(): + """All of the files in the repository""" + complete_file_list = [] + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + complete_file_list.append(path) + + # ansible-test isn't currently passing symlinks to us so construct those ourselves for now + for filename in _find_symlinks(): + if filename not in complete_file_list: + # For some reason ansible-test is passing us lib/ansible/module_utils/ansible_release.py + # which is a symlink even though it doesn't pass any others + complete_file_list.append(filename) + + # We may run this after docs sanity tests so get a clean repository to run in + with clean_repository(complete_file_list) as clean_repo_dir: + os.chdir(clean_repo_dir) + + to_ship_files = assemble_files_to_ship(complete_file_list) + to_install_files = assemble_files_to_install(complete_file_list) + + results = [] + with tempfile.TemporaryDirectory() as tmp_dir: + sdist_path = create_sdist(tmp_dir) + sdist_dir = extract_sdist(sdist_path, tmp_dir) + + # Check that the files that are supposed to be in the sdist are there + results.extend(check_sdist_contains_expected(sdist_dir, to_ship_files)) + + # Check that the files that are in the sdist are in the repository + results.extend(check_sdist_files_are_wanted(sdist_dir, to_ship_files)) + + # install the sdist + install_dir = install_sdist(tmp_dir, sdist_dir) + + # Check that the files that are supposed to be installed are there + results.extend(check_installed_contains_expected(install_dir, to_install_files)) + + # Check that the files that are installed are supposed to be installed + results.extend(check_installed_files_are_wanted(install_dir, to_install_files)) + + for message in results: + print(message) + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/package-data.requirements.txt b/test/sanity/code-smell/package-data.requirements.txt new file mode 100644 index 00000000..5d74c715 --- /dev/null +++ b/test/sanity/code-smell/package-data.requirements.txt @@ -0,0 +1,10 @@ +docutils +jinja2 +packaging +pyyaml # ansible-base requirement +rstcheck +setuptools > 39.2 +straight.plugin + +# changelog build requires python 3.6+ +antsibull-changelog ; python_version >= '3.6' diff --git a/test/sanity/code-smell/release-names.json b/test/sanity/code-smell/release-names.json new file mode 100644 index 00000000..593b765d --- /dev/null +++ b/test/sanity/code-smell/release-names.json @@ -0,0 +1,4 @@ +{ + "no_targets": true, + "output": "path-message" +} diff --git a/test/sanity/code-smell/release-names.py b/test/sanity/code-smell/release-names.py new file mode 100755 index 00000000..f8003320 --- /dev/null +++ b/test/sanity/code-smell/release-names.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# (c) 2019, Ansible Project +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +""" +Test that the release name is present in the list of used up release names +""" + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from yaml import safe_load + +from ansible.release import __codename__ + + +def main(): + """Entrypoint to the script""" + + with open('.github/RELEASE_NAMES.yml') as f: + releases = safe_load(f.read()) + + # Why this format? The file's sole purpose is to be read by a human when they need to know + # which release names have already been used. So: + # 1) It's easier for a human to find the release names when there's one on each line + # 2) It helps keep other people from using the file and then asking for new features in it + for name in (r.split(maxsplit=1)[1] for r in releases): + if __codename__ == name: + break + else: + print('.github/RELEASE_NAMES.yml: Current codename was not present in the file') + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/release-names.requirements.txt b/test/sanity/code-smell/release-names.requirements.txt new file mode 100644 index 00000000..c3726e8b --- /dev/null +++ b/test/sanity/code-smell/release-names.requirements.txt @@ -0,0 +1 @@ +pyyaml diff --git a/test/sanity/code-smell/required-and-default-attributes.json b/test/sanity/code-smell/required-and-default-attributes.json new file mode 100644 index 00000000..dd9ac7b1 --- /dev/null +++ b/test/sanity/code-smell/required-and-default-attributes.json @@ -0,0 +1,9 @@ +{ + "prefixes": [ + "lib/ansible/" + ], + "extensions": [ + ".py" + ], + "output": "path-line-column-message" +} diff --git a/test/sanity/code-smell/required-and-default-attributes.py b/test/sanity/code-smell/required-and-default-attributes.py new file mode 100755 index 00000000..5ef410bd --- /dev/null +++ b/test/sanity/code-smell/required-and-default-attributes.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as path_fd: + for line, text in enumerate(path_fd.readlines()): + match = re.search(r'(FieldAttribute.*(default|required).*(default|required))', text) + + if match: + print('%s:%d:%d: use only one of `default` or `required` with `FieldAttribute`' % ( + path, line + 1, match.start(1) + 1)) + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/skip.txt b/test/sanity/code-smell/skip.txt new file mode 100644 index 00000000..6fb327b3 --- /dev/null +++ b/test/sanity/code-smell/skip.txt @@ -0,0 +1,2 @@ +deprecated-config.py # disabled by default, to be enabled by the release manager after branching +update-bundled.py # disabled by default, to be enabled by the release manager after branching diff --git a/test/sanity/code-smell/test-constraints.json b/test/sanity/code-smell/test-constraints.json new file mode 100644 index 00000000..69b07bf3 --- /dev/null +++ b/test/sanity/code-smell/test-constraints.json @@ -0,0 +1,9 @@ +{ + "prefixes": [ + "test/lib/ansible_test/_data/requirements/" + ], + "extensions": [ + ".txt" + ], + "output": "path-line-column-message" +} diff --git a/test/sanity/code-smell/test-constraints.py b/test/sanity/code-smell/test-constraints.py new file mode 100755 index 00000000..e8b9c795 --- /dev/null +++ b/test/sanity/code-smell/test-constraints.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import sys + + +def main(): + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + with open(path, 'r') as path_fd: + for line, text in enumerate(path_fd.readlines()): + match = re.search(r'^[^;#]*?([<>=])(?!.*sanity_ok.*)', text) + + if match: + print('%s:%d:%d: put constraints in `test/lib/ansible_test/_data/requirements/constraints.txt`' % ( + path, line + 1, match.start(1) + 1)) + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/update-bundled.json b/test/sanity/code-smell/update-bundled.json new file mode 100644 index 00000000..379bf4d7 --- /dev/null +++ b/test/sanity/code-smell/update-bundled.json @@ -0,0 +1,8 @@ +{ + "all_targets": true, + "ignore_self": true, + "extensions": [ + ".py" + ], + "output": "path-message" +} diff --git a/test/sanity/code-smell/update-bundled.py b/test/sanity/code-smell/update-bundled.py new file mode 100755 index 00000000..121e225f --- /dev/null +++ b/test/sanity/code-smell/update-bundled.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# (c) 2018, Ansible Project +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +""" +This test checks whether the libraries we're bundling are out of date and need to be synced with +a newer upstream release. +""" + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import fnmatch +import json +import re +import sys +from distutils.version import LooseVersion + +import packaging.specifiers + +from ansible.module_utils.urls import open_url + + +BUNDLED_RE = re.compile(b'\\b_BUNDLED_METADATA\\b') + + +def get_bundled_libs(paths): + """ + Return the set of known bundled libraries + + :arg paths: The paths which the test has been instructed to check + :returns: The list of all files which we know to contain bundled libraries. If a bundled + library consists of multiple files, this should be the file which has metadata included. + """ + bundled_libs = set() + for filename in fnmatch.filter(paths, 'lib/ansible/compat/*/__init__.py'): + bundled_libs.add(filename) + + bundled_libs.add('lib/ansible/module_utils/distro/__init__.py') + bundled_libs.add('lib/ansible/module_utils/six/__init__.py') + bundled_libs.add('lib/ansible/module_utils/compat/ipaddress.py') + # backports.ssl_match_hostname should be moved to its own file in the future + bundled_libs.add('lib/ansible/module_utils/urls.py') + + return bundled_libs + + +def get_files_with_bundled_metadata(paths): + """ + Search for any files which have bundled metadata inside of them + + :arg paths: Iterable of filenames to search for metadata inside of + :returns: A set of pathnames which contained metadata + """ + + with_metadata = set() + for path in paths: + with open(path, 'rb') as f: + body = f.read() + + if BUNDLED_RE.search(body): + with_metadata.add(path) + + return with_metadata + + +def get_bundled_metadata(filename): + """ + Retrieve the metadata about a bundled library from a python file + + :arg filename: The filename to look inside for the metadata + :raises ValueError: If we're unable to extract metadata from the file + :returns: The metadata from the python file + """ + with open(filename, 'r') as module: + for line in module: + if line.strip().startswith('_BUNDLED_METADATA'): + data = line[line.index('{'):].strip() + break + else: + raise ValueError('Unable to check bundled library for update. Please add' + ' _BUNDLED_METADATA dictionary to the library file with' + ' information on pypi name and bundled version.') + metadata = json.loads(data) + return metadata + + +def get_latest_applicable_version(pypi_data, constraints=None): + """Get the latest pypi version of the package that we allow + + :arg pypi_data: Pypi information about the data as returned by + ``https://pypi.org/pypi/{pkg_name}/json`` + :kwarg constraints: version constraints on what we're allowed to use as specified by + the bundled metadata + :returns: The most recent version on pypi that are allowed by ``constraints`` + """ + latest_version = "0" + if constraints: + version_specification = packaging.specifiers.SpecifierSet(constraints) + for version in pypi_data['releases']: + if version in version_specification: + if LooseVersion(version) > LooseVersion(latest_version): + latest_version = version + else: + latest_version = pypi_data['info']['version'] + + return latest_version + + +def main(): + """Entrypoint to the script""" + + paths = sys.argv[1:] or sys.stdin.read().splitlines() + + bundled_libs = get_bundled_libs(paths) + files_with_bundled_metadata = get_files_with_bundled_metadata(paths) + + for filename in files_with_bundled_metadata.difference(bundled_libs): + print('{0}: ERROR: File contains _BUNDLED_METADATA but needs to be added to' + ' test/sanity/code-smell/update-bundled.py'.format(filename)) + + for filename in bundled_libs: + try: + metadata = get_bundled_metadata(filename) + except ValueError as e: + print('{0}: ERROR: {1}'.format(filename, e)) + continue + except (IOError, OSError) as e: + if e.errno == 2: + print('{0}: ERROR: {1}. Perhaps the bundled library has been removed' + ' or moved and the bundled library test needs to be modified as' + ' well?'.format(filename, e)) + + pypi_fh = open_url('https://pypi.org/pypi/{0}/json'.format(metadata['pypi_name'])) + pypi_data = json.loads(pypi_fh.read().decode('utf-8')) + + constraints = metadata.get('version_constraints', None) + latest_version = get_latest_applicable_version(pypi_data, constraints) + + if LooseVersion(metadata['version']) < LooseVersion(latest_version): + print('{0}: UPDATE {1} from {2} to {3} {4}'.format( + filename, + metadata['pypi_name'], + metadata['version'], + latest_version, + 'https://pypi.org/pypi/{0}/json'.format(metadata['pypi_name']))) + + +if __name__ == '__main__': + main() diff --git a/test/sanity/code-smell/update-bundled.requirements.txt b/test/sanity/code-smell/update-bundled.requirements.txt new file mode 100644 index 00000000..748809f7 --- /dev/null +++ b/test/sanity/code-smell/update-bundled.requirements.txt @@ -0,0 +1 @@ +packaging diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt new file mode 100644 index 00000000..5275f076 --- /dev/null +++ b/test/sanity/ignore.txt @@ -0,0 +1,427 @@ +docs/bin/find-plugin-refs.py future-import-boilerplate +docs/bin/find-plugin-refs.py metaclass-boilerplate +docs/docsite/_extensions/pygments_lexer.py future-import-boilerplate +docs/docsite/_extensions/pygments_lexer.py metaclass-boilerplate +docs/docsite/_themes/sphinx_rtd_theme/__init__.py future-import-boilerplate +docs/docsite/_themes/sphinx_rtd_theme/__init__.py metaclass-boilerplate +docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst no-smart-quotes +examples/play.yml shebang +examples/scripts/my_test.py shebang # example module but not in a normal module location +examples/scripts/my_test_facts.py shebang # example module but not in a normal module location +examples/scripts/my_test_info.py shebang # example module but not in a normal module location +examples/scripts/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath +examples/scripts/upgrade_to_ps3.ps1 pslint:PSCustomUseLiteralPath +examples/scripts/upgrade_to_ps3.ps1 pslint:PSUseApprovedVerbs +hacking/build-ansible.py shebang # only run by release engineers, Python 3.6+ required +hacking/build_library/build_ansible/announce.py compile-2.6!skip # release process only, 3.6+ required +hacking/build_library/build_ansible/announce.py compile-2.7!skip # release process only, 3.6+ required +hacking/build_library/build_ansible/announce.py compile-3.5!skip # release process only, 3.6+ required +hacking/build_library/build_ansible/commands.py compile-2.6!skip # release and docs process only, 3.6+ required +hacking/build_library/build_ansible/commands.py compile-2.7!skip # release and docs process only, 3.6+ required +hacking/build_library/build_ansible/commands.py compile-3.5!skip # release and docs process only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/dump_config.py compile-2.6!skip # docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/dump_config.py compile-2.7!skip # docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/dump_config.py compile-3.5!skip # docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/dump_keywords.py compile-2.6!skip # docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/dump_keywords.py compile-2.7!skip # docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/dump_keywords.py compile-3.5!skip # docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/generate_man.py compile-2.6!skip # docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/generate_man.py compile-2.7!skip # docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/generate_man.py compile-3.5!skip # docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/porting_guide.py compile-2.6!skip # release process only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/porting_guide.py compile-2.7!skip # release process only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/porting_guide.py compile-3.5!skip # release process only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/release_announcement.py compile-2.6!skip # release process only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/release_announcement.py compile-2.7!skip # release process only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/release_announcement.py compile-3.5!skip # release process only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/update_intersphinx.py compile-2.6!skip # release process and docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/update_intersphinx.py compile-2.7!skip # release process and docs build only, 3.6+ required +hacking/build_library/build_ansible/command_plugins/update_intersphinx.py compile-3.5!skip # release process and docs build only, 3.6+ required +hacking/fix_test_syntax.py future-import-boilerplate +hacking/fix_test_syntax.py metaclass-boilerplate +hacking/get_library.py future-import-boilerplate +hacking/get_library.py metaclass-boilerplate +hacking/report.py future-import-boilerplate +hacking/report.py metaclass-boilerplate +hacking/return_skeleton_generator.py future-import-boilerplate +hacking/return_skeleton_generator.py metaclass-boilerplate +hacking/test-module.py future-import-boilerplate +hacking/test-module.py metaclass-boilerplate +hacking/tests/gen_distribution_version_testcase.py future-import-boilerplate +hacking/tests/gen_distribution_version_testcase.py metaclass-boilerplate +lib/ansible/cli/console.py pylint:blacklisted-name +lib/ansible/cli/scripts/ansible_cli_stub.py shebang +lib/ansible/cli/scripts/ansible_connection_cli_stub.py shebang +lib/ansible/config/base.yml no-unwanted-files +lib/ansible/executor/playbook_executor.py pylint:blacklisted-name +lib/ansible/executor/powershell/async_watchdog.ps1 pslint:PSCustomUseLiteralPath +lib/ansible/executor/powershell/async_wrapper.ps1 pslint:PSCustomUseLiteralPath +lib/ansible/executor/powershell/exec_wrapper.ps1 pslint:PSCustomUseLiteralPath +lib/ansible/executor/task_queue_manager.py pylint:blacklisted-name +lib/ansible/galaxy/collection.py compile-2.6!skip # 'ansible-galaxy collection' requires 2.7+ +lib/ansible/module_utils/compat/_selectors2.py future-import-boilerplate # ignore bundled +lib/ansible/module_utils/compat/_selectors2.py metaclass-boilerplate # ignore bundled +lib/ansible/module_utils/compat/_selectors2.py pylint:blacklisted-name +lib/ansible/module_utils/distro/__init__.py empty-init # breaks namespacing, bundled, do not override +lib/ansible/module_utils/distro/_distro.py future-import-boilerplate # ignore bundled +lib/ansible/module_utils/distro/_distro.py metaclass-boilerplate # ignore bundled +lib/ansible/module_utils/distro/_distro.py no-assert +lib/ansible/module_utils/distro/_distro.py pep8!skip # bundled code we don't want to modify +lib/ansible/module_utils/facts/__init__.py empty-init # breaks namespacing, deprecate and eventually remove +lib/ansible/module_utils/facts/network/linux.py pylint:blacklisted-name +lib/ansible/module_utils/facts/system/distribution.py pylint:ansible-bad-function +lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm1 pslint:PSUseApprovedVerbs +lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1 pslint:PSProvideCommentHelp # need to agree on best format for comment location +lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1 pslint:PSUseApprovedVerbs +lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1 pslint:PSCustomUseLiteralPath +lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1 pslint:PSProvideCommentHelp +lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 pslint:PSCustomUseLiteralPath +lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 pslint:PSUseApprovedVerbs +lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1 pslint:PSUseApprovedVerbs +lib/ansible/module_utils/pycompat24.py no-get-exception +lib/ansible/module_utils/six/__init__.py empty-init # breaks namespacing, bundled, do not override +lib/ansible/module_utils/six/__init__.py future-import-boilerplate # ignore bundled +lib/ansible/module_utils/six/__init__.py metaclass-boilerplate # ignore bundled +lib/ansible/module_utils/six/__init__.py no-basestring +lib/ansible/module_utils/six/__init__.py no-dict-iteritems +lib/ansible/module_utils/six/__init__.py no-dict-iterkeys +lib/ansible/module_utils/six/__init__.py no-dict-itervalues +lib/ansible/module_utils/six/__init__.py replace-urlopen +lib/ansible/module_utils/urls.py pylint:blacklisted-name +lib/ansible/module_utils/urls.py replace-urlopen +lib/ansible/modules/command.py validate-modules:doc-default-does-not-match-spec # _uses_shell is undocumented +lib/ansible/modules/command.py validate-modules:doc-missing-type +lib/ansible/modules/command.py validate-modules:nonexistent-parameter-documented +lib/ansible/modules/command.py validate-modules:parameter-list-no-elements +lib/ansible/modules/command.py validate-modules:undocumented-parameter +lib/ansible/modules/expect.py validate-modules:doc-missing-type +lib/ansible/modules/assemble.py validate-modules:nonexistent-parameter-documented +lib/ansible/modules/blockinfile.py validate-modules:doc-choices-do-not-match-spec +lib/ansible/modules/blockinfile.py validate-modules:doc-default-does-not-match-spec +lib/ansible/modules/copy.py pylint:blacklisted-name +lib/ansible/modules/copy.py validate-modules:doc-default-does-not-match-spec +lib/ansible/modules/copy.py validate-modules:doc-type-does-not-match-spec +lib/ansible/modules/copy.py validate-modules:nonexistent-parameter-documented +lib/ansible/modules/copy.py validate-modules:undocumented-parameter +lib/ansible/modules/file.py pylint:ansible-bad-function +lib/ansible/modules/file.py validate-modules:doc-default-does-not-match-spec +lib/ansible/modules/file.py validate-modules:undocumented-parameter +lib/ansible/modules/find.py use-argspec-type-path # fix needed +lib/ansible/modules/lineinfile.py validate-modules:doc-choices-do-not-match-spec +lib/ansible/modules/lineinfile.py validate-modules:doc-default-does-not-match-spec +lib/ansible/modules/lineinfile.py validate-modules:nonexistent-parameter-documented +lib/ansible/modules/replace.py validate-modules:nonexistent-parameter-documented +lib/ansible/modules/stat.py validate-modules:doc-default-does-not-match-spec # get_md5 is undocumented +lib/ansible/modules/stat.py validate-modules:parameter-invalid +lib/ansible/modules/stat.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/stat.py validate-modules:undocumented-parameter +lib/ansible/modules/unarchive.py validate-modules:nonexistent-parameter-documented +lib/ansible/modules/unarchive.py validate-modules:parameter-list-no-elements +lib/ansible/modules/get_url.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/uri.py pylint:blacklisted-name +lib/ansible/modules/uri.py validate-modules:doc-required-mismatch +lib/ansible/modules/uri.py validate-modules:parameter-list-no-elements +lib/ansible/modules/uri.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/pip.py pylint:blacklisted-name +lib/ansible/modules/pip.py validate-modules:doc-elements-mismatch +lib/ansible/modules/pip.py validate-modules:invalid-ansiblemodule-schema +lib/ansible/modules/apt.py validate-modules:doc-default-does-not-match-spec +lib/ansible/modules/apt.py validate-modules:parameter-invalid +lib/ansible/modules/apt.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/apt.py validate-modules:undocumented-parameter +lib/ansible/modules/apt_key.py validate-modules:mutually_exclusive-unknown +lib/ansible/modules/apt_key.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/apt_key.py validate-modules:undocumented-parameter +lib/ansible/modules/apt_repository.py validate-modules:doc-default-does-not-match-spec +lib/ansible/modules/apt_repository.py validate-modules:parameter-invalid +lib/ansible/modules/apt_repository.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/apt_repository.py validate-modules:undocumented-parameter +lib/ansible/modules/dnf.py validate-modules:doc-missing-type +lib/ansible/modules/dnf.py validate-modules:doc-required-mismatch +lib/ansible/modules/dnf.py validate-modules:parameter-invalid +lib/ansible/modules/dnf.py validate-modules:parameter-list-no-elements +lib/ansible/modules/dnf.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/dpkg_selections.py validate-modules:doc-missing-type +lib/ansible/modules/dpkg_selections.py validate-modules:doc-required-mismatch +lib/ansible/modules/package_facts.py validate-modules:doc-choices-do-not-match-spec +lib/ansible/modules/package_facts.py validate-modules:doc-missing-type +lib/ansible/modules/package_facts.py validate-modules:parameter-list-no-elements +lib/ansible/modules/rpm_key.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/yum.py pylint:blacklisted-name +lib/ansible/modules/yum.py validate-modules:doc-missing-type +lib/ansible/modules/yum.py validate-modules:parameter-invalid +lib/ansible/modules/yum.py validate-modules:parameter-list-no-elements +lib/ansible/modules/yum.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/yum_repository.py validate-modules:doc-default-does-not-match-spec +lib/ansible/modules/yum_repository.py validate-modules:doc-missing-type +lib/ansible/modules/yum_repository.py validate-modules:parameter-list-no-elements +lib/ansible/modules/yum_repository.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/yum_repository.py validate-modules:undocumented-parameter +lib/ansible/modules/git.py pylint:blacklisted-name +lib/ansible/modules/git.py use-argspec-type-path +lib/ansible/modules/git.py validate-modules:doc-missing-type +lib/ansible/modules/git.py validate-modules:doc-required-mismatch +lib/ansible/modules/git.py validate-modules:parameter-list-no-elements +lib/ansible/modules/git.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/subversion.py validate-modules:doc-required-mismatch +lib/ansible/modules/subversion.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/subversion.py validate-modules:undocumented-parameter +lib/ansible/modules/getent.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/hostname.py validate-modules:invalid-ansiblemodule-schema +lib/ansible/modules/hostname.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/iptables.py pylint:blacklisted-name +lib/ansible/modules/iptables.py validate-modules:parameter-list-no-elements +lib/ansible/modules/service.py validate-modules:nonexistent-parameter-documented +lib/ansible/modules/service.py validate-modules:use-run-command-not-popen +lib/ansible/modules/setup.py validate-modules:doc-missing-type +lib/ansible/modules/setup.py validate-modules:parameter-list-no-elements +lib/ansible/modules/setup.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/systemd.py validate-modules:parameter-invalid +lib/ansible/modules/systemd.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/systemd.py validate-modules:return-syntax-error +lib/ansible/modules/sysvinit.py validate-modules:parameter-list-no-elements +lib/ansible/modules/sysvinit.py validate-modules:parameter-type-not-in-doc +lib/ansible/modules/sysvinit.py validate-modules:return-syntax-error +lib/ansible/modules/user.py validate-modules:doc-default-does-not-match-spec +lib/ansible/modules/user.py validate-modules:doc-default-incompatible-type +lib/ansible/modules/user.py validate-modules:parameter-list-no-elements +lib/ansible/modules/user.py validate-modules:use-run-command-not-popen +lib/ansible/modules/async_status.py use-argspec-type-path +lib/ansible/modules/async_status.py validate-modules!skip +lib/ansible/modules/async_wrapper.py ansible-doc!skip # not an actual module +lib/ansible/modules/async_wrapper.py pylint:ansible-bad-function +lib/ansible/modules/async_wrapper.py use-argspec-type-path +lib/ansible/modules/wait_for.py validate-modules:parameter-list-no-elements +lib/ansible/parsing/vault/__init__.py pylint:blacklisted-name +lib/ansible/playbook/base.py pylint:blacklisted-name +lib/ansible/playbook/collectionsearch.py required-and-default-attributes # https://github.com/ansible/ansible/issues/61460 +lib/ansible/playbook/helpers.py pylint:blacklisted-name +lib/ansible/playbook/role/__init__.py pylint:blacklisted-name +lib/ansible/plugins/action/normal.py action-plugin-docs # default action plugin for modules without a dedicated action plugin +lib/ansible/plugins/cache/base.py ansible-doc!skip # not a plugin, but a stub for backwards compatibility +lib/ansible/plugins/lookup/sequence.py pylint:blacklisted-name +lib/ansible/plugins/strategy/__init__.py pylint:blacklisted-name +lib/ansible/plugins/strategy/linear.py pylint:blacklisted-name +lib/ansible/vars/hostvars.py pylint:blacklisted-name +test/integration/targets/ansible-runner/files/adhoc_example1.py future-import-boilerplate +test/integration/targets/ansible-runner/files/adhoc_example1.py metaclass-boilerplate +test/integration/targets/ansible-runner/files/playbook_example1.py future-import-boilerplate +test/integration/targets/ansible-runner/files/playbook_example1.py metaclass-boilerplate +test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py pylint:ansible-bad-import +test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py pylint:ansible-bad-import-from +test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py pylint:ansible-bad-function +test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/filter/check_pylint.py pylint:blacklisted-name +test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py pylint:relative-beyond-top-level +test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py pylint:relative-beyond-top-level +test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py pylint:relative-beyond-top-level +test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py pylint:relative-beyond-top-level +test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py pylint:relative-beyond-top-level +test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py pylint:relative-beyond-top-level +test/integration/targets/async_fail/library/async_test.py future-import-boilerplate +test/integration/targets/async_fail/library/async_test.py metaclass-boilerplate +test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py future-import-boilerplate +test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py pylint:relative-beyond-top-level +test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py pylint:relative-beyond-top-level +test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py pylint:relative-beyond-top-level +test/integration/targets/expect/files/test_command.py future-import-boilerplate +test/integration/targets/expect/files/test_command.py metaclass-boilerplate +test/integration/targets/gathering_facts/library/bogus_facts shebang +test/integration/targets/gathering_facts/library/facts_one shebang +test/integration/targets/gathering_facts/library/facts_two shebang +test/integration/targets/get_url/files/testserver.py future-import-boilerplate +test/integration/targets/get_url/files/testserver.py metaclass-boilerplate +test/integration/targets/group/files/gidget.py future-import-boilerplate +test/integration/targets/group/files/gidget.py metaclass-boilerplate +test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py future-import-boilerplate +test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py metaclass-boilerplate +test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py future-import-boilerplate +test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py metaclass-boilerplate +test/integration/targets/json_cleanup/library/bad_json shebang +test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1 pslint!skip +test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 pslint!skip +test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd1 pslint!skip +test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 pslint!skip +test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd1 pslint!skip +test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps1 pslint!skip +test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1 pslint!skip +test/integration/targets/lookup_ini/lookup-8859-15.ini no-smart-quotes +test/integration/targets/module_precedence/lib_with_extension/a.ini shebang +test/integration/targets/module_precedence/lib_with_extension/ping.ini shebang +test/integration/targets/module_precedence/lib_with_extension/ping.py future-import-boilerplate +test/integration/targets/module_precedence/lib_with_extension/ping.py metaclass-boilerplate +test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py future-import-boilerplate +test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py metaclass-boilerplate +test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py future-import-boilerplate +test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py metaclass-boilerplate +test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini shebang +test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini shebang +test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py future-import-boilerplate +test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py metaclass-boilerplate +test/integration/targets/module_utils/library/test.py future-import-boilerplate +test/integration/targets/module_utils/library/test.py metaclass-boilerplate +test/integration/targets/module_utils/library/test_env_override.py future-import-boilerplate +test/integration/targets/module_utils/library/test_env_override.py metaclass-boilerplate +test/integration/targets/module_utils/library/test_failure.py future-import-boilerplate +test/integration/targets/module_utils/library/test_failure.py metaclass-boilerplate +test/integration/targets/module_utils/library/test_override.py future-import-boilerplate +test/integration/targets/module_utils/library/test_override.py metaclass-boilerplate +test/integration/targets/module_utils/module_utils/bar0/foo.py pylint:blacklisted-name +test/integration/targets/module_utils/module_utils/foo.py pylint:blacklisted-name +test/integration/targets/module_utils/module_utils/sub/bar/__init__.py pylint:blacklisted-name +test/integration/targets/module_utils/module_utils/sub/bar/bar.py pylint:blacklisted-name +test/integration/targets/module_utils/module_utils/yak/zebra/foo.py pylint:blacklisted-name +test/integration/targets/old_style_modules_posix/library/helloworld.sh shebang +test/integration/targets/pause/test-pause.py future-import-boilerplate +test/integration/targets/pause/test-pause.py metaclass-boilerplate +test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py future-import-boilerplate +test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py metaclass-boilerplate +test/integration/targets/pip/files/setup.py future-import-boilerplate +test/integration/targets/pip/files/setup.py metaclass-boilerplate +test/integration/targets/run_modules/library/test.py future-import-boilerplate +test/integration/targets/run_modules/library/test.py metaclass-boilerplate +test/integration/targets/script/files/no_shebang.py future-import-boilerplate +test/integration/targets/script/files/no_shebang.py metaclass-boilerplate +test/integration/targets/service/files/ansible_test_service.py future-import-boilerplate +test/integration/targets/service/files/ansible_test_service.py metaclass-boilerplate +test/integration/targets/setup_rpm_repo/files/create-repo.py future-import-boilerplate +test/integration/targets/setup_rpm_repo/files/create-repo.py metaclass-boilerplate +test/integration/targets/template/files/encoding_1252_utf-8.expected no-smart-quotes +test/integration/targets/template/files/encoding_1252_windows-1252.expected no-smart-quotes +test/integration/targets/template/files/foo.dos.txt line-endings +test/integration/targets/template/role_filter/filter_plugins/myplugin.py future-import-boilerplate +test/integration/targets/template/role_filter/filter_plugins/myplugin.py metaclass-boilerplate +test/integration/targets/template/templates/encoding_1252.j2 no-smart-quotes +test/integration/targets/infra/library/test.py future-import-boilerplate +test/integration/targets/infra/library/test.py metaclass-boilerplate +test/integration/targets/unicode/unicode.yml no-smart-quotes +test/integration/targets/uri/files/testserver.py future-import-boilerplate +test/integration/targets/uri/files/testserver.py metaclass-boilerplate +test/integration/targets/var_precedence/ansible-var-precedence-check.py future-import-boilerplate +test/integration/targets/var_precedence/ansible-var-precedence-check.py metaclass-boilerplate +test/integration/targets/builtin_vars_prompt/test-vars_prompt.py future-import-boilerplate +test/integration/targets/builtin_vars_prompt/test-vars_prompt.py metaclass-boilerplate +test/integration/targets/vault/test-vault-client.py future-import-boilerplate +test/integration/targets/vault/test-vault-client.py metaclass-boilerplate +test/integration/targets/wait_for/files/testserver.py future-import-boilerplate +test/integration/targets/wait_for/files/testserver.py metaclass-boilerplate +test/integration/targets/want_json_modules_posix/library/helloworld.py future-import-boilerplate +test/integration/targets/want_json_modules_posix/library/helloworld.py metaclass-boilerplate +test/integration/targets/win_exec_wrapper/library/test_fail.ps1 pslint:PSCustomUseLiteralPath +test/integration/targets/win_exec_wrapper/tasks/main.yml no-smart-quotes # We are explicitly testing smart quote support for env vars +test/integration/targets/win_fetch/tasks/main.yml no-smart-quotes # We are explictly testing smart quotes in the file name to fetch +test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1 line-endings # Explicitly tests that we still work with Windows line endings +test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1 line-endings # Explicitly tests that we still work with Windows line endings +test/integration/targets/win_script/files/test_script.ps1 pslint:PSAvoidUsingWriteHost # Keep +test/integration/targets/win_script/files/test_script_creates_file.ps1 pslint:PSAvoidUsingCmdletAliases +test/integration/targets/win_script/files/test_script_removes_file.ps1 pslint:PSCustomUseLiteralPath +test/integration/targets/win_script/files/test_script_with_args.ps1 pslint:PSAvoidUsingWriteHost # Keep +test/integration/targets/win_script/files/test_script_with_splatting.ps1 pslint:PSAvoidUsingWriteHost # Keep +test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1 pslint!skip +test/lib/ansible_test/_data/requirements/constraints.txt test-constraints +test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt test-constraints +test/lib/ansible_test/_data/requirements/sanity.ps1 pslint:PSCustomUseLiteralPath # Uses wildcards on purpose +test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py use-compat-six +test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath +test/lib/ansible_test/_data/setup/windows-httptester.ps1 pslint:PSCustomUseLiteralPath +test/support/integration/plugins/module_utils/azure_rm_common.py future-import-boilerplate +test/support/integration/plugins/module_utils/azure_rm_common.py metaclass-boilerplate +test/support/integration/plugins/module_utils/azure_rm_common_rest.py future-import-boilerplate +test/support/integration/plugins/module_utils/azure_rm_common_rest.py metaclass-boilerplate +test/support/integration/plugins/module_utils/cloud.py future-import-boilerplate +test/support/integration/plugins/module_utils/cloud.py metaclass-boilerplate +test/support/integration/plugins/module_utils/compat/ipaddress.py future-import-boilerplate +test/support/integration/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate +test/support/integration/plugins/module_utils/compat/ipaddress.py no-unicode-literals +test/support/integration/plugins/module_utils/database.py future-import-boilerplate +test/support/integration/plugins/module_utils/database.py metaclass-boilerplate +test/support/integration/plugins/module_utils/mysql.py future-import-boilerplate +test/support/integration/plugins/module_utils/mysql.py metaclass-boilerplate +test/support/integration/plugins/module_utils/network/common/utils.py future-import-boilerplate +test/support/integration/plugins/module_utils/network/common/utils.py metaclass-boilerplate +test/support/integration/plugins/module_utils/postgres.py future-import-boilerplate +test/support/integration/plugins/module_utils/postgres.py metaclass-boilerplate +test/support/integration/plugins/modules/lvg.py pylint:blacklisted-name +test/support/integration/plugins/modules/synchronize.py pylint:blacklisted-name +test/support/integration/plugins/modules/timezone.py pylint:blacklisted-name +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py no-unicode-literals +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py pep8:E203 +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py pep8:E501 +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py pep8:E231 +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py pylint:blacklisted-name +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py metaclass-boilerplate +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py future-import-boilerplate +test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py metaclass-boilerplate +test/support/windows-integration/plugins/modules/async_status.ps1 pslint!skip +test/support/windows-integration/plugins/modules/setup.ps1 pslint!skip +test/support/windows-integration/plugins/modules/win_copy.ps1 pslint!skip +test/support/windows-integration/plugins/modules/win_dsc.ps1 pslint!skip +test/support/windows-integration/plugins/modules/win_feature.ps1 pslint!skip +test/support/windows-integration/plugins/modules/win_find.ps1 pslint!skip +test/support/windows-integration/plugins/modules/win_lineinfile.ps1 pslint!skip +test/support/windows-integration/plugins/modules/win_regedit.ps1 pslint!skip +test/support/windows-integration/plugins/modules/win_security_policy.ps1 pslint!skip +test/support/windows-integration/plugins/modules/win_shell.ps1 pslint!skip +test/support/windows-integration/plugins/modules/win_wait_for.ps1 pslint!skip +test/units/executor/test_play_iterator.py pylint:blacklisted-name +test/units/module_utils/basic/test_deprecate_warn.py pylint:ansible-deprecated-no-version +test/units/module_utils/basic/test_deprecate_warn.py pylint:ansible-deprecated-version +test/units/module_utils/basic/test_run_command.py pylint:blacklisted-name +test/units/module_utils/urls/fixtures/multipart.txt line-endings # Fixture for HTTP tests that use CRLF +test/units/module_utils/urls/test_Request.py replace-urlopen +test/units/module_utils/urls/test_fetch_url.py replace-urlopen +test/units/modules/test_apt.py pylint:blacklisted-name +test/units/modules/test_known_hosts.py pylint:ansible-bad-function +test/units/parsing/vault/test_vault.py pylint:blacklisted-name +test/units/playbook/role/test_role.py pylint:blacklisted-name +test/units/plugins/test_plugins.py pylint:blacklisted-name +test/units/template/test_templar.py pylint:blacklisted-name +test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py future-import-boilerplate # test expects no boilerplate +test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py metaclass-boilerplate # test expects no boilerplate +test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py pylint:relative-beyond-top-level +test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py empty-init # testing that collections don't need inits +test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py empty-init # testing that collections don't need inits +test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py empty-init # testing that collections don't need inits +test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py empty-init # testing that collections don't need inits +test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py empty-init # testing that collections don't need inits +test/units/utils/collection_loader/test_collection_loader.py pylint:undefined-variable # magic runtime local var splatting +test/utils/shippable/check_matrix.py replace-urlopen +test/utils/shippable/timing.py shebang diff --git a/test/support/integration/plugins/cache/jsonfile.py b/test/support/integration/plugins/cache/jsonfile.py new file mode 100644 index 00000000..80b16f55 --- /dev/null +++ b/test/support/integration/plugins/cache/jsonfile.py @@ -0,0 +1,63 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + cache: jsonfile + short_description: JSON formatted files. + description: + - This cache uses JSON formatted, per host, files saved to the filesystem. + version_added: "1.9" + author: Ansible Core (@ansible-core) + options: + _uri: + required: True + description: + - Path in which the cache plugin will save the JSON files + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the JSON files + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer +''' + +import codecs +import json + +from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder +from ansible.plugins.cache import BaseFileCacheModule + + +class CacheModule(BaseFileCacheModule): + """ + A caching module backed by json files. + """ + + def _load(self, filepath): + # Valid JSON is always UTF-8 encoded. + with codecs.open(filepath, 'r', encoding='utf-8') as f: + return json.load(f, cls=AnsibleJSONDecoder) + + def _dump(self, value, filepath): + with codecs.open(filepath, 'w', encoding='utf-8') as f: + f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)) diff --git a/test/support/integration/plugins/filter/json_query.py b/test/support/integration/plugins/filter/json_query.py new file mode 100644 index 00000000..d1da71b4 --- /dev/null +++ b/test/support/integration/plugins/filter/json_query.py @@ -0,0 +1,53 @@ +# (c) 2015, Filipe Niero Felisbino +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleError, AnsibleFilterError + +try: + import jmespath + HAS_LIB = True +except ImportError: + HAS_LIB = False + + +def json_query(data, expr): + '''Query data using jmespath query language ( http://jmespath.org ). Example: + - debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}" + ''' + if not HAS_LIB: + raise AnsibleError('You need to install "jmespath" prior to running ' + 'json_query filter') + + try: + return jmespath.search(expr, data) + except jmespath.exceptions.JMESPathError as e: + raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e) + except Exception as e: + # For older jmespath, we can get ValueError and TypeError without much info. + raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e) + + +class FilterModule(object): + ''' Query filter ''' + + def filters(self): + return { + 'json_query': json_query + } diff --git a/test/support/integration/plugins/inventory/aws_ec2.py b/test/support/integration/plugins/inventory/aws_ec2.py new file mode 100644 index 00000000..09c42cf9 --- /dev/null +++ b/test/support/integration/plugins/inventory/aws_ec2.py @@ -0,0 +1,760 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: aws_ec2 + plugin_type: inventory + short_description: EC2 inventory source + requirements: + - boto3 + - botocore + extends_documentation_fragment: + - inventory_cache + - constructed + description: + - Get inventory hosts from Amazon Web Services EC2. + - Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)). + notes: + - If no credentials are provided and the control node has an associated IAM instance profile then the + role will be used for authentication. + author: + - Sloane Hertel (@s-hertel) + options: + aws_profile: + description: The AWS profile + type: str + aliases: [ boto_profile ] + env: + - name: AWS_DEFAULT_PROFILE + - name: AWS_PROFILE + aws_access_key: + description: The AWS access key to use. + type: str + aliases: [ aws_access_key_id ] + env: + - name: EC2_ACCESS_KEY + - name: AWS_ACCESS_KEY + - name: AWS_ACCESS_KEY_ID + aws_secret_key: + description: The AWS secret key that corresponds to the access key. + type: str + aliases: [ aws_secret_access_key ] + env: + - name: EC2_SECRET_KEY + - name: AWS_SECRET_KEY + - name: AWS_SECRET_ACCESS_KEY + aws_security_token: + description: The AWS security token if using temporary access and secret keys. + type: str + env: + - name: EC2_SECURITY_TOKEN + - name: AWS_SESSION_TOKEN + - name: AWS_SECURITY_TOKEN + plugin: + description: Token that ensures this is a source file for the plugin. + required: True + choices: ['aws_ec2'] + iam_role_arn: + description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS + credentials with enough privilege to perform the AssumeRole action. + version_added: '2.9' + regions: + description: + - A list of regions in which to describe EC2 instances. + - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1. + type: list + default: [] + hostnames: + description: + - A list in order of precedence for hostname variables. + - You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. + type: list + default: [] + filters: + description: + - A dictionary of filter value pairs. + - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + type: dict + default: {} + include_extra_api_calls: + description: + - Add two additional API calls for every instance to include 'persistent' and 'events' host variables. + - Spot instances may be persistent and instances may have associated events. + type: bool + default: False + version_added: '2.8' + strict_permissions: + description: + - By default if a 403 (Forbidden) error code is encountered this plugin will fail. + - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped. + type: bool + default: True + use_contrib_script_compatible_sanitization: + description: + - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible. + This option allows you to override that, in efforts to allow migration from the old inventory script and + matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``. + To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups, + you will need to replace hyphens with underscores via the regex_replace filter for those entries. + - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting, + otherwise the core engine will just use the standard sanitization on top. + - This is not the default as such names break certain functionality as not all characters are valid Python identifiers + which group names end up being used as. + type: bool + default: False + version_added: '2.8' +''' + +EXAMPLES = ''' +# Minimal example using environment vars or instance role credentials +# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address +plugin: aws_ec2 +regions: + - us-east-1 + +# Example using filters, ignoring permission errors, and specifying the hostname precedence +plugin: aws_ec2 +boto_profile: aws_profile +# Populate inventory with instances in these regions +regions: + - us-east-1 + - us-east-2 +filters: + # All instances with their `Environment` tag set to `dev` + tag:Environment: dev + # All dev and QA hosts + tag:Environment: + - dev + - qa + instance.group-id: sg-xxxxxxxx +# Ignores 403 errors rather than failing +strict_permissions: False +# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying +# inventory_hostname use compose (see example below). +hostnames: + - tag:Name=Tag1,Name=Tag2 # Return specific hosts only + - tag:CustomDNSName + - dns-name + - private-ip-address + +# Example using constructed features to create groups and set ansible_host +plugin: aws_ec2 +regions: + - us-east-1 + - us-west-1 +# keyed_groups may be used to create custom groups +strict: False +keyed_groups: + # Add e.g. x86_64 hosts to an arch_x86_64 group + - prefix: arch + key: 'architecture' + # Add hosts to tag_Name_Value groups for each Name/Value tag pair + - prefix: tag + key: tags + # Add hosts to e.g. instance_type_z3_tiny + - prefix: instance_type + key: instance_type + # Create security_groups_sg_abcd1234 group for each SG + - key: 'security_groups|json_query("[].group_id")' + prefix: 'security_groups' + # Create a group for each value of the Application tag + - key: tags.Application + separator: '' + # Create a group per region e.g. aws_region_us_east_2 + - key: placement.region + prefix: aws_region + # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project" + - key: tags['Role'] + prefix: foo + parent_group: "project" +# Set individual variables with compose +compose: + # Use the private IP address to connect to the host + # (note: this does not modify inventory_hostname, which is set via I(hostnames)) + ansible_host: private_ip_address +''' + +import re + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.utils.display import Display +from ansible.module_utils.six import string_types + +try: + import boto3 + import botocore +except ImportError: + raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.') + +display = Display() + +# The mappings give an array of keys to get from the filter name to the value +# returned by boto3's EC2 describe_instances method. + +instance_meta_filter_to_boto_attr = { + 'group-id': ('Groups', 'GroupId'), + 'group-name': ('Groups', 'GroupName'), + 'network-interface.attachment.instance-owner-id': ('OwnerId',), + 'owner-id': ('OwnerId',), + 'requester-id': ('RequesterId',), + 'reservation-id': ('ReservationId',), +} + +instance_data_filter_to_boto_attr = { + 'affinity': ('Placement', 'Affinity'), + 'architecture': ('Architecture',), + 'availability-zone': ('Placement', 'AvailabilityZone'), + 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'), + 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'), + 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'), + 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'), + 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'), + 'client-token': ('ClientToken',), + 'dns-name': ('PublicDnsName',), + 'host-id': ('Placement', 'HostId'), + 'hypervisor': ('Hypervisor',), + 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'), + 'image-id': ('ImageId',), + 'instance-id': ('InstanceId',), + 'instance-lifecycle': ('InstanceLifecycle',), + 'instance-state-code': ('State', 'Code'), + 'instance-state-name': ('State', 'Name'), + 'instance-type': ('InstanceType',), + 'instance.group-id': ('SecurityGroups', 'GroupId'), + 'instance.group-name': ('SecurityGroups', 'GroupName'), + 'ip-address': ('PublicIpAddress',), + 'kernel-id': ('KernelId',), + 'key-name': ('KeyName',), + 'launch-index': ('AmiLaunchIndex',), + 'launch-time': ('LaunchTime',), + 'monitoring-state': ('Monitoring', 'State'), + 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'), + 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'), + 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'), + 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'), + 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'), + 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'), + 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'), + 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'), + 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'), + 'network-interface.attachment.instance-id': ('InstanceId',), + 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'), + 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'), + 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'), + 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'), + 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'), + 'network-interface.description': ('NetworkInterfaces', 'Description'), + 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'), + 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'), + 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'), + 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'), + 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'), + 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'), + 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'), + # 'network-interface.requester-id': (), + 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'), + 'network-interface.status': ('NetworkInterfaces', 'Status'), + 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'), + 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'), + 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'), + 'placement-group-name': ('Placement', 'GroupName'), + 'platform': ('Platform',), + 'private-dns-name': ('PrivateDnsName',), + 'private-ip-address': ('PrivateIpAddress',), + 'product-code': ('ProductCodes', 'ProductCodeId'), + 'product-code.type': ('ProductCodes', 'ProductCodeType'), + 'ramdisk-id': ('RamdiskId',), + 'reason': ('StateTransitionReason',), + 'root-device-name': ('RootDeviceName',), + 'root-device-type': ('RootDeviceType',), + 'source-dest-check': ('SourceDestCheck',), + 'spot-instance-request-id': ('SpotInstanceRequestId',), + 'state-reason-code': ('StateReason', 'Code'), + 'state-reason-message': ('StateReason', 'Message'), + 'subnet-id': ('SubnetId',), + 'tag': ('Tags',), + 'tag-key': ('Tags',), + 'tag-value': ('Tags',), + 'tenancy': ('Placement', 'Tenancy'), + 'virtualization-type': ('VirtualizationType',), + 'vpc-id': ('VpcId',), +} + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + + NAME = 'aws_ec2' + + def __init__(self): + super(InventoryModule, self).__init__() + + self.group_prefix = 'aws_ec2_' + + # credentials + self.boto_profile = None + self.aws_secret_access_key = None + self.aws_access_key_id = None + self.aws_security_token = None + self.iam_role_arn = None + + def _compile_values(self, obj, attr): + ''' + :param obj: A list or dict of instance attributes + :param attr: A key + :return The value(s) found via the attr + ''' + if obj is None: + return + + temp_obj = [] + + if isinstance(obj, list) or isinstance(obj, tuple): + for each in obj: + value = self._compile_values(each, attr) + if value: + temp_obj.append(value) + else: + temp_obj = obj.get(attr) + + has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)]) + if has_indexes and len(temp_obj) == 1: + return temp_obj[0] + + return temp_obj + + def _get_boto_attr_chain(self, filter_name, instance): + ''' + :param filter_name: The filter + :param instance: instance dict returned by boto3 ec2 describe_instances() + ''' + allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys())) + if filter_name not in allowed_filters: + raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name, + allowed_filters)) + if filter_name in instance_data_filter_to_boto_attr: + boto_attr_list = instance_data_filter_to_boto_attr[filter_name] + else: + boto_attr_list = instance_meta_filter_to_boto_attr[filter_name] + + instance_value = instance + for attribute in boto_attr_list: + instance_value = self._compile_values(instance_value, attribute) + return instance_value + + def _get_credentials(self): + ''' + :return A dictionary of boto client credentials + ''' + boto_params = {} + for credential in (('aws_access_key_id', self.aws_access_key_id), + ('aws_secret_access_key', self.aws_secret_access_key), + ('aws_session_token', self.aws_security_token)): + if credential[1]: + boto_params[credential[0]] = credential[1] + + return boto_params + + def _get_connection(self, credentials, region='us-east-1'): + try: + connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + if self.boto_profile: + try: + connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + else: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + return connection + + def _boto3_assume_role(self, credentials, region): + """ + Assume an IAM role passed by iam_role_arn parameter + + :return: a dict containing the credentials of the assumed role + """ + + iam_role_arn = self.iam_role_arn + + try: + sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials) + sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory') + return dict( + aws_access_key_id=sts_session['Credentials']['AccessKeyId'], + aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'], + aws_session_token=sts_session['Credentials']['SessionToken'] + ) + except botocore.exceptions.ClientError as e: + raise AnsibleError("Unable to assume IAM role: %s" % to_native(e)) + + def _boto3_conn(self, regions): + ''' + :param regions: A list of regions to create a boto3 client + + Generator that yields a boto3 client and the region + ''' + + credentials = self._get_credentials() + iam_role_arn = self.iam_role_arn + + if not regions: + try: + # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html + client = self._get_connection(credentials) + resp = client.describe_regions() + regions = [x['RegionName'] for x in resp.get('Regions', [])] + except botocore.exceptions.NoRegionError: + # above seems to fail depending on boto3 version, ignore and lets try something else + pass + + # fallback to local list hardcoded in boto3 if still no regions + if not regions: + session = boto3.Session() + regions = session.get_available_regions('ec2') + + # I give up, now you MUST give me regions + if not regions: + raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.') + + for region in regions: + connection = self._get_connection(credentials, region) + try: + if iam_role_arn is not None: + assumed_credentials = self._boto3_assume_role(credentials, region) + else: + assumed_credentials = credentials + connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + if self.boto_profile: + try: + connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + else: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + yield connection, region + + def _get_instances_by_region(self, regions, filters, strict_permissions): + ''' + :param regions: a list of regions in which to describe instances + :param filters: a list of boto3 filter dictionaries + :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes + :return A list of instance dictionaries + ''' + all_instances = [] + + for connection, region in self._boto3_conn(regions): + try: + # By default find non-terminated/terminating instances + if not any([f['Name'] == 'instance-state-name' for f in filters]): + filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']}) + paginator = connection.get_paginator('describe_instances') + reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations') + instances = [] + for r in reservations: + new_instances = r['Instances'] + for instance in new_instances: + instance.update(self._get_reservation_details(r)) + if self.get_option('include_extra_api_calls'): + instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId'))) + instances.extend(new_instances) + except botocore.exceptions.ClientError as e: + if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions: + instances = [] + else: + raise AnsibleError("Failed to describe instances: %s" % to_native(e)) + except botocore.exceptions.BotoCoreError as e: + raise AnsibleError("Failed to describe instances: %s" % to_native(e)) + + all_instances.extend(instances) + + return sorted(all_instances, key=lambda x: x['InstanceId']) + + def _get_reservation_details(self, reservation): + return { + 'OwnerId': reservation['OwnerId'], + 'RequesterId': reservation.get('RequesterId', ''), + 'ReservationId': reservation['ReservationId'] + } + + def _get_event_set_and_persistence(self, connection, instance_id, spot_instance): + host_vars = {'Events': '', 'Persistent': False} + try: + kwargs = {'InstanceIds': [instance_id]} + host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if not self.get_option('strict_permissions'): + pass + else: + raise AnsibleError("Failed to describe instance status: %s" % to_native(e)) + if spot_instance: + try: + kwargs = {'SpotInstanceRequestIds': [spot_instance]} + host_vars['Persistent'] = bool( + connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent' + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if not self.get_option('strict_permissions'): + pass + else: + raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e)) + return host_vars + + def _get_tag_hostname(self, preference, instance): + tag_hostnames = preference.split('tag:', 1)[1] + if ',' in tag_hostnames: + tag_hostnames = tag_hostnames.split(',') + else: + tag_hostnames = [tag_hostnames] + tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', [])) + for v in tag_hostnames: + if '=' in v: + tag_name, tag_value = v.split('=') + if tags.get(tag_name) == tag_value: + return to_text(tag_name) + "_" + to_text(tag_value) + else: + tag_value = tags.get(v) + if tag_value: + return to_text(tag_value) + return None + + def _get_hostname(self, instance, hostnames): + ''' + :param instance: an instance dict returned by boto3 ec2 describe_instances() + :param hostnames: a list of hostname destination variables in order of preference + :return the preferred identifer for the host + ''' + if not hostnames: + hostnames = ['dns-name', 'private-dns-name'] + + hostname = None + for preference in hostnames: + if 'tag' in preference: + if not preference.startswith('tag:'): + raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.") + hostname = self._get_tag_hostname(preference, instance) + else: + hostname = self._get_boto_attr_chain(preference, instance) + if hostname: + break + if hostname: + if ':' in to_text(hostname): + return self._sanitize_group_name((to_text(hostname))) + else: + return to_text(hostname) + + def _query(self, regions, filters, strict_permissions): + ''' + :param regions: a list of regions to query + :param filters: a list of boto3 filter dictionaries + :param hostnames: a list of hostname destination variables in order of preference + :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes + ''' + return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)} + + def _populate(self, groups, hostnames): + for group in groups: + group = self.inventory.add_group(group) + self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames) + self.inventory.add_child('all', group) + + def _add_hosts(self, hosts, group, hostnames): + ''' + :param hosts: a list of hosts to be added to a group + :param group: the name of the group to which the hosts belong + :param hostnames: a list of hostname destination variables in order of preference + ''' + for host in hosts: + hostname = self._get_hostname(host, hostnames) + + host = camel_dict_to_snake_dict(host, ignore_list=['Tags']) + host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', [])) + + # Allow easier grouping by region + host['placement']['region'] = host['placement']['availability_zone'][:-1] + + if not hostname: + continue + self.inventory.add_host(hostname, group=group) + for hostvar, hostval in host.items(): + self.inventory.set_variable(hostname, hostvar, hostval) + + # Use constructed if applicable + + strict = self.get_option('strict') + + # Composed variables + self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict) + + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict) + + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict) + + def _set_credentials(self): + ''' + :param config_data: contents of the inventory config file + ''' + + self.boto_profile = self.get_option('aws_profile') + self.aws_access_key_id = self.get_option('aws_access_key') + self.aws_secret_access_key = self.get_option('aws_secret_key') + self.aws_security_token = self.get_option('aws_security_token') + self.iam_role_arn = self.get_option('iam_role_arn') + + if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key): + session = botocore.session.get_session() + try: + credentials = session.get_credentials().get_frozen_credentials() + except AttributeError: + pass + else: + self.aws_access_key_id = credentials.access_key + self.aws_secret_access_key = credentials.secret_key + self.aws_security_token = credentials.token + + if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key): + raise AnsibleError("Insufficient boto credentials found. Please provide them in your " + "inventory configuration file or set them as environment variables.") + + def verify_file(self, path): + ''' + :param loader: an ansible.parsing.dataloader.DataLoader object + :param path: the path to the inventory config file + :return the contents of the config file + ''' + if super(InventoryModule, self).verify_file(path): + if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')): + return True + display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'") + return False + + def parse(self, inventory, loader, path, cache=True): + + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + + if self.get_option('use_contrib_script_compatible_sanitization'): + self._sanitize_group_name = self._legacy_script_compatible_group_sanitization + + self._set_credentials() + + # get user specifications + regions = self.get_option('regions') + filters = ansible_dict_to_boto3_filter_list(self.get_option('filters')) + hostnames = self.get_option('hostnames') + strict_permissions = self.get_option('strict_permissions') + + cache_key = self.get_cache_key(path) + # false when refresh_cache or --flush-cache is used + if cache: + # get the user-specified directive + cache = self.get_option('cache') + + # Generate inventory + cache_needs_update = False + if cache: + try: + results = self._cache[cache_key] + except KeyError: + # if cache expires or cache file doesn't exist + cache_needs_update = True + + if not cache or cache_needs_update: + results = self._query(regions, filters, strict_permissions) + + self._populate(results, hostnames) + + # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used + # when the user is using caching, update the cached inventory + if cache_needs_update or (not cache and self.get_option('cache')): + self._cache[cache_key] = results + + @staticmethod + def _legacy_script_compatible_group_sanitization(name): + + # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python + regex = re.compile(r"[^A-Za-z0-9\_\-]") + + return regex.sub('_', name) + + +def ansible_dict_to_boto3_filter_list(filters_dict): + + """ Convert an Ansible dict of filters to list of dicts that boto3 can use + Args: + filters_dict (dict): Dict of AWS filters. + Basic Usage: + >>> filters = {'some-aws-id': 'i-01234567'} + >>> ansible_dict_to_boto3_filter_list(filters) + { + 'some-aws-id': 'i-01234567' + } + Returns: + List: List of AWS filters and their values + [ + { + 'Name': 'some-aws-id', + 'Values': [ + 'i-01234567', + ] + } + ] + """ + + filters_list = [] + for k, v in filters_dict.items(): + filter_dict = {'Name': k} + if isinstance(v, string_types): + filter_dict['Values'] = [v] + else: + filter_dict['Values'] = v + + filters_list.append(filter_dict) + + return filters_list + + +def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None): + + """ Convert a boto3 list of resource tags to a flat dict of key:value pairs + Args: + tags_list (list): List of dicts representing AWS tags. + tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") + tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value") + Basic Usage: + >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}] + >>> boto3_tag_list_to_ansible_dict(tags_list) + [ + { + 'Key': 'MyTagKey', + 'Value': 'MyTagValue' + } + ] + Returns: + Dict: Dict of key:value pairs representing AWS tags + { + 'MyTagKey': 'MyTagValue', + } + """ + + if tag_name_key_name and tag_value_key_name: + tag_candidates = {tag_name_key_name: tag_value_key_name} + else: + tag_candidates = {'key': 'value', 'Key': 'Value'} + + if not tags_list: + return {} + for k, v in tag_candidates.items(): + if k in tags_list[0] and v in tags_list[0]: + return dict((tag[k], tag[v]) for tag in tags_list) + raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list))) diff --git a/test/support/integration/plugins/inventory/docker_swarm.py b/test/support/integration/plugins/inventory/docker_swarm.py new file mode 100644 index 00000000..d0a95ca0 --- /dev/null +++ b/test/support/integration/plugins/inventory/docker_swarm.py @@ -0,0 +1,351 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Stefan Heitmueller +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: docker_swarm + plugin_type: inventory + version_added: '2.8' + author: + - Stefan Heitmüller (@morph027) + short_description: Ansible dynamic inventory plugin for Docker swarm nodes. + requirements: + - python >= 2.7 + - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 + extends_documentation_fragment: + - constructed + description: + - Reads inventories from the Docker swarm API. + - Uses a YAML configuration file docker_swarm.[yml|yaml]. + - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes; + I(managers) - all manager nodes; I(leader) - the swarm leader node; + I(nonleaders) - all nodes except the swarm leader." + options: + plugin: + description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to + recognize it as it's own. + type: str + required: true + choices: docker_swarm + docker_host: + description: + - Socket of a Docker swarm manager node (C(tcp), C(unix)). + - "Use C(unix://var/run/docker.sock) to connect via local socket." + type: str + required: true + aliases: [ docker_url ] + verbose_output: + description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS), + C(EngineVersion)) + type: bool + default: yes + tls: + description: Connect using TLS without verifying the authenticity of the Docker host server. + type: bool + default: no + validate_certs: + description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker + host server. + type: bool + default: no + aliases: [ tls_verify ] + client_key: + description: Path to the client's TLS key file. + type: path + aliases: [ tls_client_key, key_path ] + ca_cert: + description: Use a CA certificate when performing server verification by providing the path to a CA + certificate file. + type: path + aliases: [ tls_ca_cert, cacert_path ] + client_cert: + description: Path to the client's TLS certificate file. + type: path + aliases: [ tls_client_cert, cert_path ] + tls_hostname: + description: When verifying the authenticity of the Docker host server, provide the expected name of + the server. + type: str + ssl_version: + description: Provide a valid SSL version number. Default value determined by ssl.py module. + type: str + api_version: + description: + - The version of the Docker API running on the Docker Host. + - Defaults to the latest version of the API supported by docker-py. + type: str + aliases: [ docker_api_version ] + timeout: + description: + - The maximum amount of time in seconds to wait on a response from the API. + - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) + will be used instead. If the environment variable is not set, the default value will be used. + type: int + default: 60 + aliases: [ time_out ] + include_host_uri: + description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the + swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional + modification as value of option I(docker_host) in Docker Swarm modules when connecting via API. + The port always defaults to C(2376). + type: bool + default: no + include_host_uri_port: + description: Override the detected port number included in I(ansible_host_uri) + type: int +''' + +EXAMPLES = ''' +# Minimal example using local docker +plugin: docker_swarm +docker_host: unix://var/run/docker.sock + +# Minimal example using remote docker +plugin: docker_swarm +docker_host: tcp://my-docker-host:2375 + +# Example using remote docker with unverified TLS +plugin: docker_swarm +docker_host: tcp://my-docker-host:2376 +tls: yes + +# Example using remote docker with verified TLS and client certificate verification +plugin: docker_swarm +docker_host: tcp://my-docker-host:2376 +validate_certs: yes +ca_cert: /somewhere/ca.pem +client_key: /somewhere/key.pem +client_cert: /somewhere/cert.pem + +# Example using constructed features to create groups and set ansible_host +plugin: docker_swarm +docker_host: tcp://my-docker-host:2375 +strict: False +keyed_groups: + # add e.g. x86_64 hosts to an arch_x86_64 group + - prefix: arch + key: 'Description.Platform.Architecture' + # add e.g. linux hosts to an os_linux group + - prefix: os + key: 'Description.Platform.OS' + # create a group per node label + # e.g. a node labeled w/ "production" ends up in group "label_production" + # hint: labels containing special characters will be converted to safe names + - key: 'Spec.Labels' + prefix: label +''' + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.parsing.utils.addresses import parse_address + +try: + import docker + from docker.errors import TLSParameterError + from docker.tls import TLSConfig + HAS_DOCKER = True +except ImportError: + HAS_DOCKER = False + + +def update_tls_hostname(result): + if result['tls_hostname'] is None: + # get default machine name from the url + parsed_url = urlparse(result['docker_host']) + if ':' in parsed_url.netloc: + result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')] + else: + result['tls_hostname'] = parsed_url + + +def _get_tls_config(fail_function, **kwargs): + try: + tls_config = TLSConfig(**kwargs) + return tls_config + except TLSParameterError as exc: + fail_function("TLS config error: %s" % exc) + + +def get_connect_params(auth, fail_function): + if auth['tls'] or auth['tls_verify']: + auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') + + if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and host verification + if auth['cacert_path']: + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + ca_cert=auth['cacert_path'], + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + else: + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify'] and auth['cacert_path']: + # TLS with cacert only + tls_config = _get_tls_config(ca_cert=auth['cacert_path'], + assert_hostname=auth['tls_hostname'], + verify=True, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify']: + # TLS with verify and no certs + tls_config = _get_tls_config(verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and no host verification + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=False, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls']: + # TLS with no certs and not host verification + tls_config = _get_tls_config(verify=False, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + # No TLS + return dict(base_url=auth['docker_host'], + version=auth['api_version'], + timeout=auth['timeout']) + + +class InventoryModule(BaseInventoryPlugin, Constructable): + ''' Host inventory parser for ansible using Docker swarm as source. ''' + + NAME = 'docker_swarm' + + def _fail(self, msg): + raise AnsibleError(msg) + + def _populate(self): + raw_params = dict( + docker_host=self.get_option('docker_host'), + tls=self.get_option('tls'), + tls_verify=self.get_option('validate_certs'), + key_path=self.get_option('client_key'), + cacert_path=self.get_option('ca_cert'), + cert_path=self.get_option('client_cert'), + tls_hostname=self.get_option('tls_hostname'), + api_version=self.get_option('api_version'), + timeout=self.get_option('timeout'), + ssl_version=self.get_option('ssl_version'), + debug=None, + ) + update_tls_hostname(raw_params) + connect_params = get_connect_params(raw_params, fail_function=self._fail) + self.client = docker.DockerClient(**connect_params) + self.inventory.add_group('all') + self.inventory.add_group('manager') + self.inventory.add_group('worker') + self.inventory.add_group('leader') + self.inventory.add_group('nonleaders') + + if self.get_option('include_host_uri'): + if self.get_option('include_host_uri_port'): + host_uri_port = str(self.get_option('include_host_uri_port')) + elif self.get_option('tls') or self.get_option('validate_certs'): + host_uri_port = '2376' + else: + host_uri_port = '2375' + + try: + self.nodes = self.client.nodes.list() + for self.node in self.nodes: + self.node_attrs = self.client.nodes.get(self.node.id).attrs + self.inventory.add_host(self.node_attrs['ID']) + self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role']) + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', + self.node_attrs['Status']['Addr']) + if self.get_option('include_host_uri'): + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', + 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port) + if self.get_option('verbose_output'): + self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs) + if 'ManagerStatus' in self.node_attrs: + if self.node_attrs['ManagerStatus'].get('Leader'): + # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 + # Check moby/moby#35437 for details + swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \ + self.node_attrs['Status']['Addr'] + if self.get_option('include_host_uri'): + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', + 'tcp://' + swarm_leader_ip + ':' + host_uri_port) + self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip) + self.inventory.add_host(self.node_attrs['ID'], group='leader') + else: + self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') + else: + self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') + # Use constructed if applicable + strict = self.get_option('strict') + # Composed variables + self._set_composite_vars(self.get_option('compose'), + self.node_attrs, + self.node_attrs['ID'], + strict=strict) + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), + self.node_attrs, + self.node_attrs['ID'], + strict=strict) + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), + self.node_attrs, + self.node_attrs['ID'], + strict=strict) + except Exception as e: + raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' % + to_native(e)) + + def verify_file(self, path): + """Return the possibly of a file being consumable by this plugin.""" + return ( + super(InventoryModule, self).verify_file(path) and + path.endswith((self.NAME + '.yaml', self.NAME + '.yml'))) + + def parse(self, inventory, loader, path, cache=True): + if not HAS_DOCKER: + raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: ' + 'https://github.com/docker/docker-py.') + super(InventoryModule, self).parse(inventory, loader, path, cache) + self._read_config_data(path) + self._populate() diff --git a/test/support/integration/plugins/inventory/foreman.py b/test/support/integration/plugins/inventory/foreman.py new file mode 100644 index 00000000..43073f81 --- /dev/null +++ b/test/support/integration/plugins/inventory/foreman.py @@ -0,0 +1,295 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: foreman + plugin_type: inventory + short_description: foreman inventory source + version_added: "2.6" + requirements: + - requests >= 1.1 + description: + - Get inventory hosts from the foreman service. + - "Uses a configuration file as an inventory source, it must end in ``.foreman.yml`` or ``.foreman.yaml`` and has a ``plugin: foreman`` entry." + extends_documentation_fragment: + - inventory_cache + - constructed + options: + plugin: + description: the name of this plugin, it should always be set to 'foreman' for this plugin to recognize it as it's own. + required: True + choices: ['foreman'] + url: + description: url to foreman + default: 'http://localhost:3000' + env: + - name: FOREMAN_SERVER + version_added: "2.8" + user: + description: foreman authentication user + required: True + env: + - name: FOREMAN_USER + version_added: "2.8" + password: + description: foreman authentication password + required: True + env: + - name: FOREMAN_PASSWORD + version_added: "2.8" + validate_certs: + description: verify SSL certificate if using https + type: boolean + default: False + group_prefix: + description: prefix to apply to foreman groups + default: foreman_ + vars_prefix: + description: prefix to apply to host variables, does not include facts nor params + default: foreman_ + want_facts: + description: Toggle, if True the plugin will retrieve host facts from the server + type: boolean + default: False + want_params: + description: Toggle, if true the inventory will retrieve 'all_parameters' information as host vars + type: boolean + default: False + want_hostcollections: + description: Toggle, if true the plugin will create Ansible groups for host collections + type: boolean + default: False + version_added: '2.10' + want_ansible_ssh_host: + description: Toggle, if true the plugin will populate the ansible_ssh_host variable to explicitly specify the connection target + type: boolean + default: False + version_added: '2.10' + +''' + +EXAMPLES = ''' +# my.foreman.yml +plugin: foreman +url: http://localhost:2222 +user: ansible-tester +password: secure +validate_certs: False +''' + +from distutils.version import LooseVersion + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name, Constructable + +# 3rd party imports +try: + import requests + if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): + raise ImportError +except ImportError: + raise AnsibleError('This script requires python-requests 1.1 as a minimum version') + +from requests.auth import HTTPBasicAuth + + +class InventoryModule(BaseInventoryPlugin, Cacheable, Constructable): + ''' Host inventory parser for ansible using foreman as source. ''' + + NAME = 'foreman' + + def __init__(self): + + super(InventoryModule, self).__init__() + + # from config + self.foreman_url = None + + self.session = None + self.cache_key = None + self.use_cache = None + + def verify_file(self, path): + + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('foreman.yaml', 'foreman.yml')): + valid = True + else: + self.display.vvv('Skipping due to inventory source not ending in "foreman.yaml" nor "foreman.yml"') + return valid + + def _get_session(self): + if not self.session: + self.session = requests.session() + self.session.auth = HTTPBasicAuth(self.get_option('user'), to_bytes(self.get_option('password'))) + self.session.verify = self.get_option('validate_certs') + return self.session + + def _get_json(self, url, ignore_errors=None): + + if not self.use_cache or url not in self._cache.get(self.cache_key, {}): + + if self.cache_key not in self._cache: + self._cache[self.cache_key] = {url: ''} + + results = [] + s = self._get_session() + params = {'page': 1, 'per_page': 250} + while True: + ret = s.get(url, params=params) + if ignore_errors and ret.status_code in ignore_errors: + break + ret.raise_for_status() + json = ret.json() + + # process results + # FIXME: This assumes 'return type' matches a specific query, + # it will break if we expand the queries and they dont have different types + if 'results' not in json: + # /hosts/:id dos not have a 'results' key + results = json + break + elif isinstance(json['results'], MutableMapping): + # /facts are returned as dict in 'results' + results = json['results'] + break + else: + # /hosts 's 'results' is a list of all hosts, returned is paginated + results = results + json['results'] + + # check for end of paging + if len(results) >= json['subtotal']: + break + if len(json['results']) == 0: + self.display.warning("Did not make any progress during loop. expected %d got %d" % (json['subtotal'], len(results))) + break + + # get next page + params['page'] += 1 + + self._cache[self.cache_key][url] = results + + return self._cache[self.cache_key][url] + + def _get_hosts(self): + return self._get_json("%s/api/v2/hosts" % self.foreman_url) + + def _get_all_params_by_id(self, hid): + url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid) + ret = self._get_json(url, [404]) + if not ret or not isinstance(ret, MutableMapping) or not ret.get('all_parameters', False): + return {} + return ret.get('all_parameters') + + def _get_facts_by_id(self, hid): + url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid) + return self._get_json(url) + + def _get_host_data_by_id(self, hid): + url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid) + return self._get_json(url) + + def _get_facts(self, host): + """Fetch all host facts of the host""" + + ret = self._get_facts_by_id(host['id']) + if len(ret.values()) == 0: + facts = {} + elif len(ret.values()) == 1: + facts = list(ret.values())[0] + else: + raise ValueError("More than one set of facts returned for '%s'" % host) + return facts + + def _populate(self): + + for host in self._get_hosts(): + + if host.get('name'): + host_name = self.inventory.add_host(host['name']) + + # create directly mapped groups + group_name = host.get('hostgroup_title', host.get('hostgroup_name')) + if group_name: + group_name = to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group_name.lower().replace(" ", ""))) + group_name = self.inventory.add_group(group_name) + self.inventory.add_child(group_name, host_name) + + # set host vars from host info + try: + for k, v in host.items(): + if k not in ('name', 'hostgroup_title', 'hostgroup_name'): + try: + self.inventory.set_variable(host_name, self.get_option('vars_prefix') + k, v) + except ValueError as e: + self.display.warning("Could not set host info hostvar for %s, skipping %s: %s" % (host, k, to_text(e))) + except ValueError as e: + self.display.warning("Could not get host info for %s, skipping: %s" % (host_name, to_text(e))) + + # set host vars from params + if self.get_option('want_params'): + for p in self._get_all_params_by_id(host['id']): + try: + self.inventory.set_variable(host_name, p['name'], p['value']) + except ValueError as e: + self.display.warning("Could not set hostvar %s to '%s' for the '%s' host, skipping: %s" % + (p['name'], to_native(p['value']), host, to_native(e))) + + # set host vars from facts + if self.get_option('want_facts'): + self.inventory.set_variable(host_name, 'foreman_facts', self._get_facts(host)) + + # create group for host collections + if self.get_option('want_hostcollections'): + host_data = self._get_host_data_by_id(host['id']) + hostcollections = host_data.get('host_collections') + if hostcollections: + # Create Ansible groups for host collections + for hostcollection in hostcollections: + try: + hostcollection_group = to_safe_group_name('%shostcollection_%s' % (self.get_option('group_prefix'), + hostcollection['name'].lower().replace(" ", ""))) + hostcollection_group = self.inventory.add_group(hostcollection_group) + self.inventory.add_child(hostcollection_group, host_name) + except ValueError as e: + self.display.warning("Could not create groups for host collections for %s, skipping: %s" % (host_name, to_text(e))) + + # put ansible_ssh_host as hostvar + if self.get_option('want_ansible_ssh_host'): + for key in ('ip', 'ipv4', 'ipv6'): + if host.get(key): + try: + self.inventory.set_variable(host_name, 'ansible_ssh_host', host[key]) + break + except ValueError as e: + self.display.warning("Could not set hostvar ansible_ssh_host to '%s' for the '%s' host, skipping: %s" % + (host[key], host_name, to_text(e))) + + strict = self.get_option('strict') + + hostvars = self.inventory.get_host(host_name).get_vars() + self._set_composite_vars(self.get_option('compose'), hostvars, host_name, strict) + self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host_name, strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host_name, strict) + + def parse(self, inventory, loader, path, cache=True): + + super(InventoryModule, self).parse(inventory, loader, path) + + # read config from file, this sets 'options' + self._read_config_data(path) + + # get connection host + self.foreman_url = self.get_option('url') + self.cache_key = self.get_cache_key(path) + self.use_cache = cache and self.get_option('cache') + + # actually populate inventory + self._populate() diff --git a/test/support/integration/plugins/lookup/rabbitmq.py b/test/support/integration/plugins/lookup/rabbitmq.py new file mode 100644 index 00000000..7c2745f4 --- /dev/null +++ b/test/support/integration/plugins/lookup/rabbitmq.py @@ -0,0 +1,190 @@ +# (c) 2018, John Imison +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ + lookup: rabbitmq + author: John Imison <@Im0> + version_added: "2.8" + short_description: Retrieve messages from an AMQP/AMQPS RabbitMQ queue. + description: + - This lookup uses a basic get to retrieve all, or a limited number C(count), messages from a RabbitMQ queue. + options: + url: + description: + - An URI connection string to connect to the AMQP/AMQPS RabbitMQ server. + - For more information refer to the URI spec U(https://www.rabbitmq.com/uri-spec.html). + required: True + queue: + description: + - The queue to get messages from. + required: True + count: + description: + - How many messages to collect from the queue. + - If not set, defaults to retrieving all the messages from the queue. + requirements: + - The python pika package U(https://pypi.org/project/pika/). + notes: + - This lookup implements BlockingChannel.basic_get to get messages from a RabbitMQ server. + - After retrieving a message from the server, receipt of the message is acknowledged and the message on the server is deleted. + - Pika is a pure-Python implementation of the AMQP 0-9-1 protocol that tries to stay fairly independent of the underlying network support library. + - More information about pika can be found at U(https://pika.readthedocs.io/en/stable/). + - This plugin is tested against RabbitMQ. Other AMQP 0.9.1 protocol based servers may work but not tested/guaranteed. + - Assigning the return messages to a variable under C(vars) may result in unexpected results as the lookup is evaluated every time the + variable is referenced. + - Currently this plugin only handles text based messages from a queue. Unexpected results may occur when retrieving binary data. +""" + + +EXAMPLES = """ +- name: Get all messages off a queue + debug: + msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello') }}" + + +# If you are intending on using the returned messages as a variable in more than +# one task (eg. debug, template), it is recommended to set_fact. + +- name: Get 2 messages off a queue and set a fact for re-use + set_fact: + messages: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello', count=2) }}" + +- name: Dump out contents of the messages + debug: + var: messages + +""" + +RETURN = """ + _list: + description: + - A list of dictionaries with keys and value from the queue. + type: list + contains: + content_type: + description: The content_type on the message in the queue. + type: str + delivery_mode: + description: The delivery_mode on the message in the queue. + type: str + delivery_tag: + description: The delivery_tag on the message in the queue. + type: str + exchange: + description: The exchange the message came from. + type: str + message_count: + description: The message_count for the message on the queue. + type: str + msg: + description: The content of the message. + type: str + redelivered: + description: The redelivered flag. True if the message has been delivered before. + type: bool + routing_key: + description: The routing_key on the message in the queue. + type: str + headers: + description: The headers for the message returned from the queue. + type: dict + json: + description: If application/json is specified in content_type, json will be loaded into variables. + type: dict + +""" + +import json + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_native, to_text +from ansible.utils.display import Display + +try: + import pika + from pika import spec + HAS_PIKA = True +except ImportError: + HAS_PIKA = False + +display = Display() + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, url=None, queue=None, count=None): + if not HAS_PIKA: + raise AnsibleError('pika python package is required for rabbitmq lookup.') + if not url: + raise AnsibleError('URL is required for rabbitmq lookup.') + if not queue: + raise AnsibleError('Queue is required for rabbitmq lookup.') + + display.vvv(u"terms:%s : variables:%s url:%s queue:%s count:%s" % (terms, variables, url, queue, count)) + + try: + parameters = pika.URLParameters(url) + except Exception as e: + raise AnsibleError("URL malformed: %s" % to_native(e)) + + try: + connection = pika.BlockingConnection(parameters) + except Exception as e: + raise AnsibleError("Connection issue: %s" % to_native(e)) + + try: + conn_channel = connection.channel() + except pika.exceptions.AMQPChannelError as e: + try: + connection.close() + except pika.exceptions.AMQPConnectionError as ie: + raise AnsibleError("Channel and connection closing issues: %s / %s" % to_native(e), to_native(ie)) + raise AnsibleError("Channel issue: %s" % to_native(e)) + + ret = [] + idx = 0 + + while True: + method_frame, properties, body = conn_channel.basic_get(queue=queue) + if method_frame: + display.vvv(u"%s, %s, %s " % (method_frame, properties, to_text(body))) + + # TODO: In the future consider checking content_type and handle text/binary data differently. + msg_details = dict({ + 'msg': to_text(body), + 'message_count': method_frame.message_count, + 'routing_key': method_frame.routing_key, + 'delivery_tag': method_frame.delivery_tag, + 'redelivered': method_frame.redelivered, + 'exchange': method_frame.exchange, + 'delivery_mode': properties.delivery_mode, + 'content_type': properties.content_type, + 'headers': properties.headers + }) + if properties.content_type == 'application/json': + try: + msg_details['json'] = json.loads(msg_details['msg']) + except ValueError as e: + raise AnsibleError("Unable to decode JSON for message %s: %s" % (method_frame.delivery_tag, to_native(e))) + + ret.append(msg_details) + conn_channel.basic_ack(method_frame.delivery_tag) + idx += 1 + if method_frame.message_count == 0 or idx == count: + break + # If we didn't get a method_frame, exit. + else: + break + + if connection.is_closed: + return [ret] + else: + try: + connection.close() + except pika.exceptions.AMQPConnectionError: + pass + return [ret] diff --git a/test/support/integration/plugins/module_utils/aws/__init__.py b/test/support/integration/plugins/module_utils/aws/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/support/integration/plugins/module_utils/aws/core.py b/test/support/integration/plugins/module_utils/aws/core.py new file mode 100644 index 00000000..c4527b6d --- /dev/null +++ b/test/support/integration/plugins/module_utils/aws/core.py @@ -0,0 +1,335 @@ +# +# Copyright 2017 Michael De La Rue | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +"""This module adds shared support for generic Amazon AWS modules + +**This code is not yet ready for use in user modules. As of 2017** +**and through to 2018, the interface is likely to change** +**aggressively as the exact correct interface for ansible AWS modules** +**is identified. In particular, until this notice goes away or is** +**changed, methods may disappear from the interface. Please don't** +**publish modules using this except directly to the main Ansible** +**development repository.** + +In order to use this module, include it as part of a custom +module as shown below. + + from ansible.module_utils.aws import AnsibleAWSModule + module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean + mutually_exclusive=list1, required_together=list2) + +The 'AnsibleAWSModule' module provides similar, but more restricted, +interfaces to the normal Ansible module. It also includes the +additional methods for connecting to AWS using the standard module arguments + + m.resource('lambda') # - get an AWS connection as a boto3 resource. + +or + + m.client('sts') # - get an AWS connection as a boto3 client. + +To make use of AWSRetry easier, it can now be wrapped around any call from a +module-created client. To add retries to a client, create a client: + + m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + +Any calls from that client can be made to use the decorator passed at call-time +using the `aws_retry` argument. By default, no retries are used. + + ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True) + +The call will be retried the specified number of times, so the calling functions +don't need to be wrapped in the backoff decorator. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import logging +import traceback +from functools import wraps +from distutils.version import LooseVersion + +try: + from cStringIO import StringIO +except ImportError: + # Python 3 + from io import StringIO + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native +from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn +from ansible.module_utils.ec2 import get_aws_connection_info, get_aws_region + +# We will also export HAS_BOTO3 so end user modules can use it. +__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code') + + +class AnsibleAWSModule(object): + """An ansible module class for AWS modules + + AnsibleAWSModule provides an a class for building modules which + connect to Amazon Web Services. The interface is currently more + restricted than the basic module class with the aim that later the + basic module class can be reduced. If you find that any key + feature is missing please contact the author/Ansible AWS team + (available on #ansible-aws on IRC) to request the additional + features needed. + """ + default_settings = { + "default_args": True, + "check_boto3": True, + "auto_retry": True, + "module_class": AnsibleModule + } + + def __init__(self, **kwargs): + local_settings = {} + for key in AnsibleAWSModule.default_settings: + try: + local_settings[key] = kwargs.pop(key) + except KeyError: + local_settings[key] = AnsibleAWSModule.default_settings[key] + self.settings = local_settings + + if local_settings["default_args"]: + # ec2_argument_spec contains the region so we use that; there's a patch coming which + # will add it to aws_argument_spec so if that's accepted then later we should change + # over + argument_spec_full = ec2_argument_spec() + try: + argument_spec_full.update(kwargs["argument_spec"]) + except (TypeError, NameError): + pass + kwargs["argument_spec"] = argument_spec_full + + self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs) + + if local_settings["check_boto3"] and not HAS_BOTO3: + self._module.fail_json( + msg=missing_required_lib('botocore or boto3')) + + self.check_mode = self._module.check_mode + self._diff = self._module._diff + self._name = self._module._name + + self._botocore_endpoint_log_stream = StringIO() + self.logger = None + if self.params.get('debug_botocore_endpoint_logs'): + self.logger = logging.getLogger('botocore.endpoint') + self.logger.setLevel(logging.DEBUG) + self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream)) + + @property + def params(self): + return self._module.params + + def _get_resource_action_list(self): + actions = [] + for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'): + ln = ln.strip() + if not ln: + continue + found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln) + if found_operational_request: + operation_request = found_operational_request.group(0)[20:-1] + resource = re.search(r"https://.*?\.", ln).group(0)[8:-1] + actions.append("{0}:{1}".format(resource, operation_request)) + return list(set(actions)) + + def exit_json(self, *args, **kwargs): + if self.params.get('debug_botocore_endpoint_logs'): + kwargs['resource_actions'] = self._get_resource_action_list() + return self._module.exit_json(*args, **kwargs) + + def fail_json(self, *args, **kwargs): + if self.params.get('debug_botocore_endpoint_logs'): + kwargs['resource_actions'] = self._get_resource_action_list() + return self._module.fail_json(*args, **kwargs) + + def debug(self, *args, **kwargs): + return self._module.debug(*args, **kwargs) + + def warn(self, *args, **kwargs): + return self._module.warn(*args, **kwargs) + + def deprecate(self, *args, **kwargs): + return self._module.deprecate(*args, **kwargs) + + def boolean(self, *args, **kwargs): + return self._module.boolean(*args, **kwargs) + + def md5(self, *args, **kwargs): + return self._module.md5(*args, **kwargs) + + def client(self, service, retry_decorator=None): + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) + conn = boto3_conn(self, conn_type='client', resource=service, + region=region, endpoint=ec2_url, **aws_connect_kwargs) + return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator) + + def resource(self, service): + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) + return boto3_conn(self, conn_type='resource', resource=service, + region=region, endpoint=ec2_url, **aws_connect_kwargs) + + @property + def region(self, boto3=True): + return get_aws_region(self, boto3) + + def fail_json_aws(self, exception, msg=None): + """call fail_json with processed exception + + function for converting exceptions thrown by AWS SDK modules, + botocore, boto3 and boto, into nice error messages. + """ + last_traceback = traceback.format_exc() + + # to_native is trusted to handle exceptions that str() could + # convert to text. + try: + except_msg = to_native(exception.message) + except AttributeError: + except_msg = to_native(exception) + + if msg is not None: + message = '{0}: {1}'.format(msg, except_msg) + else: + message = except_msg + + try: + response = exception.response + except AttributeError: + response = None + + failure = dict( + msg=message, + exception=last_traceback, + **self._gather_versions() + ) + + if response is not None: + failure.update(**camel_dict_to_snake_dict(response)) + + self.fail_json(**failure) + + def _gather_versions(self): + """Gather AWS SDK (boto3 and botocore) dependency versions + + Returns {'boto3_version': str, 'botocore_version': str} + Returns {} if neither are installed + """ + if not HAS_BOTO3: + return {} + import boto3 + import botocore + return dict(boto3_version=boto3.__version__, + botocore_version=botocore.__version__) + + def boto3_at_least(self, desired): + """Check if the available boto3 version is greater than or equal to a desired version. + + Usage: + if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'): + # conditionally fail on old boto3 versions if a specific feature is not supported + module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.") + """ + existing = self._gather_versions() + return LooseVersion(existing['boto3_version']) >= LooseVersion(desired) + + def botocore_at_least(self, desired): + """Check if the available botocore version is greater than or equal to a desired version. + + Usage: + if not module.botocore_at_least('1.2.3'): + module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3') + if not module.botocore_at_least('1.5.3'): + module.warn('Botocore did not include waiters for Service X before 1.5.3. ' + 'To wait until Service X resources are fully available, update botocore.') + """ + existing = self._gather_versions() + return LooseVersion(existing['botocore_version']) >= LooseVersion(desired) + + +class _RetryingBotoClientWrapper(object): + __never_wait = ( + 'get_paginator', 'can_paginate', + 'get_waiter', 'generate_presigned_url', + ) + + def __init__(self, client, retry): + self.client = client + self.retry = retry + + def _create_optional_retry_wrapper_function(self, unwrapped): + retrying_wrapper = self.retry(unwrapped) + + @wraps(unwrapped) + def deciding_wrapper(aws_retry=False, *args, **kwargs): + if aws_retry: + return retrying_wrapper(*args, **kwargs) + else: + return unwrapped(*args, **kwargs) + return deciding_wrapper + + def __getattr__(self, name): + unwrapped = getattr(self.client, name) + if name in self.__never_wait: + return unwrapped + elif callable(unwrapped): + wrapped = self._create_optional_retry_wrapper_function(unwrapped) + setattr(self, name, wrapped) + return wrapped + else: + return unwrapped + + +def is_boto3_error_code(code, e=None): + """Check if the botocore exception is raised by a specific error code. + + Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match + + Example: + try: + ec2.describe_instances(InstanceIds=['potato']) + except is_boto3_error_code('InvalidInstanceID.Malformed'): + # handle the error for that code case + except botocore.exceptions.ClientError as e: + # handle the generic error case for all other codes + """ + from botocore.exceptions import ClientError + if e is None: + import sys + dummy, e, dummy = sys.exc_info() + if isinstance(e, ClientError) and e.response['Error']['Code'] == code: + return ClientError + return type('NeverEverRaisedException', (Exception,), {}) + + +def get_boto3_client_method_parameters(client, method_name, required=False): + op = client.meta.method_to_api_mapping.get(method_name) + input_shape = client._service_model.operation_model(op).input_shape + if not input_shape: + parameters = [] + elif required: + parameters = list(input_shape.required_members) + else: + parameters = list(input_shape.members.keys()) + return parameters diff --git a/test/support/integration/plugins/module_utils/aws/iam.py b/test/support/integration/plugins/module_utils/aws/iam.py new file mode 100644 index 00000000..f05999aa --- /dev/null +++ b/test/support/integration/plugins/module_utils/aws/iam.py @@ -0,0 +1,49 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import traceback + +try: + from botocore.exceptions import ClientError, NoCredentialsError +except ImportError: + pass # caught by HAS_BOTO3 + +from ansible.module_utils._text import to_native + + +def get_aws_account_id(module): + """ Given AnsibleAWSModule instance, get the active AWS account ID + + get_account_id tries too find out the account that we are working + on. It's not guaranteed that this will be easy so we try in + several different ways. Giving either IAM or STS privilages to + the account should be enough to permit this. + """ + account_id = None + try: + sts_client = module.client('sts') + account_id = sts_client.get_caller_identity().get('Account') + # non-STS sessions may also get NoCredentialsError from this STS call, so + # we must catch that too and try the IAM version + except (ClientError, NoCredentialsError): + try: + iam_client = module.client('iam') + account_id = iam_client.get_user()['User']['Arn'].split(':')[4] + except ClientError as e: + if (e.response['Error']['Code'] == 'AccessDenied'): + except_msg = to_native(e) + # don't match on `arn:aws` because of China region `arn:aws-cn` and similar + account_id = except_msg.search(r"arn:\w+:iam::([0-9]{12,32}):\w+/").group(1) + if account_id is None: + module.fail_json_aws(e, msg="Could not get AWS account information") + except Exception as e: + module.fail_json( + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.", + exception=traceback.format_exc() + ) + if not account_id: + module.fail_json(msg="Failed while determining AWS account ID. Try allowing sts:GetCallerIdentity or iam:GetUser permissions.") + return to_native(account_id) diff --git a/test/support/integration/plugins/module_utils/aws/s3.py b/test/support/integration/plugins/module_utils/aws/s3.py new file mode 100644 index 00000000..2185869d --- /dev/null +++ b/test/support/integration/plugins/module_utils/aws/s3.py @@ -0,0 +1,50 @@ +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by the calling module + +HAS_MD5 = True +try: + from hashlib import md5 +except ImportError: + try: + from md5 import md5 + except ImportError: + HAS_MD5 = False + + +def calculate_etag(module, filename, etag, s3, bucket, obj, version=None): + if not HAS_MD5: + return None + + if '-' in etag: + # Multi-part ETag; a hash of the hashes of each part. + parts = int(etag[1:-1].split('-')[1]) + digests = [] + + s3_kwargs = dict( + Bucket=bucket, + Key=obj, + ) + if version: + s3_kwargs['VersionId'] = version + + with open(filename, 'rb') as f: + for part_num in range(1, parts + 1): + s3_kwargs['PartNumber'] = part_num + try: + head = s3.head_object(**s3_kwargs) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get head object") + digests.append(md5(f.read(int(head['ContentLength'])))) + + digest_squared = md5(b''.join(m.digest() for m in digests)) + return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests)) + else: # Compute the MD5 sum normally + return '"{0}"'.format(module.md5(filename)) diff --git a/test/support/integration/plugins/module_utils/aws/waiters.py b/test/support/integration/plugins/module_utils/aws/waiters.py new file mode 100644 index 00000000..25db598b --- /dev/null +++ b/test/support/integration/plugins/module_utils/aws/waiters.py @@ -0,0 +1,405 @@ +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + import botocore.waiter as core_waiter +except ImportError: + pass # caught by HAS_BOTO3 + + +ec2_data = { + "version": 2, + "waiters": { + "InternetGatewayExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeInternetGateways", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(InternetGateways) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidInternetGatewayID.NotFound", + "state": "retry" + }, + ] + }, + "RouteTableExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeRouteTables", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(RouteTables[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidRouteTableID.NotFound", + "state": "retry" + }, + ] + }, + "SecurityGroupExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSecurityGroups", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(SecurityGroups[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidGroup.NotFound", + "state": "retry" + }, + ] + }, + "SubnetExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(Subnets[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidSubnetID.NotFound", + "state": "retry" + }, + ] + }, + "SubnetHasMapPublic": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "pathAll", + "expected": True, + "argument": "Subnets[].MapPublicIpOnLaunch", + "state": "success" + }, + ] + }, + "SubnetNoMapPublic": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "pathAll", + "expected": False, + "argument": "Subnets[].MapPublicIpOnLaunch", + "state": "success" + }, + ] + }, + "SubnetHasAssignIpv6": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "pathAll", + "expected": True, + "argument": "Subnets[].AssignIpv6AddressOnCreation", + "state": "success" + }, + ] + }, + "SubnetNoAssignIpv6": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "pathAll", + "expected": False, + "argument": "Subnets[].AssignIpv6AddressOnCreation", + "state": "success" + }, + ] + }, + "SubnetDeleted": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(Subnets[]) > `0`", + "state": "retry" + }, + { + "matcher": "error", + "expected": "InvalidSubnetID.NotFound", + "state": "success" + }, + ] + }, + "VpnGatewayExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeVpnGateways", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(VpnGateways[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidVpnGatewayID.NotFound", + "state": "retry" + }, + ] + }, + "VpnGatewayDetached": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeVpnGateways", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "VpnGateways[0].State == 'available'", + "state": "success" + }, + ] + }, + } +} + + +waf_data = { + "version": 2, + "waiters": { + "ChangeTokenInSync": { + "delay": 20, + "maxAttempts": 60, + "operation": "GetChangeTokenStatus", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "ChangeTokenStatus == 'INSYNC'", + "state": "success" + }, + { + "matcher": "error", + "expected": "WAFInternalErrorException", + "state": "retry" + } + ] + } + } +} + +eks_data = { + "version": 2, + "waiters": { + "ClusterActive": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeCluster", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "cluster.status", + "expected": "ACTIVE" + }, + { + "state": "retry", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "ClusterDeleted": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeCluster", + "acceptors": [ + { + "state": "retry", + "matcher": "path", + "argument": "cluster.status != 'DELETED'", + "expected": True + }, + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + } + } +} + + +rds_data = { + "version": 2, + "waiters": { + "DBInstanceStopped": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeDBInstances", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "DBInstances[].DBInstanceStatus", + "expected": "stopped" + }, + ] + } + } +} + + +def ec2_model(name): + ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data) + return ec2_models.get_waiter(name) + + +def waf_model(name): + waf_models = core_waiter.WaiterModel(waiter_config=waf_data) + return waf_models.get_waiter(name) + + +def eks_model(name): + eks_models = core_waiter.WaiterModel(waiter_config=eks_data) + return eks_models.get_waiter(name) + + +def rds_model(name): + rds_models = core_waiter.WaiterModel(waiter_config=rds_data) + return rds_models.get_waiter(name) + + +waiters_by_name = { + ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter( + 'internet_gateway_exists', + ec2_model('InternetGatewayExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_internet_gateways + )), + ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter( + 'route_table_exists', + ec2_model('RouteTableExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_route_tables + )), + ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter( + 'security_group_exists', + ec2_model('SecurityGroupExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_security_groups + )), + ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter( + 'subnet_exists', + ec2_model('SubnetExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter( + 'subnet_has_map_public', + ec2_model('SubnetHasMapPublic'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter( + 'subnet_no_map_public', + ec2_model('SubnetNoMapPublic'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter( + 'subnet_has_assign_ipv6', + ec2_model('SubnetHasAssignIpv6'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter( + 'subnet_no_assign_ipv6', + ec2_model('SubnetNoAssignIpv6'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter( + 'subnet_deleted', + ec2_model('SubnetDeleted'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter( + 'vpn_gateway_exists', + ec2_model('VpnGatewayExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_vpn_gateways + )), + ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter( + 'vpn_gateway_detached', + ec2_model('VpnGatewayDetached'), + core_waiter.NormalizedOperationMethod( + ec2.describe_vpn_gateways + )), + ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter( + 'change_token_in_sync', + waf_model('ChangeTokenInSync'), + core_waiter.NormalizedOperationMethod( + waf.get_change_token_status + )), + ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter( + 'change_token_in_sync', + waf_model('ChangeTokenInSync'), + core_waiter.NormalizedOperationMethod( + waf.get_change_token_status + )), + ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter( + 'cluster_active', + eks_model('ClusterActive'), + core_waiter.NormalizedOperationMethod( + eks.describe_cluster + )), + ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter( + 'cluster_deleted', + eks_model('ClusterDeleted'), + core_waiter.NormalizedOperationMethod( + eks.describe_cluster + )), + ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter( + 'db_instance_stopped', + rds_model('DBInstanceStopped'), + core_waiter.NormalizedOperationMethod( + rds.describe_db_instances + )), +} + + +def get_waiter(client, waiter_name): + try: + return waiters_by_name[(client.__class__.__name__, waiter_name)](client) + except KeyError: + raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format( + waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys()))) diff --git a/test/support/integration/plugins/module_utils/azure_rm_common.py b/test/support/integration/plugins/module_utils/azure_rm_common.py new file mode 100644 index 00000000..a7b55e97 --- /dev/null +++ b/test/support/integration/plugins/module_utils/azure_rm_common.py @@ -0,0 +1,1473 @@ +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import os +import re +import types +import copy +import inspect +import traceback +import json + +from os.path import expanduser + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +try: + from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION +except Exception: + ANSIBLE_VERSION = 'unknown' +from ansible.module_utils.six.moves import configparser +import ansible.module_utils.six.moves.urllib.parse as urlparse + +AZURE_COMMON_ARGS = dict( + auth_source=dict( + type='str', + choices=['auto', 'cli', 'env', 'credential_file', 'msi'] + ), + profile=dict(type='str'), + subscription_id=dict(type='str'), + client_id=dict(type='str', no_log=True), + secret=dict(type='str', no_log=True), + tenant=dict(type='str', no_log=True), + ad_user=dict(type='str', no_log=True), + password=dict(type='str', no_log=True), + cloud_environment=dict(type='str', default='AzureCloud'), + cert_validation_mode=dict(type='str', choices=['validate', 'ignore']), + api_profile=dict(type='str', default='latest'), + adfs_authority_url=dict(type='str', default=None) +) + +AZURE_CREDENTIAL_ENV_MAPPING = dict( + profile='AZURE_PROFILE', + subscription_id='AZURE_SUBSCRIPTION_ID', + client_id='AZURE_CLIENT_ID', + secret='AZURE_SECRET', + tenant='AZURE_TENANT', + ad_user='AZURE_AD_USER', + password='AZURE_PASSWORD', + cloud_environment='AZURE_CLOUD_ENVIRONMENT', + cert_validation_mode='AZURE_CERT_VALIDATION_MODE', + adfs_authority_url='AZURE_ADFS_AUTHORITY_URL' +) + + +class SDKProfile(object): # pylint: disable=too-few-public-methods + + def __init__(self, default_api_version, profile=None): + """Constructor. + + :param str default_api_version: Default API version if not overridden by a profile. Nullable. + :param profile: A dict operation group name to API version. + :type profile: dict[str, str] + """ + self.profile = profile if profile is not None else {} + self.profile[None] = default_api_version + + @property + def default_api_version(self): + return self.profile[None] + + +# FUTURE: this should come from the SDK or an external location. +# For now, we have to copy from azure-cli +AZURE_API_PROFILES = { + 'latest': { + 'ContainerInstanceManagementClient': '2018-02-01-preview', + 'ComputeManagementClient': dict( + default_api_version='2018-10-01', + resource_skus='2018-10-01', + disks='2018-06-01', + snapshots='2018-10-01', + virtual_machine_run_commands='2018-10-01' + ), + 'NetworkManagementClient': '2018-08-01', + 'ResourceManagementClient': '2017-05-10', + 'StorageManagementClient': '2017-10-01', + 'WebSiteManagementClient': '2018-02-01', + 'PostgreSQLManagementClient': '2017-12-01', + 'MySQLManagementClient': '2017-12-01', + 'MariaDBManagementClient': '2019-03-01', + 'ManagementLockClient': '2016-09-01' + }, + '2019-03-01-hybrid': { + 'StorageManagementClient': '2017-10-01', + 'NetworkManagementClient': '2017-10-01', + 'ComputeManagementClient': SDKProfile('2017-12-01', { + 'resource_skus': '2017-09-01', + 'disks': '2017-03-30', + 'snapshots': '2017-03-30' + }), + 'ManagementLinkClient': '2016-09-01', + 'ManagementLockClient': '2016-09-01', + 'PolicyClient': '2016-12-01', + 'ResourceManagementClient': '2018-05-01', + 'SubscriptionClient': '2016-06-01', + 'DnsManagementClient': '2016-04-01', + 'KeyVaultManagementClient': '2016-10-01', + 'AuthorizationManagementClient': SDKProfile('2015-07-01', { + 'classic_administrators': '2015-06-01', + 'policy_assignments': '2016-12-01', + 'policy_definitions': '2016-12-01' + }), + 'KeyVaultClient': '2016-10-01', + 'azure.multiapi.storage': '2017-11-09', + 'azure.multiapi.cosmosdb': '2017-04-17' + }, + '2018-03-01-hybrid': { + 'StorageManagementClient': '2016-01-01', + 'NetworkManagementClient': '2017-10-01', + 'ComputeManagementClient': SDKProfile('2017-03-30'), + 'ManagementLinkClient': '2016-09-01', + 'ManagementLockClient': '2016-09-01', + 'PolicyClient': '2016-12-01', + 'ResourceManagementClient': '2018-02-01', + 'SubscriptionClient': '2016-06-01', + 'DnsManagementClient': '2016-04-01', + 'KeyVaultManagementClient': '2016-10-01', + 'AuthorizationManagementClient': SDKProfile('2015-07-01', { + 'classic_administrators': '2015-06-01' + }), + 'KeyVaultClient': '2016-10-01', + 'azure.multiapi.storage': '2017-04-17', + 'azure.multiapi.cosmosdb': '2017-04-17' + }, + '2017-03-09-profile': { + 'StorageManagementClient': '2016-01-01', + 'NetworkManagementClient': '2015-06-15', + 'ComputeManagementClient': SDKProfile('2016-03-30'), + 'ManagementLinkClient': '2016-09-01', + 'ManagementLockClient': '2015-01-01', + 'PolicyClient': '2015-10-01-preview', + 'ResourceManagementClient': '2016-02-01', + 'SubscriptionClient': '2016-06-01', + 'DnsManagementClient': '2016-04-01', + 'KeyVaultManagementClient': '2016-10-01', + 'AuthorizationManagementClient': SDKProfile('2015-07-01', { + 'classic_administrators': '2015-06-01' + }), + 'KeyVaultClient': '2016-10-01', + 'azure.multiapi.storage': '2015-04-05' + } +} + +AZURE_TAG_ARGS = dict( + tags=dict(type='dict'), + append_tags=dict(type='bool', default=True), +) + +AZURE_COMMON_REQUIRED_IF = [ + ('log_mode', 'file', ['log_path']) +] + +ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION) +CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT' +VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT' + +CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1" + r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))") + +AZURE_SUCCESS_STATE = "Succeeded" +AZURE_FAILED_STATE = "Failed" + +HAS_AZURE = True +HAS_AZURE_EXC = None +HAS_AZURE_CLI_CORE = True +HAS_AZURE_CLI_CORE_EXC = None + +HAS_MSRESTAZURE = True +HAS_MSRESTAZURE_EXC = None + +try: + import importlib +except ImportError: + # This passes the sanity import test, but does not provide a user friendly error message. + # Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils. + importlib = None + +try: + from packaging.version import Version + HAS_PACKAGING_VERSION = True + HAS_PACKAGING_VERSION_EXC = None +except ImportError: + Version = None + HAS_PACKAGING_VERSION = False + HAS_PACKAGING_VERSION_EXC = traceback.format_exc() + +# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately +try: + from msrest.serialization import Serializer +except ImportError: + HAS_MSRESTAZURE_EXC = traceback.format_exc() + HAS_MSRESTAZURE = False + +try: + from enum import Enum + from msrestazure.azure_active_directory import AADTokenCredentials + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_active_directory import MSIAuthentication + from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id + from msrestazure import azure_cloud + from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials + from azure.mgmt.monitor.version import VERSION as monitor_client_version + from azure.mgmt.network.version import VERSION as network_client_version + from azure.mgmt.storage.version import VERSION as storage_client_version + from azure.mgmt.compute.version import VERSION as compute_client_version + from azure.mgmt.resource.version import VERSION as resource_client_version + from azure.mgmt.dns.version import VERSION as dns_client_version + from azure.mgmt.web.version import VERSION as web_client_version + from azure.mgmt.network import NetworkManagementClient + from azure.mgmt.resource.resources import ResourceManagementClient + from azure.mgmt.resource.subscriptions import SubscriptionClient + from azure.mgmt.storage import StorageManagementClient + from azure.mgmt.compute import ComputeManagementClient + from azure.mgmt.dns import DnsManagementClient + from azure.mgmt.monitor import MonitorManagementClient + from azure.mgmt.web import WebSiteManagementClient + from azure.mgmt.containerservice import ContainerServiceClient + from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements + from azure.mgmt.trafficmanager import TrafficManagerManagementClient + from azure.storage.cloudstorageaccount import CloudStorageAccount + from azure.storage.blob import PageBlobService, BlockBlobService + from adal.authentication_context import AuthenticationContext + from azure.mgmt.sql import SqlManagementClient + from azure.mgmt.servicebus import ServiceBusManagementClient + import azure.mgmt.servicebus.models as ServicebusModel + from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient + from azure.mgmt.rdbms.mysql import MySQLManagementClient + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from azure.mgmt.containerregistry import ContainerRegistryManagementClient + from azure.mgmt.containerinstance import ContainerInstanceManagementClient + from azure.mgmt.loganalytics import LogAnalyticsManagementClient + import azure.mgmt.loganalytics.models as LogAnalyticsModels + from azure.mgmt.automation import AutomationClient + import azure.mgmt.automation.models as AutomationModel + from azure.mgmt.iothub import IotHubClient + from azure.mgmt.iothub import models as IoTHubModels + from msrest.service_client import ServiceClient + from msrestazure import AzureConfiguration + from msrest.authentication import Authentication + from azure.mgmt.resource.locks import ManagementLockClient +except ImportError as exc: + Authentication = object + HAS_AZURE_EXC = traceback.format_exc() + HAS_AZURE = False + +from base64 import b64encode, b64decode +from hashlib import sha256 +from hmac import HMAC +from time import time + +try: + from urllib import (urlencode, quote_plus) +except ImportError: + from urllib.parse import (urlencode, quote_plus) + +try: + from azure.cli.core.util import CLIError + from azure.common.credentials import get_azure_cli_credentials, get_cli_profile + from azure.common.cloud import get_cli_active_cloud +except ImportError: + HAS_AZURE_CLI_CORE = False + HAS_AZURE_CLI_CORE_EXC = None + CLIError = Exception + + +def azure_id_to_dict(id): + pieces = re.sub(r'^\/', '', id).split('/') + result = {} + index = 0 + while index < len(pieces) - 1: + result[pieces[index]] = pieces[index + 1] + index += 1 + return result + + +def format_resource_id(val, subscription_id, namespace, types, resource_group): + return resource_id(name=val, + resource_group=resource_group, + namespace=namespace, + type=types, + subscription=subscription_id) if not is_valid_resource_id(val) else val + + +def normalize_location_name(name): + return name.replace(' ', '').lower() + + +# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime) +# or generate the requirements files from this so we only have one source of truth to maintain... +AZURE_PKG_VERSIONS = { + 'StorageManagementClient': { + 'package_name': 'storage', + 'expected_version': '3.1.0' + }, + 'ComputeManagementClient': { + 'package_name': 'compute', + 'expected_version': '4.4.0' + }, + 'ContainerInstanceManagementClient': { + 'package_name': 'containerinstance', + 'expected_version': '0.4.0' + }, + 'NetworkManagementClient': { + 'package_name': 'network', + 'expected_version': '2.3.0' + }, + 'ResourceManagementClient': { + 'package_name': 'resource', + 'expected_version': '2.1.0' + }, + 'DnsManagementClient': { + 'package_name': 'dns', + 'expected_version': '2.1.0' + }, + 'WebSiteManagementClient': { + 'package_name': 'web', + 'expected_version': '0.41.0' + }, + 'TrafficManagerManagementClient': { + 'package_name': 'trafficmanager', + 'expected_version': '0.50.0' + }, +} if HAS_AZURE else {} + + +AZURE_MIN_RELEASE = '2.0.0' + + +class AzureRMModuleBase(object): + def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False, + mutually_exclusive=None, required_together=None, + required_one_of=None, add_file_common_args=False, supports_check_mode=False, + required_if=None, supports_tags=True, facts_module=False, skip_exec=False): + + merged_arg_spec = dict() + merged_arg_spec.update(AZURE_COMMON_ARGS) + if supports_tags: + merged_arg_spec.update(AZURE_TAG_ARGS) + + if derived_arg_spec: + merged_arg_spec.update(derived_arg_spec) + + merged_required_if = list(AZURE_COMMON_REQUIRED_IF) + if required_if: + merged_required_if += required_if + + self.module = AnsibleModule(argument_spec=merged_arg_spec, + bypass_checks=bypass_checks, + no_log=no_log, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, + add_file_common_args=add_file_common_args, + supports_check_mode=supports_check_mode, + required_if=merged_required_if) + + if not HAS_PACKAGING_VERSION: + self.fail(msg=missing_required_lib('packaging'), + exception=HAS_PACKAGING_VERSION_EXC) + + if not HAS_MSRESTAZURE: + self.fail(msg=missing_required_lib('msrestazure'), + exception=HAS_MSRESTAZURE_EXC) + + if not HAS_AZURE: + self.fail(msg=missing_required_lib('ansible[azure] (azure >= {0})'.format(AZURE_MIN_RELEASE)), + exception=HAS_AZURE_EXC) + + self._network_client = None + self._storage_client = None + self._resource_client = None + self._compute_client = None + self._dns_client = None + self._web_client = None + self._marketplace_client = None + self._sql_client = None + self._mysql_client = None + self._mariadb_client = None + self._postgresql_client = None + self._containerregistry_client = None + self._containerinstance_client = None + self._containerservice_client = None + self._managedcluster_client = None + self._traffic_manager_management_client = None + self._monitor_client = None + self._resource = None + self._log_analytics_client = None + self._servicebus_client = None + self._automation_client = None + self._IoThub_client = None + self._lock_client = None + + self.check_mode = self.module.check_mode + self.api_profile = self.module.params.get('api_profile') + self.facts_module = facts_module + # self.debug = self.module.params.get('debug') + + # delegate auth to AzureRMAuth class (shared with all plugin types) + self.azure_auth = AzureRMAuth(fail_impl=self.fail, **self.module.params) + + # common parameter validation + if self.module.params.get('tags'): + self.validate_tags(self.module.params['tags']) + + if not skip_exec: + res = self.exec_module(**self.module.params) + self.module.exit_json(**res) + + def check_client_version(self, client_type): + # Ensure Azure modules are at least 2.0.0rc5. + package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None) + if package_version is not None: + client_name = package_version.get('package_name') + try: + client_module = importlib.import_module(client_type.__module__) + client_version = client_module.VERSION + except (RuntimeError, AttributeError): + # can't get at the module version for some reason, just fail silently... + return + expected_version = package_version.get('expected_version') + if Version(client_version) < Version(expected_version): + self.fail("Installed azure-mgmt-{0} client version is {1}. The minimum supported version is {2}. Try " + "`pip install ansible[azure]`".format(client_name, client_version, expected_version)) + if Version(client_version) != Version(expected_version): + self.module.warn("Installed azure-mgmt-{0} client version is {1}. The expected version is {2}. Try " + "`pip install ansible[azure]`".format(client_name, client_version, expected_version)) + + def exec_module(self, **kwargs): + self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__)) + + def fail(self, msg, **kwargs): + ''' + Shortcut for calling module.fail() + + :param msg: Error message text. + :param kwargs: Any key=value pairs + :return: None + ''' + self.module.fail_json(msg=msg, **kwargs) + + def deprecate(self, msg, version=None, collection_name=None): + self.module.deprecate(msg, version, collection_name=collection_name) + + def log(self, msg, pretty_print=False): + if pretty_print: + self.module.debug(json.dumps(msg, indent=4, sort_keys=True)) + else: + self.module.debug(msg) + + def validate_tags(self, tags): + ''' + Check if tags dictionary contains string:string pairs. + + :param tags: dictionary of string:string pairs + :return: None + ''' + if not self.facts_module: + if not isinstance(tags, dict): + self.fail("Tags must be a dictionary of string:string values.") + for key, value in tags.items(): + if not isinstance(value, str): + self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value))) + + def update_tags(self, tags): + ''' + Call from the module to update metadata tags. Returns tuple + with bool indicating if there was a change and dict of new + tags to assign to the object. + + :param tags: metadata tags from the object + :return: bool, dict + ''' + tags = tags or dict() + new_tags = copy.copy(tags) if isinstance(tags, dict) else dict() + param_tags = self.module.params.get('tags') if isinstance(self.module.params.get('tags'), dict) else dict() + append_tags = self.module.params.get('append_tags') if self.module.params.get('append_tags') is not None else True + changed = False + # check add or update + for key, value in param_tags.items(): + if not new_tags.get(key) or new_tags[key] != value: + changed = True + new_tags[key] = value + # check remove + if not append_tags: + for key, value in tags.items(): + if not param_tags.get(key): + new_tags.pop(key) + changed = True + return changed, new_tags + + def has_tags(self, obj_tags, tag_list): + ''' + Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags + exists in object tags. + + :param obj_tags: dictionary of tags from an Azure object. + :param tag_list: list of tag keys or tag key:value pairs + :return: bool + ''' + + if not obj_tags and tag_list: + return False + + if not tag_list: + return True + + matches = 0 + result = False + for tag in tag_list: + tag_key = tag + tag_value = None + if ':' in tag: + tag_key, tag_value = tag.split(':') + if tag_value and obj_tags.get(tag_key) == tag_value: + matches += 1 + elif not tag_value and obj_tags.get(tag_key): + matches += 1 + if matches == len(tag_list): + result = True + return result + + def get_resource_group(self, resource_group): + ''' + Fetch a resource group. + + :param resource_group: name of a resource group + :return: resource group object + ''' + try: + return self.rm_client.resource_groups.get(resource_group) + except CloudError as cloud_error: + self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message)) + except Exception as exc: + self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc))) + + def parse_resource_to_dict(self, resource): + ''' + Return a dict of the give resource, which contains name and resource group. + + :param resource: It can be a resource name, id or a dict contains name and resource group. + ''' + resource_dict = parse_resource_id(resource) if not isinstance(resource, dict) else resource + resource_dict['resource_group'] = resource_dict.get('resource_group', self.resource_group) + resource_dict['subscription_id'] = resource_dict.get('subscription_id', self.subscription_id) + return resource_dict + + def serialize_obj(self, obj, class_name, enum_modules=None): + ''' + Return a JSON representation of an Azure object. + + :param obj: Azure object + :param class_name: Name of the object's class + :param enum_modules: List of module names to build enum dependencies from. + :return: serialized result + ''' + enum_modules = [] if enum_modules is None else enum_modules + + dependencies = dict() + if enum_modules: + for module_name in enum_modules: + mod = importlib.import_module(module_name) + for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass): + dependencies[mod_class_name] = mod_class_obj + self.log("dependencies: ") + self.log(str(dependencies)) + serializer = Serializer(classes=dependencies) + return serializer.body(obj, class_name, keep_readonly=True) + + def get_poller_result(self, poller, wait=5): + ''' + Consistent method of waiting on and retrieving results from Azure's long poller + + :param poller Azure poller object + :return object resulting from the original request + ''' + try: + delay = wait + while not poller.done(): + self.log("Waiting for {0} sec".format(delay)) + poller.wait(timeout=delay) + return poller.result() + except Exception as exc: + self.log(str(exc)) + raise + + def check_provisioning_state(self, azure_object, requested_state='present'): + ''' + Check an Azure object's provisioning state. If something did not complete the provisioning + process, then we cannot operate on it. + + :param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state + and name attributes. + :return None + ''' + + if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \ + hasattr(azure_object, 'name'): + # resource group object fits this model + if isinstance(azure_object.properties.provisioning_state, Enum): + if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \ + requested_state != 'absent': + self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( + azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE)) + return + if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \ + requested_state != 'absent': + self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( + azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE)) + return + + if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'): + if isinstance(azure_object.provisioning_state, Enum): + if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent': + self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( + azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE)) + return + if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent': + self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( + azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE)) + + def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'): + keys = dict() + try: + # Get keys from the storage account + self.log('Getting keys') + account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name) + except Exception as exc: + self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc))) + + try: + self.log('Create blob service') + if storage_blob_type == 'page': + return PageBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint, + account_name=storage_account_name, + account_key=account_keys.keys[0].value) + elif storage_blob_type == 'block': + return BlockBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint, + account_name=storage_account_name, + account_key=account_keys.keys[0].value) + else: + raise Exception("Invalid storage blob type defined.") + except Exception as exc: + self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name, + str(exc))) + + def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic', sku=None): + ''' + Create a default public IP address to associate with a network interface. + If a PIP address matching exists, return it. Otherwise, create one. + + :param resource_group: name of an existing resource group + :param location: a valid azure location + :param public_ip_name: base name to assign the public IP address + :param allocation_method: one of 'Static' or 'Dynamic' + :param sku: sku + :return: PIP object + ''' + pip = None + + self.log("Starting create_default_pip {0}".format(public_ip_name)) + self.log("Check to see if public IP {0} exists".format(public_ip_name)) + try: + pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name) + except CloudError: + pass + + if pip: + self.log("Public ip {0} found.".format(public_ip_name)) + self.check_provisioning_state(pip) + return pip + + params = self.network_models.PublicIPAddress( + location=location, + public_ip_allocation_method=allocation_method, + sku=sku + ) + self.log('Creating default public IP {0}'.format(public_ip_name)) + try: + poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params) + except Exception as exc: + self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc))) + + return self.get_poller_result(poller) + + def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports): + ''' + Create a default security group to associate with a network interface. If a security group matching + exists, return it. Otherwise, create one. + + :param resource_group: Resource group name + :param location: azure location name + :param security_group_name: base name to use for the security group + :param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group. + :param ssh_port: for os_type 'Linux' port used in rule allowing SSH access. + :param rdp_port: for os_type 'Windows' port used in rule allowing RDP access. + :return: security_group object + ''' + group = None + + self.log("Create security group {0}".format(security_group_name)) + self.log("Check to see if security group {0} exists".format(security_group_name)) + try: + group = self.network_client.network_security_groups.get(resource_group, security_group_name) + except CloudError: + pass + + if group: + self.log("Security group {0} found.".format(security_group_name)) + self.check_provisioning_state(group) + return group + + parameters = self.network_models.NetworkSecurityGroup() + parameters.location = location + + if not open_ports: + # Open default ports based on OS type + if os_type == 'Linux': + # add an inbound SSH rule + parameters.security_rules = [ + self.network_models.SecurityRule(protocol='Tcp', + source_address_prefix='*', + destination_address_prefix='*', + access='Allow', + direction='Inbound', + description='Allow SSH Access', + source_port_range='*', + destination_port_range='22', + priority=100, + name='SSH') + ] + parameters.location = location + else: + # for windows add inbound RDP and WinRM rules + parameters.security_rules = [ + self.network_models.SecurityRule(protocol='Tcp', + source_address_prefix='*', + destination_address_prefix='*', + access='Allow', + direction='Inbound', + description='Allow RDP port 3389', + source_port_range='*', + destination_port_range='3389', + priority=100, + name='RDP01'), + self.network_models.SecurityRule(protocol='Tcp', + source_address_prefix='*', + destination_address_prefix='*', + access='Allow', + direction='Inbound', + description='Allow WinRM HTTPS port 5986', + source_port_range='*', + destination_port_range='5986', + priority=101, + name='WinRM01'), + ] + else: + # Open custom ports + parameters.security_rules = [] + priority = 100 + for port in open_ports: + priority += 1 + rule_name = "Rule_{0}".format(priority) + parameters.security_rules.append( + self.network_models.SecurityRule(protocol='Tcp', + source_address_prefix='*', + destination_address_prefix='*', + access='Allow', + direction='Inbound', + source_port_range='*', + destination_port_range=str(port), + priority=priority, + name=rule_name) + ) + + self.log('Creating default security group {0}'.format(security_group_name)) + try: + poller = self.network_client.network_security_groups.create_or_update(resource_group, + security_group_name, + parameters) + except Exception as exc: + self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc))) + + return self.get_poller_result(poller) + + @staticmethod + def _validation_ignore_callback(session, global_config, local_config, **kwargs): + session.verify = False + + def get_api_profile(self, client_type_name, api_profile_name): + profile_all_clients = AZURE_API_PROFILES.get(api_profile_name) + + if not profile_all_clients: + raise KeyError("unknown Azure API profile: {0}".format(api_profile_name)) + + profile_raw = profile_all_clients.get(client_type_name, None) + + if not profile_raw: + self.module.warn("Azure API profile {0} does not define an entry for {1}".format(api_profile_name, client_type_name)) + + if isinstance(profile_raw, dict): + if not profile_raw.get('default_api_version'): + raise KeyError("Azure API profile {0} does not define 'default_api_version'".format(api_profile_name)) + return profile_raw + + # wrap basic strings in a dict that just defines the default + return dict(default_api_version=profile_raw) + + def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None): + self.log('Getting management service client {0}'.format(client_type.__name__)) + self.check_client_version(client_type) + + client_argspec = inspect.getargspec(client_type.__init__) + + if not base_url: + # most things are resource_manager, don't make everyone specify + base_url = self.azure_auth._cloud_environment.endpoints.resource_manager + + client_kwargs = dict(credentials=self.azure_auth.azure_credentials, subscription_id=self.azure_auth.subscription_id, base_url=base_url) + + api_profile_dict = {} + + if self.api_profile: + api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile) + + # unversioned clients won't accept profile; only send it if necessary + # clients without a version specified in the profile will use the default + if api_profile_dict and 'profile' in client_argspec.args: + client_kwargs['profile'] = api_profile_dict + + # If the client doesn't accept api_version, it's unversioned. + # If it does, favor explicitly-specified api_version, fall back to api_profile + if 'api_version' in client_argspec.args: + profile_default_version = api_profile_dict.get('default_api_version', None) + if api_version or profile_default_version: + client_kwargs['api_version'] = api_version or profile_default_version + if 'profile' in client_kwargs: + # remove profile; only pass API version if specified + client_kwargs.pop('profile') + + client = client_type(**client_kwargs) + + # FUTURE: remove this once everything exposes models directly (eg, containerinstance) + try: + getattr(client, "models") + except AttributeError: + def _ansible_get_models(self, *arg, **kwarg): + return self._ansible_models + + setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models) + client.models = types.MethodType(_ansible_get_models, client) + + client.config = self.add_user_agent(client.config) + + if self.azure_auth._cert_validation_mode == 'ignore': + client.config.session_configuration_callback = self._validation_ignore_callback + + return client + + def add_user_agent(self, config): + # Add user agent for Ansible + config.add_user_agent(ANSIBLE_USER_AGENT) + # Add user agent when running from Cloud Shell + if CLOUDSHELL_USER_AGENT_KEY in os.environ: + config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY]) + # Add user agent when running from VSCode extension + if VSCODEEXT_USER_AGENT_KEY in os.environ: + config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY]) + return config + + def generate_sas_token(self, **kwags): + base_url = kwags.get('base_url', None) + expiry = kwags.get('expiry', time() + 3600) + key = kwags.get('key', None) + policy = kwags.get('policy', None) + url = quote_plus(base_url) + ttl = int(expiry) + sign_key = '{0}\n{1}'.format(url, ttl) + signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest()) + result = { + 'sr': url, + 'sig': signature, + 'se': str(ttl), + } + if policy: + result['skn'] = policy + return 'SharedAccessSignature ' + urlencode(result) + + def get_data_svc_client(self, **kwags): + url = kwags.get('base_url', None) + config = AzureConfiguration(base_url='https://{0}'.format(url)) + config.credentials = AzureSASAuthentication(token=self.generate_sas_token(**kwags)) + config = self.add_user_agent(config) + return ServiceClient(creds=config.credentials, config=config) + + # passthru methods to AzureAuth instance for backcompat + @property + def credentials(self): + return self.azure_auth.credentials + + @property + def _cloud_environment(self): + return self.azure_auth._cloud_environment + + @property + def subscription_id(self): + return self.azure_auth.subscription_id + + @property + def storage_client(self): + self.log('Getting storage client...') + if not self._storage_client: + self._storage_client = self.get_mgmt_svc_client(StorageManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2018-07-01') + return self._storage_client + + @property + def storage_models(self): + return StorageManagementClient.models("2018-07-01") + + @property + def network_client(self): + self.log('Getting network client') + if not self._network_client: + self._network_client = self.get_mgmt_svc_client(NetworkManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2019-06-01') + return self._network_client + + @property + def network_models(self): + self.log("Getting network models...") + return NetworkManagementClient.models("2018-08-01") + + @property + def rm_client(self): + self.log('Getting resource manager client') + if not self._resource_client: + self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2017-05-10') + return self._resource_client + + @property + def rm_models(self): + self.log("Getting resource manager models") + return ResourceManagementClient.models("2017-05-10") + + @property + def compute_client(self): + self.log('Getting compute client') + if not self._compute_client: + self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2019-07-01') + return self._compute_client + + @property + def compute_models(self): + self.log("Getting compute models") + return ComputeManagementClient.models("2019-07-01") + + @property + def dns_client(self): + self.log('Getting dns client') + if not self._dns_client: + self._dns_client = self.get_mgmt_svc_client(DnsManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2018-05-01') + return self._dns_client + + @property + def dns_models(self): + self.log("Getting dns models...") + return DnsManagementClient.models('2018-05-01') + + @property + def web_client(self): + self.log('Getting web client') + if not self._web_client: + self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2018-02-01') + return self._web_client + + @property + def containerservice_client(self): + self.log('Getting container service client') + if not self._containerservice_client: + self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2017-07-01') + return self._containerservice_client + + @property + def managedcluster_models(self): + self.log("Getting container service models") + return ContainerServiceClient.models('2018-03-31') + + @property + def managedcluster_client(self): + self.log('Getting container service client') + if not self._managedcluster_client: + self._managedcluster_client = self.get_mgmt_svc_client(ContainerServiceClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2018-03-31') + return self._managedcluster_client + + @property + def sql_client(self): + self.log('Getting SQL client') + if not self._sql_client: + self._sql_client = self.get_mgmt_svc_client(SqlManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._sql_client + + @property + def postgresql_client(self): + self.log('Getting PostgreSQL client') + if not self._postgresql_client: + self._postgresql_client = self.get_mgmt_svc_client(PostgreSQLManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._postgresql_client + + @property + def mysql_client(self): + self.log('Getting MySQL client') + if not self._mysql_client: + self._mysql_client = self.get_mgmt_svc_client(MySQLManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._mysql_client + + @property + def mariadb_client(self): + self.log('Getting MariaDB client') + if not self._mariadb_client: + self._mariadb_client = self.get_mgmt_svc_client(MariaDBManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._mariadb_client + + @property + def sql_client(self): + self.log('Getting SQL client') + if not self._sql_client: + self._sql_client = self.get_mgmt_svc_client(SqlManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._sql_client + + @property + def containerregistry_client(self): + self.log('Getting container registry mgmt client') + if not self._containerregistry_client: + self._containerregistry_client = self.get_mgmt_svc_client(ContainerRegistryManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2017-10-01') + + return self._containerregistry_client + + @property + def containerinstance_client(self): + self.log('Getting container instance mgmt client') + if not self._containerinstance_client: + self._containerinstance_client = self.get_mgmt_svc_client(ContainerInstanceManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2018-06-01') + + return self._containerinstance_client + + @property + def marketplace_client(self): + self.log('Getting marketplace agreement client') + if not self._marketplace_client: + self._marketplace_client = self.get_mgmt_svc_client(MarketplaceOrderingAgreements, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._marketplace_client + + @property + def traffic_manager_management_client(self): + self.log('Getting traffic manager client') + if not self._traffic_manager_management_client: + self._traffic_manager_management_client = self.get_mgmt_svc_client(TrafficManagerManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._traffic_manager_management_client + + @property + def monitor_client(self): + self.log('Getting monitor client') + if not self._monitor_client: + self._monitor_client = self.get_mgmt_svc_client(MonitorManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._monitor_client + + @property + def log_analytics_client(self): + self.log('Getting log analytics client') + if not self._log_analytics_client: + self._log_analytics_client = self.get_mgmt_svc_client(LogAnalyticsManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._log_analytics_client + + @property + def log_analytics_models(self): + self.log('Getting log analytics models') + return LogAnalyticsModels + + @property + def servicebus_client(self): + self.log('Getting servicebus client') + if not self._servicebus_client: + self._servicebus_client = self.get_mgmt_svc_client(ServiceBusManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._servicebus_client + + @property + def servicebus_models(self): + return ServicebusModel + + @property + def automation_client(self): + self.log('Getting automation client') + if not self._automation_client: + self._automation_client = self.get_mgmt_svc_client(AutomationClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._automation_client + + @property + def automation_models(self): + return AutomationModel + + @property + def IoThub_client(self): + self.log('Getting iothub client') + if not self._IoThub_client: + self._IoThub_client = self.get_mgmt_svc_client(IotHubClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._IoThub_client + + @property + def IoThub_models(self): + return IoTHubModels + + @property + def automation_client(self): + self.log('Getting automation client') + if not self._automation_client: + self._automation_client = self.get_mgmt_svc_client(AutomationClient, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._automation_client + + @property + def automation_models(self): + return AutomationModel + + @property + def lock_client(self): + self.log('Getting lock client') + if not self._lock_client: + self._lock_client = self.get_mgmt_svc_client(ManagementLockClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2016-09-01') + return self._lock_client + + @property + def lock_models(self): + self.log("Getting lock models") + return ManagementLockClient.models('2016-09-01') + + +class AzureSASAuthentication(Authentication): + """Simple SAS Authentication. + An implementation of Authentication in + https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/authentication.py + + :param str token: SAS token + """ + def __init__(self, token): + self.token = token + + def signed_session(self): + session = super(AzureSASAuthentication, self).signed_session() + session.headers['Authorization'] = self.token + return session + + +class AzureRMAuthException(Exception): + pass + + +class AzureRMAuth(object): + def __init__(self, auth_source='auto', profile=None, subscription_id=None, client_id=None, secret=None, + tenant=None, ad_user=None, password=None, cloud_environment='AzureCloud', cert_validation_mode='validate', + api_profile='latest', adfs_authority_url=None, fail_impl=None, **kwargs): + + if fail_impl: + self._fail_impl = fail_impl + else: + self._fail_impl = self._default_fail_impl + + self._cloud_environment = None + self._adfs_authority_url = None + + # authenticate + self.credentials = self._get_credentials( + dict(auth_source=auth_source, profile=profile, subscription_id=subscription_id, client_id=client_id, secret=secret, + tenant=tenant, ad_user=ad_user, password=password, cloud_environment=cloud_environment, + cert_validation_mode=cert_validation_mode, api_profile=api_profile, adfs_authority_url=adfs_authority_url)) + + if not self.credentials: + if HAS_AZURE_CLI_CORE: + self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " + "define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).") + else: + self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " + "define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).") + + # cert validation mode precedence: module-arg, credential profile, env, "validate" + self._cert_validation_mode = cert_validation_mode or self.credentials.get('cert_validation_mode') or \ + os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate' + + if self._cert_validation_mode not in ['validate', 'ignore']: + self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode)) + + # if cloud_environment specified, look up/build Cloud object + raw_cloud_env = self.credentials.get('cloud_environment') + if self.credentials.get('credentials') is not None and raw_cloud_env is not None: + self._cloud_environment = raw_cloud_env + elif not raw_cloud_env: + self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default + else: + # try to look up "well-known" values via the name attribute on azure_cloud members + all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] + matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] + if len(matched_clouds) == 1: + self._cloud_environment = matched_clouds[0] + elif len(matched_clouds) > 1: + self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env)) + else: + if not urlparse.urlparse(raw_cloud_env).scheme: + self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds])) + try: + self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) + except Exception as e: + self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc()) + + if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None: + self.fail("Credentials did not include a subscription_id value.") + self.log("setting subscription_id") + self.subscription_id = self.credentials['subscription_id'] + + # get authentication authority + # for adfs, user could pass in authority or not. + # for others, use default authority from cloud environment + if self.credentials.get('adfs_authority_url') is None: + self._adfs_authority_url = self._cloud_environment.endpoints.active_directory + else: + self._adfs_authority_url = self.credentials.get('adfs_authority_url') + + # get resource from cloud environment + self._resource = self._cloud_environment.endpoints.active_directory_resource_id + + if self.credentials.get('credentials') is not None: + # AzureCLI credentials + self.azure_credentials = self.credentials['credentials'] + elif self.credentials.get('client_id') is not None and \ + self.credentials.get('secret') is not None and \ + self.credentials.get('tenant') is not None: + self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], + secret=self.credentials['secret'], + tenant=self.credentials['tenant'], + cloud_environment=self._cloud_environment, + verify=self._cert_validation_mode == 'validate') + + elif self.credentials.get('ad_user') is not None and \ + self.credentials.get('password') is not None and \ + self.credentials.get('client_id') is not None and \ + self.credentials.get('tenant') is not None: + + self.azure_credentials = self.acquire_token_with_username_password( + self._adfs_authority_url, + self._resource, + self.credentials['ad_user'], + self.credentials['password'], + self.credentials['client_id'], + self.credentials['tenant']) + + elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: + tenant = self.credentials.get('tenant') + if not tenant: + tenant = 'common' # SDK default + + self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], + self.credentials['password'], + tenant=tenant, + cloud_environment=self._cloud_environment, + verify=self._cert_validation_mode == 'validate') + else: + self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " + "Credentials must include client_id, secret and tenant or ad_user and password, or " + "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or " + "be logged in using AzureCLI.") + + def fail(self, msg, exception=None, **kwargs): + self._fail_impl(msg) + + def _default_fail_impl(self, msg, exception=None, **kwargs): + raise AzureRMAuthException(msg) + + def _get_profile(self, profile="default"): + path = expanduser("~/.azure/credentials") + try: + config = configparser.ConfigParser() + config.read(path) + except Exception as exc: + self.fail("Failed to access {0}. Check that the file exists and you have read " + "access. {1}".format(path, str(exc))) + credentials = dict() + for key in AZURE_CREDENTIAL_ENV_MAPPING: + try: + credentials[key] = config.get(profile, key, raw=True) + except Exception: + pass + + if credentials.get('subscription_id'): + return credentials + + return None + + def _get_msi_credentials(self, subscription_id_param=None, **kwargs): + client_id = kwargs.get('client_id', None) + credentials = MSIAuthentication(client_id=client_id) + subscription_id = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None) + if not subscription_id: + try: + # use the first subscription of the MSI + subscription_client = SubscriptionClient(credentials) + subscription = next(subscription_client.subscriptions.list()) + subscription_id = str(subscription.subscription_id) + except Exception as exc: + self.fail("Failed to get MSI token: {0}. " + "Please check whether your machine enabled MSI or grant access to any subscription.".format(str(exc))) + return { + 'credentials': credentials, + 'subscription_id': subscription_id + } + + def _get_azure_cli_credentials(self): + credentials, subscription_id = get_azure_cli_credentials() + cloud_environment = get_cli_active_cloud() + + cli_credentials = { + 'credentials': credentials, + 'subscription_id': subscription_id, + 'cloud_environment': cloud_environment + } + return cli_credentials + + def _get_env_credentials(self): + env_credentials = dict() + for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): + env_credentials[attribute] = os.environ.get(env_variable, None) + + if env_credentials['profile']: + credentials = self._get_profile(env_credentials['profile']) + return credentials + + if env_credentials.get('subscription_id') is not None: + return env_credentials + + return None + + # TODO: use explicit kwargs instead of intermediate dict + def _get_credentials(self, params): + # Get authentication credentials. + self.log('Getting credentials') + + arg_credentials = dict() + for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): + arg_credentials[attribute] = params.get(attribute, None) + + auth_source = params.get('auth_source', None) + if not auth_source: + auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto') + + if auth_source == 'msi': + self.log('Retrieving credenitals from MSI') + return self._get_msi_credentials(arg_credentials['subscription_id'], client_id=params.get('client_id', None)) + + if auth_source == 'cli': + if not HAS_AZURE_CLI_CORE: + self.fail(msg=missing_required_lib('azure-cli', reason='for `cli` auth_source'), + exception=HAS_AZURE_CLI_CORE_EXC) + try: + self.log('Retrieving credentials from Azure CLI profile') + cli_credentials = self._get_azure_cli_credentials() + return cli_credentials + except CLIError as err: + self.fail("Azure CLI profile cannot be loaded - {0}".format(err)) + + if auth_source == 'env': + self.log('Retrieving credentials from environment') + env_credentials = self._get_env_credentials() + return env_credentials + + if auth_source == 'credential_file': + self.log("Retrieving credentials from credential file") + profile = params.get('profile') or 'default' + default_credentials = self._get_profile(profile) + return default_credentials + + # auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials + # try module params + if arg_credentials['profile'] is not None: + self.log('Retrieving credentials with profile parameter.') + credentials = self._get_profile(arg_credentials['profile']) + return credentials + + if arg_credentials['subscription_id']: + self.log('Received credentials from parameters.') + return arg_credentials + + # try environment + env_credentials = self._get_env_credentials() + if env_credentials: + self.log('Received credentials from env.') + return env_credentials + + # try default profile from ~./azure/credentials + default_credentials = self._get_profile() + if default_credentials: + self.log('Retrieved default profile credentials from ~/.azure/credentials.') + return default_credentials + + try: + if HAS_AZURE_CLI_CORE: + self.log('Retrieving credentials from AzureCLI profile') + cli_credentials = self._get_azure_cli_credentials() + return cli_credentials + except CLIError as ce: + self.log('Error getting AzureCLI profile credentials - {0}'.format(ce)) + + return None + + def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant): + authority_uri = authority + + if tenant is not None: + authority_uri = authority + '/' + tenant + + context = AuthenticationContext(authority_uri) + token_response = context.acquire_token_with_username_password(resource, username, password, client_id) + + return AADTokenCredentials(token_response) + + def log(self, msg, pretty_print=False): + pass + # Use only during module development + # if self.debug: + # log_file = open('azure_rm.log', 'a') + # if pretty_print: + # log_file.write(json.dumps(msg, indent=4, sort_keys=True)) + # else: + # log_file.write(msg + u'\n') diff --git a/test/support/integration/plugins/module_utils/azure_rm_common_rest.py b/test/support/integration/plugins/module_utils/azure_rm_common_rest.py new file mode 100644 index 00000000..4fd7eaa3 --- /dev/null +++ b/test/support/integration/plugins/module_utils/azure_rm_common_rest.py @@ -0,0 +1,97 @@ +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION + +try: + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_configuration import AzureConfiguration + from msrest.service_client import ServiceClient + from msrest.pipeline import ClientRawResponse + from msrest.polling import LROPoller + from msrestazure.polling.arm_polling import ARMPolling + import uuid + import json +except ImportError: + # This is handled in azure_rm_common + AzureConfiguration = object + +ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION) + + +class GenericRestClientConfiguration(AzureConfiguration): + + def __init__(self, credentials, subscription_id, base_url=None): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if not base_url: + base_url = 'https://management.azure.com' + + super(GenericRestClientConfiguration, self).__init__(base_url) + + self.add_user_agent(ANSIBLE_USER_AGENT) + + self.credentials = credentials + self.subscription_id = subscription_id + + +class GenericRestClient(object): + + def __init__(self, credentials, subscription_id, base_url=None): + self.config = GenericRestClientConfiguration(credentials, subscription_id, base_url) + self._client = ServiceClient(self.config.credentials, self.config) + self.models = None + + def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval): + # Construct and send request + operation_config = {} + + request = None + + if header_parameters is None: + header_parameters = {} + + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + + if method == 'GET': + request = self._client.get(url, query_parameters) + elif method == 'PUT': + request = self._client.put(url, query_parameters) + elif method == 'POST': + request = self._client.post(url, query_parameters) + elif method == 'HEAD': + request = self._client.head(url, query_parameters) + elif method == 'PATCH': + request = self._client.patch(url, query_parameters) + elif method == 'DELETE': + request = self._client.delete(url, query_parameters) + elif method == 'MERGE': + request = self._client.merge(url, query_parameters) + + response = self._client.send(request, header_parameters, body, **operation_config) + + if response.status_code not in expected_status_codes: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + elif response.status_code == 202 and polling_timeout > 0: + def get_long_running_output(response): + return response + poller = LROPoller(self._client, + ClientRawResponse(None, response), + get_long_running_output, + ARMPolling(polling_interval, **operation_config)) + response = self.get_poller_result(poller, polling_timeout) + + return response + + def get_poller_result(self, poller, timeout): + try: + poller.wait(timeout=timeout) + return poller.result() + except Exception as exc: + raise diff --git a/test/support/integration/plugins/module_utils/cloud.py b/test/support/integration/plugins/module_utils/cloud.py new file mode 100644 index 00000000..0d29071f --- /dev/null +++ b/test/support/integration/plugins/module_utils/cloud.py @@ -0,0 +1,217 @@ +# +# (c) 2016 Allen Sanabria, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +""" +This module adds shared support for generic cloud modules + +In order to use this module, include it as part of a custom +module as shown below. + +from ansible.module_utils.cloud import CloudRetry + +The 'cloud' module provides the following common classes: + + * CloudRetry + - The base class to be used by other cloud providers, in order to + provide a backoff/retry decorator based on status codes. + + - Example using the AWSRetry class which inherits from CloudRetry. + + @AWSRetry.exponential_backoff(retries=10, delay=3) + get_ec2_security_group_ids_from_names() + + @AWSRetry.jittered_backoff() + get_ec2_security_group_ids_from_names() + +""" +import random +from functools import wraps +import syslog +import time + + +def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60): + """ Customizable exponential backoff strategy. + Args: + retries (int): Maximum number of times to retry a request. + delay (float): Initial (base) delay. + backoff (float): base of the exponent to use for exponential + backoff. + max_delay (int): Optional. If provided each delay generated is capped + at this amount. Defaults to 60 seconds. + Returns: + Callable that returns a generator. This generator yields durations in + seconds to be used as delays for an exponential backoff strategy. + Usage: + >>> backoff = _exponential_backoff() + >>> backoff + + >>> list(backoff()) + [2, 4, 8, 16, 32, 60, 60, 60, 60, 60] + """ + def backoff_gen(): + for retry in range(0, retries): + sleep = delay * backoff ** retry + yield sleep if max_delay is None else min(sleep, max_delay) + return backoff_gen + + +def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random): + """ Implements the "Full Jitter" backoff strategy described here + https://www.awsarchitectureblog.com/2015/03/backoff.html + Args: + retries (int): Maximum number of times to retry a request. + delay (float): Approximate number of seconds to sleep for the first + retry. + max_delay (int): The maximum number of seconds to sleep for any retry. + _random (random.Random or None): Makes this generator testable by + allowing developers to explicitly pass in the a seeded Random. + Returns: + Callable that returns a generator. This generator yields durations in + seconds to be used as delays for a full jitter backoff strategy. + Usage: + >>> backoff = _full_jitter_backoff(retries=5) + >>> backoff + + >>> list(backoff()) + [3, 6, 5, 23, 38] + >>> list(backoff()) + [2, 1, 6, 6, 31] + """ + def backoff_gen(): + for retry in range(0, retries): + yield _random.randint(0, min(max_delay, delay * 2 ** retry)) + return backoff_gen + + +class CloudRetry(object): + """ CloudRetry can be used by any cloud provider, in order to implement a + backoff algorithm/retry effect based on Status Code from Exceptions. + """ + # This is the base class of the exception. + # AWS Example botocore.exceptions.ClientError + base_class = None + + @staticmethod + def status_code_from_exception(error): + """ Return the status code from the exception object + Args: + error (object): The exception itself. + """ + pass + + @staticmethod + def found(response_code, catch_extra_error_codes=None): + """ Return True if the Response Code to retry on was found. + Args: + response_code (str): This is the Response Code that is being matched against. + """ + pass + + @classmethod + def _backoff(cls, backoff_strategy, catch_extra_error_codes=None): + """ Retry calling the Cloud decorated function using the provided + backoff strategy. + Args: + backoff_strategy (callable): Callable that returns a generator. The + generator should yield sleep times for each retry of the decorated + function. + """ + def deco(f): + @wraps(f) + def retry_func(*args, **kwargs): + for delay in backoff_strategy(): + try: + return f(*args, **kwargs) + except Exception as e: + if isinstance(e, cls.base_class): + response_code = cls.status_code_from_exception(e) + if cls.found(response_code, catch_extra_error_codes): + msg = "{0}: Retrying in {1} seconds...".format(str(e), delay) + syslog.syslog(syslog.LOG_INFO, msg) + time.sleep(delay) + else: + # Return original exception if exception is not a ClientError + raise e + else: + # Return original exception if exception is not a ClientError + raise e + return f(*args, **kwargs) + + return retry_func # true decorator + + return deco + + @classmethod + def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None): + """ + Retry calling the Cloud decorated function using an exponential backoff. + + Kwargs: + retries (int): Number of times to retry a failed request before giving up + default=10 + delay (int or float): Initial delay between retries in seconds + default=3 + backoff (int or float): backoff multiplier e.g. value of 2 will + double the delay each retry + default=1.1 + max_delay (int or None): maximum amount of time to wait between retries. + default=60 + """ + return cls._backoff(_exponential_backoff( + retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes) + + @classmethod + def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None): + """ + Retry calling the Cloud decorated function using a jittered backoff + strategy. More on this strategy here: + + https://www.awsarchitectureblog.com/2015/03/backoff.html + + Kwargs: + retries (int): Number of times to retry a failed request before giving up + default=10 + delay (int): Initial delay between retries in seconds + default=3 + max_delay (int): maximum amount of time to wait between retries. + default=60 + """ + return cls._backoff(_full_jitter_backoff( + retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes) + + @classmethod + def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None): + """ + Retry calling the Cloud decorated function using an exponential backoff. + + Compatibility for the original implementation of CloudRetry.backoff that + did not provide configurable backoff strategies. Developers should use + CloudRetry.exponential_backoff instead. + + Kwargs: + tries (int): Number of times to try (not retry) before giving up + default=10 + delay (int or float): Initial delay between retries in seconds + default=3 + backoff (int or float): backoff multiplier e.g. value of 2 will + double the delay each retry + default=1.1 + """ + return cls.exponential_backoff( + retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes) diff --git a/test/support/integration/plugins/module_utils/compat/__init__.py b/test/support/integration/plugins/module_utils/compat/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/support/integration/plugins/module_utils/compat/ipaddress.py b/test/support/integration/plugins/module_utils/compat/ipaddress.py new file mode 100644 index 00000000..c46ad72a --- /dev/null +++ b/test/support/integration/plugins/module_utils/compat/ipaddress.py @@ -0,0 +1,2476 @@ +# -*- coding: utf-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file, and this file only, is based on +# Lib/ipaddress.py of cpython +# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +# +# 1. This LICENSE AGREEMENT is between the Python Software Foundation +# ("PSF"), and the Individual or Organization ("Licensee") accessing and +# otherwise using this software ("Python") in source or binary form and +# its associated documentation. +# +# 2. Subject to the terms and conditions of this License Agreement, PSF hereby +# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +# analyze, test, perform and/or display publicly, prepare derivative works, +# distribute, and otherwise use Python alone or in any derivative version, +# provided, however, that PSF's License Agreement and PSF's notice of copyright, +# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" +# are retained in Python alone or in any derivative version prepared by Licensee. +# +# 3. In the event Licensee prepares a derivative work that is based on +# or incorporates Python or any part thereof, and wants to make +# the derivative work available to others as provided herein, then +# Licensee hereby agrees to include in any such work a brief summary of +# the changes made to Python. +# +# 4. PSF is making Python available to Licensee on an "AS IS" +# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +# INFRINGE ANY THIRD PARTY RIGHTS. +# +# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. +# +# 6. This License Agreement will automatically terminate upon a material +# breach of its terms and conditions. +# +# 7. Nothing in this License Agreement shall be deemed to create any +# relationship of agency, partnership, or joint venture between PSF and +# Licensee. This License Agreement does not grant permission to use PSF +# trademarks or trade name in a trademark sense to endorse or promote +# products or services of Licensee, or any third party. +# +# 8. By copying, installing or otherwise using Python, Licensee +# agrees to be bound by the terms and conditions of this License +# Agreement. + +# Copyright 2007 Google Inc. +# Licensed to PSF under a Contributor Agreement. + +"""A fast, lightweight IPv4/IPv6 manipulation library in Python. + +This library is used to create/poke/manipulate IPv4 and IPv6 addresses +and networks. + +""" + +from __future__ import unicode_literals + + +import itertools +import struct + + +# The following makes it easier for us to script updates of the bundled code and is not part of +# upstream +_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"} + +__version__ = '1.0.22' + +# Compatibility functions +_compat_int_types = (int,) +try: + _compat_int_types = (int, long) +except NameError: + pass +try: + _compat_str = unicode +except NameError: + _compat_str = str + assert bytes != str +if b'\0'[0] == 0: # Python 3 semantics + def _compat_bytes_to_byte_vals(byt): + return byt +else: + def _compat_bytes_to_byte_vals(byt): + return [struct.unpack(b'!B', b)[0] for b in byt] +try: + _compat_int_from_byte_vals = int.from_bytes +except AttributeError: + def _compat_int_from_byte_vals(bytvals, endianess): + assert endianess == 'big' + res = 0 + for bv in bytvals: + assert isinstance(bv, _compat_int_types) + res = (res << 8) + bv + return res + + +def _compat_to_bytes(intval, length, endianess): + assert isinstance(intval, _compat_int_types) + assert endianess == 'big' + if length == 4: + if intval < 0 or intval >= 2 ** 32: + raise struct.error("integer out of range for 'I' format code") + return struct.pack(b'!I', intval) + elif length == 16: + if intval < 0 or intval >= 2 ** 128: + raise struct.error("integer out of range for 'QQ' format code") + return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff) + else: + raise NotImplementedError() + + +if hasattr(int, 'bit_length'): + # Not int.bit_length , since that won't work in 2.7 where long exists + def _compat_bit_length(i): + return i.bit_length() +else: + def _compat_bit_length(i): + for res in itertools.count(): + if i >> res == 0: + return res + + +def _compat_range(start, end, step=1): + assert step > 0 + i = start + while i < end: + yield i + i += step + + +class _TotalOrderingMixin(object): + __slots__ = () + + # Helper that derives the other comparison operations from + # __lt__ and __eq__ + # We avoid functools.total_ordering because it doesn't handle + # NotImplemented correctly yet (http://bugs.python.org/issue10042) + def __eq__(self, other): + raise NotImplementedError + + def __ne__(self, other): + equal = self.__eq__(other) + if equal is NotImplemented: + return NotImplemented + return not equal + + def __lt__(self, other): + raise NotImplementedError + + def __le__(self, other): + less = self.__lt__(other) + if less is NotImplemented or not less: + return self.__eq__(other) + return less + + def __gt__(self, other): + less = self.__lt__(other) + if less is NotImplemented: + return NotImplemented + equal = self.__eq__(other) + if equal is NotImplemented: + return NotImplemented + return not (less or equal) + + def __ge__(self, other): + less = self.__lt__(other) + if less is NotImplemented: + return NotImplemented + return not less + + +IPV4LENGTH = 32 +IPV6LENGTH = 128 + + +class AddressValueError(ValueError): + """A Value Error related to the address.""" + + +class NetmaskValueError(ValueError): + """A Value Error related to the netmask.""" + + +def ip_address(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Address or IPv6Address object. + + Raises: + ValueError: if the *address* passed isn't either a v4 or a v6 + address + + """ + try: + return IPv4Address(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Address(address) + except (AddressValueError, NetmaskValueError): + pass + + if isinstance(address, bytes): + raise AddressValueError( + '%r does not appear to be an IPv4 or IPv6 address. ' + 'Did you pass in a bytes (str in Python 2) instead of' + ' a unicode object?' % address) + + raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % + address) + + +def ip_network(address, strict=True): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP network. Either IPv4 or + IPv6 networks may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Network or IPv6Network object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. Or if the network has host bits set. + + """ + try: + return IPv4Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + if isinstance(address, bytes): + raise AddressValueError( + '%r does not appear to be an IPv4 or IPv6 network. ' + 'Did you pass in a bytes (str in Python 2) instead of' + ' a unicode object?' % address) + + raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % + address) + + +def ip_interface(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Interface or IPv6Interface object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. + + Notes: + The IPv?Interface classes describe an Address on a particular + Network, so they're basically a combination of both the Address + and Network classes. + + """ + try: + return IPv4Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' % + address) + + +def v4_int_to_packed(address): + """Represent an address as 4 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv4 IP address. + + Returns: + The integer address packed as 4 bytes in network (big-endian) order. + + Raises: + ValueError: If the integer is negative or too large to be an + IPv4 IP address. + + """ + try: + return _compat_to_bytes(address, 4, 'big') + except (struct.error, OverflowError): + raise ValueError("Address negative or too large for IPv4") + + +def v6_int_to_packed(address): + """Represent an address as 16 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv6 IP address. + + Returns: + The integer address packed as 16 bytes in network (big-endian) order. + + """ + try: + return _compat_to_bytes(address, 16, 'big') + except (struct.error, OverflowError): + raise ValueError("Address negative or too large for IPv6") + + +def _split_optional_netmask(address): + """Helper to split the netmask and raise AddressValueError if needed""" + addr = _compat_str(address).split('/') + if len(addr) > 2: + raise AddressValueError("Only one '/' permitted in %r" % address) + return addr + + +def _find_address_range(addresses): + """Find a sequence of sorted deduplicated IPv#Address. + + Args: + addresses: a list of IPv#Address objects. + + Yields: + A tuple containing the first and last IP addresses in the sequence. + + """ + it = iter(addresses) + first = last = next(it) # pylint: disable=stop-iteration-return + for ip in it: + if ip._ip != last._ip + 1: + yield first, last + first = ip + last = ip + yield first, last + + +def _count_righthand_zero_bits(number, bits): + """Count the number of zero bits on the right hand side. + + Args: + number: an integer. + bits: maximum number of bits to count. + + Returns: + The number of zero bits on the right hand side of the number. + + """ + if number == 0: + return bits + return min(bits, _compat_bit_length(~number & (number - 1))) + + +def summarize_address_range(first, last): + """Summarize a network range given the first and last IP addresses. + + Example: + >>> list(summarize_address_range(IPv4Address('192.0.2.0'), + ... IPv4Address('192.0.2.130'))) + ... #doctest: +NORMALIZE_WHITESPACE + [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), + IPv4Network('192.0.2.130/32')] + + Args: + first: the first IPv4Address or IPv6Address in the range. + last: the last IPv4Address or IPv6Address in the range. + + Returns: + An iterator of the summarized IPv(4|6) network objects. + + Raise: + TypeError: + If the first and last objects are not IP addresses. + If the first and last objects are not the same version. + ValueError: + If the last object is not greater than the first. + If the version of the first address is not 4 or 6. + + """ + if (not (isinstance(first, _BaseAddress) and + isinstance(last, _BaseAddress))): + raise TypeError('first and last must be IP addresses, not networks') + if first.version != last.version: + raise TypeError("%s and %s are not of the same version" % ( + first, last)) + if first > last: + raise ValueError('last IP address must be greater than first') + + if first.version == 4: + ip = IPv4Network + elif first.version == 6: + ip = IPv6Network + else: + raise ValueError('unknown IP version') + + ip_bits = first._max_prefixlen + first_int = first._ip + last_int = last._ip + while first_int <= last_int: + nbits = min(_count_righthand_zero_bits(first_int, ip_bits), + _compat_bit_length(last_int - first_int + 1) - 1) + net = ip((first_int, ip_bits - nbits)) + yield net + first_int += 1 << nbits + if first_int - 1 == ip._ALL_ONES: + break + + +def _collapse_addresses_internal(addresses): + """Loops through the addresses, collapsing concurrent netblocks. + + Example: + + ip1 = IPv4Network('192.0.2.0/26') + ip2 = IPv4Network('192.0.2.64/26') + ip3 = IPv4Network('192.0.2.128/26') + ip4 = IPv4Network('192.0.2.192/26') + + _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> + [IPv4Network('192.0.2.0/24')] + + This shouldn't be called directly; it is called via + collapse_addresses([]). + + Args: + addresses: A list of IPv4Network's or IPv6Network's + + Returns: + A list of IPv4Network's or IPv6Network's depending on what we were + passed. + + """ + # First merge + to_merge = list(addresses) + subnets = {} + while to_merge: + net = to_merge.pop() + supernet = net.supernet() + existing = subnets.get(supernet) + if existing is None: + subnets[supernet] = net + elif existing != net: + # Merge consecutive subnets + del subnets[supernet] + to_merge.append(supernet) + # Then iterate over resulting networks, skipping subsumed subnets + last = None + for net in sorted(subnets.values()): + if last is not None: + # Since they are sorted, + # last.network_address <= net.network_address is a given. + if last.broadcast_address >= net.broadcast_address: + continue + yield net + last = net + + +def collapse_addresses(addresses): + """Collapse a list of IP objects. + + Example: + collapse_addresses([IPv4Network('192.0.2.0/25'), + IPv4Network('192.0.2.128/25')]) -> + [IPv4Network('192.0.2.0/24')] + + Args: + addresses: An iterator of IPv4Network or IPv6Network objects. + + Returns: + An iterator of the collapsed IPv(4|6)Network objects. + + Raises: + TypeError: If passed a list of mixed version objects. + + """ + addrs = [] + ips = [] + nets = [] + + # split IP addresses and networks + for ip in addresses: + if isinstance(ip, _BaseAddress): + if ips and ips[-1]._version != ip._version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + ips.append(ip) + elif ip._prefixlen == ip._max_prefixlen: + if ips and ips[-1]._version != ip._version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + try: + ips.append(ip.ip) + except AttributeError: + ips.append(ip.network_address) + else: + if nets and nets[-1]._version != ip._version: + raise TypeError("%s and %s are not of the same version" % ( + ip, nets[-1])) + nets.append(ip) + + # sort and dedup + ips = sorted(set(ips)) + + # find consecutive address ranges in the sorted sequence and summarize them + if ips: + for first, last in _find_address_range(ips): + addrs.extend(summarize_address_range(first, last)) + + return _collapse_addresses_internal(addrs + nets) + + +def get_mixed_type_key(obj): + """Return a key suitable for sorting between networks and addresses. + + Address and Network objects are not sortable by default; they're + fundamentally different so the expression + + IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') + + doesn't make any sense. There are some times however, where you may wish + to have ipaddress sort these for you anyway. If you need to do this, you + can use this function as the key= argument to sorted(). + + Args: + obj: either a Network or Address object. + Returns: + appropriate key. + + """ + if isinstance(obj, _BaseNetwork): + return obj._get_networks_key() + elif isinstance(obj, _BaseAddress): + return obj._get_address_key() + return NotImplemented + + +class _IPAddressBase(_TotalOrderingMixin): + + """The mother class.""" + + __slots__ = () + + @property + def exploded(self): + """Return the longhand version of the IP address as a string.""" + return self._explode_shorthand_ip_string() + + @property + def compressed(self): + """Return the shorthand version of the IP address as a string.""" + return _compat_str(self) + + @property + def reverse_pointer(self): + """The name of the reverse DNS pointer for the IP address, e.g.: + >>> ipaddress.ip_address("127.0.0.1").reverse_pointer + '1.0.0.127.in-addr.arpa' + >>> ipaddress.ip_address("2001:db8::1").reverse_pointer + '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' + + """ + return self._reverse_pointer() + + @property + def version(self): + msg = '%200s has no version specified' % (type(self),) + raise NotImplementedError(msg) + + def _check_int_address(self, address): + if address < 0: + msg = "%d (< 0) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self._version)) + if address > self._ALL_ONES: + msg = "%d (>= 2**%d) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self._max_prefixlen, + self._version)) + + def _check_packed_address(self, address, expected_len): + address_len = len(address) + if address_len != expected_len: + msg = ( + '%r (len %d != %d) is not permitted as an IPv%d address. ' + 'Did you pass in a bytes (str in Python 2) instead of' + ' a unicode object?') + raise AddressValueError(msg % (address, address_len, + expected_len, self._version)) + + @classmethod + def _ip_int_from_prefix(cls, prefixlen): + """Turn the prefix length into a bitwise netmask + + Args: + prefixlen: An integer, the prefix length. + + Returns: + An integer. + + """ + return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) + + @classmethod + def _prefix_from_ip_int(cls, ip_int): + """Return prefix length from the bitwise netmask. + + Args: + ip_int: An integer, the netmask in expanded bitwise format + + Returns: + An integer, the prefix length. + + Raises: + ValueError: If the input intermingles zeroes & ones + """ + trailing_zeroes = _count_righthand_zero_bits(ip_int, + cls._max_prefixlen) + prefixlen = cls._max_prefixlen - trailing_zeroes + leading_ones = ip_int >> trailing_zeroes + all_ones = (1 << prefixlen) - 1 + if leading_ones != all_ones: + byteslen = cls._max_prefixlen // 8 + details = _compat_to_bytes(ip_int, byteslen, 'big') + msg = 'Netmask pattern %r mixes zeroes & ones' + raise ValueError(msg % details) + return prefixlen + + @classmethod + def _report_invalid_netmask(cls, netmask_str): + msg = '%r is not a valid netmask' % netmask_str + raise NetmaskValueError(msg) + + @classmethod + def _prefix_from_prefix_string(cls, prefixlen_str): + """Return prefix length from a numeric string + + Args: + prefixlen_str: The string to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask + """ + # int allows a leading +/- as well as surrounding whitespace, + # so we ensure that isn't the case + if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): + cls._report_invalid_netmask(prefixlen_str) + try: + prefixlen = int(prefixlen_str) + except ValueError: + cls._report_invalid_netmask(prefixlen_str) + if not (0 <= prefixlen <= cls._max_prefixlen): + cls._report_invalid_netmask(prefixlen_str) + return prefixlen + + @classmethod + def _prefix_from_ip_string(cls, ip_str): + """Turn a netmask/hostmask string into a prefix length + + Args: + ip_str: The netmask/hostmask to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask/hostmask + """ + # Parse the netmask/hostmask like an IP address. + try: + ip_int = cls._ip_int_from_string(ip_str) + except AddressValueError: + cls._report_invalid_netmask(ip_str) + + # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). + # Note that the two ambiguous cases (all-ones and all-zeroes) are + # treated as netmasks. + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + pass + + # Invert the bits, and try matching a /0+1+/ hostmask instead. + ip_int ^= cls._ALL_ONES + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + cls._report_invalid_netmask(ip_str) + + def __reduce__(self): + return self.__class__, (_compat_str(self),) + + +class _BaseAddress(_IPAddressBase): + + """A generic IP object. + + This IP class contains the version independent methods which are + used by single IP addresses. + """ + + __slots__ = () + + def __int__(self): + return self._ip + + def __eq__(self, other): + try: + return (self._ip == other._ip and + self._version == other._version) + except AttributeError: + return NotImplemented + + def __lt__(self, other): + if not isinstance(other, _IPAddressBase): + return NotImplemented + if not isinstance(other, _BaseAddress): + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + if self._version != other._version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self._ip != other._ip: + return self._ip < other._ip + return False + + # Shorthand for Integer addition and subtraction. This is not + # meant to ever support addition/subtraction of addresses. + def __add__(self, other): + if not isinstance(other, _compat_int_types): + return NotImplemented + return self.__class__(int(self) + other) + + def __sub__(self, other): + if not isinstance(other, _compat_int_types): + return NotImplemented + return self.__class__(int(self) - other) + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) + + def __str__(self): + return _compat_str(self._string_from_ip_int(self._ip)) + + def __hash__(self): + return hash(hex(int(self._ip))) + + def _get_address_key(self): + return (self._version, self) + + def __reduce__(self): + return self.__class__, (self._ip,) + + +class _BaseNetwork(_IPAddressBase): + + """A generic IP network object. + + This IP class contains the version independent methods which are + used by networks. + + """ + def __init__(self, address): + self._cache = {} + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) + + def __str__(self): + return '%s/%d' % (self.network_address, self.prefixlen) + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the network + or broadcast addresses. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network + 1, broadcast): + yield self._address_class(x) + + def __iter__(self): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network, broadcast + 1): + yield self._address_class(x) + + def __getitem__(self, n): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + if n >= 0: + if network + n > broadcast: + raise IndexError('address out of range') + return self._address_class(network + n) + else: + n += 1 + if broadcast + n < network: + raise IndexError('address out of range') + return self._address_class(broadcast + n) + + def __lt__(self, other): + if not isinstance(other, _IPAddressBase): + return NotImplemented + if not isinstance(other, _BaseNetwork): + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + if self._version != other._version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self.network_address != other.network_address: + return self.network_address < other.network_address + if self.netmask != other.netmask: + return self.netmask < other.netmask + return False + + def __eq__(self, other): + try: + return (self._version == other._version and + self.network_address == other.network_address and + int(self.netmask) == int(other.netmask)) + except AttributeError: + return NotImplemented + + def __hash__(self): + return hash(int(self.network_address) ^ int(self.netmask)) + + def __contains__(self, other): + # always false if one is v4 and the other is v6. + if self._version != other._version: + return False + # dealing with another network. + if isinstance(other, _BaseNetwork): + return False + # dealing with another address + else: + # address + return (int(self.network_address) <= int(other._ip) <= + int(self.broadcast_address)) + + def overlaps(self, other): + """Tell if self is partly contained in other.""" + return self.network_address in other or ( + self.broadcast_address in other or ( + other.network_address in self or ( + other.broadcast_address in self))) + + @property + def broadcast_address(self): + x = self._cache.get('broadcast_address') + if x is None: + x = self._address_class(int(self.network_address) | + int(self.hostmask)) + self._cache['broadcast_address'] = x + return x + + @property + def hostmask(self): + x = self._cache.get('hostmask') + if x is None: + x = self._address_class(int(self.netmask) ^ self._ALL_ONES) + self._cache['hostmask'] = x + return x + + @property + def with_prefixlen(self): + return '%s/%d' % (self.network_address, self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self.network_address, self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self.network_address, self.hostmask) + + @property + def num_addresses(self): + """Number of hosts in the current subnet.""" + return int(self.broadcast_address) - int(self.network_address) + 1 + + @property + def _address_class(self): + # Returning bare address objects (rather than interfaces) allows for + # more consistent behaviour across the network address, broadcast + # address and individual host addresses. + msg = '%200s has no associated address class' % (type(self),) + raise NotImplementedError(msg) + + @property + def prefixlen(self): + return self._prefixlen + + def address_exclude(self, other): + """Remove an address from a larger block. + + For example: + + addr1 = ip_network('192.0.2.0/28') + addr2 = ip_network('192.0.2.1/32') + list(addr1.address_exclude(addr2)) = + [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), + IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] + + or IPv6: + + addr1 = ip_network('2001:db8::1/32') + addr2 = ip_network('2001:db8::1/128') + list(addr1.address_exclude(addr2)) = + [ip_network('2001:db8::1/128'), + ip_network('2001:db8::2/127'), + ip_network('2001:db8::4/126'), + ip_network('2001:db8::8/125'), + ... + ip_network('2001:db8:8000::/33')] + + Args: + other: An IPv4Network or IPv6Network object of the same type. + + Returns: + An iterator of the IPv(4|6)Network objects which is self + minus other. + + Raises: + TypeError: If self and other are of differing address + versions, or if other is not a network object. + ValueError: If other is not completely contained by self. + + """ + if not self._version == other._version: + raise TypeError("%s and %s are not of the same version" % ( + self, other)) + + if not isinstance(other, _BaseNetwork): + raise TypeError("%s is not a network object" % other) + + if not other.subnet_of(self): + raise ValueError('%s not contained in %s' % (other, self)) + if other == self: + return + + # Make sure we're comparing the network of other. + other = other.__class__('%s/%s' % (other.network_address, + other.prefixlen)) + + s1, s2 = self.subnets() + while s1 != other and s2 != other: + if other.subnet_of(s1): + yield s2 + s1, s2 = s1.subnets() + elif other.subnet_of(s2): + yield s1 + s1, s2 = s2.subnets() + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + if s1 == other: + yield s2 + elif s2 == other: + yield s1 + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + + def compare_networks(self, other): + """Compare two IP objects. + + This is only concerned about the comparison of the integer + representation of the network addresses. This means that the + host bits aren't considered at all in this method. If you want + to compare host bits, you can easily enough do a + 'HostA._ip < HostB._ip' + + Args: + other: An IP object. + + Returns: + If the IP versions of self and other are the same, returns: + + -1 if self < other: + eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') + IPv6Network('2001:db8::1000/124') < + IPv6Network('2001:db8::2000/124') + 0 if self == other + eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') + IPv6Network('2001:db8::1000/124') == + IPv6Network('2001:db8::1000/124') + 1 if self > other + eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') + IPv6Network('2001:db8::2000/124') > + IPv6Network('2001:db8::1000/124') + + Raises: + TypeError if the IP versions are different. + + """ + # does this need to raise a ValueError? + if self._version != other._version: + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + # self._version == other._version below here: + if self.network_address < other.network_address: + return -1 + if self.network_address > other.network_address: + return 1 + # self.network_address == other.network_address below here: + if self.netmask < other.netmask: + return -1 + if self.netmask > other.netmask: + return 1 + return 0 + + def _get_networks_key(self): + """Network-only key function. + + Returns an object that identifies this address' network and + netmask. This function is a suitable "key" argument for sorted() + and list.sort(). + + """ + return (self._version, self.network_address, self.netmask) + + def subnets(self, prefixlen_diff=1, new_prefix=None): + """The subnets which join to make the current subnet. + + In the case that self contains only one IP + (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 + for IPv6), yield an iterator with just ourself. + + Args: + prefixlen_diff: An integer, the amount the prefix length + should be increased by. This should not be set if + new_prefix is also set. + new_prefix: The desired new prefix length. This must be a + larger number (smaller prefix) than the existing prefix. + This should not be set if prefixlen_diff is also set. + + Returns: + An iterator of IPv(4|6) objects. + + Raises: + ValueError: The prefixlen_diff is too small or too large. + OR + prefixlen_diff and new_prefix are both set or new_prefix + is a smaller number than the current prefix (smaller + number means a larger network) + + """ + if self._prefixlen == self._max_prefixlen: + yield self + return + + if new_prefix is not None: + if new_prefix < self._prefixlen: + raise ValueError('new prefix must be longer') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = new_prefix - self._prefixlen + + if prefixlen_diff < 0: + raise ValueError('prefix length diff must be > 0') + new_prefixlen = self._prefixlen + prefixlen_diff + + if new_prefixlen > self._max_prefixlen: + raise ValueError( + 'prefix length diff %d is invalid for netblock %s' % ( + new_prefixlen, self)) + + start = int(self.network_address) + end = int(self.broadcast_address) + 1 + step = (int(self.hostmask) + 1) >> prefixlen_diff + for new_addr in _compat_range(start, end, step): + current = self.__class__((new_addr, new_prefixlen)) + yield current + + def supernet(self, prefixlen_diff=1, new_prefix=None): + """The supernet containing the current network. + + Args: + prefixlen_diff: An integer, the amount the prefix length of + the network should be decreased by. For example, given a + /24 network and a prefixlen_diff of 3, a supernet with a + /21 netmask is returned. + + Returns: + An IPv4 network object. + + Raises: + ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have + a negative prefix length. + OR + If prefixlen_diff and new_prefix are both set or new_prefix is a + larger number than the current prefix (larger number means a + smaller network) + + """ + if self._prefixlen == 0: + return self + + if new_prefix is not None: + if new_prefix > self._prefixlen: + raise ValueError('new prefix must be shorter') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = self._prefixlen - new_prefix + + new_prefixlen = self.prefixlen - prefixlen_diff + if new_prefixlen < 0: + raise ValueError( + 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % + (self.prefixlen, prefixlen_diff)) + return self.__class__(( + int(self.network_address) & (int(self.netmask) << prefixlen_diff), + new_prefixlen)) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return (self.network_address.is_multicast and + self.broadcast_address.is_multicast) + + @staticmethod + def _is_subnet_of(a, b): + try: + # Always false if one is v4 and the other is v6. + if a._version != b._version: + raise TypeError("%s and %s are not of the same version" % (a, b)) + return (b.network_address <= a.network_address and + b.broadcast_address >= a.broadcast_address) + except AttributeError: + raise TypeError("Unable to test subnet containment " + "between %s and %s" % (a, b)) + + def subnet_of(self, other): + """Return True if this network is a subnet of other.""" + return self._is_subnet_of(self, other) + + def supernet_of(self, other): + """Return True if this network is a supernet of other.""" + return self._is_subnet_of(other, self) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return (self.network_address.is_reserved and + self.broadcast_address.is_reserved) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return (self.network_address.is_link_local and + self.broadcast_address.is_link_local) + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return (self.network_address.is_private and + self.broadcast_address.is_private) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return (self.network_address.is_unspecified and + self.broadcast_address.is_unspecified) + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return (self.network_address.is_loopback and + self.broadcast_address.is_loopback) + + +class _BaseV4(object): + + """Base IPv4 object. + + The following methods are used by IPv4 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + _version = 4 + # Equivalent to 255.255.255.255 or 32 bits of 1's. + _ALL_ONES = (2 ** IPV4LENGTH) - 1 + _DECIMAL_DIGITS = frozenset('0123456789') + + # the valid octets for host and netmasks. only useful for IPv4. + _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) + + _max_prefixlen = IPV4LENGTH + # There are only a handful of valid v4 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + def _explode_shorthand_ip_string(self): + return _compat_str(self) + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, _compat_int_types): + prefixlen = arg + else: + try: + # Check for a netmask in prefix length form + prefixlen = cls._prefix_from_prefix_string(arg) + except NetmaskValueError: + # Check for a netmask or hostmask in dotted-quad form. + # This may raise NetmaskValueError. + prefixlen = cls._prefix_from_ip_string(arg) + netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn the given IP string into an integer for comparison. + + Args: + ip_str: A string, the IP ip_str. + + Returns: + The IP ip_str as an integer. + + Raises: + AddressValueError: if ip_str isn't a valid IPv4 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + + octets = ip_str.split('.') + if len(octets) != 4: + raise AddressValueError("Expected 4 octets in %r" % ip_str) + + try: + return _compat_int_from_byte_vals( + map(cls._parse_octet, octets), 'big') + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + + @classmethod + def _parse_octet(cls, octet_str): + """Convert a decimal octet into an integer. + + Args: + octet_str: A string, the number to parse. + + Returns: + The octet as an integer. + + Raises: + ValueError: if the octet isn't strictly a decimal from [0..255]. + + """ + if not octet_str: + raise ValueError("Empty octet not permitted") + # Whitelist the characters, since int() allows a lot of bizarre stuff. + if not cls._DECIMAL_DIGITS.issuperset(octet_str): + msg = "Only decimal digits permitted in %r" + raise ValueError(msg % octet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(octet_str) > 3: + msg = "At most 3 characters permitted in %r" + raise ValueError(msg % octet_str) + # Convert to integer (we know digits are legal) + octet_int = int(octet_str, 10) + # Any octets that look like they *might* be written in octal, + # and which don't look exactly the same in both octal and + # decimal are rejected as ambiguous + if octet_int > 7 and octet_str[0] == '0': + msg = "Ambiguous (octal/decimal) value in %r not permitted" + raise ValueError(msg % octet_str) + if octet_int > 255: + raise ValueError("Octet %d (> 255) not permitted" % octet_int) + return octet_int + + @classmethod + def _string_from_ip_int(cls, ip_int): + """Turns a 32-bit integer into dotted decimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + The IP address as a string in dotted decimal notation. + + """ + return '.'.join(_compat_str(struct.unpack(b'!B', b)[0] + if isinstance(b, bytes) + else b) + for b in _compat_to_bytes(ip_int, 4, 'big')) + + def _is_hostmask(self, ip_str): + """Test if the IP string is a hostmask (rather than a netmask). + + Args: + ip_str: A string, the potential hostmask. + + Returns: + A boolean, True if the IP string is a hostmask. + + """ + bits = ip_str.split('.') + try: + parts = [x for x in map(int, bits) if x in self._valid_mask_octets] + except ValueError: + return False + if len(parts) != len(bits): + return False + if parts[0] < parts[-1]: + return True + return False + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv4 address. + + This implements the method described in RFC1035 3.5. + + """ + reverse_octets = _compat_str(self).split('.')[::-1] + return '.'.join(reverse_octets) + '.in-addr.arpa' + + @property + def max_prefixlen(self): + return self._max_prefixlen + + @property + def version(self): + return self._version + + +class IPv4Address(_BaseV4, _BaseAddress): + + """Represent and manipulate single IPv4 Addresses.""" + + __slots__ = ('_ip', '__weakref__') + + def __init__(self, address): + + """ + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv4Address('192.0.2.1') == IPv4Address(3221225985). + or, more generally + IPv4Address(int(IPv4Address('192.0.2.1'))) == + IPv4Address('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + + """ + # Efficient constructor from integer. + if isinstance(address, _compat_int_types): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 4) + bvs = _compat_bytes_to_byte_vals(address) + self._ip = _compat_int_from_byte_vals(bvs, 'big') + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = _compat_str(address) + if '/' in addr_str: + raise AddressValueError("Unexpected '/' in %r" % address) + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v4_int_to_packed(self._ip) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within the + reserved IPv4 Network range. + + """ + return self in self._constants._reserved_network + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv4-special-registry. + + """ + return any(self in net for net in self._constants._private_networks) + + @property + def is_global(self): + return ( + self not in self._constants._public_network and + not self.is_private) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is multicast. + See RFC 3171 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 5735 3. + + """ + return self == self._constants._unspecified_address + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback per RFC 3330. + + """ + return self in self._constants._loopback_network + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is link-local per RFC 3927. + + """ + return self in self._constants._linklocal_network + + +class IPv4Interface(IPv4Address): + + def __init__(self, address): + if isinstance(address, (bytes, _compat_int_types)): + IPv4Address.__init__(self, address) + self.network = IPv4Network(self._ip) + self._prefixlen = self._max_prefixlen + return + + if isinstance(address, tuple): + IPv4Address.__init__(self, address[0]) + if len(address) > 1: + self._prefixlen = int(address[1]) + else: + self._prefixlen = self._max_prefixlen + + self.network = IPv4Network(address, strict=False) + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + return + + addr = _split_optional_netmask(address) + IPv4Address.__init__(self, addr[0]) + + self.network = IPv4Network(address, strict=False) + self._prefixlen = self.network._prefixlen + + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + + def __str__(self): + return '%s/%d' % (self._string_from_ip_int(self._ip), + self.network.prefixlen) + + def __eq__(self, other): + address_equal = IPv4Address.__eq__(self, other) + if not address_equal or address_equal is NotImplemented: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv4Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return self._ip ^ self._prefixlen ^ int(self.network.network_address) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv4Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + +class IPv4Network(_BaseV4, _BaseNetwork): + + """This class represents and manipulates 32-bit IPv4 network + addresses.. + + Attributes: [examples for IPv4Network('192.0.2.0/27')] + .network_address: IPv4Address('192.0.2.0') + .hostmask: IPv4Address('0.0.0.31') + .broadcast_address: IPv4Address('192.0.2.32') + .netmask: IPv4Address('255.255.255.224') + .prefixlen: 27 + + """ + # Class to use when creating address objects + _address_class = IPv4Address + + def __init__(self, address, strict=True): + + """Instantiate a new IPv4 network object. + + Args: + address: A string or integer representing the IP [& network]. + '192.0.2.0/24' + '192.0.2.0/255.255.255.0' + '192.0.0.2/0.0.0.255' + are all functionally the same in IPv4. Similarly, + '192.0.2.1' + '192.0.2.1/255.255.255.255' + '192.0.2.1/32' + are also functionally equivalent. That is to say, failing to + provide a subnetmask will create an object with a mask of /32. + + If the mask (portion after the / in the argument) is given in + dotted quad form, it is treated as a netmask if it starts with a + non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it + starts with a zero field (e.g. 0.255.255.255 == /8), with the + single exception of an all-zero mask which is treated as a + netmask == /0. If no mask is given, a default of /32 is used. + + Additionally, an integer can be passed, so + IPv4Network('192.0.2.1') == IPv4Network(3221225985) + or, more generally + IPv4Interface(int(IPv4Interface('192.0.2.1'))) == + IPv4Interface('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + NetmaskValueError: If the netmask isn't valid for + an IPv4 address. + ValueError: If strict is True and a network address is not + supplied. + + """ + _BaseNetwork.__init__(self, address) + + # Constructing from a packed address or integer + if isinstance(address, (_compat_int_types, bytes)): + self.network_address = IPv4Address(address) + self.netmask, self._prefixlen = self._make_netmask( + self._max_prefixlen) + # fixme: address/network test here. + return + + if isinstance(address, tuple): + if len(address) > 1: + arg = address[1] + else: + # We weren't given an address[1] + arg = self._max_prefixlen + self.network_address = IPv4Address(address[0]) + self.netmask, self._prefixlen = self._make_netmask(arg) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv4Address(packed & + int(self.netmask)) + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + addr = _split_optional_netmask(address) + self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) + + if len(addr) == 2: + arg = addr[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + + if strict: + if (IPv4Address(int(self.network_address) & int(self.netmask)) != + self.network_address): + raise ValueError('%s has host bits set' % self) + self.network_address = IPv4Address(int(self.network_address) & + int(self.netmask)) + + if self._prefixlen == (self._max_prefixlen - 1): + self.hosts = self.__iter__ + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry. + + """ + return (not (self.network_address in IPv4Network('100.64.0.0/10') and + self.broadcast_address in IPv4Network('100.64.0.0/10')) and + not self.is_private) + + +class _IPv4Constants(object): + + _linklocal_network = IPv4Network('169.254.0.0/16') + + _loopback_network = IPv4Network('127.0.0.0/8') + + _multicast_network = IPv4Network('224.0.0.0/4') + + _public_network = IPv4Network('100.64.0.0/10') + + _private_networks = [ + IPv4Network('0.0.0.0/8'), + IPv4Network('10.0.0.0/8'), + IPv4Network('127.0.0.0/8'), + IPv4Network('169.254.0.0/16'), + IPv4Network('172.16.0.0/12'), + IPv4Network('192.0.0.0/29'), + IPv4Network('192.0.0.170/31'), + IPv4Network('192.0.2.0/24'), + IPv4Network('192.168.0.0/16'), + IPv4Network('198.18.0.0/15'), + IPv4Network('198.51.100.0/24'), + IPv4Network('203.0.113.0/24'), + IPv4Network('240.0.0.0/4'), + IPv4Network('255.255.255.255/32'), + ] + + _reserved_network = IPv4Network('240.0.0.0/4') + + _unspecified_address = IPv4Address('0.0.0.0') + + +IPv4Address._constants = _IPv4Constants + + +class _BaseV6(object): + + """Base IPv6 object. + + The following methods are used by IPv6 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + _version = 6 + _ALL_ONES = (2 ** IPV6LENGTH) - 1 + _HEXTET_COUNT = 8 + _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') + _max_prefixlen = IPV6LENGTH + + # There are only a bunch of valid v6 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, _compat_int_types): + prefixlen = arg + else: + prefixlen = cls._prefix_from_prefix_string(arg) + netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn an IPv6 ip_str into an integer. + + Args: + ip_str: A string, the IPv6 ip_str. + + Returns: + An int, the IPv6 address + + Raises: + AddressValueError: if ip_str isn't a valid IPv6 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + + parts = ip_str.split(':') + + # An IPv6 address needs at least 2 colons (3 parts). + _min_parts = 3 + if len(parts) < _min_parts: + msg = "At least %d parts expected in %r" % (_min_parts, ip_str) + raise AddressValueError(msg) + + # If the address has an IPv4-style suffix, convert it to hexadecimal. + if '.' in parts[-1]: + try: + ipv4_int = IPv4Address(parts.pop())._ip + except AddressValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) + parts.append('%x' % (ipv4_int & 0xFFFF)) + + # An IPv6 address can't have more than 8 colons (9 parts). + # The extra colon comes from using the "::" notation for a single + # leading or trailing zero part. + _max_parts = cls._HEXTET_COUNT + 1 + if len(parts) > _max_parts: + msg = "At most %d colons permitted in %r" % ( + _max_parts - 1, ip_str) + raise AddressValueError(msg) + + # Disregarding the endpoints, find '::' with nothing in between. + # This indicates that a run of zeroes has been skipped. + skip_index = None + for i in _compat_range(1, len(parts) - 1): + if not parts[i]: + if skip_index is not None: + # Can't have more than one '::' + msg = "At most one '::' permitted in %r" % ip_str + raise AddressValueError(msg) + skip_index = i + + # parts_hi is the number of parts to copy from above/before the '::' + # parts_lo is the number of parts to copy from below/after the '::' + if skip_index is not None: + # If we found a '::', then check if it also covers the endpoints. + parts_hi = skip_index + parts_lo = len(parts) - skip_index - 1 + if not parts[0]: + parts_hi -= 1 + if parts_hi: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + parts_lo -= 1 + if parts_lo: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) + if parts_skipped < 1: + msg = "Expected at most %d other parts with '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) + else: + # Otherwise, allocate the entire address to parts_hi. The + # endpoints could still be empty, but _parse_hextet() will check + # for that. + if len(parts) != cls._HEXTET_COUNT: + msg = "Exactly %d parts expected without '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) + if not parts[0]: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_hi = len(parts) + parts_lo = 0 + parts_skipped = 0 + + try: + # Now, parse the hextets into a 128-bit integer. + ip_int = 0 + for i in range(parts_hi): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + ip_int <<= 16 * parts_skipped + for i in range(-parts_lo, 0): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + return ip_int + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + + @classmethod + def _parse_hextet(cls, hextet_str): + """Convert an IPv6 hextet string into an integer. + + Args: + hextet_str: A string, the number to parse. + + Returns: + The hextet as an integer. + + Raises: + ValueError: if the input isn't strictly a hex number from + [0..FFFF]. + + """ + # Whitelist the characters, since int() allows a lot of bizarre stuff. + if not cls._HEX_DIGITS.issuperset(hextet_str): + raise ValueError("Only hex digits permitted in %r" % hextet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(hextet_str) > 4: + msg = "At most 4 characters permitted in %r" + raise ValueError(msg % hextet_str) + # Length check means we can skip checking the integer value + return int(hextet_str, 16) + + @classmethod + def _compress_hextets(cls, hextets): + """Compresses a list of hextets. + + Compresses a list of strings, replacing the longest continuous + sequence of "0" in the list with "" and adding empty strings at + the beginning or at the end of the string such that subsequently + calling ":".join(hextets) will produce the compressed version of + the IPv6 address. + + Args: + hextets: A list of strings, the hextets to compress. + + Returns: + A list of strings. + + """ + best_doublecolon_start = -1 + best_doublecolon_len = 0 + doublecolon_start = -1 + doublecolon_len = 0 + for index, hextet in enumerate(hextets): + if hextet == '0': + doublecolon_len += 1 + if doublecolon_start == -1: + # Start of a sequence of zeros. + doublecolon_start = index + if doublecolon_len > best_doublecolon_len: + # This is the longest sequence of zeros so far. + best_doublecolon_len = doublecolon_len + best_doublecolon_start = doublecolon_start + else: + doublecolon_len = 0 + doublecolon_start = -1 + + if best_doublecolon_len > 1: + best_doublecolon_end = (best_doublecolon_start + + best_doublecolon_len) + # For zeros at the end of the address. + if best_doublecolon_end == len(hextets): + hextets += [''] + hextets[best_doublecolon_start:best_doublecolon_end] = [''] + # For zeros at the beginning of the address. + if best_doublecolon_start == 0: + hextets = [''] + hextets + + return hextets + + @classmethod + def _string_from_ip_int(cls, ip_int=None): + """Turns a 128-bit integer into hexadecimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + A string, the hexadecimal representation of the address. + + Raises: + ValueError: The address is bigger than 128 bits of all ones. + + """ + if ip_int is None: + ip_int = int(cls._ip) + + if ip_int > cls._ALL_ONES: + raise ValueError('IPv6 address is too large') + + hex_str = '%032x' % ip_int + hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] + + hextets = cls._compress_hextets(hextets) + return ':'.join(hextets) + + def _explode_shorthand_ip_string(self): + """Expand a shortened IPv6 address. + + Args: + ip_str: A string, the IPv6 address. + + Returns: + A string, the expanded IPv6 address. + + """ + if isinstance(self, IPv6Network): + ip_str = _compat_str(self.network_address) + elif isinstance(self, IPv6Interface): + ip_str = _compat_str(self.ip) + else: + ip_str = _compat_str(self) + + ip_int = self._ip_int_from_string(ip_str) + hex_str = '%032x' % ip_int + parts = [hex_str[x:x + 4] for x in range(0, 32, 4)] + if isinstance(self, (_BaseNetwork, IPv6Interface)): + return '%s/%d' % (':'.join(parts), self._prefixlen) + return ':'.join(parts) + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv6 address. + + This implements the method described in RFC3596 2.5. + + """ + reverse_chars = self.exploded[::-1].replace(':', '') + return '.'.join(reverse_chars) + '.ip6.arpa' + + @property + def max_prefixlen(self): + return self._max_prefixlen + + @property + def version(self): + return self._version + + +class IPv6Address(_BaseV6, _BaseAddress): + + """Represent and manipulate single IPv6 Addresses.""" + + __slots__ = ('_ip', '__weakref__') + + def __init__(self, address): + """Instantiate a new IPv6 address object. + + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv6Address('2001:db8::') == + IPv6Address(42540766411282592856903984951653826560) + or, more generally + IPv6Address(int(IPv6Address('2001:db8::'))) == + IPv6Address('2001:db8::') + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + + """ + # Efficient constructor from integer. + if isinstance(address, _compat_int_types): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 16) + bvs = _compat_bytes_to_byte_vals(address) + self._ip = _compat_int_from_byte_vals(bvs, 'big') + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = _compat_str(address) + if '/' in addr_str: + raise AddressValueError("Unexpected '/' in %r" % address) + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v6_int_to_packed(self._ip) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return any(self in x for x in self._constants._reserved_networks) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return self in self._constants._linklocal_network + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return self in self._constants._sitelocal_network + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv6-special-registry. + + """ + return any(self in net for net in self._constants._private_networks) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, true if the address is not reserved per + iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return self._ip == 0 + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return self._ip == 1 + + @property + def ipv4_mapped(self): + """Return the IPv4 mapped address. + + Returns: + If the IPv6 address is a v4 mapped address, return the + IPv4 mapped address. Return None otherwise. + + """ + if (self._ip >> 32) != 0xFFFF: + return None + return IPv4Address(self._ip & 0xFFFFFFFF) + + @property + def teredo(self): + """Tuple of embedded teredo IPs. + + Returns: + Tuple of the (server, client) IPs or None if the address + doesn't appear to be a teredo address (doesn't start with + 2001::/32) + + """ + if (self._ip >> 96) != 0x20010000: + return None + return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), + IPv4Address(~self._ip & 0xFFFFFFFF)) + + @property + def sixtofour(self): + """Return the IPv4 6to4 embedded address. + + Returns: + The IPv4 6to4-embedded address if present or None if the + address doesn't appear to contain a 6to4 embedded address. + + """ + if (self._ip >> 112) != 0x2002: + return None + return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) + + +class IPv6Interface(IPv6Address): + + def __init__(self, address): + if isinstance(address, (bytes, _compat_int_types)): + IPv6Address.__init__(self, address) + self.network = IPv6Network(self._ip) + self._prefixlen = self._max_prefixlen + return + if isinstance(address, tuple): + IPv6Address.__init__(self, address[0]) + if len(address) > 1: + self._prefixlen = int(address[1]) + else: + self._prefixlen = self._max_prefixlen + self.network = IPv6Network(address, strict=False) + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + return + + addr = _split_optional_netmask(address) + IPv6Address.__init__(self, addr[0]) + self.network = IPv6Network(address, strict=False) + self.netmask = self.network.netmask + self._prefixlen = self.network._prefixlen + self.hostmask = self.network.hostmask + + def __str__(self): + return '%s/%d' % (self._string_from_ip_int(self._ip), + self.network.prefixlen) + + def __eq__(self, other): + address_equal = IPv6Address.__eq__(self, other) + if not address_equal or address_equal is NotImplemented: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv6Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return self._ip ^ self._prefixlen ^ int(self.network.network_address) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv6Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + @property + def is_unspecified(self): + return self._ip == 0 and self.network.is_unspecified + + @property + def is_loopback(self): + return self._ip == 1 and self.network.is_loopback + + +class IPv6Network(_BaseV6, _BaseNetwork): + + """This class represents and manipulates 128-bit IPv6 networks. + + Attributes: [examples for IPv6('2001:db8::1000/124')] + .network_address: IPv6Address('2001:db8::1000') + .hostmask: IPv6Address('::f') + .broadcast_address: IPv6Address('2001:db8::100f') + .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') + .prefixlen: 124 + + """ + + # Class to use when creating address objects + _address_class = IPv6Address + + def __init__(self, address, strict=True): + """Instantiate a new IPv6 Network object. + + Args: + address: A string or integer representing the IPv6 network or the + IP and prefix/netmask. + '2001:db8::/128' + '2001:db8:0000:0000:0000:0000:0000:0000/128' + '2001:db8::' + are all functionally the same in IPv6. That is to say, + failing to provide a subnetmask will create an object with + a mask of /128. + + Additionally, an integer can be passed, so + IPv6Network('2001:db8::') == + IPv6Network(42540766411282592856903984951653826560) + or, more generally + IPv6Network(int(IPv6Network('2001:db8::'))) == + IPv6Network('2001:db8::') + + strict: A boolean. If true, ensure that we have been passed + A true network address, eg, 2001:db8::1000/124 and not an + IP address on a network, eg, 2001:db8::1/124. + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + NetmaskValueError: If the netmask isn't valid for + an IPv6 address. + ValueError: If strict was True and a network address was not + supplied. + + """ + _BaseNetwork.__init__(self, address) + + # Efficient constructor from integer or packed address + if isinstance(address, (bytes, _compat_int_types)): + self.network_address = IPv6Address(address) + self.netmask, self._prefixlen = self._make_netmask( + self._max_prefixlen) + return + + if isinstance(address, tuple): + if len(address) > 1: + arg = address[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + self.network_address = IPv6Address(address[0]) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv6Address(packed & + int(self.netmask)) + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + addr = _split_optional_netmask(address) + + self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) + + if len(addr) == 2: + arg = addr[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + + if strict: + if (IPv6Address(int(self.network_address) & int(self.netmask)) != + self.network_address): + raise ValueError('%s has host bits set' % self) + self.network_address = IPv6Address(int(self.network_address) & + int(self.netmask)) + + if self._prefixlen == (self._max_prefixlen - 1): + self.hosts = self.__iter__ + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the + Subnet-Router anycast address. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network + 1, broadcast + 1): + yield self._address_class(x) + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return (self.network_address.is_site_local and + self.broadcast_address.is_site_local) + + +class _IPv6Constants(object): + + _linklocal_network = IPv6Network('fe80::/10') + + _multicast_network = IPv6Network('ff00::/8') + + _private_networks = [ + IPv6Network('::1/128'), + IPv6Network('::/128'), + IPv6Network('::ffff:0:0/96'), + IPv6Network('100::/64'), + IPv6Network('2001::/23'), + IPv6Network('2001:2::/48'), + IPv6Network('2001:db8::/32'), + IPv6Network('2001:10::/28'), + IPv6Network('fc00::/7'), + IPv6Network('fe80::/10'), + ] + + _reserved_networks = [ + IPv6Network('::/8'), IPv6Network('100::/8'), + IPv6Network('200::/7'), IPv6Network('400::/6'), + IPv6Network('800::/5'), IPv6Network('1000::/4'), + IPv6Network('4000::/3'), IPv6Network('6000::/3'), + IPv6Network('8000::/3'), IPv6Network('A000::/3'), + IPv6Network('C000::/3'), IPv6Network('E000::/4'), + IPv6Network('F000::/5'), IPv6Network('F800::/6'), + IPv6Network('FE00::/9'), + ] + + _sitelocal_network = IPv6Network('fec0::/10') + + +IPv6Address._constants = _IPv6Constants diff --git a/test/support/integration/plugins/module_utils/crypto.py b/test/support/integration/plugins/module_utils/crypto.py new file mode 100644 index 00000000..e67eeff1 --- /dev/null +++ b/test/support/integration/plugins/module_utils/crypto.py @@ -0,0 +1,2125 @@ +# -*- coding: utf-8 -*- +# +# (c) 2016, Yanis Guenane +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ---------------------------------------------------------------------- +# A clearly marked portion of this file is licensed under the BSD license +# Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk) +# Copyright (c) 2017 Fraser Tweedale (@frasertweedale) +# For more details, search for the function _obj2txt(). +# --------------------------------------------------------------------- +# A clearly marked portion of this file is extracted from a project that +# is licensed under the Apache License 2.0 +# Copyright (c) the OpenSSL contributors +# For more details, search for the function _OID_MAP. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import sys +from distutils.version import LooseVersion + +try: + import OpenSSL + from OpenSSL import crypto +except ImportError: + # An error will be raised in the calling class to let the end + # user know that OpenSSL couldn't be found. + pass + +try: + import cryptography + from cryptography import x509 + from cryptography.hazmat.backends import default_backend as cryptography_backend + from cryptography.hazmat.primitives.serialization import load_pem_private_key + from cryptography.hazmat.primitives import hashes + from cryptography.hazmat.primitives import serialization + import ipaddress + + # Older versions of cryptography (< 2.1) do not have __hash__ functions for + # general name objects (DNSName, IPAddress, ...), while providing overloaded + # equality and string representation operations. This makes it impossible to + # use them in hash-based data structures such as set or dict. Since we are + # actually doing that in openssl_certificate, and potentially in other code, + # we need to monkey-patch __hash__ for these classes to make sure our code + # works fine. + if LooseVersion(cryptography.__version__) < LooseVersion('2.1'): + # A very simply hash function which relies on the representation + # of an object to be implemented. This is the case since at least + # cryptography 1.0, see + # https://github.com/pyca/cryptography/commit/7a9abce4bff36c05d26d8d2680303a6f64a0e84f + def simple_hash(self): + return hash(repr(self)) + + # The hash functions for the following types were added for cryptography 2.1: + # https://github.com/pyca/cryptography/commit/fbfc36da2a4769045f2373b004ddf0aff906cf38 + x509.DNSName.__hash__ = simple_hash + x509.DirectoryName.__hash__ = simple_hash + x509.GeneralName.__hash__ = simple_hash + x509.IPAddress.__hash__ = simple_hash + x509.OtherName.__hash__ = simple_hash + x509.RegisteredID.__hash__ = simple_hash + + if LooseVersion(cryptography.__version__) < LooseVersion('1.2'): + # The hash functions for the following types were added for cryptography 1.2: + # https://github.com/pyca/cryptography/commit/b642deed88a8696e5f01ce6855ccf89985fc35d0 + # https://github.com/pyca/cryptography/commit/d1b5681f6db2bde7a14625538bd7907b08dfb486 + x509.RFC822Name.__hash__ = simple_hash + x509.UniformResourceIdentifier.__hash__ = simple_hash + + # Test whether we have support for X25519, X448, Ed25519 and/or Ed448 + try: + import cryptography.hazmat.primitives.asymmetric.x25519 + CRYPTOGRAPHY_HAS_X25519 = True + try: + cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.private_bytes + CRYPTOGRAPHY_HAS_X25519_FULL = True + except AttributeError: + CRYPTOGRAPHY_HAS_X25519_FULL = False + except ImportError: + CRYPTOGRAPHY_HAS_X25519 = False + CRYPTOGRAPHY_HAS_X25519_FULL = False + try: + import cryptography.hazmat.primitives.asymmetric.x448 + CRYPTOGRAPHY_HAS_X448 = True + except ImportError: + CRYPTOGRAPHY_HAS_X448 = False + try: + import cryptography.hazmat.primitives.asymmetric.ed25519 + CRYPTOGRAPHY_HAS_ED25519 = True + except ImportError: + CRYPTOGRAPHY_HAS_ED25519 = False + try: + import cryptography.hazmat.primitives.asymmetric.ed448 + CRYPTOGRAPHY_HAS_ED448 = True + except ImportError: + CRYPTOGRAPHY_HAS_ED448 = False + + HAS_CRYPTOGRAPHY = True +except ImportError: + # Error handled in the calling module. + CRYPTOGRAPHY_HAS_X25519 = False + CRYPTOGRAPHY_HAS_X25519_FULL = False + CRYPTOGRAPHY_HAS_X448 = False + CRYPTOGRAPHY_HAS_ED25519 = False + CRYPTOGRAPHY_HAS_ED448 = False + HAS_CRYPTOGRAPHY = False + + +import abc +import base64 +import binascii +import datetime +import errno +import hashlib +import os +import re +import tempfile + +from ansible.module_utils import six +from ansible.module_utils._text import to_native, to_bytes, to_text + + +class OpenSSLObjectError(Exception): + pass + + +class OpenSSLBadPassphraseError(OpenSSLObjectError): + pass + + +def get_fingerprint_of_bytes(source): + """Generate the fingerprint of the given bytes.""" + + fingerprint = {} + + try: + algorithms = hashlib.algorithms + except AttributeError: + try: + algorithms = hashlib.algorithms_guaranteed + except AttributeError: + return None + + for algo in algorithms: + f = getattr(hashlib, algo) + try: + h = f(source) + except ValueError: + # This can happen for hash algorithms not supported in FIPS mode + # (https://github.com/ansible/ansible/issues/67213) + continue + try: + # Certain hash functions have a hexdigest() which expects a length parameter + pubkey_digest = h.hexdigest() + except TypeError: + pubkey_digest = h.hexdigest(32) + fingerprint[algo] = ':'.join(pubkey_digest[i:i + 2] for i in range(0, len(pubkey_digest), 2)) + + return fingerprint + + +def get_fingerprint(path, passphrase=None, content=None, backend='pyopenssl'): + """Generate the fingerprint of the public key. """ + + privatekey = load_privatekey(path, passphrase=passphrase, content=content, check_passphrase=False, backend=backend) + + if backend == 'pyopenssl': + try: + publickey = crypto.dump_publickey(crypto.FILETYPE_ASN1, privatekey) + except AttributeError: + # If PyOpenSSL < 16.0 crypto.dump_publickey() will fail. + try: + bio = crypto._new_mem_buf() + rc = crypto._lib.i2d_PUBKEY_bio(bio, privatekey._pkey) + if rc != 1: + crypto._raise_current_error() + publickey = crypto._bio_to_string(bio) + except AttributeError: + # By doing this we prevent the code from raising an error + # yet we return no value in the fingerprint hash. + return None + elif backend == 'cryptography': + publickey = privatekey.public_key().public_bytes( + serialization.Encoding.DER, + serialization.PublicFormat.SubjectPublicKeyInfo + ) + + return get_fingerprint_of_bytes(publickey) + + +def load_file_if_exists(path, module=None, ignore_errors=False): + try: + with open(path, 'rb') as f: + return f.read() + except EnvironmentError as exc: + if exc.errno == errno.ENOENT: + return None + if ignore_errors: + return None + if module is None: + raise + module.fail_json('Error while loading {0} - {1}'.format(path, str(exc))) + except Exception as exc: + if ignore_errors: + return None + if module is None: + raise + module.fail_json('Error while loading {0} - {1}'.format(path, str(exc))) + + +def load_privatekey(path, passphrase=None, check_passphrase=True, content=None, backend='pyopenssl'): + """Load the specified OpenSSL private key. + + The content can also be specified via content; in that case, + this function will not load the key from disk. + """ + + try: + if content is None: + with open(path, 'rb') as b_priv_key_fh: + priv_key_detail = b_priv_key_fh.read() + else: + priv_key_detail = content + + if backend == 'pyopenssl': + + # First try: try to load with real passphrase (resp. empty string) + # Will work if this is the correct passphrase, or the key is not + # password-protected. + try: + result = crypto.load_privatekey(crypto.FILETYPE_PEM, + priv_key_detail, + to_bytes(passphrase or '')) + except crypto.Error as e: + if len(e.args) > 0 and len(e.args[0]) > 0: + if e.args[0][0][2] in ('bad decrypt', 'bad password read'): + # This happens in case we have the wrong passphrase. + if passphrase is not None: + raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key!') + else: + raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!') + raise OpenSSLObjectError('Error while deserializing key: {0}'.format(e)) + if check_passphrase: + # Next we want to make sure that the key is actually protected by + # a passphrase (in case we did try the empty string before, make + # sure that the key is not protected by the empty string) + try: + crypto.load_privatekey(crypto.FILETYPE_PEM, + priv_key_detail, + to_bytes('y' if passphrase == 'x' else 'x')) + if passphrase is not None: + # Since we can load the key without an exception, the + # key isn't password-protected + raise OpenSSLBadPassphraseError('Passphrase provided, but private key is not password-protected!') + except crypto.Error as e: + if passphrase is None and len(e.args) > 0 and len(e.args[0]) > 0: + if e.args[0][0][2] in ('bad decrypt', 'bad password read'): + # The key is obviously protected by the empty string. + # Don't do this at home (if it's possible at all)... + raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!') + elif backend == 'cryptography': + try: + result = load_pem_private_key(priv_key_detail, + None if passphrase is None else to_bytes(passphrase), + cryptography_backend()) + except TypeError as dummy: + raise OpenSSLBadPassphraseError('Wrong or empty passphrase provided for private key') + except ValueError as dummy: + raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key') + + return result + except (IOError, OSError) as exc: + raise OpenSSLObjectError(exc) + + +def load_certificate(path, content=None, backend='pyopenssl'): + """Load the specified certificate.""" + + try: + if content is None: + with open(path, 'rb') as cert_fh: + cert_content = cert_fh.read() + else: + cert_content = content + if backend == 'pyopenssl': + return crypto.load_certificate(crypto.FILETYPE_PEM, cert_content) + elif backend == 'cryptography': + return x509.load_pem_x509_certificate(cert_content, cryptography_backend()) + except (IOError, OSError) as exc: + raise OpenSSLObjectError(exc) + + +def load_certificate_request(path, content=None, backend='pyopenssl'): + """Load the specified certificate signing request.""" + try: + if content is None: + with open(path, 'rb') as csr_fh: + csr_content = csr_fh.read() + else: + csr_content = content + except (IOError, OSError) as exc: + raise OpenSSLObjectError(exc) + if backend == 'pyopenssl': + return crypto.load_certificate_request(crypto.FILETYPE_PEM, csr_content) + elif backend == 'cryptography': + return x509.load_pem_x509_csr(csr_content, cryptography_backend()) + + +def parse_name_field(input_dict): + """Take a dict with key: value or key: list_of_values mappings and return a list of tuples""" + + result = [] + for key in input_dict: + if isinstance(input_dict[key], list): + for entry in input_dict[key]: + result.append((key, entry)) + else: + result.append((key, input_dict[key])) + return result + + +def convert_relative_to_datetime(relative_time_string): + """Get a datetime.datetime or None from a string in the time format described in sshd_config(5)""" + + parsed_result = re.match( + r"^(?P[+-])((?P\d+)[wW])?((?P\d+)[dD])?((?P\d+)[hH])?((?P\d+)[mM])?((?P\d+)[sS]?)?$", + relative_time_string) + + if parsed_result is None or len(relative_time_string) == 1: + # not matched or only a single "+" or "-" + return None + + offset = datetime.timedelta(0) + if parsed_result.group("weeks") is not None: + offset += datetime.timedelta(weeks=int(parsed_result.group("weeks"))) + if parsed_result.group("days") is not None: + offset += datetime.timedelta(days=int(parsed_result.group("days"))) + if parsed_result.group("hours") is not None: + offset += datetime.timedelta(hours=int(parsed_result.group("hours"))) + if parsed_result.group("minutes") is not None: + offset += datetime.timedelta( + minutes=int(parsed_result.group("minutes"))) + if parsed_result.group("seconds") is not None: + offset += datetime.timedelta( + seconds=int(parsed_result.group("seconds"))) + + if parsed_result.group("prefix") == "+": + return datetime.datetime.utcnow() + offset + else: + return datetime.datetime.utcnow() - offset + + +def get_relative_time_option(input_string, input_name, backend='cryptography'): + """Return an absolute timespec if a relative timespec or an ASN1 formatted + string is provided. + + The return value will be a datetime object for the cryptography backend, + and a ASN1 formatted string for the pyopenssl backend.""" + result = to_native(input_string) + if result is None: + raise OpenSSLObjectError( + 'The timespec "%s" for %s is not valid' % + input_string, input_name) + # Relative time + if result.startswith("+") or result.startswith("-"): + result_datetime = convert_relative_to_datetime(result) + if backend == 'pyopenssl': + return result_datetime.strftime("%Y%m%d%H%M%SZ") + elif backend == 'cryptography': + return result_datetime + # Absolute time + if backend == 'pyopenssl': + return input_string + elif backend == 'cryptography': + for date_fmt in ['%Y%m%d%H%M%SZ', '%Y%m%d%H%MZ', '%Y%m%d%H%M%S%z', '%Y%m%d%H%M%z']: + try: + return datetime.datetime.strptime(result, date_fmt) + except ValueError: + pass + + raise OpenSSLObjectError( + 'The time spec "%s" for %s is invalid' % + (input_string, input_name) + ) + + +def select_message_digest(digest_string): + digest = None + if digest_string == 'sha256': + digest = hashes.SHA256() + elif digest_string == 'sha384': + digest = hashes.SHA384() + elif digest_string == 'sha512': + digest = hashes.SHA512() + elif digest_string == 'sha1': + digest = hashes.SHA1() + elif digest_string == 'md5': + digest = hashes.MD5() + return digest + + +def write_file(module, content, default_mode=None, path=None): + ''' + Writes content into destination file as securely as possible. + Uses file arguments from module. + ''' + # Find out parameters for file + file_args = module.load_file_common_arguments(module.params, path=path) + if file_args['mode'] is None: + file_args['mode'] = default_mode + # Create tempfile name + tmp_fd, tmp_name = tempfile.mkstemp(prefix=b'.ansible_tmp') + try: + os.close(tmp_fd) + except Exception as dummy: + pass + module.add_cleanup_file(tmp_name) # if we fail, let Ansible try to remove the file + try: + try: + # Create tempfile + file = os.open(tmp_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600) + os.write(file, content) + os.close(file) + except Exception as e: + try: + os.remove(tmp_name) + except Exception as dummy: + pass + module.fail_json(msg='Error while writing result into temporary file: {0}'.format(e)) + # Update destination to wanted permissions + if os.path.exists(file_args['path']): + module.set_fs_attributes_if_different(file_args, False) + # Move tempfile to final destination + module.atomic_move(tmp_name, file_args['path']) + # Try to update permissions again + module.set_fs_attributes_if_different(file_args, False) + except Exception as e: + try: + os.remove(tmp_name) + except Exception as dummy: + pass + module.fail_json(msg='Error while writing result: {0}'.format(e)) + + +@six.add_metaclass(abc.ABCMeta) +class OpenSSLObject(object): + + def __init__(self, path, state, force, check_mode): + self.path = path + self.state = state + self.force = force + self.name = os.path.basename(path) + self.changed = False + self.check_mode = check_mode + + def check(self, module, perms_required=True): + """Ensure the resource is in its desired state.""" + + def _check_state(): + return os.path.exists(self.path) + + def _check_perms(module): + file_args = module.load_file_common_arguments(module.params) + return not module.set_fs_attributes_if_different(file_args, False) + + if not perms_required: + return _check_state() + + return _check_state() and _check_perms(module) + + @abc.abstractmethod + def dump(self): + """Serialize the object into a dictionary.""" + + pass + + @abc.abstractmethod + def generate(self): + """Generate the resource.""" + + pass + + def remove(self, module): + """Remove the resource from the filesystem.""" + + try: + os.remove(self.path) + self.changed = True + except OSError as exc: + if exc.errno != errno.ENOENT: + raise OpenSSLObjectError(exc) + else: + pass + + +# ##################################################################################### +# ##################################################################################### +# This has been extracted from the OpenSSL project's objects.txt: +# https://github.com/openssl/openssl/blob/9537fe5757bb07761fa275d779bbd40bcf5530e4/crypto/objects/objects.txt +# Extracted with https://gist.github.com/felixfontein/376748017ad65ead093d56a45a5bf376 +# +# In case the following data structure has any copyrightable content, note that it is licensed as follows: +# Copyright (c) the OpenSSL contributors +# Licensed under the Apache License 2.0 +# https://github.com/openssl/openssl/blob/master/LICENSE +_OID_MAP = { + '0': ('itu-t', 'ITU-T', 'ccitt'), + '0.3.4401.5': ('ntt-ds', ), + '0.3.4401.5.3.1.9': ('camellia', ), + '0.3.4401.5.3.1.9.1': ('camellia-128-ecb', 'CAMELLIA-128-ECB'), + '0.3.4401.5.3.1.9.3': ('camellia-128-ofb', 'CAMELLIA-128-OFB'), + '0.3.4401.5.3.1.9.4': ('camellia-128-cfb', 'CAMELLIA-128-CFB'), + '0.3.4401.5.3.1.9.6': ('camellia-128-gcm', 'CAMELLIA-128-GCM'), + '0.3.4401.5.3.1.9.7': ('camellia-128-ccm', 'CAMELLIA-128-CCM'), + '0.3.4401.5.3.1.9.9': ('camellia-128-ctr', 'CAMELLIA-128-CTR'), + '0.3.4401.5.3.1.9.10': ('camellia-128-cmac', 'CAMELLIA-128-CMAC'), + '0.3.4401.5.3.1.9.21': ('camellia-192-ecb', 'CAMELLIA-192-ECB'), + '0.3.4401.5.3.1.9.23': ('camellia-192-ofb', 'CAMELLIA-192-OFB'), + '0.3.4401.5.3.1.9.24': ('camellia-192-cfb', 'CAMELLIA-192-CFB'), + '0.3.4401.5.3.1.9.26': ('camellia-192-gcm', 'CAMELLIA-192-GCM'), + '0.3.4401.5.3.1.9.27': ('camellia-192-ccm', 'CAMELLIA-192-CCM'), + '0.3.4401.5.3.1.9.29': ('camellia-192-ctr', 'CAMELLIA-192-CTR'), + '0.3.4401.5.3.1.9.30': ('camellia-192-cmac', 'CAMELLIA-192-CMAC'), + '0.3.4401.5.3.1.9.41': ('camellia-256-ecb', 'CAMELLIA-256-ECB'), + '0.3.4401.5.3.1.9.43': ('camellia-256-ofb', 'CAMELLIA-256-OFB'), + '0.3.4401.5.3.1.9.44': ('camellia-256-cfb', 'CAMELLIA-256-CFB'), + '0.3.4401.5.3.1.9.46': ('camellia-256-gcm', 'CAMELLIA-256-GCM'), + '0.3.4401.5.3.1.9.47': ('camellia-256-ccm', 'CAMELLIA-256-CCM'), + '0.3.4401.5.3.1.9.49': ('camellia-256-ctr', 'CAMELLIA-256-CTR'), + '0.3.4401.5.3.1.9.50': ('camellia-256-cmac', 'CAMELLIA-256-CMAC'), + '0.9': ('data', ), + '0.9.2342': ('pss', ), + '0.9.2342.19200300': ('ucl', ), + '0.9.2342.19200300.100': ('pilot', ), + '0.9.2342.19200300.100.1': ('pilotAttributeType', ), + '0.9.2342.19200300.100.1.1': ('userId', 'UID'), + '0.9.2342.19200300.100.1.2': ('textEncodedORAddress', ), + '0.9.2342.19200300.100.1.3': ('rfc822Mailbox', 'mail'), + '0.9.2342.19200300.100.1.4': ('info', ), + '0.9.2342.19200300.100.1.5': ('favouriteDrink', ), + '0.9.2342.19200300.100.1.6': ('roomNumber', ), + '0.9.2342.19200300.100.1.7': ('photo', ), + '0.9.2342.19200300.100.1.8': ('userClass', ), + '0.9.2342.19200300.100.1.9': ('host', ), + '0.9.2342.19200300.100.1.10': ('manager', ), + '0.9.2342.19200300.100.1.11': ('documentIdentifier', ), + '0.9.2342.19200300.100.1.12': ('documentTitle', ), + '0.9.2342.19200300.100.1.13': ('documentVersion', ), + '0.9.2342.19200300.100.1.14': ('documentAuthor', ), + '0.9.2342.19200300.100.1.15': ('documentLocation', ), + '0.9.2342.19200300.100.1.20': ('homeTelephoneNumber', ), + '0.9.2342.19200300.100.1.21': ('secretary', ), + '0.9.2342.19200300.100.1.22': ('otherMailbox', ), + '0.9.2342.19200300.100.1.23': ('lastModifiedTime', ), + '0.9.2342.19200300.100.1.24': ('lastModifiedBy', ), + '0.9.2342.19200300.100.1.25': ('domainComponent', 'DC'), + '0.9.2342.19200300.100.1.26': ('aRecord', ), + '0.9.2342.19200300.100.1.27': ('pilotAttributeType27', ), + '0.9.2342.19200300.100.1.28': ('mXRecord', ), + '0.9.2342.19200300.100.1.29': ('nSRecord', ), + '0.9.2342.19200300.100.1.30': ('sOARecord', ), + '0.9.2342.19200300.100.1.31': ('cNAMERecord', ), + '0.9.2342.19200300.100.1.37': ('associatedDomain', ), + '0.9.2342.19200300.100.1.38': ('associatedName', ), + '0.9.2342.19200300.100.1.39': ('homePostalAddress', ), + '0.9.2342.19200300.100.1.40': ('personalTitle', ), + '0.9.2342.19200300.100.1.41': ('mobileTelephoneNumber', ), + '0.9.2342.19200300.100.1.42': ('pagerTelephoneNumber', ), + '0.9.2342.19200300.100.1.43': ('friendlyCountryName', ), + '0.9.2342.19200300.100.1.44': ('uniqueIdentifier', 'uid'), + '0.9.2342.19200300.100.1.45': ('organizationalStatus', ), + '0.9.2342.19200300.100.1.46': ('janetMailbox', ), + '0.9.2342.19200300.100.1.47': ('mailPreferenceOption', ), + '0.9.2342.19200300.100.1.48': ('buildingName', ), + '0.9.2342.19200300.100.1.49': ('dSAQuality', ), + '0.9.2342.19200300.100.1.50': ('singleLevelQuality', ), + '0.9.2342.19200300.100.1.51': ('subtreeMinimumQuality', ), + '0.9.2342.19200300.100.1.52': ('subtreeMaximumQuality', ), + '0.9.2342.19200300.100.1.53': ('personalSignature', ), + '0.9.2342.19200300.100.1.54': ('dITRedirect', ), + '0.9.2342.19200300.100.1.55': ('audio', ), + '0.9.2342.19200300.100.1.56': ('documentPublisher', ), + '0.9.2342.19200300.100.3': ('pilotAttributeSyntax', ), + '0.9.2342.19200300.100.3.4': ('iA5StringSyntax', ), + '0.9.2342.19200300.100.3.5': ('caseIgnoreIA5StringSyntax', ), + '0.9.2342.19200300.100.4': ('pilotObjectClass', ), + '0.9.2342.19200300.100.4.3': ('pilotObject', ), + '0.9.2342.19200300.100.4.4': ('pilotPerson', ), + '0.9.2342.19200300.100.4.5': ('account', ), + '0.9.2342.19200300.100.4.6': ('document', ), + '0.9.2342.19200300.100.4.7': ('room', ), + '0.9.2342.19200300.100.4.9': ('documentSeries', ), + '0.9.2342.19200300.100.4.13': ('Domain', 'domain'), + '0.9.2342.19200300.100.4.14': ('rFC822localPart', ), + '0.9.2342.19200300.100.4.15': ('dNSDomain', ), + '0.9.2342.19200300.100.4.17': ('domainRelatedObject', ), + '0.9.2342.19200300.100.4.18': ('friendlyCountry', ), + '0.9.2342.19200300.100.4.19': ('simpleSecurityObject', ), + '0.9.2342.19200300.100.4.20': ('pilotOrganization', ), + '0.9.2342.19200300.100.4.21': ('pilotDSA', ), + '0.9.2342.19200300.100.4.22': ('qualityLabelledData', ), + '0.9.2342.19200300.100.10': ('pilotGroups', ), + '1': ('iso', 'ISO'), + '1.0.9797.3.4': ('gmac', 'GMAC'), + '1.0.10118.3.0.55': ('whirlpool', ), + '1.2': ('ISO Member Body', 'member-body'), + '1.2.156': ('ISO CN Member Body', 'ISO-CN'), + '1.2.156.10197': ('oscca', ), + '1.2.156.10197.1': ('sm-scheme', ), + '1.2.156.10197.1.104.1': ('sm4-ecb', 'SM4-ECB'), + '1.2.156.10197.1.104.2': ('sm4-cbc', 'SM4-CBC'), + '1.2.156.10197.1.104.3': ('sm4-ofb', 'SM4-OFB'), + '1.2.156.10197.1.104.4': ('sm4-cfb', 'SM4-CFB'), + '1.2.156.10197.1.104.5': ('sm4-cfb1', 'SM4-CFB1'), + '1.2.156.10197.1.104.6': ('sm4-cfb8', 'SM4-CFB8'), + '1.2.156.10197.1.104.7': ('sm4-ctr', 'SM4-CTR'), + '1.2.156.10197.1.301': ('sm2', 'SM2'), + '1.2.156.10197.1.401': ('sm3', 'SM3'), + '1.2.156.10197.1.501': ('SM2-with-SM3', 'SM2-SM3'), + '1.2.156.10197.1.504': ('sm3WithRSAEncryption', 'RSA-SM3'), + '1.2.392.200011.61.1.1.1.2': ('camellia-128-cbc', 'CAMELLIA-128-CBC'), + '1.2.392.200011.61.1.1.1.3': ('camellia-192-cbc', 'CAMELLIA-192-CBC'), + '1.2.392.200011.61.1.1.1.4': ('camellia-256-cbc', 'CAMELLIA-256-CBC'), + '1.2.392.200011.61.1.1.3.2': ('id-camellia128-wrap', ), + '1.2.392.200011.61.1.1.3.3': ('id-camellia192-wrap', ), + '1.2.392.200011.61.1.1.3.4': ('id-camellia256-wrap', ), + '1.2.410.200004': ('kisa', 'KISA'), + '1.2.410.200004.1.3': ('seed-ecb', 'SEED-ECB'), + '1.2.410.200004.1.4': ('seed-cbc', 'SEED-CBC'), + '1.2.410.200004.1.5': ('seed-cfb', 'SEED-CFB'), + '1.2.410.200004.1.6': ('seed-ofb', 'SEED-OFB'), + '1.2.410.200046.1.1': ('aria', ), + '1.2.410.200046.1.1.1': ('aria-128-ecb', 'ARIA-128-ECB'), + '1.2.410.200046.1.1.2': ('aria-128-cbc', 'ARIA-128-CBC'), + '1.2.410.200046.1.1.3': ('aria-128-cfb', 'ARIA-128-CFB'), + '1.2.410.200046.1.1.4': ('aria-128-ofb', 'ARIA-128-OFB'), + '1.2.410.200046.1.1.5': ('aria-128-ctr', 'ARIA-128-CTR'), + '1.2.410.200046.1.1.6': ('aria-192-ecb', 'ARIA-192-ECB'), + '1.2.410.200046.1.1.7': ('aria-192-cbc', 'ARIA-192-CBC'), + '1.2.410.200046.1.1.8': ('aria-192-cfb', 'ARIA-192-CFB'), + '1.2.410.200046.1.1.9': ('aria-192-ofb', 'ARIA-192-OFB'), + '1.2.410.200046.1.1.10': ('aria-192-ctr', 'ARIA-192-CTR'), + '1.2.410.200046.1.1.11': ('aria-256-ecb', 'ARIA-256-ECB'), + '1.2.410.200046.1.1.12': ('aria-256-cbc', 'ARIA-256-CBC'), + '1.2.410.200046.1.1.13': ('aria-256-cfb', 'ARIA-256-CFB'), + '1.2.410.200046.1.1.14': ('aria-256-ofb', 'ARIA-256-OFB'), + '1.2.410.200046.1.1.15': ('aria-256-ctr', 'ARIA-256-CTR'), + '1.2.410.200046.1.1.34': ('aria-128-gcm', 'ARIA-128-GCM'), + '1.2.410.200046.1.1.35': ('aria-192-gcm', 'ARIA-192-GCM'), + '1.2.410.200046.1.1.36': ('aria-256-gcm', 'ARIA-256-GCM'), + '1.2.410.200046.1.1.37': ('aria-128-ccm', 'ARIA-128-CCM'), + '1.2.410.200046.1.1.38': ('aria-192-ccm', 'ARIA-192-CCM'), + '1.2.410.200046.1.1.39': ('aria-256-ccm', 'ARIA-256-CCM'), + '1.2.643.2.2': ('cryptopro', ), + '1.2.643.2.2.3': ('GOST R 34.11-94 with GOST R 34.10-2001', 'id-GostR3411-94-with-GostR3410-2001'), + '1.2.643.2.2.4': ('GOST R 34.11-94 with GOST R 34.10-94', 'id-GostR3411-94-with-GostR3410-94'), + '1.2.643.2.2.9': ('GOST R 34.11-94', 'md_gost94'), + '1.2.643.2.2.10': ('HMAC GOST 34.11-94', 'id-HMACGostR3411-94'), + '1.2.643.2.2.14.0': ('id-Gost28147-89-None-KeyMeshing', ), + '1.2.643.2.2.14.1': ('id-Gost28147-89-CryptoPro-KeyMeshing', ), + '1.2.643.2.2.19': ('GOST R 34.10-2001', 'gost2001'), + '1.2.643.2.2.20': ('GOST R 34.10-94', 'gost94'), + '1.2.643.2.2.20.1': ('id-GostR3410-94-a', ), + '1.2.643.2.2.20.2': ('id-GostR3410-94-aBis', ), + '1.2.643.2.2.20.3': ('id-GostR3410-94-b', ), + '1.2.643.2.2.20.4': ('id-GostR3410-94-bBis', ), + '1.2.643.2.2.21': ('GOST 28147-89', 'gost89'), + '1.2.643.2.2.22': ('GOST 28147-89 MAC', 'gost-mac'), + '1.2.643.2.2.23': ('GOST R 34.11-94 PRF', 'prf-gostr3411-94'), + '1.2.643.2.2.30.0': ('id-GostR3411-94-TestParamSet', ), + '1.2.643.2.2.30.1': ('id-GostR3411-94-CryptoProParamSet', ), + '1.2.643.2.2.31.0': ('id-Gost28147-89-TestParamSet', ), + '1.2.643.2.2.31.1': ('id-Gost28147-89-CryptoPro-A-ParamSet', ), + '1.2.643.2.2.31.2': ('id-Gost28147-89-CryptoPro-B-ParamSet', ), + '1.2.643.2.2.31.3': ('id-Gost28147-89-CryptoPro-C-ParamSet', ), + '1.2.643.2.2.31.4': ('id-Gost28147-89-CryptoPro-D-ParamSet', ), + '1.2.643.2.2.31.5': ('id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet', ), + '1.2.643.2.2.31.6': ('id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet', ), + '1.2.643.2.2.31.7': ('id-Gost28147-89-CryptoPro-RIC-1-ParamSet', ), + '1.2.643.2.2.32.0': ('id-GostR3410-94-TestParamSet', ), + '1.2.643.2.2.32.2': ('id-GostR3410-94-CryptoPro-A-ParamSet', ), + '1.2.643.2.2.32.3': ('id-GostR3410-94-CryptoPro-B-ParamSet', ), + '1.2.643.2.2.32.4': ('id-GostR3410-94-CryptoPro-C-ParamSet', ), + '1.2.643.2.2.32.5': ('id-GostR3410-94-CryptoPro-D-ParamSet', ), + '1.2.643.2.2.33.1': ('id-GostR3410-94-CryptoPro-XchA-ParamSet', ), + '1.2.643.2.2.33.2': ('id-GostR3410-94-CryptoPro-XchB-ParamSet', ), + '1.2.643.2.2.33.3': ('id-GostR3410-94-CryptoPro-XchC-ParamSet', ), + '1.2.643.2.2.35.0': ('id-GostR3410-2001-TestParamSet', ), + '1.2.643.2.2.35.1': ('id-GostR3410-2001-CryptoPro-A-ParamSet', ), + '1.2.643.2.2.35.2': ('id-GostR3410-2001-CryptoPro-B-ParamSet', ), + '1.2.643.2.2.35.3': ('id-GostR3410-2001-CryptoPro-C-ParamSet', ), + '1.2.643.2.2.36.0': ('id-GostR3410-2001-CryptoPro-XchA-ParamSet', ), + '1.2.643.2.2.36.1': ('id-GostR3410-2001-CryptoPro-XchB-ParamSet', ), + '1.2.643.2.2.98': ('GOST R 34.10-2001 DH', 'id-GostR3410-2001DH'), + '1.2.643.2.2.99': ('GOST R 34.10-94 DH', 'id-GostR3410-94DH'), + '1.2.643.2.9': ('cryptocom', ), + '1.2.643.2.9.1.3.3': ('GOST R 34.11-94 with GOST R 34.10-94 Cryptocom', 'id-GostR3411-94-with-GostR3410-94-cc'), + '1.2.643.2.9.1.3.4': ('GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom', 'id-GostR3411-94-with-GostR3410-2001-cc'), + '1.2.643.2.9.1.5.3': ('GOST 34.10-94 Cryptocom', 'gost94cc'), + '1.2.643.2.9.1.5.4': ('GOST 34.10-2001 Cryptocom', 'gost2001cc'), + '1.2.643.2.9.1.6.1': ('GOST 28147-89 Cryptocom ParamSet', 'id-Gost28147-89-cc'), + '1.2.643.2.9.1.8.1': ('GOST R 3410-2001 Parameter Set Cryptocom', 'id-GostR3410-2001-ParamSet-cc'), + '1.2.643.3.131.1.1': ('INN', 'INN'), + '1.2.643.7.1': ('id-tc26', ), + '1.2.643.7.1.1': ('id-tc26-algorithms', ), + '1.2.643.7.1.1.1': ('id-tc26-sign', ), + '1.2.643.7.1.1.1.1': ('GOST R 34.10-2012 with 256 bit modulus', 'gost2012_256'), + '1.2.643.7.1.1.1.2': ('GOST R 34.10-2012 with 512 bit modulus', 'gost2012_512'), + '1.2.643.7.1.1.2': ('id-tc26-digest', ), + '1.2.643.7.1.1.2.2': ('GOST R 34.11-2012 with 256 bit hash', 'md_gost12_256'), + '1.2.643.7.1.1.2.3': ('GOST R 34.11-2012 with 512 bit hash', 'md_gost12_512'), + '1.2.643.7.1.1.3': ('id-tc26-signwithdigest', ), + '1.2.643.7.1.1.3.2': ('GOST R 34.10-2012 with GOST R 34.11-2012 (256 bit)', 'id-tc26-signwithdigest-gost3410-2012-256'), + '1.2.643.7.1.1.3.3': ('GOST R 34.10-2012 with GOST R 34.11-2012 (512 bit)', 'id-tc26-signwithdigest-gost3410-2012-512'), + '1.2.643.7.1.1.4': ('id-tc26-mac', ), + '1.2.643.7.1.1.4.1': ('HMAC GOST 34.11-2012 256 bit', 'id-tc26-hmac-gost-3411-2012-256'), + '1.2.643.7.1.1.4.2': ('HMAC GOST 34.11-2012 512 bit', 'id-tc26-hmac-gost-3411-2012-512'), + '1.2.643.7.1.1.5': ('id-tc26-cipher', ), + '1.2.643.7.1.1.5.1': ('id-tc26-cipher-gostr3412-2015-magma', ), + '1.2.643.7.1.1.5.1.1': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm', ), + '1.2.643.7.1.1.5.1.2': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm-omac', ), + '1.2.643.7.1.1.5.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik', ), + '1.2.643.7.1.1.5.2.1': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm', ), + '1.2.643.7.1.1.5.2.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm-omac', ), + '1.2.643.7.1.1.6': ('id-tc26-agreement', ), + '1.2.643.7.1.1.6.1': ('id-tc26-agreement-gost-3410-2012-256', ), + '1.2.643.7.1.1.6.2': ('id-tc26-agreement-gost-3410-2012-512', ), + '1.2.643.7.1.1.7': ('id-tc26-wrap', ), + '1.2.643.7.1.1.7.1': ('id-tc26-wrap-gostr3412-2015-magma', ), + '1.2.643.7.1.1.7.1.1': ('id-tc26-wrap-gostr3412-2015-magma-kexp15', 'id-tc26-wrap-gostr3412-2015-kuznyechik-kexp15'), + '1.2.643.7.1.1.7.2': ('id-tc26-wrap-gostr3412-2015-kuznyechik', ), + '1.2.643.7.1.2': ('id-tc26-constants', ), + '1.2.643.7.1.2.1': ('id-tc26-sign-constants', ), + '1.2.643.7.1.2.1.1': ('id-tc26-gost-3410-2012-256-constants', ), + '1.2.643.7.1.2.1.1.1': ('GOST R 34.10-2012 (256 bit) ParamSet A', 'id-tc26-gost-3410-2012-256-paramSetA'), + '1.2.643.7.1.2.1.1.2': ('GOST R 34.10-2012 (256 bit) ParamSet B', 'id-tc26-gost-3410-2012-256-paramSetB'), + '1.2.643.7.1.2.1.1.3': ('GOST R 34.10-2012 (256 bit) ParamSet C', 'id-tc26-gost-3410-2012-256-paramSetC'), + '1.2.643.7.1.2.1.1.4': ('GOST R 34.10-2012 (256 bit) ParamSet D', 'id-tc26-gost-3410-2012-256-paramSetD'), + '1.2.643.7.1.2.1.2': ('id-tc26-gost-3410-2012-512-constants', ), + '1.2.643.7.1.2.1.2.0': ('GOST R 34.10-2012 (512 bit) testing parameter set', 'id-tc26-gost-3410-2012-512-paramSetTest'), + '1.2.643.7.1.2.1.2.1': ('GOST R 34.10-2012 (512 bit) ParamSet A', 'id-tc26-gost-3410-2012-512-paramSetA'), + '1.2.643.7.1.2.1.2.2': ('GOST R 34.10-2012 (512 bit) ParamSet B', 'id-tc26-gost-3410-2012-512-paramSetB'), + '1.2.643.7.1.2.1.2.3': ('GOST R 34.10-2012 (512 bit) ParamSet C', 'id-tc26-gost-3410-2012-512-paramSetC'), + '1.2.643.7.1.2.2': ('id-tc26-digest-constants', ), + '1.2.643.7.1.2.5': ('id-tc26-cipher-constants', ), + '1.2.643.7.1.2.5.1': ('id-tc26-gost-28147-constants', ), + '1.2.643.7.1.2.5.1.1': ('GOST 28147-89 TC26 parameter set', 'id-tc26-gost-28147-param-Z'), + '1.2.643.100.1': ('OGRN', 'OGRN'), + '1.2.643.100.3': ('SNILS', 'SNILS'), + '1.2.643.100.111': ('Signing Tool of Subject', 'subjectSignTool'), + '1.2.643.100.112': ('Signing Tool of Issuer', 'issuerSignTool'), + '1.2.804': ('ISO-UA', ), + '1.2.804.2.1.1.1': ('ua-pki', ), + '1.2.804.2.1.1.1.1.1.1': ('DSTU Gost 28147-2009', 'dstu28147'), + '1.2.804.2.1.1.1.1.1.1.2': ('DSTU Gost 28147-2009 OFB mode', 'dstu28147-ofb'), + '1.2.804.2.1.1.1.1.1.1.3': ('DSTU Gost 28147-2009 CFB mode', 'dstu28147-cfb'), + '1.2.804.2.1.1.1.1.1.1.5': ('DSTU Gost 28147-2009 key wrap', 'dstu28147-wrap'), + '1.2.804.2.1.1.1.1.1.2': ('HMAC DSTU Gost 34311-95', 'hmacWithDstu34311'), + '1.2.804.2.1.1.1.1.2.1': ('DSTU Gost 34311-95', 'dstu34311'), + '1.2.804.2.1.1.1.1.3.1.1': ('DSTU 4145-2002 little endian', 'dstu4145le'), + '1.2.804.2.1.1.1.1.3.1.1.1.1': ('DSTU 4145-2002 big endian', 'dstu4145be'), + '1.2.804.2.1.1.1.1.3.1.1.2.0': ('DSTU curve 0', 'uacurve0'), + '1.2.804.2.1.1.1.1.3.1.1.2.1': ('DSTU curve 1', 'uacurve1'), + '1.2.804.2.1.1.1.1.3.1.1.2.2': ('DSTU curve 2', 'uacurve2'), + '1.2.804.2.1.1.1.1.3.1.1.2.3': ('DSTU curve 3', 'uacurve3'), + '1.2.804.2.1.1.1.1.3.1.1.2.4': ('DSTU curve 4', 'uacurve4'), + '1.2.804.2.1.1.1.1.3.1.1.2.5': ('DSTU curve 5', 'uacurve5'), + '1.2.804.2.1.1.1.1.3.1.1.2.6': ('DSTU curve 6', 'uacurve6'), + '1.2.804.2.1.1.1.1.3.1.1.2.7': ('DSTU curve 7', 'uacurve7'), + '1.2.804.2.1.1.1.1.3.1.1.2.8': ('DSTU curve 8', 'uacurve8'), + '1.2.804.2.1.1.1.1.3.1.1.2.9': ('DSTU curve 9', 'uacurve9'), + '1.2.840': ('ISO US Member Body', 'ISO-US'), + '1.2.840.10040': ('X9.57', 'X9-57'), + '1.2.840.10040.2': ('holdInstruction', ), + '1.2.840.10040.2.1': ('Hold Instruction None', 'holdInstructionNone'), + '1.2.840.10040.2.2': ('Hold Instruction Call Issuer', 'holdInstructionCallIssuer'), + '1.2.840.10040.2.3': ('Hold Instruction Reject', 'holdInstructionReject'), + '1.2.840.10040.4': ('X9.57 CM ?', 'X9cm'), + '1.2.840.10040.4.1': ('dsaEncryption', 'DSA'), + '1.2.840.10040.4.3': ('dsaWithSHA1', 'DSA-SHA1'), + '1.2.840.10045': ('ANSI X9.62', 'ansi-X9-62'), + '1.2.840.10045.1': ('id-fieldType', ), + '1.2.840.10045.1.1': ('prime-field', ), + '1.2.840.10045.1.2': ('characteristic-two-field', ), + '1.2.840.10045.1.2.3': ('id-characteristic-two-basis', ), + '1.2.840.10045.1.2.3.1': ('onBasis', ), + '1.2.840.10045.1.2.3.2': ('tpBasis', ), + '1.2.840.10045.1.2.3.3': ('ppBasis', ), + '1.2.840.10045.2': ('id-publicKeyType', ), + '1.2.840.10045.2.1': ('id-ecPublicKey', ), + '1.2.840.10045.3': ('ellipticCurve', ), + '1.2.840.10045.3.0': ('c-TwoCurve', ), + '1.2.840.10045.3.0.1': ('c2pnb163v1', ), + '1.2.840.10045.3.0.2': ('c2pnb163v2', ), + '1.2.840.10045.3.0.3': ('c2pnb163v3', ), + '1.2.840.10045.3.0.4': ('c2pnb176v1', ), + '1.2.840.10045.3.0.5': ('c2tnb191v1', ), + '1.2.840.10045.3.0.6': ('c2tnb191v2', ), + '1.2.840.10045.3.0.7': ('c2tnb191v3', ), + '1.2.840.10045.3.0.8': ('c2onb191v4', ), + '1.2.840.10045.3.0.9': ('c2onb191v5', ), + '1.2.840.10045.3.0.10': ('c2pnb208w1', ), + '1.2.840.10045.3.0.11': ('c2tnb239v1', ), + '1.2.840.10045.3.0.12': ('c2tnb239v2', ), + '1.2.840.10045.3.0.13': ('c2tnb239v3', ), + '1.2.840.10045.3.0.14': ('c2onb239v4', ), + '1.2.840.10045.3.0.15': ('c2onb239v5', ), + '1.2.840.10045.3.0.16': ('c2pnb272w1', ), + '1.2.840.10045.3.0.17': ('c2pnb304w1', ), + '1.2.840.10045.3.0.18': ('c2tnb359v1', ), + '1.2.840.10045.3.0.19': ('c2pnb368w1', ), + '1.2.840.10045.3.0.20': ('c2tnb431r1', ), + '1.2.840.10045.3.1': ('primeCurve', ), + '1.2.840.10045.3.1.1': ('prime192v1', ), + '1.2.840.10045.3.1.2': ('prime192v2', ), + '1.2.840.10045.3.1.3': ('prime192v3', ), + '1.2.840.10045.3.1.4': ('prime239v1', ), + '1.2.840.10045.3.1.5': ('prime239v2', ), + '1.2.840.10045.3.1.6': ('prime239v3', ), + '1.2.840.10045.3.1.7': ('prime256v1', ), + '1.2.840.10045.4': ('id-ecSigType', ), + '1.2.840.10045.4.1': ('ecdsa-with-SHA1', ), + '1.2.840.10045.4.2': ('ecdsa-with-Recommended', ), + '1.2.840.10045.4.3': ('ecdsa-with-Specified', ), + '1.2.840.10045.4.3.1': ('ecdsa-with-SHA224', ), + '1.2.840.10045.4.3.2': ('ecdsa-with-SHA256', ), + '1.2.840.10045.4.3.3': ('ecdsa-with-SHA384', ), + '1.2.840.10045.4.3.4': ('ecdsa-with-SHA512', ), + '1.2.840.10046.2.1': ('X9.42 DH', 'dhpublicnumber'), + '1.2.840.113533.7.66.10': ('cast5-cbc', 'CAST5-CBC'), + '1.2.840.113533.7.66.12': ('pbeWithMD5AndCast5CBC', ), + '1.2.840.113533.7.66.13': ('password based MAC', 'id-PasswordBasedMAC'), + '1.2.840.113533.7.66.30': ('Diffie-Hellman based MAC', 'id-DHBasedMac'), + '1.2.840.113549': ('RSA Data Security, Inc.', 'rsadsi'), + '1.2.840.113549.1': ('RSA Data Security, Inc. PKCS', 'pkcs'), + '1.2.840.113549.1.1': ('pkcs1', ), + '1.2.840.113549.1.1.1': ('rsaEncryption', ), + '1.2.840.113549.1.1.2': ('md2WithRSAEncryption', 'RSA-MD2'), + '1.2.840.113549.1.1.3': ('md4WithRSAEncryption', 'RSA-MD4'), + '1.2.840.113549.1.1.4': ('md5WithRSAEncryption', 'RSA-MD5'), + '1.2.840.113549.1.1.5': ('sha1WithRSAEncryption', 'RSA-SHA1'), + '1.2.840.113549.1.1.6': ('rsaOAEPEncryptionSET', ), + '1.2.840.113549.1.1.7': ('rsaesOaep', 'RSAES-OAEP'), + '1.2.840.113549.1.1.8': ('mgf1', 'MGF1'), + '1.2.840.113549.1.1.9': ('pSpecified', 'PSPECIFIED'), + '1.2.840.113549.1.1.10': ('rsassaPss', 'RSASSA-PSS'), + '1.2.840.113549.1.1.11': ('sha256WithRSAEncryption', 'RSA-SHA256'), + '1.2.840.113549.1.1.12': ('sha384WithRSAEncryption', 'RSA-SHA384'), + '1.2.840.113549.1.1.13': ('sha512WithRSAEncryption', 'RSA-SHA512'), + '1.2.840.113549.1.1.14': ('sha224WithRSAEncryption', 'RSA-SHA224'), + '1.2.840.113549.1.1.15': ('sha512-224WithRSAEncryption', 'RSA-SHA512/224'), + '1.2.840.113549.1.1.16': ('sha512-256WithRSAEncryption', 'RSA-SHA512/256'), + '1.2.840.113549.1.3': ('pkcs3', ), + '1.2.840.113549.1.3.1': ('dhKeyAgreement', ), + '1.2.840.113549.1.5': ('pkcs5', ), + '1.2.840.113549.1.5.1': ('pbeWithMD2AndDES-CBC', 'PBE-MD2-DES'), + '1.2.840.113549.1.5.3': ('pbeWithMD5AndDES-CBC', 'PBE-MD5-DES'), + '1.2.840.113549.1.5.4': ('pbeWithMD2AndRC2-CBC', 'PBE-MD2-RC2-64'), + '1.2.840.113549.1.5.6': ('pbeWithMD5AndRC2-CBC', 'PBE-MD5-RC2-64'), + '1.2.840.113549.1.5.10': ('pbeWithSHA1AndDES-CBC', 'PBE-SHA1-DES'), + '1.2.840.113549.1.5.11': ('pbeWithSHA1AndRC2-CBC', 'PBE-SHA1-RC2-64'), + '1.2.840.113549.1.5.12': ('PBKDF2', ), + '1.2.840.113549.1.5.13': ('PBES2', ), + '1.2.840.113549.1.5.14': ('PBMAC1', ), + '1.2.840.113549.1.7': ('pkcs7', ), + '1.2.840.113549.1.7.1': ('pkcs7-data', ), + '1.2.840.113549.1.7.2': ('pkcs7-signedData', ), + '1.2.840.113549.1.7.3': ('pkcs7-envelopedData', ), + '1.2.840.113549.1.7.4': ('pkcs7-signedAndEnvelopedData', ), + '1.2.840.113549.1.7.5': ('pkcs7-digestData', ), + '1.2.840.113549.1.7.6': ('pkcs7-encryptedData', ), + '1.2.840.113549.1.9': ('pkcs9', ), + '1.2.840.113549.1.9.1': ('emailAddress', ), + '1.2.840.113549.1.9.2': ('unstructuredName', ), + '1.2.840.113549.1.9.3': ('contentType', ), + '1.2.840.113549.1.9.4': ('messageDigest', ), + '1.2.840.113549.1.9.5': ('signingTime', ), + '1.2.840.113549.1.9.6': ('countersignature', ), + '1.2.840.113549.1.9.7': ('challengePassword', ), + '1.2.840.113549.1.9.8': ('unstructuredAddress', ), + '1.2.840.113549.1.9.9': ('extendedCertificateAttributes', ), + '1.2.840.113549.1.9.14': ('Extension Request', 'extReq'), + '1.2.840.113549.1.9.15': ('S/MIME Capabilities', 'SMIME-CAPS'), + '1.2.840.113549.1.9.16': ('S/MIME', 'SMIME'), + '1.2.840.113549.1.9.16.0': ('id-smime-mod', ), + '1.2.840.113549.1.9.16.0.1': ('id-smime-mod-cms', ), + '1.2.840.113549.1.9.16.0.2': ('id-smime-mod-ess', ), + '1.2.840.113549.1.9.16.0.3': ('id-smime-mod-oid', ), + '1.2.840.113549.1.9.16.0.4': ('id-smime-mod-msg-v3', ), + '1.2.840.113549.1.9.16.0.5': ('id-smime-mod-ets-eSignature-88', ), + '1.2.840.113549.1.9.16.0.6': ('id-smime-mod-ets-eSignature-97', ), + '1.2.840.113549.1.9.16.0.7': ('id-smime-mod-ets-eSigPolicy-88', ), + '1.2.840.113549.1.9.16.0.8': ('id-smime-mod-ets-eSigPolicy-97', ), + '1.2.840.113549.1.9.16.1': ('id-smime-ct', ), + '1.2.840.113549.1.9.16.1.1': ('id-smime-ct-receipt', ), + '1.2.840.113549.1.9.16.1.2': ('id-smime-ct-authData', ), + '1.2.840.113549.1.9.16.1.3': ('id-smime-ct-publishCert', ), + '1.2.840.113549.1.9.16.1.4': ('id-smime-ct-TSTInfo', ), + '1.2.840.113549.1.9.16.1.5': ('id-smime-ct-TDTInfo', ), + '1.2.840.113549.1.9.16.1.6': ('id-smime-ct-contentInfo', ), + '1.2.840.113549.1.9.16.1.7': ('id-smime-ct-DVCSRequestData', ), + '1.2.840.113549.1.9.16.1.8': ('id-smime-ct-DVCSResponseData', ), + '1.2.840.113549.1.9.16.1.9': ('id-smime-ct-compressedData', ), + '1.2.840.113549.1.9.16.1.19': ('id-smime-ct-contentCollection', ), + '1.2.840.113549.1.9.16.1.23': ('id-smime-ct-authEnvelopedData', ), + '1.2.840.113549.1.9.16.1.27': ('id-ct-asciiTextWithCRLF', ), + '1.2.840.113549.1.9.16.1.28': ('id-ct-xml', ), + '1.2.840.113549.1.9.16.2': ('id-smime-aa', ), + '1.2.840.113549.1.9.16.2.1': ('id-smime-aa-receiptRequest', ), + '1.2.840.113549.1.9.16.2.2': ('id-smime-aa-securityLabel', ), + '1.2.840.113549.1.9.16.2.3': ('id-smime-aa-mlExpandHistory', ), + '1.2.840.113549.1.9.16.2.4': ('id-smime-aa-contentHint', ), + '1.2.840.113549.1.9.16.2.5': ('id-smime-aa-msgSigDigest', ), + '1.2.840.113549.1.9.16.2.6': ('id-smime-aa-encapContentType', ), + '1.2.840.113549.1.9.16.2.7': ('id-smime-aa-contentIdentifier', ), + '1.2.840.113549.1.9.16.2.8': ('id-smime-aa-macValue', ), + '1.2.840.113549.1.9.16.2.9': ('id-smime-aa-equivalentLabels', ), + '1.2.840.113549.1.9.16.2.10': ('id-smime-aa-contentReference', ), + '1.2.840.113549.1.9.16.2.11': ('id-smime-aa-encrypKeyPref', ), + '1.2.840.113549.1.9.16.2.12': ('id-smime-aa-signingCertificate', ), + '1.2.840.113549.1.9.16.2.13': ('id-smime-aa-smimeEncryptCerts', ), + '1.2.840.113549.1.9.16.2.14': ('id-smime-aa-timeStampToken', ), + '1.2.840.113549.1.9.16.2.15': ('id-smime-aa-ets-sigPolicyId', ), + '1.2.840.113549.1.9.16.2.16': ('id-smime-aa-ets-commitmentType', ), + '1.2.840.113549.1.9.16.2.17': ('id-smime-aa-ets-signerLocation', ), + '1.2.840.113549.1.9.16.2.18': ('id-smime-aa-ets-signerAttr', ), + '1.2.840.113549.1.9.16.2.19': ('id-smime-aa-ets-otherSigCert', ), + '1.2.840.113549.1.9.16.2.20': ('id-smime-aa-ets-contentTimestamp', ), + '1.2.840.113549.1.9.16.2.21': ('id-smime-aa-ets-CertificateRefs', ), + '1.2.840.113549.1.9.16.2.22': ('id-smime-aa-ets-RevocationRefs', ), + '1.2.840.113549.1.9.16.2.23': ('id-smime-aa-ets-certValues', ), + '1.2.840.113549.1.9.16.2.24': ('id-smime-aa-ets-revocationValues', ), + '1.2.840.113549.1.9.16.2.25': ('id-smime-aa-ets-escTimeStamp', ), + '1.2.840.113549.1.9.16.2.26': ('id-smime-aa-ets-certCRLTimestamp', ), + '1.2.840.113549.1.9.16.2.27': ('id-smime-aa-ets-archiveTimeStamp', ), + '1.2.840.113549.1.9.16.2.28': ('id-smime-aa-signatureType', ), + '1.2.840.113549.1.9.16.2.29': ('id-smime-aa-dvcs-dvc', ), + '1.2.840.113549.1.9.16.2.47': ('id-smime-aa-signingCertificateV2', ), + '1.2.840.113549.1.9.16.3': ('id-smime-alg', ), + '1.2.840.113549.1.9.16.3.1': ('id-smime-alg-ESDHwith3DES', ), + '1.2.840.113549.1.9.16.3.2': ('id-smime-alg-ESDHwithRC2', ), + '1.2.840.113549.1.9.16.3.3': ('id-smime-alg-3DESwrap', ), + '1.2.840.113549.1.9.16.3.4': ('id-smime-alg-RC2wrap', ), + '1.2.840.113549.1.9.16.3.5': ('id-smime-alg-ESDH', ), + '1.2.840.113549.1.9.16.3.6': ('id-smime-alg-CMS3DESwrap', ), + '1.2.840.113549.1.9.16.3.7': ('id-smime-alg-CMSRC2wrap', ), + '1.2.840.113549.1.9.16.3.8': ('zlib compression', 'ZLIB'), + '1.2.840.113549.1.9.16.3.9': ('id-alg-PWRI-KEK', ), + '1.2.840.113549.1.9.16.4': ('id-smime-cd', ), + '1.2.840.113549.1.9.16.4.1': ('id-smime-cd-ldap', ), + '1.2.840.113549.1.9.16.5': ('id-smime-spq', ), + '1.2.840.113549.1.9.16.5.1': ('id-smime-spq-ets-sqt-uri', ), + '1.2.840.113549.1.9.16.5.2': ('id-smime-spq-ets-sqt-unotice', ), + '1.2.840.113549.1.9.16.6': ('id-smime-cti', ), + '1.2.840.113549.1.9.16.6.1': ('id-smime-cti-ets-proofOfOrigin', ), + '1.2.840.113549.1.9.16.6.2': ('id-smime-cti-ets-proofOfReceipt', ), + '1.2.840.113549.1.9.16.6.3': ('id-smime-cti-ets-proofOfDelivery', ), + '1.2.840.113549.1.9.16.6.4': ('id-smime-cti-ets-proofOfSender', ), + '1.2.840.113549.1.9.16.6.5': ('id-smime-cti-ets-proofOfApproval', ), + '1.2.840.113549.1.9.16.6.6': ('id-smime-cti-ets-proofOfCreation', ), + '1.2.840.113549.1.9.20': ('friendlyName', ), + '1.2.840.113549.1.9.21': ('localKeyID', ), + '1.2.840.113549.1.9.22': ('certTypes', ), + '1.2.840.113549.1.9.22.1': ('x509Certificate', ), + '1.2.840.113549.1.9.22.2': ('sdsiCertificate', ), + '1.2.840.113549.1.9.23': ('crlTypes', ), + '1.2.840.113549.1.9.23.1': ('x509Crl', ), + '1.2.840.113549.1.12': ('pkcs12', ), + '1.2.840.113549.1.12.1': ('pkcs12-pbeids', ), + '1.2.840.113549.1.12.1.1': ('pbeWithSHA1And128BitRC4', 'PBE-SHA1-RC4-128'), + '1.2.840.113549.1.12.1.2': ('pbeWithSHA1And40BitRC4', 'PBE-SHA1-RC4-40'), + '1.2.840.113549.1.12.1.3': ('pbeWithSHA1And3-KeyTripleDES-CBC', 'PBE-SHA1-3DES'), + '1.2.840.113549.1.12.1.4': ('pbeWithSHA1And2-KeyTripleDES-CBC', 'PBE-SHA1-2DES'), + '1.2.840.113549.1.12.1.5': ('pbeWithSHA1And128BitRC2-CBC', 'PBE-SHA1-RC2-128'), + '1.2.840.113549.1.12.1.6': ('pbeWithSHA1And40BitRC2-CBC', 'PBE-SHA1-RC2-40'), + '1.2.840.113549.1.12.10': ('pkcs12-Version1', ), + '1.2.840.113549.1.12.10.1': ('pkcs12-BagIds', ), + '1.2.840.113549.1.12.10.1.1': ('keyBag', ), + '1.2.840.113549.1.12.10.1.2': ('pkcs8ShroudedKeyBag', ), + '1.2.840.113549.1.12.10.1.3': ('certBag', ), + '1.2.840.113549.1.12.10.1.4': ('crlBag', ), + '1.2.840.113549.1.12.10.1.5': ('secretBag', ), + '1.2.840.113549.1.12.10.1.6': ('safeContentsBag', ), + '1.2.840.113549.2.2': ('md2', 'MD2'), + '1.2.840.113549.2.4': ('md4', 'MD4'), + '1.2.840.113549.2.5': ('md5', 'MD5'), + '1.2.840.113549.2.6': ('hmacWithMD5', ), + '1.2.840.113549.2.7': ('hmacWithSHA1', ), + '1.2.840.113549.2.8': ('hmacWithSHA224', ), + '1.2.840.113549.2.9': ('hmacWithSHA256', ), + '1.2.840.113549.2.10': ('hmacWithSHA384', ), + '1.2.840.113549.2.11': ('hmacWithSHA512', ), + '1.2.840.113549.2.12': ('hmacWithSHA512-224', ), + '1.2.840.113549.2.13': ('hmacWithSHA512-256', ), + '1.2.840.113549.3.2': ('rc2-cbc', 'RC2-CBC'), + '1.2.840.113549.3.4': ('rc4', 'RC4'), + '1.2.840.113549.3.7': ('des-ede3-cbc', 'DES-EDE3-CBC'), + '1.2.840.113549.3.8': ('rc5-cbc', 'RC5-CBC'), + '1.2.840.113549.3.10': ('des-cdmf', 'DES-CDMF'), + '1.3': ('identified-organization', 'org', 'ORG'), + '1.3.6': ('dod', 'DOD'), + '1.3.6.1': ('iana', 'IANA', 'internet'), + '1.3.6.1.1': ('Directory', 'directory'), + '1.3.6.1.2': ('Management', 'mgmt'), + '1.3.6.1.3': ('Experimental', 'experimental'), + '1.3.6.1.4': ('Private', 'private'), + '1.3.6.1.4.1': ('Enterprises', 'enterprises'), + '1.3.6.1.4.1.188.7.1.1.2': ('idea-cbc', 'IDEA-CBC'), + '1.3.6.1.4.1.311.2.1.14': ('Microsoft Extension Request', 'msExtReq'), + '1.3.6.1.4.1.311.2.1.21': ('Microsoft Individual Code Signing', 'msCodeInd'), + '1.3.6.1.4.1.311.2.1.22': ('Microsoft Commercial Code Signing', 'msCodeCom'), + '1.3.6.1.4.1.311.10.3.1': ('Microsoft Trust List Signing', 'msCTLSign'), + '1.3.6.1.4.1.311.10.3.3': ('Microsoft Server Gated Crypto', 'msSGC'), + '1.3.6.1.4.1.311.10.3.4': ('Microsoft Encrypted File System', 'msEFS'), + '1.3.6.1.4.1.311.17.1': ('Microsoft CSP Name', 'CSPName'), + '1.3.6.1.4.1.311.17.2': ('Microsoft Local Key set', 'LocalKeySet'), + '1.3.6.1.4.1.311.20.2.2': ('Microsoft Smartcardlogin', 'msSmartcardLogin'), + '1.3.6.1.4.1.311.20.2.3': ('Microsoft Universal Principal Name', 'msUPN'), + '1.3.6.1.4.1.311.60.2.1.1': ('jurisdictionLocalityName', 'jurisdictionL'), + '1.3.6.1.4.1.311.60.2.1.2': ('jurisdictionStateOrProvinceName', 'jurisdictionST'), + '1.3.6.1.4.1.311.60.2.1.3': ('jurisdictionCountryName', 'jurisdictionC'), + '1.3.6.1.4.1.1466.344': ('dcObject', 'dcobject'), + '1.3.6.1.4.1.1722.12.2.1.16': ('blake2b512', 'BLAKE2b512'), + '1.3.6.1.4.1.1722.12.2.2.8': ('blake2s256', 'BLAKE2s256'), + '1.3.6.1.4.1.3029.1.2': ('bf-cbc', 'BF-CBC'), + '1.3.6.1.4.1.11129.2.4.2': ('CT Precertificate SCTs', 'ct_precert_scts'), + '1.3.6.1.4.1.11129.2.4.3': ('CT Precertificate Poison', 'ct_precert_poison'), + '1.3.6.1.4.1.11129.2.4.4': ('CT Precertificate Signer', 'ct_precert_signer'), + '1.3.6.1.4.1.11129.2.4.5': ('CT Certificate SCTs', 'ct_cert_scts'), + '1.3.6.1.4.1.11591.4.11': ('scrypt', 'id-scrypt'), + '1.3.6.1.5': ('Security', 'security'), + '1.3.6.1.5.2.3': ('id-pkinit', ), + '1.3.6.1.5.2.3.4': ('PKINIT Client Auth', 'pkInitClientAuth'), + '1.3.6.1.5.2.3.5': ('Signing KDC Response', 'pkInitKDC'), + '1.3.6.1.5.5.7': ('PKIX', ), + '1.3.6.1.5.5.7.0': ('id-pkix-mod', ), + '1.3.6.1.5.5.7.0.1': ('id-pkix1-explicit-88', ), + '1.3.6.1.5.5.7.0.2': ('id-pkix1-implicit-88', ), + '1.3.6.1.5.5.7.0.3': ('id-pkix1-explicit-93', ), + '1.3.6.1.5.5.7.0.4': ('id-pkix1-implicit-93', ), + '1.3.6.1.5.5.7.0.5': ('id-mod-crmf', ), + '1.3.6.1.5.5.7.0.6': ('id-mod-cmc', ), + '1.3.6.1.5.5.7.0.7': ('id-mod-kea-profile-88', ), + '1.3.6.1.5.5.7.0.8': ('id-mod-kea-profile-93', ), + '1.3.6.1.5.5.7.0.9': ('id-mod-cmp', ), + '1.3.6.1.5.5.7.0.10': ('id-mod-qualified-cert-88', ), + '1.3.6.1.5.5.7.0.11': ('id-mod-qualified-cert-93', ), + '1.3.6.1.5.5.7.0.12': ('id-mod-attribute-cert', ), + '1.3.6.1.5.5.7.0.13': ('id-mod-timestamp-protocol', ), + '1.3.6.1.5.5.7.0.14': ('id-mod-ocsp', ), + '1.3.6.1.5.5.7.0.15': ('id-mod-dvcs', ), + '1.3.6.1.5.5.7.0.16': ('id-mod-cmp2000', ), + '1.3.6.1.5.5.7.1': ('id-pe', ), + '1.3.6.1.5.5.7.1.1': ('Authority Information Access', 'authorityInfoAccess'), + '1.3.6.1.5.5.7.1.2': ('Biometric Info', 'biometricInfo'), + '1.3.6.1.5.5.7.1.3': ('qcStatements', ), + '1.3.6.1.5.5.7.1.4': ('ac-auditEntity', ), + '1.3.6.1.5.5.7.1.5': ('ac-targeting', ), + '1.3.6.1.5.5.7.1.6': ('aaControls', ), + '1.3.6.1.5.5.7.1.7': ('sbgp-ipAddrBlock', ), + '1.3.6.1.5.5.7.1.8': ('sbgp-autonomousSysNum', ), + '1.3.6.1.5.5.7.1.9': ('sbgp-routerIdentifier', ), + '1.3.6.1.5.5.7.1.10': ('ac-proxying', ), + '1.3.6.1.5.5.7.1.11': ('Subject Information Access', 'subjectInfoAccess'), + '1.3.6.1.5.5.7.1.14': ('Proxy Certificate Information', 'proxyCertInfo'), + '1.3.6.1.5.5.7.1.24': ('TLS Feature', 'tlsfeature'), + '1.3.6.1.5.5.7.2': ('id-qt', ), + '1.3.6.1.5.5.7.2.1': ('Policy Qualifier CPS', 'id-qt-cps'), + '1.3.6.1.5.5.7.2.2': ('Policy Qualifier User Notice', 'id-qt-unotice'), + '1.3.6.1.5.5.7.2.3': ('textNotice', ), + '1.3.6.1.5.5.7.3': ('id-kp', ), + '1.3.6.1.5.5.7.3.1': ('TLS Web Server Authentication', 'serverAuth'), + '1.3.6.1.5.5.7.3.2': ('TLS Web Client Authentication', 'clientAuth'), + '1.3.6.1.5.5.7.3.3': ('Code Signing', 'codeSigning'), + '1.3.6.1.5.5.7.3.4': ('E-mail Protection', 'emailProtection'), + '1.3.6.1.5.5.7.3.5': ('IPSec End System', 'ipsecEndSystem'), + '1.3.6.1.5.5.7.3.6': ('IPSec Tunnel', 'ipsecTunnel'), + '1.3.6.1.5.5.7.3.7': ('IPSec User', 'ipsecUser'), + '1.3.6.1.5.5.7.3.8': ('Time Stamping', 'timeStamping'), + '1.3.6.1.5.5.7.3.9': ('OCSP Signing', 'OCSPSigning'), + '1.3.6.1.5.5.7.3.10': ('dvcs', 'DVCS'), + '1.3.6.1.5.5.7.3.17': ('ipsec Internet Key Exchange', 'ipsecIKE'), + '1.3.6.1.5.5.7.3.18': ('Ctrl/provision WAP Access', 'capwapAC'), + '1.3.6.1.5.5.7.3.19': ('Ctrl/Provision WAP Termination', 'capwapWTP'), + '1.3.6.1.5.5.7.3.21': ('SSH Client', 'secureShellClient'), + '1.3.6.1.5.5.7.3.22': ('SSH Server', 'secureShellServer'), + '1.3.6.1.5.5.7.3.23': ('Send Router', 'sendRouter'), + '1.3.6.1.5.5.7.3.24': ('Send Proxied Router', 'sendProxiedRouter'), + '1.3.6.1.5.5.7.3.25': ('Send Owner', 'sendOwner'), + '1.3.6.1.5.5.7.3.26': ('Send Proxied Owner', 'sendProxiedOwner'), + '1.3.6.1.5.5.7.3.27': ('CMC Certificate Authority', 'cmcCA'), + '1.3.6.1.5.5.7.3.28': ('CMC Registration Authority', 'cmcRA'), + '1.3.6.1.5.5.7.4': ('id-it', ), + '1.3.6.1.5.5.7.4.1': ('id-it-caProtEncCert', ), + '1.3.6.1.5.5.7.4.2': ('id-it-signKeyPairTypes', ), + '1.3.6.1.5.5.7.4.3': ('id-it-encKeyPairTypes', ), + '1.3.6.1.5.5.7.4.4': ('id-it-preferredSymmAlg', ), + '1.3.6.1.5.5.7.4.5': ('id-it-caKeyUpdateInfo', ), + '1.3.6.1.5.5.7.4.6': ('id-it-currentCRL', ), + '1.3.6.1.5.5.7.4.7': ('id-it-unsupportedOIDs', ), + '1.3.6.1.5.5.7.4.8': ('id-it-subscriptionRequest', ), + '1.3.6.1.5.5.7.4.9': ('id-it-subscriptionResponse', ), + '1.3.6.1.5.5.7.4.10': ('id-it-keyPairParamReq', ), + '1.3.6.1.5.5.7.4.11': ('id-it-keyPairParamRep', ), + '1.3.6.1.5.5.7.4.12': ('id-it-revPassphrase', ), + '1.3.6.1.5.5.7.4.13': ('id-it-implicitConfirm', ), + '1.3.6.1.5.5.7.4.14': ('id-it-confirmWaitTime', ), + '1.3.6.1.5.5.7.4.15': ('id-it-origPKIMessage', ), + '1.3.6.1.5.5.7.4.16': ('id-it-suppLangTags', ), + '1.3.6.1.5.5.7.5': ('id-pkip', ), + '1.3.6.1.5.5.7.5.1': ('id-regCtrl', ), + '1.3.6.1.5.5.7.5.1.1': ('id-regCtrl-regToken', ), + '1.3.6.1.5.5.7.5.1.2': ('id-regCtrl-authenticator', ), + '1.3.6.1.5.5.7.5.1.3': ('id-regCtrl-pkiPublicationInfo', ), + '1.3.6.1.5.5.7.5.1.4': ('id-regCtrl-pkiArchiveOptions', ), + '1.3.6.1.5.5.7.5.1.5': ('id-regCtrl-oldCertID', ), + '1.3.6.1.5.5.7.5.1.6': ('id-regCtrl-protocolEncrKey', ), + '1.3.6.1.5.5.7.5.2': ('id-regInfo', ), + '1.3.6.1.5.5.7.5.2.1': ('id-regInfo-utf8Pairs', ), + '1.3.6.1.5.5.7.5.2.2': ('id-regInfo-certReq', ), + '1.3.6.1.5.5.7.6': ('id-alg', ), + '1.3.6.1.5.5.7.6.1': ('id-alg-des40', ), + '1.3.6.1.5.5.7.6.2': ('id-alg-noSignature', ), + '1.3.6.1.5.5.7.6.3': ('id-alg-dh-sig-hmac-sha1', ), + '1.3.6.1.5.5.7.6.4': ('id-alg-dh-pop', ), + '1.3.6.1.5.5.7.7': ('id-cmc', ), + '1.3.6.1.5.5.7.7.1': ('id-cmc-statusInfo', ), + '1.3.6.1.5.5.7.7.2': ('id-cmc-identification', ), + '1.3.6.1.5.5.7.7.3': ('id-cmc-identityProof', ), + '1.3.6.1.5.5.7.7.4': ('id-cmc-dataReturn', ), + '1.3.6.1.5.5.7.7.5': ('id-cmc-transactionId', ), + '1.3.6.1.5.5.7.7.6': ('id-cmc-senderNonce', ), + '1.3.6.1.5.5.7.7.7': ('id-cmc-recipientNonce', ), + '1.3.6.1.5.5.7.7.8': ('id-cmc-addExtensions', ), + '1.3.6.1.5.5.7.7.9': ('id-cmc-encryptedPOP', ), + '1.3.6.1.5.5.7.7.10': ('id-cmc-decryptedPOP', ), + '1.3.6.1.5.5.7.7.11': ('id-cmc-lraPOPWitness', ), + '1.3.6.1.5.5.7.7.15': ('id-cmc-getCert', ), + '1.3.6.1.5.5.7.7.16': ('id-cmc-getCRL', ), + '1.3.6.1.5.5.7.7.17': ('id-cmc-revokeRequest', ), + '1.3.6.1.5.5.7.7.18': ('id-cmc-regInfo', ), + '1.3.6.1.5.5.7.7.19': ('id-cmc-responseInfo', ), + '1.3.6.1.5.5.7.7.21': ('id-cmc-queryPending', ), + '1.3.6.1.5.5.7.7.22': ('id-cmc-popLinkRandom', ), + '1.3.6.1.5.5.7.7.23': ('id-cmc-popLinkWitness', ), + '1.3.6.1.5.5.7.7.24': ('id-cmc-confirmCertAcceptance', ), + '1.3.6.1.5.5.7.8': ('id-on', ), + '1.3.6.1.5.5.7.8.1': ('id-on-personalData', ), + '1.3.6.1.5.5.7.8.3': ('Permanent Identifier', 'id-on-permanentIdentifier'), + '1.3.6.1.5.5.7.9': ('id-pda', ), + '1.3.6.1.5.5.7.9.1': ('id-pda-dateOfBirth', ), + '1.3.6.1.5.5.7.9.2': ('id-pda-placeOfBirth', ), + '1.3.6.1.5.5.7.9.3': ('id-pda-gender', ), + '1.3.6.1.5.5.7.9.4': ('id-pda-countryOfCitizenship', ), + '1.3.6.1.5.5.7.9.5': ('id-pda-countryOfResidence', ), + '1.3.6.1.5.5.7.10': ('id-aca', ), + '1.3.6.1.5.5.7.10.1': ('id-aca-authenticationInfo', ), + '1.3.6.1.5.5.7.10.2': ('id-aca-accessIdentity', ), + '1.3.6.1.5.5.7.10.3': ('id-aca-chargingIdentity', ), + '1.3.6.1.5.5.7.10.4': ('id-aca-group', ), + '1.3.6.1.5.5.7.10.5': ('id-aca-role', ), + '1.3.6.1.5.5.7.10.6': ('id-aca-encAttrs', ), + '1.3.6.1.5.5.7.11': ('id-qcs', ), + '1.3.6.1.5.5.7.11.1': ('id-qcs-pkixQCSyntax-v1', ), + '1.3.6.1.5.5.7.12': ('id-cct', ), + '1.3.6.1.5.5.7.12.1': ('id-cct-crs', ), + '1.3.6.1.5.5.7.12.2': ('id-cct-PKIData', ), + '1.3.6.1.5.5.7.12.3': ('id-cct-PKIResponse', ), + '1.3.6.1.5.5.7.21': ('id-ppl', ), + '1.3.6.1.5.5.7.21.0': ('Any language', 'id-ppl-anyLanguage'), + '1.3.6.1.5.5.7.21.1': ('Inherit all', 'id-ppl-inheritAll'), + '1.3.6.1.5.5.7.21.2': ('Independent', 'id-ppl-independent'), + '1.3.6.1.5.5.7.48': ('id-ad', ), + '1.3.6.1.5.5.7.48.1': ('OCSP', 'OCSP', 'id-pkix-OCSP'), + '1.3.6.1.5.5.7.48.1.1': ('Basic OCSP Response', 'basicOCSPResponse'), + '1.3.6.1.5.5.7.48.1.2': ('OCSP Nonce', 'Nonce'), + '1.3.6.1.5.5.7.48.1.3': ('OCSP CRL ID', 'CrlID'), + '1.3.6.1.5.5.7.48.1.4': ('Acceptable OCSP Responses', 'acceptableResponses'), + '1.3.6.1.5.5.7.48.1.5': ('OCSP No Check', 'noCheck'), + '1.3.6.1.5.5.7.48.1.6': ('OCSP Archive Cutoff', 'archiveCutoff'), + '1.3.6.1.5.5.7.48.1.7': ('OCSP Service Locator', 'serviceLocator'), + '1.3.6.1.5.5.7.48.1.8': ('Extended OCSP Status', 'extendedStatus'), + '1.3.6.1.5.5.7.48.1.9': ('valid', ), + '1.3.6.1.5.5.7.48.1.10': ('path', ), + '1.3.6.1.5.5.7.48.1.11': ('Trust Root', 'trustRoot'), + '1.3.6.1.5.5.7.48.2': ('CA Issuers', 'caIssuers'), + '1.3.6.1.5.5.7.48.3': ('AD Time Stamping', 'ad_timestamping'), + '1.3.6.1.5.5.7.48.4': ('ad dvcs', 'AD_DVCS'), + '1.3.6.1.5.5.7.48.5': ('CA Repository', 'caRepository'), + '1.3.6.1.5.5.8.1.1': ('hmac-md5', 'HMAC-MD5'), + '1.3.6.1.5.5.8.1.2': ('hmac-sha1', 'HMAC-SHA1'), + '1.3.6.1.6': ('SNMPv2', 'snmpv2'), + '1.3.6.1.7': ('Mail', ), + '1.3.6.1.7.1': ('MIME MHS', 'mime-mhs'), + '1.3.6.1.7.1.1': ('mime-mhs-headings', 'mime-mhs-headings'), + '1.3.6.1.7.1.1.1': ('id-hex-partial-message', 'id-hex-partial-message'), + '1.3.6.1.7.1.1.2': ('id-hex-multipart-message', 'id-hex-multipart-message'), + '1.3.6.1.7.1.2': ('mime-mhs-bodies', 'mime-mhs-bodies'), + '1.3.14.3.2': ('algorithm', 'algorithm'), + '1.3.14.3.2.3': ('md5WithRSA', 'RSA-NP-MD5'), + '1.3.14.3.2.6': ('des-ecb', 'DES-ECB'), + '1.3.14.3.2.7': ('des-cbc', 'DES-CBC'), + '1.3.14.3.2.8': ('des-ofb', 'DES-OFB'), + '1.3.14.3.2.9': ('des-cfb', 'DES-CFB'), + '1.3.14.3.2.11': ('rsaSignature', ), + '1.3.14.3.2.12': ('dsaEncryption-old', 'DSA-old'), + '1.3.14.3.2.13': ('dsaWithSHA', 'DSA-SHA'), + '1.3.14.3.2.15': ('shaWithRSAEncryption', 'RSA-SHA'), + '1.3.14.3.2.17': ('des-ede', 'DES-EDE'), + '1.3.14.3.2.18': ('sha', 'SHA'), + '1.3.14.3.2.26': ('sha1', 'SHA1'), + '1.3.14.3.2.27': ('dsaWithSHA1-old', 'DSA-SHA1-old'), + '1.3.14.3.2.29': ('sha1WithRSA', 'RSA-SHA1-2'), + '1.3.36.3.2.1': ('ripemd160', 'RIPEMD160'), + '1.3.36.3.3.1.2': ('ripemd160WithRSA', 'RSA-RIPEMD160'), + '1.3.36.3.3.2.8.1.1.1': ('brainpoolP160r1', ), + '1.3.36.3.3.2.8.1.1.2': ('brainpoolP160t1', ), + '1.3.36.3.3.2.8.1.1.3': ('brainpoolP192r1', ), + '1.3.36.3.3.2.8.1.1.4': ('brainpoolP192t1', ), + '1.3.36.3.3.2.8.1.1.5': ('brainpoolP224r1', ), + '1.3.36.3.3.2.8.1.1.6': ('brainpoolP224t1', ), + '1.3.36.3.3.2.8.1.1.7': ('brainpoolP256r1', ), + '1.3.36.3.3.2.8.1.1.8': ('brainpoolP256t1', ), + '1.3.36.3.3.2.8.1.1.9': ('brainpoolP320r1', ), + '1.3.36.3.3.2.8.1.1.10': ('brainpoolP320t1', ), + '1.3.36.3.3.2.8.1.1.11': ('brainpoolP384r1', ), + '1.3.36.3.3.2.8.1.1.12': ('brainpoolP384t1', ), + '1.3.36.3.3.2.8.1.1.13': ('brainpoolP512r1', ), + '1.3.36.3.3.2.8.1.1.14': ('brainpoolP512t1', ), + '1.3.36.8.3.3': ('Professional Information or basis for Admission', 'x509ExtAdmission'), + '1.3.101.1.4.1': ('Strong Extranet ID', 'SXNetID'), + '1.3.101.110': ('X25519', ), + '1.3.101.111': ('X448', ), + '1.3.101.112': ('ED25519', ), + '1.3.101.113': ('ED448', ), + '1.3.111': ('ieee', ), + '1.3.111.2.1619': ('IEEE Security in Storage Working Group', 'ieee-siswg'), + '1.3.111.2.1619.0.1.1': ('aes-128-xts', 'AES-128-XTS'), + '1.3.111.2.1619.0.1.2': ('aes-256-xts', 'AES-256-XTS'), + '1.3.132': ('certicom-arc', ), + '1.3.132.0': ('secg_ellipticCurve', ), + '1.3.132.0.1': ('sect163k1', ), + '1.3.132.0.2': ('sect163r1', ), + '1.3.132.0.3': ('sect239k1', ), + '1.3.132.0.4': ('sect113r1', ), + '1.3.132.0.5': ('sect113r2', ), + '1.3.132.0.6': ('secp112r1', ), + '1.3.132.0.7': ('secp112r2', ), + '1.3.132.0.8': ('secp160r1', ), + '1.3.132.0.9': ('secp160k1', ), + '1.3.132.0.10': ('secp256k1', ), + '1.3.132.0.15': ('sect163r2', ), + '1.3.132.0.16': ('sect283k1', ), + '1.3.132.0.17': ('sect283r1', ), + '1.3.132.0.22': ('sect131r1', ), + '1.3.132.0.23': ('sect131r2', ), + '1.3.132.0.24': ('sect193r1', ), + '1.3.132.0.25': ('sect193r2', ), + '1.3.132.0.26': ('sect233k1', ), + '1.3.132.0.27': ('sect233r1', ), + '1.3.132.0.28': ('secp128r1', ), + '1.3.132.0.29': ('secp128r2', ), + '1.3.132.0.30': ('secp160r2', ), + '1.3.132.0.31': ('secp192k1', ), + '1.3.132.0.32': ('secp224k1', ), + '1.3.132.0.33': ('secp224r1', ), + '1.3.132.0.34': ('secp384r1', ), + '1.3.132.0.35': ('secp521r1', ), + '1.3.132.0.36': ('sect409k1', ), + '1.3.132.0.37': ('sect409r1', ), + '1.3.132.0.38': ('sect571k1', ), + '1.3.132.0.39': ('sect571r1', ), + '1.3.132.1': ('secg-scheme', ), + '1.3.132.1.11.0': ('dhSinglePass-stdDH-sha224kdf-scheme', ), + '1.3.132.1.11.1': ('dhSinglePass-stdDH-sha256kdf-scheme', ), + '1.3.132.1.11.2': ('dhSinglePass-stdDH-sha384kdf-scheme', ), + '1.3.132.1.11.3': ('dhSinglePass-stdDH-sha512kdf-scheme', ), + '1.3.132.1.14.0': ('dhSinglePass-cofactorDH-sha224kdf-scheme', ), + '1.3.132.1.14.1': ('dhSinglePass-cofactorDH-sha256kdf-scheme', ), + '1.3.132.1.14.2': ('dhSinglePass-cofactorDH-sha384kdf-scheme', ), + '1.3.132.1.14.3': ('dhSinglePass-cofactorDH-sha512kdf-scheme', ), + '1.3.133.16.840.63.0': ('x9-63-scheme', ), + '1.3.133.16.840.63.0.2': ('dhSinglePass-stdDH-sha1kdf-scheme', ), + '1.3.133.16.840.63.0.3': ('dhSinglePass-cofactorDH-sha1kdf-scheme', ), + '2': ('joint-iso-itu-t', 'JOINT-ISO-ITU-T', 'joint-iso-ccitt'), + '2.5': ('directory services (X.500)', 'X500'), + '2.5.1.5': ('Selected Attribute Types', 'selected-attribute-types'), + '2.5.1.5.55': ('clearance', ), + '2.5.4': ('X509', ), + '2.5.4.3': ('commonName', 'CN'), + '2.5.4.4': ('surname', 'SN'), + '2.5.4.5': ('serialNumber', ), + '2.5.4.6': ('countryName', 'C'), + '2.5.4.7': ('localityName', 'L'), + '2.5.4.8': ('stateOrProvinceName', 'ST'), + '2.5.4.9': ('streetAddress', 'street'), + '2.5.4.10': ('organizationName', 'O'), + '2.5.4.11': ('organizationalUnitName', 'OU'), + '2.5.4.12': ('title', 'title'), + '2.5.4.13': ('description', ), + '2.5.4.14': ('searchGuide', ), + '2.5.4.15': ('businessCategory', ), + '2.5.4.16': ('postalAddress', ), + '2.5.4.17': ('postalCode', ), + '2.5.4.18': ('postOfficeBox', ), + '2.5.4.19': ('physicalDeliveryOfficeName', ), + '2.5.4.20': ('telephoneNumber', ), + '2.5.4.21': ('telexNumber', ), + '2.5.4.22': ('teletexTerminalIdentifier', ), + '2.5.4.23': ('facsimileTelephoneNumber', ), + '2.5.4.24': ('x121Address', ), + '2.5.4.25': ('internationaliSDNNumber', ), + '2.5.4.26': ('registeredAddress', ), + '2.5.4.27': ('destinationIndicator', ), + '2.5.4.28': ('preferredDeliveryMethod', ), + '2.5.4.29': ('presentationAddress', ), + '2.5.4.30': ('supportedApplicationContext', ), + '2.5.4.31': ('member', ), + '2.5.4.32': ('owner', ), + '2.5.4.33': ('roleOccupant', ), + '2.5.4.34': ('seeAlso', ), + '2.5.4.35': ('userPassword', ), + '2.5.4.36': ('userCertificate', ), + '2.5.4.37': ('cACertificate', ), + '2.5.4.38': ('authorityRevocationList', ), + '2.5.4.39': ('certificateRevocationList', ), + '2.5.4.40': ('crossCertificatePair', ), + '2.5.4.41': ('name', 'name'), + '2.5.4.42': ('givenName', 'GN'), + '2.5.4.43': ('initials', 'initials'), + '2.5.4.44': ('generationQualifier', ), + '2.5.4.45': ('x500UniqueIdentifier', ), + '2.5.4.46': ('dnQualifier', 'dnQualifier'), + '2.5.4.47': ('enhancedSearchGuide', ), + '2.5.4.48': ('protocolInformation', ), + '2.5.4.49': ('distinguishedName', ), + '2.5.4.50': ('uniqueMember', ), + '2.5.4.51': ('houseIdentifier', ), + '2.5.4.52': ('supportedAlgorithms', ), + '2.5.4.53': ('deltaRevocationList', ), + '2.5.4.54': ('dmdName', ), + '2.5.4.65': ('pseudonym', ), + '2.5.4.72': ('role', 'role'), + '2.5.4.97': ('organizationIdentifier', ), + '2.5.4.98': ('countryCode3c', 'c3'), + '2.5.4.99': ('countryCode3n', 'n3'), + '2.5.4.100': ('dnsName', ), + '2.5.8': ('directory services - algorithms', 'X500algorithms'), + '2.5.8.1.1': ('rsa', 'RSA'), + '2.5.8.3.100': ('mdc2WithRSA', 'RSA-MDC2'), + '2.5.8.3.101': ('mdc2', 'MDC2'), + '2.5.29': ('id-ce', ), + '2.5.29.9': ('X509v3 Subject Directory Attributes', 'subjectDirectoryAttributes'), + '2.5.29.14': ('X509v3 Subject Key Identifier', 'subjectKeyIdentifier'), + '2.5.29.15': ('X509v3 Key Usage', 'keyUsage'), + '2.5.29.16': ('X509v3 Private Key Usage Period', 'privateKeyUsagePeriod'), + '2.5.29.17': ('X509v3 Subject Alternative Name', 'subjectAltName'), + '2.5.29.18': ('X509v3 Issuer Alternative Name', 'issuerAltName'), + '2.5.29.19': ('X509v3 Basic Constraints', 'basicConstraints'), + '2.5.29.20': ('X509v3 CRL Number', 'crlNumber'), + '2.5.29.21': ('X509v3 CRL Reason Code', 'CRLReason'), + '2.5.29.23': ('Hold Instruction Code', 'holdInstructionCode'), + '2.5.29.24': ('Invalidity Date', 'invalidityDate'), + '2.5.29.27': ('X509v3 Delta CRL Indicator', 'deltaCRL'), + '2.5.29.28': ('X509v3 Issuing Distribution Point', 'issuingDistributionPoint'), + '2.5.29.29': ('X509v3 Certificate Issuer', 'certificateIssuer'), + '2.5.29.30': ('X509v3 Name Constraints', 'nameConstraints'), + '2.5.29.31': ('X509v3 CRL Distribution Points', 'crlDistributionPoints'), + '2.5.29.32': ('X509v3 Certificate Policies', 'certificatePolicies'), + '2.5.29.32.0': ('X509v3 Any Policy', 'anyPolicy'), + '2.5.29.33': ('X509v3 Policy Mappings', 'policyMappings'), + '2.5.29.35': ('X509v3 Authority Key Identifier', 'authorityKeyIdentifier'), + '2.5.29.36': ('X509v3 Policy Constraints', 'policyConstraints'), + '2.5.29.37': ('X509v3 Extended Key Usage', 'extendedKeyUsage'), + '2.5.29.37.0': ('Any Extended Key Usage', 'anyExtendedKeyUsage'), + '2.5.29.46': ('X509v3 Freshest CRL', 'freshestCRL'), + '2.5.29.54': ('X509v3 Inhibit Any Policy', 'inhibitAnyPolicy'), + '2.5.29.55': ('X509v3 AC Targeting', 'targetInformation'), + '2.5.29.56': ('X509v3 No Revocation Available', 'noRevAvail'), + '2.16.840.1.101.3': ('csor', ), + '2.16.840.1.101.3.4': ('nistAlgorithms', ), + '2.16.840.1.101.3.4.1': ('aes', ), + '2.16.840.1.101.3.4.1.1': ('aes-128-ecb', 'AES-128-ECB'), + '2.16.840.1.101.3.4.1.2': ('aes-128-cbc', 'AES-128-CBC'), + '2.16.840.1.101.3.4.1.3': ('aes-128-ofb', 'AES-128-OFB'), + '2.16.840.1.101.3.4.1.4': ('aes-128-cfb', 'AES-128-CFB'), + '2.16.840.1.101.3.4.1.5': ('id-aes128-wrap', ), + '2.16.840.1.101.3.4.1.6': ('aes-128-gcm', 'id-aes128-GCM'), + '2.16.840.1.101.3.4.1.7': ('aes-128-ccm', 'id-aes128-CCM'), + '2.16.840.1.101.3.4.1.8': ('id-aes128-wrap-pad', ), + '2.16.840.1.101.3.4.1.21': ('aes-192-ecb', 'AES-192-ECB'), + '2.16.840.1.101.3.4.1.22': ('aes-192-cbc', 'AES-192-CBC'), + '2.16.840.1.101.3.4.1.23': ('aes-192-ofb', 'AES-192-OFB'), + '2.16.840.1.101.3.4.1.24': ('aes-192-cfb', 'AES-192-CFB'), + '2.16.840.1.101.3.4.1.25': ('id-aes192-wrap', ), + '2.16.840.1.101.3.4.1.26': ('aes-192-gcm', 'id-aes192-GCM'), + '2.16.840.1.101.3.4.1.27': ('aes-192-ccm', 'id-aes192-CCM'), + '2.16.840.1.101.3.4.1.28': ('id-aes192-wrap-pad', ), + '2.16.840.1.101.3.4.1.41': ('aes-256-ecb', 'AES-256-ECB'), + '2.16.840.1.101.3.4.1.42': ('aes-256-cbc', 'AES-256-CBC'), + '2.16.840.1.101.3.4.1.43': ('aes-256-ofb', 'AES-256-OFB'), + '2.16.840.1.101.3.4.1.44': ('aes-256-cfb', 'AES-256-CFB'), + '2.16.840.1.101.3.4.1.45': ('id-aes256-wrap', ), + '2.16.840.1.101.3.4.1.46': ('aes-256-gcm', 'id-aes256-GCM'), + '2.16.840.1.101.3.4.1.47': ('aes-256-ccm', 'id-aes256-CCM'), + '2.16.840.1.101.3.4.1.48': ('id-aes256-wrap-pad', ), + '2.16.840.1.101.3.4.2': ('nist_hashalgs', ), + '2.16.840.1.101.3.4.2.1': ('sha256', 'SHA256'), + '2.16.840.1.101.3.4.2.2': ('sha384', 'SHA384'), + '2.16.840.1.101.3.4.2.3': ('sha512', 'SHA512'), + '2.16.840.1.101.3.4.2.4': ('sha224', 'SHA224'), + '2.16.840.1.101.3.4.2.5': ('sha512-224', 'SHA512-224'), + '2.16.840.1.101.3.4.2.6': ('sha512-256', 'SHA512-256'), + '2.16.840.1.101.3.4.2.7': ('sha3-224', 'SHA3-224'), + '2.16.840.1.101.3.4.2.8': ('sha3-256', 'SHA3-256'), + '2.16.840.1.101.3.4.2.9': ('sha3-384', 'SHA3-384'), + '2.16.840.1.101.3.4.2.10': ('sha3-512', 'SHA3-512'), + '2.16.840.1.101.3.4.2.11': ('shake128', 'SHAKE128'), + '2.16.840.1.101.3.4.2.12': ('shake256', 'SHAKE256'), + '2.16.840.1.101.3.4.2.13': ('hmac-sha3-224', 'id-hmacWithSHA3-224'), + '2.16.840.1.101.3.4.2.14': ('hmac-sha3-256', 'id-hmacWithSHA3-256'), + '2.16.840.1.101.3.4.2.15': ('hmac-sha3-384', 'id-hmacWithSHA3-384'), + '2.16.840.1.101.3.4.2.16': ('hmac-sha3-512', 'id-hmacWithSHA3-512'), + '2.16.840.1.101.3.4.3': ('dsa_with_sha2', 'sigAlgs'), + '2.16.840.1.101.3.4.3.1': ('dsa_with_SHA224', ), + '2.16.840.1.101.3.4.3.2': ('dsa_with_SHA256', ), + '2.16.840.1.101.3.4.3.3': ('dsa_with_SHA384', 'id-dsa-with-sha384'), + '2.16.840.1.101.3.4.3.4': ('dsa_with_SHA512', 'id-dsa-with-sha512'), + '2.16.840.1.101.3.4.3.5': ('dsa_with_SHA3-224', 'id-dsa-with-sha3-224'), + '2.16.840.1.101.3.4.3.6': ('dsa_with_SHA3-256', 'id-dsa-with-sha3-256'), + '2.16.840.1.101.3.4.3.7': ('dsa_with_SHA3-384', 'id-dsa-with-sha3-384'), + '2.16.840.1.101.3.4.3.8': ('dsa_with_SHA3-512', 'id-dsa-with-sha3-512'), + '2.16.840.1.101.3.4.3.9': ('ecdsa_with_SHA3-224', 'id-ecdsa-with-sha3-224'), + '2.16.840.1.101.3.4.3.10': ('ecdsa_with_SHA3-256', 'id-ecdsa-with-sha3-256'), + '2.16.840.1.101.3.4.3.11': ('ecdsa_with_SHA3-384', 'id-ecdsa-with-sha3-384'), + '2.16.840.1.101.3.4.3.12': ('ecdsa_with_SHA3-512', 'id-ecdsa-with-sha3-512'), + '2.16.840.1.101.3.4.3.13': ('RSA-SHA3-224', 'id-rsassa-pkcs1-v1_5-with-sha3-224'), + '2.16.840.1.101.3.4.3.14': ('RSA-SHA3-256', 'id-rsassa-pkcs1-v1_5-with-sha3-256'), + '2.16.840.1.101.3.4.3.15': ('RSA-SHA3-384', 'id-rsassa-pkcs1-v1_5-with-sha3-384'), + '2.16.840.1.101.3.4.3.16': ('RSA-SHA3-512', 'id-rsassa-pkcs1-v1_5-with-sha3-512'), + '2.16.840.1.113730': ('Netscape Communications Corp.', 'Netscape'), + '2.16.840.1.113730.1': ('Netscape Certificate Extension', 'nsCertExt'), + '2.16.840.1.113730.1.1': ('Netscape Cert Type', 'nsCertType'), + '2.16.840.1.113730.1.2': ('Netscape Base Url', 'nsBaseUrl'), + '2.16.840.1.113730.1.3': ('Netscape Revocation Url', 'nsRevocationUrl'), + '2.16.840.1.113730.1.4': ('Netscape CA Revocation Url', 'nsCaRevocationUrl'), + '2.16.840.1.113730.1.7': ('Netscape Renewal Url', 'nsRenewalUrl'), + '2.16.840.1.113730.1.8': ('Netscape CA Policy Url', 'nsCaPolicyUrl'), + '2.16.840.1.113730.1.12': ('Netscape SSL Server Name', 'nsSslServerName'), + '2.16.840.1.113730.1.13': ('Netscape Comment', 'nsComment'), + '2.16.840.1.113730.2': ('Netscape Data Type', 'nsDataType'), + '2.16.840.1.113730.2.5': ('Netscape Certificate Sequence', 'nsCertSequence'), + '2.16.840.1.113730.4.1': ('Netscape Server Gated Crypto', 'nsSGC'), + '2.23': ('International Organizations', 'international-organizations'), + '2.23.42': ('Secure Electronic Transactions', 'id-set'), + '2.23.42.0': ('content types', 'set-ctype'), + '2.23.42.0.0': ('setct-PANData', ), + '2.23.42.0.1': ('setct-PANToken', ), + '2.23.42.0.2': ('setct-PANOnly', ), + '2.23.42.0.3': ('setct-OIData', ), + '2.23.42.0.4': ('setct-PI', ), + '2.23.42.0.5': ('setct-PIData', ), + '2.23.42.0.6': ('setct-PIDataUnsigned', ), + '2.23.42.0.7': ('setct-HODInput', ), + '2.23.42.0.8': ('setct-AuthResBaggage', ), + '2.23.42.0.9': ('setct-AuthRevReqBaggage', ), + '2.23.42.0.10': ('setct-AuthRevResBaggage', ), + '2.23.42.0.11': ('setct-CapTokenSeq', ), + '2.23.42.0.12': ('setct-PInitResData', ), + '2.23.42.0.13': ('setct-PI-TBS', ), + '2.23.42.0.14': ('setct-PResData', ), + '2.23.42.0.16': ('setct-AuthReqTBS', ), + '2.23.42.0.17': ('setct-AuthResTBS', ), + '2.23.42.0.18': ('setct-AuthResTBSX', ), + '2.23.42.0.19': ('setct-AuthTokenTBS', ), + '2.23.42.0.20': ('setct-CapTokenData', ), + '2.23.42.0.21': ('setct-CapTokenTBS', ), + '2.23.42.0.22': ('setct-AcqCardCodeMsg', ), + '2.23.42.0.23': ('setct-AuthRevReqTBS', ), + '2.23.42.0.24': ('setct-AuthRevResData', ), + '2.23.42.0.25': ('setct-AuthRevResTBS', ), + '2.23.42.0.26': ('setct-CapReqTBS', ), + '2.23.42.0.27': ('setct-CapReqTBSX', ), + '2.23.42.0.28': ('setct-CapResData', ), + '2.23.42.0.29': ('setct-CapRevReqTBS', ), + '2.23.42.0.30': ('setct-CapRevReqTBSX', ), + '2.23.42.0.31': ('setct-CapRevResData', ), + '2.23.42.0.32': ('setct-CredReqTBS', ), + '2.23.42.0.33': ('setct-CredReqTBSX', ), + '2.23.42.0.34': ('setct-CredResData', ), + '2.23.42.0.35': ('setct-CredRevReqTBS', ), + '2.23.42.0.36': ('setct-CredRevReqTBSX', ), + '2.23.42.0.37': ('setct-CredRevResData', ), + '2.23.42.0.38': ('setct-PCertReqData', ), + '2.23.42.0.39': ('setct-PCertResTBS', ), + '2.23.42.0.40': ('setct-BatchAdminReqData', ), + '2.23.42.0.41': ('setct-BatchAdminResData', ), + '2.23.42.0.42': ('setct-CardCInitResTBS', ), + '2.23.42.0.43': ('setct-MeAqCInitResTBS', ), + '2.23.42.0.44': ('setct-RegFormResTBS', ), + '2.23.42.0.45': ('setct-CertReqData', ), + '2.23.42.0.46': ('setct-CertReqTBS', ), + '2.23.42.0.47': ('setct-CertResData', ), + '2.23.42.0.48': ('setct-CertInqReqTBS', ), + '2.23.42.0.49': ('setct-ErrorTBS', ), + '2.23.42.0.50': ('setct-PIDualSignedTBE', ), + '2.23.42.0.51': ('setct-PIUnsignedTBE', ), + '2.23.42.0.52': ('setct-AuthReqTBE', ), + '2.23.42.0.53': ('setct-AuthResTBE', ), + '2.23.42.0.54': ('setct-AuthResTBEX', ), + '2.23.42.0.55': ('setct-AuthTokenTBE', ), + '2.23.42.0.56': ('setct-CapTokenTBE', ), + '2.23.42.0.57': ('setct-CapTokenTBEX', ), + '2.23.42.0.58': ('setct-AcqCardCodeMsgTBE', ), + '2.23.42.0.59': ('setct-AuthRevReqTBE', ), + '2.23.42.0.60': ('setct-AuthRevResTBE', ), + '2.23.42.0.61': ('setct-AuthRevResTBEB', ), + '2.23.42.0.62': ('setct-CapReqTBE', ), + '2.23.42.0.63': ('setct-CapReqTBEX', ), + '2.23.42.0.64': ('setct-CapResTBE', ), + '2.23.42.0.65': ('setct-CapRevReqTBE', ), + '2.23.42.0.66': ('setct-CapRevReqTBEX', ), + '2.23.42.0.67': ('setct-CapRevResTBE', ), + '2.23.42.0.68': ('setct-CredReqTBE', ), + '2.23.42.0.69': ('setct-CredReqTBEX', ), + '2.23.42.0.70': ('setct-CredResTBE', ), + '2.23.42.0.71': ('setct-CredRevReqTBE', ), + '2.23.42.0.72': ('setct-CredRevReqTBEX', ), + '2.23.42.0.73': ('setct-CredRevResTBE', ), + '2.23.42.0.74': ('setct-BatchAdminReqTBE', ), + '2.23.42.0.75': ('setct-BatchAdminResTBE', ), + '2.23.42.0.76': ('setct-RegFormReqTBE', ), + '2.23.42.0.77': ('setct-CertReqTBE', ), + '2.23.42.0.78': ('setct-CertReqTBEX', ), + '2.23.42.0.79': ('setct-CertResTBE', ), + '2.23.42.0.80': ('setct-CRLNotificationTBS', ), + '2.23.42.0.81': ('setct-CRLNotificationResTBS', ), + '2.23.42.0.82': ('setct-BCIDistributionTBS', ), + '2.23.42.1': ('message extensions', 'set-msgExt'), + '2.23.42.1.1': ('generic cryptogram', 'setext-genCrypt'), + '2.23.42.1.3': ('merchant initiated auth', 'setext-miAuth'), + '2.23.42.1.4': ('setext-pinSecure', ), + '2.23.42.1.5': ('setext-pinAny', ), + '2.23.42.1.7': ('setext-track2', ), + '2.23.42.1.8': ('additional verification', 'setext-cv'), + '2.23.42.3': ('set-attr', ), + '2.23.42.3.0': ('setAttr-Cert', ), + '2.23.42.3.0.0': ('set-rootKeyThumb', ), + '2.23.42.3.0.1': ('set-addPolicy', ), + '2.23.42.3.1': ('payment gateway capabilities', 'setAttr-PGWYcap'), + '2.23.42.3.2': ('setAttr-TokenType', ), + '2.23.42.3.2.1': ('setAttr-Token-EMV', ), + '2.23.42.3.2.2': ('setAttr-Token-B0Prime', ), + '2.23.42.3.3': ('issuer capabilities', 'setAttr-IssCap'), + '2.23.42.3.3.3': ('setAttr-IssCap-CVM', ), + '2.23.42.3.3.3.1': ('generate cryptogram', 'setAttr-GenCryptgrm'), + '2.23.42.3.3.4': ('setAttr-IssCap-T2', ), + '2.23.42.3.3.4.1': ('encrypted track 2', 'setAttr-T2Enc'), + '2.23.42.3.3.4.2': ('cleartext track 2', 'setAttr-T2cleartxt'), + '2.23.42.3.3.5': ('setAttr-IssCap-Sig', ), + '2.23.42.3.3.5.1': ('ICC or token signature', 'setAttr-TokICCsig'), + '2.23.42.3.3.5.2': ('secure device signature', 'setAttr-SecDevSig'), + '2.23.42.5': ('set-policy', ), + '2.23.42.5.0': ('set-policy-root', ), + '2.23.42.7': ('certificate extensions', 'set-certExt'), + '2.23.42.7.0': ('setCext-hashedRoot', ), + '2.23.42.7.1': ('setCext-certType', ), + '2.23.42.7.2': ('setCext-merchData', ), + '2.23.42.7.3': ('setCext-cCertRequired', ), + '2.23.42.7.4': ('setCext-tunneling', ), + '2.23.42.7.5': ('setCext-setExt', ), + '2.23.42.7.6': ('setCext-setQualf', ), + '2.23.42.7.7': ('setCext-PGWYcapabilities', ), + '2.23.42.7.8': ('setCext-TokenIdentifier', ), + '2.23.42.7.9': ('setCext-Track2Data', ), + '2.23.42.7.10': ('setCext-TokenType', ), + '2.23.42.7.11': ('setCext-IssuerCapabilities', ), + '2.23.42.8': ('set-brand', ), + '2.23.42.8.1': ('set-brand-IATA-ATA', ), + '2.23.42.8.4': ('set-brand-Visa', ), + '2.23.42.8.5': ('set-brand-MasterCard', ), + '2.23.42.8.30': ('set-brand-Diners', ), + '2.23.42.8.34': ('set-brand-AmericanExpress', ), + '2.23.42.8.35': ('set-brand-JCB', ), + '2.23.42.8.6011': ('set-brand-Novus', ), + '2.23.43': ('wap', ), + '2.23.43.1': ('wap-wsg', ), + '2.23.43.1.4': ('wap-wsg-idm-ecid', ), + '2.23.43.1.4.1': ('wap-wsg-idm-ecid-wtls1', ), + '2.23.43.1.4.3': ('wap-wsg-idm-ecid-wtls3', ), + '2.23.43.1.4.4': ('wap-wsg-idm-ecid-wtls4', ), + '2.23.43.1.4.5': ('wap-wsg-idm-ecid-wtls5', ), + '2.23.43.1.4.6': ('wap-wsg-idm-ecid-wtls6', ), + '2.23.43.1.4.7': ('wap-wsg-idm-ecid-wtls7', ), + '2.23.43.1.4.8': ('wap-wsg-idm-ecid-wtls8', ), + '2.23.43.1.4.9': ('wap-wsg-idm-ecid-wtls9', ), + '2.23.43.1.4.10': ('wap-wsg-idm-ecid-wtls10', ), + '2.23.43.1.4.11': ('wap-wsg-idm-ecid-wtls11', ), + '2.23.43.1.4.12': ('wap-wsg-idm-ecid-wtls12', ), +} +# ##################################################################################### +# ##################################################################################### + +_OID_LOOKUP = dict() +_NORMALIZE_NAMES = dict() +_NORMALIZE_NAMES_SHORT = dict() + +for dotted, names in _OID_MAP.items(): + for name in names: + if name in _NORMALIZE_NAMES and _OID_LOOKUP[name] != dotted: + raise AssertionError( + 'Name collision during setup: "{0}" for OIDs {1} and {2}' + .format(name, dotted, _OID_LOOKUP[name]) + ) + _NORMALIZE_NAMES[name] = names[0] + _NORMALIZE_NAMES_SHORT[name] = names[-1] + _OID_LOOKUP[name] = dotted +for alias, original in [('userID', 'userId')]: + if alias in _NORMALIZE_NAMES: + raise AssertionError( + 'Name collision during adding aliases: "{0}" (alias for "{1}") is already mapped to OID {2}' + .format(alias, original, _OID_LOOKUP[alias]) + ) + _NORMALIZE_NAMES[alias] = original + _NORMALIZE_NAMES_SHORT[alias] = _NORMALIZE_NAMES_SHORT[original] + _OID_LOOKUP[alias] = _OID_LOOKUP[original] + + +def pyopenssl_normalize_name(name, short=False): + nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(name)) + if nid != 0: + b_name = OpenSSL._util.lib.OBJ_nid2ln(nid) + name = to_text(OpenSSL._util.ffi.string(b_name)) + if short: + return _NORMALIZE_NAMES_SHORT.get(name, name) + else: + return _NORMALIZE_NAMES.get(name, name) + + +# ##################################################################################### +# ##################################################################################### +# # This excerpt is dual licensed under the terms of the Apache License, Version +# # 2.0, and the BSD License. See the LICENSE file at +# # https://github.com/pyca/cryptography/blob/master/LICENSE for complete details. +# # +# # Adapted from cryptography's hazmat/backends/openssl/decode_asn1.py +# # +# # Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk) +# # Copyright (c) 2017 Fraser Tweedale (@frasertweedale) +# # +# # Relevant commits from cryptography project (https://github.com/pyca/cryptography): +# # pyca/cryptography@719d536dd691e84e208534798f2eb4f82aaa2e07 +# # pyca/cryptography@5ab6d6a5c05572bd1c75f05baf264a2d0001894a +# # pyca/cryptography@2e776e20eb60378e0af9b7439000d0e80da7c7e3 +# # pyca/cryptography@fb309ed24647d1be9e319b61b1f2aa8ebb87b90b +# # pyca/cryptography@2917e460993c475c72d7146c50dc3bbc2414280d +# # pyca/cryptography@3057f91ea9a05fb593825006d87a391286a4d828 +# # pyca/cryptography@d607dd7e5bc5c08854ec0c9baff70ba4a35be36f +def _obj2txt(openssl_lib, openssl_ffi, obj): + # Set to 80 on the recommendation of + # https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values + # + # But OIDs longer than this occur in real life (e.g. Active + # Directory makes some very long OIDs). So we need to detect + # and properly handle the case where the default buffer is not + # big enough. + # + buf_len = 80 + buf = openssl_ffi.new("char[]", buf_len) + + # 'res' is the number of bytes that *would* be written if the + # buffer is large enough. If 'res' > buf_len - 1, we need to + # alloc a big-enough buffer and go again. + res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1) + if res > buf_len - 1: # account for terminating null byte + buf_len = res + 1 + buf = openssl_ffi.new("char[]", buf_len) + res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1) + return openssl_ffi.buffer(buf, res)[:].decode() +# ##################################################################################### +# ##################################################################################### + + +def cryptography_get_extensions_from_cert(cert): + # Since cryptography won't give us the DER value for an extension + # (that is only stored for unrecognized extensions), we have to re-do + # the extension parsing outselves. + result = dict() + backend = cert._backend + x509_obj = cert._x509 + + for i in range(backend._lib.X509_get_ext_count(x509_obj)): + ext = backend._lib.X509_get_ext(x509_obj, i) + if ext == backend._ffi.NULL: + continue + crit = backend._lib.X509_EXTENSION_get_critical(ext) + data = backend._lib.X509_EXTENSION_get_data(ext) + backend.openssl_assert(data != backend._ffi.NULL) + der = backend._ffi.buffer(data.data, data.length)[:] + entry = dict( + critical=(crit == 1), + value=base64.b64encode(der), + ) + oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext)) + result[oid] = entry + return result + + +def cryptography_get_extensions_from_csr(csr): + # Since cryptography won't give us the DER value for an extension + # (that is only stored for unrecognized extensions), we have to re-do + # the extension parsing outselves. + result = dict() + backend = csr._backend + + extensions = backend._lib.X509_REQ_get_extensions(csr._x509_req) + extensions = backend._ffi.gc( + extensions, + lambda ext: backend._lib.sk_X509_EXTENSION_pop_free( + ext, + backend._ffi.addressof(backend._lib._original_lib, "X509_EXTENSION_free") + ) + ) + + for i in range(backend._lib.sk_X509_EXTENSION_num(extensions)): + ext = backend._lib.sk_X509_EXTENSION_value(extensions, i) + if ext == backend._ffi.NULL: + continue + crit = backend._lib.X509_EXTENSION_get_critical(ext) + data = backend._lib.X509_EXTENSION_get_data(ext) + backend.openssl_assert(data != backend._ffi.NULL) + der = backend._ffi.buffer(data.data, data.length)[:] + entry = dict( + critical=(crit == 1), + value=base64.b64encode(der), + ) + oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext)) + result[oid] = entry + return result + + +def pyopenssl_get_extensions_from_cert(cert): + # While pyOpenSSL allows us to get an extension's DER value, it won't + # give us the dotted string for an OID. So we have to do some magic to + # get hold of it. + result = dict() + ext_count = cert.get_extension_count() + for i in range(0, ext_count): + ext = cert.get_extension(i) + entry = dict( + critical=bool(ext.get_critical()), + value=base64.b64encode(ext.get_data()), + ) + oid = _obj2txt( + OpenSSL._util.lib, + OpenSSL._util.ffi, + OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension) + ) + # This could also be done a bit simpler: + # + # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid)) + # + # Unfortunately this gives the wrong result in case the linked OpenSSL + # doesn't know the OID. That's why we have to get the OID dotted string + # similarly to how cryptography does it. + result[oid] = entry + return result + + +def pyopenssl_get_extensions_from_csr(csr): + # While pyOpenSSL allows us to get an extension's DER value, it won't + # give us the dotted string for an OID. So we have to do some magic to + # get hold of it. + result = dict() + for ext in csr.get_extensions(): + entry = dict( + critical=bool(ext.get_critical()), + value=base64.b64encode(ext.get_data()), + ) + oid = _obj2txt( + OpenSSL._util.lib, + OpenSSL._util.ffi, + OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension) + ) + # This could also be done a bit simpler: + # + # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid)) + # + # Unfortunately this gives the wrong result in case the linked OpenSSL + # doesn't know the OID. That's why we have to get the OID dotted string + # similarly to how cryptography does it. + result[oid] = entry + return result + + +def cryptography_name_to_oid(name): + dotted = _OID_LOOKUP.get(name) + if dotted is None: + raise OpenSSLObjectError('Cannot find OID for "{0}"'.format(name)) + return x509.oid.ObjectIdentifier(dotted) + + +def cryptography_oid_to_name(oid, short=False): + dotted_string = oid.dotted_string + names = _OID_MAP.get(dotted_string) + name = names[0] if names else oid._name + if short: + return _NORMALIZE_NAMES_SHORT.get(name, name) + else: + return _NORMALIZE_NAMES.get(name, name) + + +def cryptography_get_name(name): + ''' + Given a name string, returns a cryptography x509.Name object. + Raises an OpenSSLObjectError if the name is unknown or cannot be parsed. + ''' + try: + if name.startswith('DNS:'): + return x509.DNSName(to_text(name[4:])) + if name.startswith('IP:'): + return x509.IPAddress(ipaddress.ip_address(to_text(name[3:]))) + if name.startswith('email:'): + return x509.RFC822Name(to_text(name[6:])) + if name.startswith('URI:'): + return x509.UniformResourceIdentifier(to_text(name[4:])) + except Exception as e: + raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}": {1}'.format(name, e)) + if ':' not in name: + raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (forgot "DNS:" prefix?)'.format(name)) + raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (potentially unsupported by cryptography backend)'.format(name)) + + +def _get_hex(bytesstr): + if bytesstr is None: + return bytesstr + data = binascii.hexlify(bytesstr) + data = to_text(b':'.join(data[i:i + 2] for i in range(0, len(data), 2))) + return data + + +def cryptography_decode_name(name): + ''' + Given a cryptography x509.Name object, returns a string. + Raises an OpenSSLObjectError if the name is not supported. + ''' + if isinstance(name, x509.DNSName): + return 'DNS:{0}'.format(name.value) + if isinstance(name, x509.IPAddress): + return 'IP:{0}'.format(name.value.compressed) + if isinstance(name, x509.RFC822Name): + return 'email:{0}'.format(name.value) + if isinstance(name, x509.UniformResourceIdentifier): + return 'URI:{0}'.format(name.value) + if isinstance(name, x509.DirectoryName): + # FIXME: test + return 'DirName:' + ''.join(['/{0}:{1}'.format(attribute.oid._name, attribute.value) for attribute in name.value]) + if isinstance(name, x509.RegisteredID): + # FIXME: test + return 'RegisteredID:{0}'.format(name.value) + if isinstance(name, x509.OtherName): + # FIXME: test + return '{0}:{1}'.format(name.type_id.dotted_string, _get_hex(name.value)) + raise OpenSSLObjectError('Cannot decode name "{0}"'.format(name)) + + +def _cryptography_get_keyusage(usage): + ''' + Given a key usage identifier string, returns the parameter name used by cryptography's x509.KeyUsage(). + Raises an OpenSSLObjectError if the identifier is unknown. + ''' + if usage in ('Digital Signature', 'digitalSignature'): + return 'digital_signature' + if usage in ('Non Repudiation', 'nonRepudiation'): + return 'content_commitment' + if usage in ('Key Encipherment', 'keyEncipherment'): + return 'key_encipherment' + if usage in ('Data Encipherment', 'dataEncipherment'): + return 'data_encipherment' + if usage in ('Key Agreement', 'keyAgreement'): + return 'key_agreement' + if usage in ('Certificate Sign', 'keyCertSign'): + return 'key_cert_sign' + if usage in ('CRL Sign', 'cRLSign'): + return 'crl_sign' + if usage in ('Encipher Only', 'encipherOnly'): + return 'encipher_only' + if usage in ('Decipher Only', 'decipherOnly'): + return 'decipher_only' + raise OpenSSLObjectError('Unknown key usage "{0}"'.format(usage)) + + +def cryptography_parse_key_usage_params(usages): + ''' + Given a list of key usage identifier strings, returns the parameters for cryptography's x509.KeyUsage(). + Raises an OpenSSLObjectError if an identifier is unknown. + ''' + params = dict( + digital_signature=False, + content_commitment=False, + key_encipherment=False, + data_encipherment=False, + key_agreement=False, + key_cert_sign=False, + crl_sign=False, + encipher_only=False, + decipher_only=False, + ) + for usage in usages: + params[_cryptography_get_keyusage(usage)] = True + return params + + +def cryptography_get_basic_constraints(constraints): + ''' + Given a list of constraints, returns a tuple (ca, path_length). + Raises an OpenSSLObjectError if a constraint is unknown or cannot be parsed. + ''' + ca = False + path_length = None + if constraints: + for constraint in constraints: + if constraint.startswith('CA:'): + if constraint == 'CA:TRUE': + ca = True + elif constraint == 'CA:FALSE': + ca = False + else: + raise OpenSSLObjectError('Unknown basic constraint value "{0}" for CA'.format(constraint[3:])) + elif constraint.startswith('pathlen:'): + v = constraint[len('pathlen:'):] + try: + path_length = int(v) + except Exception as e: + raise OpenSSLObjectError('Cannot parse path length constraint "{0}" ({1})'.format(v, e)) + else: + raise OpenSSLObjectError('Unknown basic constraint "{0}"'.format(constraint)) + return ca, path_length + + +def binary_exp_mod(f, e, m): + '''Computes f^e mod m in O(log e) multiplications modulo m.''' + # Compute len_e = floor(log_2(e)) + len_e = -1 + x = e + while x > 0: + x >>= 1 + len_e += 1 + # Compute f**e mod m + result = 1 + for k in range(len_e, -1, -1): + result = (result * result) % m + if ((e >> k) & 1) != 0: + result = (result * f) % m + return result + + +def simple_gcd(a, b): + '''Compute GCD of its two inputs.''' + while b != 0: + a, b = b, a % b + return a + + +def quick_is_not_prime(n): + '''Does some quick checks to see if we can poke a hole into the primality of n. + + A result of `False` does **not** mean that the number is prime; it just means + that we couldn't detect quickly whether it is not prime. + ''' + if n <= 2: + return True + # The constant in the next line is the product of all primes < 200 + if simple_gcd(n, 7799922041683461553249199106329813876687996789903550945093032474868511536164700810) > 1: + return True + # TODO: maybe do some iterations of Miller-Rabin to increase confidence + # (https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test) + return False + + +python_version = (sys.version_info[0], sys.version_info[1]) +if python_version >= (2, 7) or python_version >= (3, 1): + # Ansible still supports Python 2.6 on remote nodes + def count_bits(no): + no = abs(no) + if no == 0: + return 0 + return no.bit_length() +else: + # Slow, but works + def count_bits(no): + no = abs(no) + count = 0 + while no > 0: + no >>= 1 + count += 1 + return count + + +PEM_START = '-----BEGIN ' +PEM_END = '-----' +PKCS8_PRIVATEKEY_NAMES = ('PRIVATE KEY', 'ENCRYPTED PRIVATE KEY') +PKCS1_PRIVATEKEY_SUFFIX = ' PRIVATE KEY' + + +def identify_private_key_format(content): + '''Given the contents of a private key file, identifies its format.''' + # See https://github.com/openssl/openssl/blob/master/crypto/pem/pem_pkey.c#L40-L85 + # (PEM_read_bio_PrivateKey) + # and https://github.com/openssl/openssl/blob/master/include/openssl/pem.h#L46-L47 + # (PEM_STRING_PKCS8, PEM_STRING_PKCS8INF) + try: + lines = content.decode('utf-8').splitlines(False) + if lines[0].startswith(PEM_START) and lines[0].endswith(PEM_END) and len(lines[0]) > len(PEM_START) + len(PEM_END): + name = lines[0][len(PEM_START):-len(PEM_END)] + if name in PKCS8_PRIVATEKEY_NAMES: + return 'pkcs8' + if len(name) > len(PKCS1_PRIVATEKEY_SUFFIX) and name.endswith(PKCS1_PRIVATEKEY_SUFFIX): + return 'pkcs1' + return 'unknown-pem' + except UnicodeDecodeError: + pass + return 'raw' + + +def cryptography_key_needs_digest_for_signing(key): + '''Tests whether the given private key requires a digest algorithm for signing. + + Ed25519 and Ed448 keys do not; they need None to be passed as the digest algorithm. + ''' + if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey): + return False + if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey): + return False + return True + + +def cryptography_compare_public_keys(key1, key2): + '''Tests whether two public keys are the same. + + Needs special logic for Ed25519 and Ed448 keys, since they do not have public_numbers(). + ''' + if CRYPTOGRAPHY_HAS_ED25519: + a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey) + b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey) + if a or b: + if not a or not b: + return False + a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) + b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) + return a == b + if CRYPTOGRAPHY_HAS_ED448: + a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey) + b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey) + if a or b: + if not a or not b: + return False + a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) + b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) + return a == b + return key1.public_numbers() == key2.public_numbers() + + +if HAS_CRYPTOGRAPHY: + REVOCATION_REASON_MAP = { + 'unspecified': x509.ReasonFlags.unspecified, + 'key_compromise': x509.ReasonFlags.key_compromise, + 'ca_compromise': x509.ReasonFlags.ca_compromise, + 'affiliation_changed': x509.ReasonFlags.affiliation_changed, + 'superseded': x509.ReasonFlags.superseded, + 'cessation_of_operation': x509.ReasonFlags.cessation_of_operation, + 'certificate_hold': x509.ReasonFlags.certificate_hold, + 'privilege_withdrawn': x509.ReasonFlags.privilege_withdrawn, + 'aa_compromise': x509.ReasonFlags.aa_compromise, + 'remove_from_crl': x509.ReasonFlags.remove_from_crl, + } + REVOCATION_REASON_MAP_INVERSE = dict() + for k, v in REVOCATION_REASON_MAP.items(): + REVOCATION_REASON_MAP_INVERSE[v] = k + + +def cryptography_decode_revoked_certificate(cert): + result = { + 'serial_number': cert.serial_number, + 'revocation_date': cert.revocation_date, + 'issuer': None, + 'issuer_critical': False, + 'reason': None, + 'reason_critical': False, + 'invalidity_date': None, + 'invalidity_date_critical': False, + } + try: + ext = cert.extensions.get_extension_for_class(x509.CertificateIssuer) + result['issuer'] = list(ext.value) + result['issuer_critical'] = ext.critical + except x509.ExtensionNotFound: + pass + try: + ext = cert.extensions.get_extension_for_class(x509.CRLReason) + result['reason'] = ext.value.reason + result['reason_critical'] = ext.critical + except x509.ExtensionNotFound: + pass + try: + ext = cert.extensions.get_extension_for_class(x509.InvalidityDate) + result['invalidity_date'] = ext.value.invalidity_date + result['invalidity_date_critical'] = ext.critical + except x509.ExtensionNotFound: + pass + return result diff --git a/test/support/integration/plugins/module_utils/database.py b/test/support/integration/plugins/module_utils/database.py new file mode 100644 index 00000000..014939a2 --- /dev/null +++ b/test/support/integration/plugins/module_utils/database.py @@ -0,0 +1,142 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2014, Toshio Kuratomi +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +class SQLParseError(Exception): + pass + + +class UnclosedQuoteError(SQLParseError): + pass + + +# maps a type of identifier to the maximum number of dot levels that are +# allowed to specify that identifier. For example, a database column can be +# specified by up to 4 levels: database.schema.table.column +_PG_IDENTIFIER_TO_DOT_LEVEL = dict( + database=1, + schema=2, + table=3, + column=4, + role=1, + tablespace=1, + sequence=3, + publication=1, +) +_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1) + + +def _find_end_quote(identifier, quote_char): + accumulate = 0 + while True: + try: + quote = identifier.index(quote_char) + except ValueError: + raise UnclosedQuoteError + accumulate = accumulate + quote + try: + next_char = identifier[quote + 1] + except IndexError: + return accumulate + if next_char == quote_char: + try: + identifier = identifier[quote + 2:] + accumulate = accumulate + 2 + except IndexError: + raise UnclosedQuoteError + else: + return accumulate + + +def _identifier_parse(identifier, quote_char): + if not identifier: + raise SQLParseError('Identifier name unspecified or unquoted trailing dot') + + already_quoted = False + if identifier.startswith(quote_char): + already_quoted = True + try: + end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1 + except UnclosedQuoteError: + already_quoted = False + else: + if end_quote < len(identifier) - 1: + if identifier[end_quote + 1] == '.': + dot = end_quote + 1 + first_identifier = identifier[:dot] + next_identifier = identifier[dot + 1:] + further_identifiers = _identifier_parse(next_identifier, quote_char) + further_identifiers.insert(0, first_identifier) + else: + raise SQLParseError('User escaped identifiers must escape extra quotes') + else: + further_identifiers = [identifier] + + if not already_quoted: + try: + dot = identifier.index('.') + except ValueError: + identifier = identifier.replace(quote_char, quote_char * 2) + identifier = ''.join((quote_char, identifier, quote_char)) + further_identifiers = [identifier] + else: + if dot == 0 or dot >= len(identifier) - 1: + identifier = identifier.replace(quote_char, quote_char * 2) + identifier = ''.join((quote_char, identifier, quote_char)) + further_identifiers = [identifier] + else: + first_identifier = identifier[:dot] + next_identifier = identifier[dot + 1:] + further_identifiers = _identifier_parse(next_identifier, quote_char) + first_identifier = first_identifier.replace(quote_char, quote_char * 2) + first_identifier = ''.join((quote_char, first_identifier, quote_char)) + further_identifiers.insert(0, first_identifier) + + return further_identifiers + + +def pg_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier, quote_char='"') + if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type])) + return '.'.join(identifier_fragments) + + +def mysql_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier, quote_char='`') + if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) + + special_cased_fragments = [] + for fragment in identifier_fragments: + if fragment == '`*`': + special_cased_fragments.append('*') + else: + special_cased_fragments.append(fragment) + + return '.'.join(special_cased_fragments) diff --git a/test/support/integration/plugins/module_utils/docker/__init__.py b/test/support/integration/plugins/module_utils/docker/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/support/integration/plugins/module_utils/docker/common.py b/test/support/integration/plugins/module_utils/docker/common.py new file mode 100644 index 00000000..03307250 --- /dev/null +++ b/test/support/integration/plugins/module_utils/docker/common.py @@ -0,0 +1,1022 @@ +# +# Copyright 2016 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import os +import platform +import re +import sys +from datetime import timedelta +from distutils.version import LooseVersion + + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ansible.module_utils.common._collections_compat import Mapping, Sequence +from ansible.module_utils.six import string_types +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE + +HAS_DOCKER_PY = True +HAS_DOCKER_PY_2 = False +HAS_DOCKER_PY_3 = False +HAS_DOCKER_ERROR = None + +try: + from requests.exceptions import SSLError + from docker import __version__ as docker_version + from docker.errors import APIError, NotFound, TLSParameterError + from docker.tls import TLSConfig + from docker import auth + + if LooseVersion(docker_version) >= LooseVersion('3.0.0'): + HAS_DOCKER_PY_3 = True + from docker import APIClient as Client + elif LooseVersion(docker_version) >= LooseVersion('2.0.0'): + HAS_DOCKER_PY_2 = True + from docker import APIClient as Client + else: + from docker import Client + +except ImportError as exc: + HAS_DOCKER_ERROR = str(exc) + HAS_DOCKER_PY = False + + +# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used +# to ensure the user does not have both ``docker`` and ``docker-py`` modules +# installed, as they utilize the same namespace are are incompatible +try: + # docker (Docker SDK for Python >= 2.0.0) + import docker.models # noqa: F401 + HAS_DOCKER_MODELS = True +except ImportError: + HAS_DOCKER_MODELS = False + +try: + # docker-py (Docker SDK for Python < 2.0.0) + import docker.ssladapter # noqa: F401 + HAS_DOCKER_SSLADAPTER = True +except ImportError: + HAS_DOCKER_SSLADAPTER = False + + +try: + from requests.exceptions import RequestException +except ImportError: + # Either docker-py is no longer using requests, or docker-py isn't around either, + # or docker-py's dependency requests is missing. In any case, define an exception + # class RequestException so that our code doesn't break. + class RequestException(Exception): + pass + + +DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' +DEFAULT_TLS = False +DEFAULT_TLS_VERIFY = False +DEFAULT_TLS_HOSTNAME = 'localhost' +MIN_DOCKER_VERSION = "1.8.0" +DEFAULT_TIMEOUT_SECONDS = 60 + +DOCKER_COMMON_ARGS = dict( + docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']), + tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])), + api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']), + timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])), + ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']), + client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']), + client_key=dict(type='path', aliases=['tls_client_key', 'key_path']), + ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])), + tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])), + validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']), + debug=dict(type='bool', default=False) +) + +DOCKER_MUTUALLY_EXCLUSIVE = [] + +DOCKER_REQUIRED_TOGETHER = [ + ['client_cert', 'client_key'] +] + +DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/' +EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+' +BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + + +if not HAS_DOCKER_PY: + docker_version = None + + # No Docker SDK for Python. Create a place holder client to allow + # instantiation of AnsibleModule and proper error handing + class Client(object): # noqa: F811 + def __init__(self, **kwargs): + pass + + class APIError(Exception): # noqa: F811 + pass + + class NotFound(Exception): # noqa: F811 + pass + + +def is_image_name_id(name): + """Check whether the given image name is in fact an image ID (hash).""" + if re.match('^sha256:[0-9a-fA-F]{64}$', name): + return True + return False + + +def is_valid_tag(tag, allow_empty=False): + """Check whether the given string is a valid docker tag name.""" + if not tag: + return allow_empty + # See here ("Extended description") for a definition what tags can be: + # https://docs.docker.com/engine/reference/commandline/tag/ + return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag)) + + +def sanitize_result(data): + """Sanitize data object for return to Ansible. + + When the data object contains types such as docker.types.containers.HostConfig, + Ansible will fail when these are returned via exit_json or fail_json. + HostConfig is derived from dict, but its constructor requires additional + arguments. This function sanitizes data structures by recursively converting + everything derived from dict to dict and everything derived from list (and tuple) + to a list. + """ + if isinstance(data, dict): + return dict((k, sanitize_result(v)) for k, v in data.items()) + elif isinstance(data, (list, tuple)): + return [sanitize_result(v) for v in data] + else: + return data + + +class DockerBaseClass(object): + + def __init__(self): + self.debug = False + + def log(self, msg, pretty_print=False): + pass + # if self.debug: + # log_file = open('docker.log', 'a') + # if pretty_print: + # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) + # log_file.write(u'\n') + # else: + # log_file.write(msg + u'\n') + + +def update_tls_hostname(result): + if result['tls_hostname'] is None: + # get default machine name from the url + parsed_url = urlparse(result['docker_host']) + if ':' in parsed_url.netloc: + result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')] + else: + result['tls_hostname'] = parsed_url + + +def _get_tls_config(fail_function, **kwargs): + try: + tls_config = TLSConfig(**kwargs) + return tls_config + except TLSParameterError as exc: + fail_function("TLS config error: %s" % exc) + + +def get_connect_params(auth, fail_function): + if auth['tls'] or auth['tls_verify']: + auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') + + if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and host verification + if auth['cacert_path']: + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + ca_cert=auth['cacert_path'], + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + else: + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify'] and auth['cacert_path']: + # TLS with cacert only + tls_config = _get_tls_config(ca_cert=auth['cacert_path'], + assert_hostname=auth['tls_hostname'], + verify=True, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls_verify']: + # TLS with verify and no certs + tls_config = _get_tls_config(verify=True, + assert_hostname=auth['tls_hostname'], + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls'] and auth['cert_path'] and auth['key_path']: + # TLS with certs and no host verification + tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), + verify=False, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + if auth['tls']: + # TLS with no certs and not host verification + tls_config = _get_tls_config(verify=False, + ssl_version=auth['ssl_version'], + fail_function=fail_function) + return dict(base_url=auth['docker_host'], + tls=tls_config, + version=auth['api_version'], + timeout=auth['timeout']) + + # No TLS + return dict(base_url=auth['docker_host'], + version=auth['api_version'], + timeout=auth['timeout']) + + +DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`." +DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade." +DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. " + "Hint: if you do not need Python 2.6 support, try " + "`pip uninstall docker-py` instead, followed by `pip install docker`.") + + +class AnsibleDockerClient(Client): + + def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, + required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION, + min_docker_api_version=None, option_minimal_versions=None, + option_minimal_versions_ignore_params=None, fail_results=None): + + # Modules can put information in here which will always be returned + # in case client.fail() is called. + self.fail_results = fail_results or {} + + merged_arg_spec = dict() + merged_arg_spec.update(DOCKER_COMMON_ARGS) + if argument_spec: + merged_arg_spec.update(argument_spec) + self.arg_spec = merged_arg_spec + + mutually_exclusive_params = [] + mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE + if mutually_exclusive: + mutually_exclusive_params += mutually_exclusive + + required_together_params = [] + required_together_params += DOCKER_REQUIRED_TOGETHER + if required_together: + required_together_params += required_together + + self.module = AnsibleModule( + argument_spec=merged_arg_spec, + supports_check_mode=supports_check_mode, + mutually_exclusive=mutually_exclusive_params, + required_together=required_together_params, + required_if=required_if) + + NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0')) + + self.docker_py_version = LooseVersion(docker_version) + + if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER: + self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker " + "SDK for Python) installed together as they use the same namespace and cause a corrupt " + "installation. Please uninstall both packages, and re-install only the docker-py or docker " + "python module (for %s's Python %s). It is recommended to install the docker module if no " + "support for Python 2.6 is required. Please note that simply uninstalling one of the modules " + "can leave the other module in a broken state." % (platform.node(), sys.executable)) + + if not HAS_DOCKER_PY: + if NEEDS_DOCKER_PY2: + msg = missing_required_lib("Docker SDK for Python: docker") + msg = msg + ", for example via `pip install docker`. The error was: %s" + else: + msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)") + msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s" + self.fail(msg % HAS_DOCKER_ERROR) + + if self.docker_py_version < LooseVersion(min_docker_version): + msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s." + if not NEEDS_DOCKER_PY2: + # The minimal required version is < 2.0 (and the current version as well). + # Advertise docker (instead of docker-py) for non-Python-2.6 users. + msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER + elif docker_version < LooseVersion('2.0'): + msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER + else: + msg += DOCKERPYUPGRADE_UPGRADE_DOCKER + self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version)) + + self.debug = self.module.params.get('debug') + self.check_mode = self.module.check_mode + self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail) + + try: + super(AnsibleDockerClient, self).__init__(**self._connect_params) + self.docker_api_version_str = self.version()['ApiVersion'] + except APIError as exc: + self.fail("Docker API error: %s" % exc) + except Exception as exc: + self.fail("Error connecting: %s" % exc) + + self.docker_api_version = LooseVersion(self.docker_api_version_str) + if min_docker_api_version is not None: + if self.docker_api_version < LooseVersion(min_docker_api_version): + self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version)) + + if option_minimal_versions is not None: + self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params) + + def log(self, msg, pretty_print=False): + pass + # if self.debug: + # log_file = open('docker.log', 'a') + # if pretty_print: + # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) + # log_file.write(u'\n') + # else: + # log_file.write(msg + u'\n') + + def fail(self, msg, **kwargs): + self.fail_results.update(kwargs) + self.module.fail_json(msg=msg, **sanitize_result(self.fail_results)) + + @staticmethod + def _get_value(param_name, param_value, env_variable, default_value): + if param_value is not None: + # take module parameter value + if param_value in BOOLEANS_TRUE: + return True + if param_value in BOOLEANS_FALSE: + return False + return param_value + + if env_variable is not None: + env_value = os.environ.get(env_variable) + if env_value is not None: + # take the env variable value + if param_name == 'cert_path': + return os.path.join(env_value, 'cert.pem') + if param_name == 'cacert_path': + return os.path.join(env_value, 'ca.pem') + if param_name == 'key_path': + return os.path.join(env_value, 'key.pem') + if env_value in BOOLEANS_TRUE: + return True + if env_value in BOOLEANS_FALSE: + return False + return env_value + + # take the default + return default_value + + @property + def auth_params(self): + # Get authentication credentials. + # Precedence: module parameters-> environment variables-> defaults. + + self.log('Getting credentials') + + params = dict() + for key in DOCKER_COMMON_ARGS: + params[key] = self.module.params.get(key) + + if self.module.params.get('use_tls'): + # support use_tls option in docker_image.py. This will be deprecated. + use_tls = self.module.params.get('use_tls') + if use_tls == 'encrypt': + params['tls'] = True + if use_tls == 'verify': + params['validate_certs'] = True + + result = dict( + docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST', + DEFAULT_DOCKER_HOST), + tls_hostname=self._get_value('tls_hostname', params['tls_hostname'], + 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME), + api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION', + 'auto'), + cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None), + cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None), + key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None), + ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None), + tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS), + tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY', + DEFAULT_TLS_VERIFY), + timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT', + DEFAULT_TIMEOUT_SECONDS), + ) + + update_tls_hostname(result) + + return result + + def _handle_ssl_error(self, error): + match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) + if match: + self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. " + "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME " + "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by " + "setting the `tls` parameter to true." + % (self.auth_params['tls_hostname'], match.group(1), match.group(1))) + self.fail("SSL Exception: %s" % (error)) + + def _get_minimal_versions(self, option_minimal_versions, ignore_params=None): + self.option_minimal_versions = dict() + for option in self.module.argument_spec: + if ignore_params is not None: + if option in ignore_params: + continue + self.option_minimal_versions[option] = dict() + self.option_minimal_versions.update(option_minimal_versions) + + for option, data in self.option_minimal_versions.items(): + # Test whether option is supported, and store result + support_docker_py = True + support_docker_api = True + if 'docker_py_version' in data: + support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version']) + if 'docker_api_version' in data: + support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version']) + data['supported'] = support_docker_py and support_docker_api + # Fail if option is not supported but used + if not data['supported']: + # Test whether option is specified + if 'detect_usage' in data: + used = data['detect_usage'](self) + else: + used = self.module.params.get(option) is not None + if used and 'default' in self.module.argument_spec[option]: + used = self.module.params[option] != self.module.argument_spec[option]['default'] + if used: + # If the option is used, compose error message. + if 'usage_msg' in data: + usg = data['usage_msg'] + else: + usg = 'set %s option' % (option, ) + if not support_docker_api: + msg = 'Docker API version is %s. Minimum version required is %s to %s.' + msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg) + elif not support_docker_py: + msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. " + if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'): + msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER + elif self.docker_py_version < LooseVersion('2.0.0'): + msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER + else: + msg += DOCKERPYUPGRADE_UPGRADE_DOCKER + msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg) + else: + # should not happen + msg = 'Cannot %s with your configuration.' % (usg, ) + self.fail(msg) + + def get_container_by_id(self, container_id): + try: + self.log("Inspecting container Id %s" % container_id) + result = self.inspect_container(container=container_id) + self.log("Completed container inspection") + return result + except NotFound as dummy: + return None + except Exception as exc: + self.fail("Error inspecting container: %s" % exc) + + def get_container(self, name=None): + ''' + Lookup a container and return the inspection results. + ''' + if name is None: + return None + + search_name = name + if not name.startswith('/'): + search_name = '/' + name + + result = None + try: + for container in self.containers(all=True): + self.log("testing container: %s" % (container['Names'])) + if isinstance(container['Names'], list) and search_name in container['Names']: + result = container + break + if container['Id'].startswith(name): + result = container + break + if container['Id'] == name: + result = container + break + except SSLError as exc: + self._handle_ssl_error(exc) + except Exception as exc: + self.fail("Error retrieving container list: %s" % exc) + + if result is None: + return None + + return self.get_container_by_id(result['Id']) + + def get_network(self, name=None, network_id=None): + ''' + Lookup a network and return the inspection results. + ''' + if name is None and network_id is None: + return None + + result = None + + if network_id is None: + try: + for network in self.networks(): + self.log("testing network: %s" % (network['Name'])) + if name == network['Name']: + result = network + break + if network['Id'].startswith(name): + result = network + break + except SSLError as exc: + self._handle_ssl_error(exc) + except Exception as exc: + self.fail("Error retrieving network list: %s" % exc) + + if result is not None: + network_id = result['Id'] + + if network_id is not None: + try: + self.log("Inspecting network Id %s" % network_id) + result = self.inspect_network(network_id) + self.log("Completed network inspection") + except NotFound as dummy: + return None + except Exception as exc: + self.fail("Error inspecting network: %s" % exc) + + return result + + def find_image(self, name, tag): + ''' + Lookup an image (by name and tag) and return the inspection results. + ''' + if not name: + return None + + self.log("Find image %s:%s" % (name, tag)) + images = self._image_lookup(name, tag) + if not images: + # In API <= 1.20 seeing 'docker.io/' as the name of images pulled from docker hub + registry, repo_name = auth.resolve_repository_name(name) + if registry == 'docker.io': + # If docker.io is explicitly there in name, the image + # isn't found in some cases (#41509) + self.log("Check for docker.io image: %s" % repo_name) + images = self._image_lookup(repo_name, tag) + if not images and repo_name.startswith('library/'): + # Sometimes library/xxx images are not found + lookup = repo_name[len('library/'):] + self.log("Check for docker.io image: %s" % lookup) + images = self._image_lookup(lookup, tag) + if not images: + # Last case: if docker.io wasn't there, it can be that + # the image wasn't found either (#15586) + lookup = "%s/%s" % (registry, repo_name) + self.log("Check for docker.io image: %s" % lookup) + images = self._image_lookup(lookup, tag) + + if len(images) > 1: + self.fail("Registry returned more than one result for %s:%s" % (name, tag)) + + if len(images) == 1: + try: + inspection = self.inspect_image(images[0]['Id']) + except Exception as exc: + self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc))) + return inspection + + self.log("Image %s:%s not found." % (name, tag)) + return None + + def find_image_by_id(self, image_id): + ''' + Lookup an image (by ID) and return the inspection results. + ''' + if not image_id: + return None + + self.log("Find image %s (by ID)" % image_id) + try: + inspection = self.inspect_image(image_id) + except Exception as exc: + self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc))) + return inspection + + def _image_lookup(self, name, tag): + ''' + Including a tag in the name parameter sent to the Docker SDK for Python images method + does not work consistently. Instead, get the result set for name and manually check + if the tag exists. + ''' + try: + response = self.images(name=name) + except Exception as exc: + self.fail("Error searching for image %s - %s" % (name, str(exc))) + images = response + if tag: + lookup = "%s:%s" % (name, tag) + lookup_digest = "%s@%s" % (name, tag) + images = [] + for image in response: + tags = image.get('RepoTags') + digests = image.get('RepoDigests') + if (tags and lookup in tags) or (digests and lookup_digest in digests): + images = [image] + break + return images + + def pull_image(self, name, tag="latest"): + ''' + Pull an image + ''' + self.log("Pulling image %s:%s" % (name, tag)) + old_tag = self.find_image(name, tag) + try: + for line in self.pull(name, tag=tag, stream=True, decode=True): + self.log(line, pretty_print=True) + if line.get('error'): + if line.get('errorDetail'): + error_detail = line.get('errorDetail') + self.fail("Error pulling %s - code: %s message: %s" % (name, + error_detail.get('code'), + error_detail.get('message'))) + else: + self.fail("Error pulling %s - %s" % (name, line.get('error'))) + except Exception as exc: + self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc))) + + new_tag = self.find_image(name, tag) + + return new_tag, old_tag == new_tag + + def report_warnings(self, result, warnings_key=None): + ''' + Checks result of client operation for warnings, and if present, outputs them. + + warnings_key should be a list of keys used to crawl the result dictionary. + For example, if warnings_key == ['a', 'b'], the function will consider + result['a']['b'] if these keys exist. If the result is a non-empty string, it + will be reported as a warning. If the result is a list, every entry will be + reported as a warning. + + In most cases (if warnings are returned at all), warnings_key should be + ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings']. + ''' + if warnings_key is None: + warnings_key = ['Warnings'] + for key in warnings_key: + if not isinstance(result, Mapping): + return + result = result.get(key) + if isinstance(result, Sequence): + for warning in result: + self.module.warn('Docker warning: {0}'.format(warning)) + elif isinstance(result, string_types) and result: + self.module.warn('Docker warning: {0}'.format(result)) + + def inspect_distribution(self, image, **kwargs): + ''' + Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0 + since prior versions did not support accessing private repositories. + ''' + if self.docker_py_version < LooseVersion('4.0.0'): + registry = auth.resolve_repository_name(image)[0] + header = auth.get_config_header(self, registry) + if header: + return self._result(self._get( + self._url('/distribution/{0}/json', image), + headers={'X-Registry-Auth': header} + ), json=True) + return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs) + + +def compare_dict_allow_more_present(av, bv): + ''' + Compare two dictionaries for whether every entry of the first is in the second. + ''' + for key, value in av.items(): + if key not in bv: + return False + if bv[key] != value: + return False + return True + + +def compare_generic(a, b, method, datatype): + ''' + Compare values a and b as described by method and datatype. + + Returns ``True`` if the values compare equal, and ``False`` if not. + + ``a`` is usually the module's parameter, while ``b`` is a property + of the current object. ``a`` must not be ``None`` (except for + ``datatype == 'value'``). + + Valid values for ``method`` are: + - ``ignore`` (always compare as equal); + - ``strict`` (only compare if really equal) + - ``allow_more_present`` (allow b to have elements which a does not have). + + Valid values for ``datatype`` are: + - ``value``: for simple values (strings, numbers, ...); + - ``list``: for ``list``s or ``tuple``s where order matters; + - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not + matter; + - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does + not matter and which contain ``dict``s; ``allow_more_present`` is used + for the ``dict``s, and these are assumed to be dictionaries of values; + - ``dict``: for dictionaries of values. + ''' + if method == 'ignore': + return True + # If a or b is None: + if a is None or b is None: + # If both are None: equality + if a == b: + return True + # Otherwise, not equal for values, and equal + # if the other is empty for set/list/dict + if datatype == 'value': + return False + # For allow_more_present, allow a to be None + if method == 'allow_more_present' and a is None: + return True + # Otherwise, the iterable object which is not None must have length 0 + return len(b if a is None else a) == 0 + # Do proper comparison (both objects not None) + if datatype == 'value': + return a == b + elif datatype == 'list': + if method == 'strict': + return a == b + else: + i = 0 + for v in a: + while i < len(b) and b[i] != v: + i += 1 + if i == len(b): + return False + i += 1 + return True + elif datatype == 'dict': + if method == 'strict': + return a == b + else: + return compare_dict_allow_more_present(a, b) + elif datatype == 'set': + set_a = set(a) + set_b = set(b) + if method == 'strict': + return set_a == set_b + else: + return set_b >= set_a + elif datatype == 'set(dict)': + for av in a: + found = False + for bv in b: + if compare_dict_allow_more_present(av, bv): + found = True + break + if not found: + return False + if method == 'strict': + # If we would know that both a and b do not contain duplicates, + # we could simply compare len(a) to len(b) to finish this test. + # We can assume that b has no duplicates (as it is returned by + # docker), but we don't know for a. + for bv in b: + found = False + for av in a: + if compare_dict_allow_more_present(av, bv): + found = True + break + if not found: + return False + return True + + +class DifferenceTracker(object): + def __init__(self): + self._diff = [] + + def add(self, name, parameter=None, active=None): + self._diff.append(dict( + name=name, + parameter=parameter, + active=active, + )) + + def merge(self, other_tracker): + self._diff.extend(other_tracker._diff) + + @property + def empty(self): + return len(self._diff) == 0 + + def get_before_after(self): + ''' + Return texts ``before`` and ``after``. + ''' + before = dict() + after = dict() + for item in self._diff: + before[item['name']] = item['active'] + after[item['name']] = item['parameter'] + return before, after + + def has_difference_for(self, name): + ''' + Returns a boolean if a difference exists for name + ''' + return any(diff for diff in self._diff if diff['name'] == name) + + def get_legacy_docker_container_diffs(self): + ''' + Return differences in the docker_container legacy format. + ''' + result = [] + for entry in self._diff: + item = dict() + item[entry['name']] = dict( + parameter=entry['parameter'], + container=entry['active'], + ) + result.append(item) + return result + + def get_legacy_docker_diffs(self): + ''' + Return differences in the docker_container legacy format. + ''' + result = [entry['name'] for entry in self._diff] + return result + + +def clean_dict_booleans_for_docker_api(data): + ''' + Go doesn't like Python booleans 'True' or 'False', while Ansible is just + fine with them in YAML. As such, they need to be converted in cases where + we pass dictionaries to the Docker API (e.g. docker_network's + driver_options and docker_prune's filters). + ''' + result = dict() + if data is not None: + for k, v in data.items(): + if v is True: + v = 'true' + elif v is False: + v = 'false' + else: + v = str(v) + result[str(k)] = v + return result + + +def convert_duration_to_nanosecond(time_str): + """ + Return time duration in nanosecond. + """ + if not isinstance(time_str, str): + raise ValueError('Missing unit in duration - %s' % time_str) + + regex = re.compile( + r'^(((?P\d+)h)?' + r'((?P\d+)m(?!s))?' + r'((?P\d+)s)?' + r'((?P\d+)ms)?' + r'((?P\d+)us)?)$' + ) + parts = regex.match(time_str) + + if not parts: + raise ValueError('Invalid time duration - %s' % time_str) + + parts = parts.groupdict() + time_params = {} + for (name, value) in parts.items(): + if value: + time_params[name] = int(value) + + delta = timedelta(**time_params) + time_in_nanoseconds = ( + delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6 + ) * 10 ** 3 + + return time_in_nanoseconds + + +def parse_healthcheck(healthcheck): + """ + Return dictionary of healthcheck parameters and boolean if + healthcheck defined in image was requested to be disabled. + """ + if (not healthcheck) or (not healthcheck.get('test')): + return None, None + + result = dict() + + # All supported healthcheck parameters + options = dict( + test='test', + interval='interval', + timeout='timeout', + start_period='start_period', + retries='retries' + ) + + duration_options = ['interval', 'timeout', 'start_period'] + + for (key, value) in options.items(): + if value in healthcheck: + if healthcheck.get(value) is None: + # due to recursive argument_spec, all keys are always present + # (but have default value None if not specified) + continue + if value in duration_options: + time = convert_duration_to_nanosecond(healthcheck.get(value)) + if time: + result[key] = time + elif healthcheck.get(value): + result[key] = healthcheck.get(value) + if key == 'test': + if isinstance(result[key], (tuple, list)): + result[key] = [str(e) for e in result[key]] + else: + result[key] = ['CMD-SHELL', str(result[key])] + elif key == 'retries': + try: + result[key] = int(result[key]) + except ValueError: + raise ValueError( + 'Cannot parse number of retries for healthcheck. ' + 'Expected an integer, got "{0}".'.format(result[key]) + ) + + if result['test'] == ['NONE']: + # If the user explicitly disables the healthcheck, return None + # as the healthcheck object, and set disable_healthcheck to True + return None, True + + return result, False + + +def omit_none_from_dict(d): + """ + Return a copy of the dictionary with all keys with value None omitted. + """ + return dict((k, v) for (k, v) in d.items() if v is not None) diff --git a/test/support/integration/plugins/module_utils/docker/swarm.py b/test/support/integration/plugins/module_utils/docker/swarm.py new file mode 100644 index 00000000..55d94db0 --- /dev/null +++ b/test/support/integration/plugins/module_utils/docker/swarm.py @@ -0,0 +1,280 @@ +# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) +# (c) Thierry Bouvet (@tbouvet) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import json +from time import sleep + +try: + from docker.errors import APIError, NotFound +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible.module_utils._text import to_native +from ansible.module_utils.docker.common import ( + AnsibleDockerClient, + LooseVersion, +) + + +class AnsibleDockerSwarmClient(AnsibleDockerClient): + + def __init__(self, **kwargs): + super(AnsibleDockerSwarmClient, self).__init__(**kwargs) + + def get_swarm_node_id(self): + """ + Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID + of Docker host the module is executed on + :return: + NodeID of host or 'None' if not part of Swarm + """ + + try: + info = self.info() + except APIError as exc: + self.fail("Failed to get node information for %s" % to_native(exc)) + + if info: + json_str = json.dumps(info, ensure_ascii=False) + swarm_info = json.loads(json_str) + if swarm_info['Swarm']['NodeID']: + return swarm_info['Swarm']['NodeID'] + return None + + def check_if_swarm_node(self, node_id=None): + """ + Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host + system information looking if specific key in output exists. If 'node_id' is provided then it tries to + read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if + it is not executed on Swarm manager + + :param node_id: Node identifier + :return: + bool: True if node is part of Swarm, False otherwise + """ + + if node_id is None: + try: + info = self.info() + except APIError: + self.fail("Failed to get host information.") + + if info: + json_str = json.dumps(info, ensure_ascii=False) + swarm_info = json.loads(json_str) + if swarm_info['Swarm']['NodeID']: + return True + if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'): + return True + return False + else: + try: + node_info = self.get_node_inspect(node_id=node_id) + except APIError: + return + + if node_info['ID'] is not None: + return True + return False + + def check_if_swarm_manager(self): + """ + Checks if node role is set as Manager in Swarm. The node is the docker host on which module action + is performed. The inspect_swarm() will fail if node is not a manager + + :return: True if node is Swarm Manager, False otherwise + """ + + try: + self.inspect_swarm() + return True + except APIError: + return False + + def fail_task_if_not_swarm_manager(self): + """ + If host is not a swarm manager then Ansible task on this host should end with 'failed' state + """ + if not self.check_if_swarm_manager(): + self.fail("Error running docker swarm module: must run on swarm manager node") + + def check_if_swarm_worker(self): + """ + Checks if node role is set as Worker in Swarm. The node is the docker host on which module action + is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node() + + :return: True if node is Swarm Worker, False otherwise + """ + + if self.check_if_swarm_node() and not self.check_if_swarm_manager(): + return True + return False + + def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1): + """ + Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about + node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or + host that is not part of Swarm it will fail the playbook + + :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once + :param node_id: node ID or name, if None then method will try to get node_id of host module run on + :return: + True if node is part of swarm but its state is down, False otherwise + """ + + if repeat_check < 1: + repeat_check = 1 + + if node_id is None: + node_id = self.get_swarm_node_id() + + for retry in range(0, repeat_check): + if retry > 0: + sleep(5) + node_info = self.get_node_inspect(node_id=node_id) + if node_info['Status']['State'] == 'down': + return True + return False + + def get_node_inspect(self, node_id=None, skip_missing=False): + """ + Returns Swarm node info as in 'docker node inspect' command about single node + + :param skip_missing: if True then function will return None instead of failing the task + :param node_id: node ID or name, if None then method will try to get node_id of host module run on + :return: + Single node information structure + """ + + if node_id is None: + node_id = self.get_swarm_node_id() + + if node_id is None: + self.fail("Failed to get node information.") + + try: + node_info = self.inspect_node(node_id=node_id) + except APIError as exc: + if exc.status_code == 503: + self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") + if exc.status_code == 404: + if skip_missing: + return None + self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + except Exception as exc: + self.fail("Error inspecting swarm node: %s" % exc) + + json_str = json.dumps(node_info, ensure_ascii=False) + node_info = json.loads(json_str) + + if 'ManagerStatus' in node_info: + if node_info['ManagerStatus'].get('Leader'): + # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 + # Check moby/moby#35437 for details + count_colons = node_info['ManagerStatus']['Addr'].count(":") + if count_colons == 1: + swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr'] + else: + swarm_leader_ip = node_info['Status']['Addr'] + node_info['Status']['Addr'] = swarm_leader_ip + return node_info + + def get_all_nodes_inspect(self): + """ + Returns Swarm node info as in 'docker node inspect' command about all registered nodes + + :return: + Structure with information about all nodes + """ + try: + node_info = self.nodes() + except APIError as exc: + if exc.status_code == 503: + self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") + self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + except Exception as exc: + self.fail("Error inspecting swarm node: %s" % exc) + + json_str = json.dumps(node_info, ensure_ascii=False) + node_info = json.loads(json_str) + return node_info + + def get_all_nodes_list(self, output='short'): + """ + Returns list of nodes registered in Swarm + + :param output: Defines format of returned data + :return: + If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm, + if 'output' is 'long' then returns data is list of dict containing the attributes as in + output of command 'docker node ls' + """ + nodes_list = [] + + nodes_inspect = self.get_all_nodes_inspect() + if nodes_inspect is None: + return None + + if output == 'short': + for node in nodes_inspect: + nodes_list.append(node['Description']['Hostname']) + elif output == 'long': + for node in nodes_inspect: + node_property = {} + + node_property.update({'ID': node['ID']}) + node_property.update({'Hostname': node['Description']['Hostname']}) + node_property.update({'Status': node['Status']['State']}) + node_property.update({'Availability': node['Spec']['Availability']}) + if 'ManagerStatus' in node: + if node['ManagerStatus']['Leader'] is True: + node_property.update({'Leader': True}) + node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']}) + node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']}) + + nodes_list.append(node_property) + else: + return None + + return nodes_list + + def get_node_name_by_id(self, nodeid): + return self.get_node_inspect(nodeid)['Description']['Hostname'] + + def get_unlock_key(self): + if self.docker_py_version < LooseVersion('2.7.0'): + return None + return super(AnsibleDockerSwarmClient, self).get_unlock_key() + + def get_service_inspect(self, service_id, skip_missing=False): + """ + Returns Swarm service info as in 'docker service inspect' command about single service + + :param service_id: service ID or name + :param skip_missing: if True then function will return None instead of failing the task + :return: + Single service information structure + """ + try: + service_info = self.inspect_service(service_id) + except NotFound as exc: + if skip_missing is False: + self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + else: + return None + except APIError as exc: + if exc.status_code == 503: + self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager") + self.fail("Error inspecting swarm service: %s" % exc) + except Exception as exc: + self.fail("Error inspecting swarm service: %s" % exc) + + json_str = json.dumps(service_info, ensure_ascii=False) + service_info = json.loads(json_str) + return service_info diff --git a/test/support/integration/plugins/module_utils/ec2.py b/test/support/integration/plugins/module_utils/ec2.py new file mode 100644 index 00000000..0d28108d --- /dev/null +++ b/test/support/integration/plugins/module_utils/ec2.py @@ -0,0 +1,758 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re +import sys +import traceback + +from ansible.module_utils.ansible_release import __version__ +from ansible.module_utils.basic import missing_required_lib, env_fallback +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.cloud import CloudRetry +from ansible.module_utils.six import string_types, binary_type, text_type +from ansible.module_utils.common.dict_transformations import ( + camel_dict_to_snake_dict, snake_dict_to_camel_dict, + _camel_to_snake, _snake_to_camel, +) + +BOTO_IMP_ERR = None +try: + import boto + import boto.ec2 # boto does weird import stuff + HAS_BOTO = True +except ImportError: + BOTO_IMP_ERR = traceback.format_exc() + HAS_BOTO = False + +BOTO3_IMP_ERR = None +try: + import boto3 + import botocore + HAS_BOTO3 = True +except Exception: + BOTO3_IMP_ERR = traceback.format_exc() + HAS_BOTO3 = False + +try: + # Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also + # uses this (and it works as expected). Python 2.6 will trigger the ImportError. + from functools import cmp_to_key + PY3_COMPARISON = True +except ImportError: + PY3_COMPARISON = False + + +class AnsibleAWSError(Exception): + pass + + +def _botocore_exception_maybe(): + """ + Allow for boto3 not being installed when using these utils by wrapping + botocore.exceptions instead of assigning from it directly. + """ + if HAS_BOTO3: + return botocore.exceptions.ClientError + return type(None) + + +class AWSRetry(CloudRetry): + base_class = _botocore_exception_maybe() + + @staticmethod + def status_code_from_exception(error): + return error.response['Error']['Code'] + + @staticmethod + def found(response_code, catch_extra_error_codes=None): + # This list of failures is based on this API Reference + # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html + # + # TooManyRequestsException comes from inside botocore when it + # does retrys, unfortunately however it does not try long + # enough to allow some services such as API Gateway to + # complete configuration. At the moment of writing there is a + # botocore/boto3 bug open to fix this. + # + # https://github.com/boto/boto3/issues/876 (and linked PRs etc) + retry_on = [ + 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', + 'InternalFailure', 'InternalError', 'TooManyRequestsException', + 'Throttling' + ] + if catch_extra_error_codes: + retry_on.extend(catch_extra_error_codes) + + return response_code in retry_on + + +def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): + try: + return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) + except ValueError as e: + module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e)) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, + botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e: + module.fail_json(msg=to_native(e)) + except botocore.exceptions.NoRegionError as e: + module.fail_json(msg="The %s module requires a region and none was found in configuration, " + "environment variables or module parameters" % module._name) + + +def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params): + profile = params.pop('profile_name', None) + + if conn_type not in ['both', 'resource', 'client']: + raise ValueError('There is an issue in the calling code. You ' + 'must specify either both, resource, or client to ' + 'the conn_type parameter in the boto3_conn function ' + 'call') + + config = botocore.config.Config( + user_agent_extra='Ansible/{0}'.format(__version__), + ) + + if params.get('config') is not None: + config = config.merge(params.pop('config')) + if params.get('aws_config') is not None: + config = config.merge(params.pop('aws_config')) + + session = boto3.session.Session( + profile_name=profile, + ) + + if conn_type == 'resource': + return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params) + elif conn_type == 'client': + return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params) + else: + client = session.client(resource, region_name=region, endpoint_url=endpoint, **params) + resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params) + return client, resource + + +boto3_inventory_conn = _boto3_conn + + +def boto_exception(err): + """ + Extracts the error message from a boto exception. + + :param err: Exception from boto + :return: Error message + """ + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err)) + else: + error = '%s: %s' % (Exception, err) + + return error + + +def aws_common_argument_spec(): + return dict( + debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'), + ec2_url=dict(), + aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), + aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), + validate_certs=dict(default=True, type='bool'), + security_token=dict(aliases=['access_token'], no_log=True), + profile=dict(), + aws_config=dict(type='dict'), + ) + + +def ec2_argument_spec(): + spec = aws_common_argument_spec() + spec.update( + dict( + region=dict(aliases=['aws_region', 'ec2_region']), + ) + ) + return spec + + +def get_aws_region(module, boto3=False): + region = module.params.get('region') + + if region: + return region + + if 'AWS_REGION' in os.environ: + return os.environ['AWS_REGION'] + if 'AWS_DEFAULT_REGION' in os.environ: + return os.environ['AWS_DEFAULT_REGION'] + if 'EC2_REGION' in os.environ: + return os.environ['EC2_REGION'] + + if not boto3: + if not HAS_BOTO: + module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR) + # boto.config.get returns None if config not found + region = boto.config.get('Boto', 'aws_region') + if region: + return region + return boto.config.get('Boto', 'ec2_region') + + if not HAS_BOTO3: + module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR) + + # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. + try: + profile_name = module.params.get('profile') + return botocore.session.Session(profile=profile_name).get_config_variable('region') + except botocore.exceptions.ProfileNotFound as e: + return None + + +def get_aws_connection_info(module, boto3=False): + + # Check module args for credentials, then check environment vars + # access_key + + ec2_url = module.params.get('ec2_url') + access_key = module.params.get('aws_access_key') + secret_key = module.params.get('aws_secret_key') + security_token = module.params.get('security_token') + region = get_aws_region(module, boto3) + profile_name = module.params.get('profile') + validate_certs = module.params.get('validate_certs') + config = module.params.get('aws_config') + + if not ec2_url: + if 'AWS_URL' in os.environ: + ec2_url = os.environ['AWS_URL'] + elif 'EC2_URL' in os.environ: + ec2_url = os.environ['EC2_URL'] + + if not access_key: + if os.environ.get('AWS_ACCESS_KEY_ID'): + access_key = os.environ['AWS_ACCESS_KEY_ID'] + elif os.environ.get('AWS_ACCESS_KEY'): + access_key = os.environ['AWS_ACCESS_KEY'] + elif os.environ.get('EC2_ACCESS_KEY'): + access_key = os.environ['EC2_ACCESS_KEY'] + elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'): + access_key = boto.config.get('Credentials', 'aws_access_key_id') + elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'): + access_key = boto.config.get('default', 'aws_access_key_id') + else: + # in case access_key came in as empty string + access_key = None + + if not secret_key: + if os.environ.get('AWS_SECRET_ACCESS_KEY'): + secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] + elif os.environ.get('AWS_SECRET_KEY'): + secret_key = os.environ['AWS_SECRET_KEY'] + elif os.environ.get('EC2_SECRET_KEY'): + secret_key = os.environ['EC2_SECRET_KEY'] + elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'): + secret_key = boto.config.get('Credentials', 'aws_secret_access_key') + elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'): + secret_key = boto.config.get('default', 'aws_secret_access_key') + else: + # in case secret_key came in as empty string + secret_key = None + + if not security_token: + if os.environ.get('AWS_SECURITY_TOKEN'): + security_token = os.environ['AWS_SECURITY_TOKEN'] + elif os.environ.get('AWS_SESSION_TOKEN'): + security_token = os.environ['AWS_SESSION_TOKEN'] + elif os.environ.get('EC2_SECURITY_TOKEN'): + security_token = os.environ['EC2_SECURITY_TOKEN'] + elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'): + security_token = boto.config.get('Credentials', 'aws_security_token') + elif HAS_BOTO and boto.config.get('default', 'aws_security_token'): + security_token = boto.config.get('default', 'aws_security_token') + else: + # in case secret_token came in as empty string + security_token = None + + if HAS_BOTO3 and boto3: + boto_params = dict(aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + aws_session_token=security_token) + boto_params['verify'] = validate_certs + + if profile_name: + boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None) + boto_params['profile_name'] = profile_name + + else: + boto_params = dict(aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + security_token=security_token) + + # only set profile_name if passed as an argument + if profile_name: + boto_params['profile_name'] = profile_name + + boto_params['validate_certs'] = validate_certs + + if config is not None: + if HAS_BOTO3 and boto3: + boto_params['aws_config'] = botocore.config.Config(**config) + elif HAS_BOTO and not boto3: + if 'user_agent' in config: + sys.modules["boto.connection"].UserAgent = config['user_agent'] + + for param, value in boto_params.items(): + if isinstance(value, binary_type): + boto_params[param] = text_type(value, 'utf-8', 'strict') + + return region, ec2_url, boto_params + + +def get_ec2_creds(module): + ''' for compatibility mode with old modules that don't/can't yet + use ec2_connect method ''' + region, ec2_url, boto_params = get_aws_connection_info(module) + return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region + + +def boto_fix_security_token_in_profile(conn, profile_name): + ''' monkey patch for boto issue boto/boto#2100 ''' + profile = 'profile ' + profile_name + if boto.config.has_option(profile, 'aws_security_token'): + conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token')) + return conn + + +def connect_to_aws(aws_module, region, **params): + try: + conn = aws_module.connect_to_region(region, **params) + except(boto.provider.ProfileNotFoundError): + raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.") + if not conn: + if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: + raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade " + "boto or extend with endpoints_path" % (region, aws_module.__name__)) + else: + raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) + if params.get('profile_name'): + conn = boto_fix_security_token_in_profile(conn, params['profile_name']) + return conn + + +def ec2_connect(module): + + """ Return an ec2 connection""" + + region, ec2_url, boto_params = get_aws_connection_info(module) + + # If we have a region specified, connect to its endpoint. + if region: + try: + ec2 = connect_to_aws(boto.ec2, region, **boto_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e: + module.fail_json(msg=str(e)) + # Otherwise, no region so we fallback to the old connection method + elif ec2_url: + try: + ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="Either region or ec2_url must be specified") + + return ec2 + + +def ansible_dict_to_boto3_filter_list(filters_dict): + + """ Convert an Ansible dict of filters to list of dicts that boto3 can use + Args: + filters_dict (dict): Dict of AWS filters. + Basic Usage: + >>> filters = {'some-aws-id': 'i-01234567'} + >>> ansible_dict_to_boto3_filter_list(filters) + { + 'some-aws-id': 'i-01234567' + } + Returns: + List: List of AWS filters and their values + [ + { + 'Name': 'some-aws-id', + 'Values': [ + 'i-01234567', + ] + } + ] + """ + + filters_list = [] + for k, v in filters_dict.items(): + filter_dict = {'Name': k} + if isinstance(v, string_types): + filter_dict['Values'] = [v] + else: + filter_dict['Values'] = v + + filters_list.append(filter_dict) + + return filters_list + + +def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None): + + """ Convert a boto3 list of resource tags to a flat dict of key:value pairs + Args: + tags_list (list): List of dicts representing AWS tags. + tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") + tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value") + Basic Usage: + >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}] + >>> boto3_tag_list_to_ansible_dict(tags_list) + [ + { + 'Key': 'MyTagKey', + 'Value': 'MyTagValue' + } + ] + Returns: + Dict: Dict of key:value pairs representing AWS tags + { + 'MyTagKey': 'MyTagValue', + } + """ + + if tag_name_key_name and tag_value_key_name: + tag_candidates = {tag_name_key_name: tag_value_key_name} + else: + tag_candidates = {'key': 'value', 'Key': 'Value'} + + if not tags_list: + return {} + for k, v in tag_candidates.items(): + if k in tags_list[0] and v in tags_list[0]: + return dict((tag[k], tag[v]) for tag in tags_list) + raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list))) + + +def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'): + + """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts + Args: + tags_dict (dict): Dict representing AWS resource tags. + tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") + tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value") + Basic Usage: + >>> tags_dict = {'MyTagKey': 'MyTagValue'} + >>> ansible_dict_to_boto3_tag_list(tags_dict) + { + 'MyTagKey': 'MyTagValue' + } + Returns: + List: List of dicts containing tag keys and values + [ + { + 'Key': 'MyTagKey', + 'Value': 'MyTagValue' + } + ] + """ + + tags_list = [] + for k, v in tags_dict.items(): + tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)}) + + return tags_list + + +def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True): + + """ Return list of security group IDs from security group names. Note that security group names are not unique + across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This + will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in + a try block + """ + + def get_sg_name(sg, boto3): + + if boto3: + return sg['GroupName'] + else: + return sg.name + + def get_sg_id(sg, boto3): + + if boto3: + return sg['GroupId'] + else: + return sg.id + + sec_group_id_list = [] + + if isinstance(sec_group_list, string_types): + sec_group_list = [sec_group_list] + + # Get all security groups + if boto3: + if vpc_id: + filters = [ + { + 'Name': 'vpc-id', + 'Values': [ + vpc_id, + ] + } + ] + all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups'] + else: + all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups'] + else: + if vpc_id: + filters = {'vpc-id': vpc_id} + all_sec_groups = ec2_connection.get_all_security_groups(filters=filters) + else: + all_sec_groups = ec2_connection.get_all_security_groups() + + unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups) + sec_group_name_list = list(set(sec_group_list) - set(unmatched)) + + if len(unmatched) > 0: + # If we have unmatched names that look like an ID, assume they are + import re + sec_group_id_list = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)] + still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)] + if len(still_unmatched) > 0: + raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched)) + + sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list] + + return sec_group_id_list + + +def _hashable_policy(policy, policy_list): + """ + Takes a policy and returns a list, the contents of which are all hashable and sorted. + Example input policy: + {'Version': '2012-10-17', + 'Statement': [{'Action': 's3:PutObjectAcl', + 'Sid': 'AddCannedAcl2', + 'Resource': 'arn:aws:s3:::test_policy/*', + 'Effect': 'Allow', + 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + }]} + Returned value: + [('Statement', ((('Action', (u's3:PutObjectAcl',)), + ('Effect', (u'Allow',)), + ('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))), + ('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))), + ('Version', (u'2012-10-17',)))] + + """ + # Amazon will automatically convert bool and int to strings for us + if isinstance(policy, bool): + return tuple([str(policy).lower()]) + elif isinstance(policy, int): + return tuple([str(policy)]) + + if isinstance(policy, list): + for each in policy: + tupleified = _hashable_policy(each, []) + if isinstance(tupleified, list): + tupleified = tuple(tupleified) + policy_list.append(tupleified) + elif isinstance(policy, string_types) or isinstance(policy, binary_type): + policy = to_text(policy) + # convert root account ARNs to just account IDs + if policy.startswith('arn:aws:iam::') and policy.endswith(':root'): + policy = policy.split(':')[4] + return [policy] + elif isinstance(policy, dict): + sorted_keys = list(policy.keys()) + sorted_keys.sort() + for key in sorted_keys: + tupleified = _hashable_policy(policy[key], []) + if isinstance(tupleified, list): + tupleified = tuple(tupleified) + policy_list.append((key, tupleified)) + + # ensure we aren't returning deeply nested structures of length 1 + if len(policy_list) == 1 and isinstance(policy_list[0], tuple): + policy_list = policy_list[0] + if isinstance(policy_list, list): + if PY3_COMPARISON: + policy_list.sort(key=cmp_to_key(py3cmp)) + else: + policy_list.sort() + return policy_list + + +def py3cmp(a, b): + """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3.""" + try: + if a > b: + return 1 + elif a < b: + return -1 + else: + return 0 + except TypeError as e: + # check to see if they're tuple-string + # always say strings are less than tuples (to maintain compatibility with python2) + str_ind = to_text(e).find('str') + tup_ind = to_text(e).find('tuple') + if -1 not in (str_ind, tup_ind): + if str_ind < tup_ind: + return -1 + elif tup_ind < str_ind: + return 1 + raise + + +def compare_policies(current_policy, new_policy): + """ Compares the existing policy and the updated policy + Returns True if there is a difference between policies. + """ + return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, [])) + + +def sort_json_policy_dict(policy_dict): + + """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but + different orders will return true + Args: + policy_dict (dict): Dict representing IAM JSON policy. + Basic Usage: + >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]} + >>> sort_json_policy_dict(my_iam_policy) + Returns: + Dict: Will return a copy of the policy as a Dict but any List will be sorted + { + 'Principle': { + 'AWS': [ '7', '14', '31', '101' ] + } + } + """ + + def value_is_list(my_list): + + checked_list = [] + for item in my_list: + if isinstance(item, dict): + checked_list.append(sort_json_policy_dict(item)) + elif isinstance(item, list): + checked_list.append(value_is_list(item)) + else: + checked_list.append(item) + + # Sort list. If it's a list of dictionaries, sort by tuple of key-value + # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries. + checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x) + return checked_list + + ordered_policy_dict = {} + for key, value in policy_dict.items(): + if isinstance(value, dict): + ordered_policy_dict[key] = sort_json_policy_dict(value) + elif isinstance(value, list): + ordered_policy_dict[key] = value_is_list(value) + else: + ordered_policy_dict[key] = value + + return ordered_policy_dict + + +def map_complex_type(complex_type, type_map): + """ + Allows to cast elements within a dictionary to a specific type + Example of usage: + + DEPLOYMENT_CONFIGURATION_TYPE_MAP = { + 'maximum_percent': 'int', + 'minimum_healthy_percent': 'int' + } + + deployment_configuration = map_complex_type(module.params['deployment_configuration'], + DEPLOYMENT_CONFIGURATION_TYPE_MAP) + + This ensures all keys within the root element are casted and valid integers + """ + + if complex_type is None: + return + new_type = type(complex_type)() + if isinstance(complex_type, dict): + for key in complex_type: + if key in type_map: + if isinstance(type_map[key], list): + new_type[key] = map_complex_type( + complex_type[key], + type_map[key][0]) + else: + new_type[key] = map_complex_type( + complex_type[key], + type_map[key]) + else: + return complex_type + elif isinstance(complex_type, list): + for i in range(len(complex_type)): + new_type.append(map_complex_type( + complex_type[i], + type_map)) + elif type_map: + return globals()['__builtins__'][type_map](complex_type) + return new_type + + +def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True): + """ + Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function. + Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ + these may not be able to be used out of the box. + + :param current_tags_dict: + :param new_tags_dict: + :param purge_tags: + :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty + :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty + """ + + tag_key_value_pairs_to_set = {} + tag_keys_to_unset = [] + + for key in current_tags_dict.keys(): + if key not in new_tags_dict and purge_tags: + tag_keys_to_unset.append(key) + + for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset): + if to_text(new_tags_dict[key]) != current_tags_dict.get(key): + tag_key_value_pairs_to_set[key] = new_tags_dict[key] + + return tag_key_value_pairs_to_set, tag_keys_to_unset diff --git a/test/support/integration/plugins/module_utils/ecs/__init__.py b/test/support/integration/plugins/module_utils/ecs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/support/integration/plugins/module_utils/ecs/api.py b/test/support/integration/plugins/module_utils/ecs/api.py new file mode 100644 index 00000000..d89b0333 --- /dev/null +++ b/test/support/integration/plugins/module_utils/ecs/api.py @@ -0,0 +1,364 @@ +# -*- coding: utf-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is licensed under the +# Modified BSD License. Modules you write using this snippet, which is embedded +# dynamically by Ansible, still belong to the author of the module, and may assign +# their own license to the complete work. +# +# Copyright (c), Entrust Datacard Corporation, 2019 +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import json +import os +import re +import time +import traceback + +from ansible.module_utils._text import to_text, to_native +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils.urls import Request + +YAML_IMP_ERR = None +try: + import yaml +except ImportError: + YAML_FOUND = False + YAML_IMP_ERR = traceback.format_exc() +else: + YAML_FOUND = True + +valid_file_format = re.compile(r".*(\.)(yml|yaml|json)$") + + +def ecs_client_argument_spec(): + return dict( + entrust_api_user=dict(type='str', required=True), + entrust_api_key=dict(type='str', required=True, no_log=True), + entrust_api_client_cert_path=dict(type='path', required=True), + entrust_api_client_cert_key_path=dict(type='path', required=True, no_log=True), + entrust_api_specification_path=dict(type='path', default='https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml'), + ) + + +class SessionConfigurationException(Exception): + """ Raised if we cannot configure a session with the API """ + + pass + + +class RestOperationException(Exception): + """ Encapsulate a REST API error """ + + def __init__(self, error): + self.status = to_native(error.get("status", None)) + self.errors = [to_native(err.get("message")) for err in error.get("errors", {})] + self.message = to_native(" ".join(self.errors)) + + +def generate_docstring(operation_spec): + """Generate a docstring for an operation defined in operation_spec (swagger)""" + # Description of the operation + docs = operation_spec.get("description", "No Description") + docs += "\n\n" + + # Parameters of the operation + parameters = operation_spec.get("parameters", []) + if len(parameters) != 0: + docs += "\tArguments:\n\n" + for parameter in parameters: + docs += "{0} ({1}:{2}): {3}\n".format( + parameter.get("name"), + parameter.get("type", "No Type"), + "Required" if parameter.get("required", False) else "Not Required", + parameter.get("description"), + ) + + return docs + + +def bind(instance, method, operation_spec): + def binding_scope_fn(*args, **kwargs): + return method(instance, *args, **kwargs) + + # Make sure we don't confuse users; add the proper name and documentation to the function. + # Users can use !help() to get help on the function from interactive python or pdb + operation_name = operation_spec.get("operationId").split("Using")[0] + binding_scope_fn.__name__ = str(operation_name) + binding_scope_fn.__doc__ = generate_docstring(operation_spec) + + return binding_scope_fn + + +class RestOperation(object): + def __init__(self, session, uri, method, parameters=None): + self.session = session + self.method = method + if parameters is None: + self.parameters = {} + else: + self.parameters = parameters + self.url = "{scheme}://{host}{base_path}{uri}".format(scheme="https", host=session._spec.get("host"), base_path=session._spec.get("basePath"), uri=uri) + + def restmethod(self, *args, **kwargs): + """Do the hard work of making the request here""" + + # gather named path parameters and do substitution on the URL + if self.parameters: + path_parameters = {} + body_parameters = {} + query_parameters = {} + for x in self.parameters: + expected_location = x.get("in") + key_name = x.get("name", None) + key_value = kwargs.get(key_name, None) + if expected_location == "path" and key_name and key_value: + path_parameters.update({key_name: key_value}) + elif expected_location == "body" and key_name and key_value: + body_parameters.update({key_name: key_value}) + elif expected_location == "query" and key_name and key_value: + query_parameters.update({key_name: key_value}) + + if len(body_parameters.keys()) >= 1: + body_parameters = body_parameters.get(list(body_parameters.keys())[0]) + else: + body_parameters = None + else: + path_parameters = {} + query_parameters = {} + body_parameters = None + + # This will fail if we have not set path parameters with a KeyError + url = self.url.format(**path_parameters) + if query_parameters: + # modify the URL to add path parameters + url = url + "?" + urlencode(query_parameters) + + try: + if body_parameters: + body_parameters_json = json.dumps(body_parameters) + response = self.session.request.open(method=self.method, url=url, data=body_parameters_json) + else: + response = self.session.request.open(method=self.method, url=url) + request_error = False + except HTTPError as e: + # An HTTPError has the same methods available as a valid response from request.open + response = e + request_error = True + + # Return the result if JSON and success ({} for empty responses) + # Raise an exception if there was a failure. + try: + result_code = response.getcode() + result = json.loads(response.read()) + except ValueError: + result = {} + + if result or result == {}: + if result_code and result_code < 400: + return result + else: + raise RestOperationException(result) + + # Raise a generic RestOperationException if this fails + raise RestOperationException({"status": result_code, "errors": [{"message": "REST Operation Failed"}]}) + + +class Resource(object): + """ Implement basic CRUD operations against a path. """ + + def __init__(self, session): + self.session = session + self.parameters = {} + + for url in session._spec.get("paths").keys(): + methods = session._spec.get("paths").get(url) + for method in methods.keys(): + operation_spec = methods.get(method) + operation_name = operation_spec.get("operationId", None) + parameters = operation_spec.get("parameters") + + if not operation_name: + if method.lower() == "post": + operation_name = "Create" + elif method.lower() == "get": + operation_name = "Get" + elif method.lower() == "put": + operation_name = "Update" + elif method.lower() == "delete": + operation_name = "Delete" + elif method.lower() == "patch": + operation_name = "Patch" + else: + raise SessionConfigurationException(to_native("Invalid REST method type {0}".format(method))) + + # Get the non-parameter parts of the URL and append to the operation name + # e.g /application/version -> GetApplicationVersion + # e.g. /application/{id} -> GetApplication + # This may lead to duplicates, which we must prevent. + operation_name += re.sub(r"{(.*)}", "", url).replace("/", " ").title().replace(" ", "") + operation_spec["operationId"] = operation_name + + op = RestOperation(session, url, method, parameters) + setattr(self, operation_name, bind(self, op.restmethod, operation_spec)) + + +# Session to encapsulate the connection parameters of the module_utils Request object, the api spec, etc +class ECSSession(object): + def __init__(self, name, **kwargs): + """ + Initialize our session + """ + + self._set_config(name, **kwargs) + + def client(self): + resource = Resource(self) + return resource + + def _set_config(self, name, **kwargs): + headers = { + "Content-Type": "application/json", + "Connection": "keep-alive", + } + self.request = Request(headers=headers, timeout=60) + + configurators = [self._read_config_vars] + for configurator in configurators: + self._config = configurator(name, **kwargs) + if self._config: + break + if self._config is None: + raise SessionConfigurationException(to_native("No Configuration Found.")) + + # set up auth if passed + entrust_api_user = self.get_config("entrust_api_user") + entrust_api_key = self.get_config("entrust_api_key") + if entrust_api_user and entrust_api_key: + self.request.url_username = entrust_api_user + self.request.url_password = entrust_api_key + else: + raise SessionConfigurationException(to_native("User and key must be provided.")) + + # set up client certificate if passed (support all-in one or cert + key) + entrust_api_cert = self.get_config("entrust_api_cert") + entrust_api_cert_key = self.get_config("entrust_api_cert_key") + if entrust_api_cert: + self.request.client_cert = entrust_api_cert + if entrust_api_cert_key: + self.request.client_key = entrust_api_cert_key + else: + raise SessionConfigurationException(to_native("Client certificate for authentication to the API must be provided.")) + + # set up the spec + entrust_api_specification_path = self.get_config("entrust_api_specification_path") + + if not entrust_api_specification_path.startswith("http") and not os.path.isfile(entrust_api_specification_path): + raise SessionConfigurationException(to_native("OpenAPI specification was not found at location {0}.".format(entrust_api_specification_path))) + if not valid_file_format.match(entrust_api_specification_path): + raise SessionConfigurationException(to_native("OpenAPI specification filename must end in .json, .yml or .yaml")) + + self.verify = True + + if entrust_api_specification_path.startswith("http"): + try: + http_response = Request().open(method="GET", url=entrust_api_specification_path) + http_response_contents = http_response.read() + if entrust_api_specification_path.endswith(".json"): + self._spec = json.load(http_response_contents) + elif entrust_api_specification_path.endswith(".yml") or entrust_api_specification_path.endswith(".yaml"): + self._spec = yaml.safe_load(http_response_contents) + except HTTPError as e: + raise SessionConfigurationException(to_native("Error downloading specification from address '{0}', received error code '{1}'".format( + entrust_api_specification_path, e.getcode()))) + else: + with open(entrust_api_specification_path) as f: + if ".json" in entrust_api_specification_path: + self._spec = json.load(f) + elif ".yml" in entrust_api_specification_path or ".yaml" in entrust_api_specification_path: + self._spec = yaml.safe_load(f) + + def get_config(self, item): + return self._config.get(item, None) + + def _read_config_vars(self, name, **kwargs): + """ Read configuration from variables passed to the module. """ + config = {} + + entrust_api_specification_path = kwargs.get("entrust_api_specification_path") + if not entrust_api_specification_path or (not entrust_api_specification_path.startswith("http") and not os.path.isfile(entrust_api_specification_path)): + raise SessionConfigurationException( + to_native( + "Parameter provided for entrust_api_specification_path of value '{0}' was not a valid file path or HTTPS address.".format( + entrust_api_specification_path + ) + ) + ) + + for required_file in ["entrust_api_cert", "entrust_api_cert_key"]: + file_path = kwargs.get(required_file) + if not file_path or not os.path.isfile(file_path): + raise SessionConfigurationException( + to_native("Parameter provided for {0} of value '{1}' was not a valid file path.".format(required_file, file_path)) + ) + + for required_var in ["entrust_api_user", "entrust_api_key"]: + if not kwargs.get(required_var): + raise SessionConfigurationException(to_native("Parameter provided for {0} was missing.".format(required_var))) + + config["entrust_api_cert"] = kwargs.get("entrust_api_cert") + config["entrust_api_cert_key"] = kwargs.get("entrust_api_cert_key") + config["entrust_api_specification_path"] = kwargs.get("entrust_api_specification_path") + config["entrust_api_user"] = kwargs.get("entrust_api_user") + config["entrust_api_key"] = kwargs.get("entrust_api_key") + + return config + + +def ECSClient(entrust_api_user=None, entrust_api_key=None, entrust_api_cert=None, entrust_api_cert_key=None, entrust_api_specification_path=None): + """Create an ECS client""" + + if not YAML_FOUND: + raise SessionConfigurationException(missing_required_lib("PyYAML"), exception=YAML_IMP_ERR) + + if entrust_api_specification_path is None: + entrust_api_specification_path = "https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml" + + # Not functionally necessary with current uses of this module_util, but better to be explicit for future use cases + entrust_api_user = to_text(entrust_api_user) + entrust_api_key = to_text(entrust_api_key) + entrust_api_cert_key = to_text(entrust_api_cert_key) + entrust_api_specification_path = to_text(entrust_api_specification_path) + + return ECSSession( + "ecs", + entrust_api_user=entrust_api_user, + entrust_api_key=entrust_api_key, + entrust_api_cert=entrust_api_cert, + entrust_api_cert_key=entrust_api_cert_key, + entrust_api_specification_path=entrust_api_specification_path, + ).client() diff --git a/test/support/integration/plugins/module_utils/mysql.py b/test/support/integration/plugins/module_utils/mysql.py new file mode 100644 index 00000000..46198f36 --- /dev/null +++ b/test/support/integration/plugins/module_utils/mysql.py @@ -0,0 +1,106 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Jonathan Mainguy , 2015 +# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +try: + import pymysql as mysql_driver + _mysql_cursor_param = 'cursor' +except ImportError: + try: + import MySQLdb as mysql_driver + import MySQLdb.cursors + _mysql_cursor_param = 'cursorclass' + except ImportError: + mysql_driver = None + +mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.' + + +def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None, + connect_timeout=30, autocommit=False): + config = {} + + if ssl_ca is not None or ssl_key is not None or ssl_cert is not None: + config['ssl'] = {} + + if module.params['login_unix_socket']: + config['unix_socket'] = module.params['login_unix_socket'] + else: + config['host'] = module.params['login_host'] + config['port'] = module.params['login_port'] + + if os.path.exists(config_file): + config['read_default_file'] = config_file + + # If login_user or login_password are given, they should override the + # config file + if login_user is not None: + config['user'] = login_user + if login_password is not None: + config['passwd'] = login_password + if ssl_cert is not None: + config['ssl']['cert'] = ssl_cert + if ssl_key is not None: + config['ssl']['key'] = ssl_key + if ssl_ca is not None: + config['ssl']['ca'] = ssl_ca + if db is not None: + config['db'] = db + if connect_timeout is not None: + config['connect_timeout'] = connect_timeout + + if _mysql_cursor_param == 'cursor': + # In case of PyMySQL driver: + db_connection = mysql_driver.connect(autocommit=autocommit, **config) + else: + # In case of MySQLdb driver + db_connection = mysql_driver.connect(**config) + if autocommit: + db_connection.autocommit(True) + + if cursor_class == 'DictCursor': + return db_connection.cursor(**{_mysql_cursor_param: mysql_driver.cursors.DictCursor}), db_connection + else: + return db_connection.cursor(), db_connection + + +def mysql_common_argument_spec(): + return dict( + login_user=dict(type='str', default=None), + login_password=dict(type='str', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=3306), + login_unix_socket=dict(type='str'), + config_file=dict(type='path', default='~/.my.cnf'), + connect_timeout=dict(type='int', default=30), + client_cert=dict(type='path', aliases=['ssl_cert']), + client_key=dict(type='path', aliases=['ssl_key']), + ca_cert=dict(type='path', aliases=['ssl_ca']), + ) diff --git a/test/support/integration/plugins/module_utils/net_tools/__init__.py b/test/support/integration/plugins/module_utils/net_tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/support/integration/plugins/module_utils/network/__init__.py b/test/support/integration/plugins/module_utils/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/support/integration/plugins/module_utils/network/common/__init__.py b/test/support/integration/plugins/module_utils/network/common/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/support/integration/plugins/module_utils/network/common/utils.py b/test/support/integration/plugins/module_utils/network/common/utils.py new file mode 100644 index 00000000..80317387 --- /dev/null +++ b/test/support/integration/plugins/module_utils/network/common/utils.py @@ -0,0 +1,643 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# Networking tools for network modules only + +import re +import ast +import operator +import socket +import json + +from itertools import chain + +from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.common._collections_compat import Mapping +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils import basic +from ansible.module_utils.parsing.convert_bool import boolean + +# Backwards compatibility for 3rd party modules +# TODO(pabelanger): With move to ansible.netcommon, we should clean this code +# up and have modules import directly themself. +from ansible.module_utils.common.network import ( # noqa: F401 + to_bits, is_netmask, is_masklen, to_netmask, to_masklen, to_subnet, to_ipv6_network, VALID_MASKS +) + +try: + from jinja2 import Environment, StrictUndefined + from jinja2.exceptions import UndefinedError + HAS_JINJA2 = True +except ImportError: + HAS_JINJA2 = False + + +OPERATORS = frozenset(['ge', 'gt', 'eq', 'neq', 'lt', 'le']) +ALIASES = frozenset([('min', 'ge'), ('max', 'le'), ('exactly', 'eq'), ('neq', 'ne')]) + + +def to_list(val): + if isinstance(val, (list, tuple, set)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = to_text(item).split('\n') + yield item + + +def transform_commands(module): + transform = ComplexList(dict( + command=dict(key=True), + output=dict(), + prompt=dict(type='list'), + answer=dict(type='list'), + newline=dict(type='bool', default=True), + sendonly=dict(type='bool', default=False), + check_all=dict(type='bool', default=False), + ), module) + + return transform(module.params['commands']) + + +def sort_list(val): + if isinstance(val, list): + return sorted(val) + return val + + +class Entity(object): + """Transforms a dict to with an argument spec + + This class will take a dict and apply an Ansible argument spec to the + values. The resulting dict will contain all of the keys in the param + with appropriate values set. + + Example:: + + argument_spec = dict( + command=dict(key=True), + display=dict(default='text', choices=['text', 'json']), + validate=dict(type='bool') + ) + transform = Entity(module, argument_spec) + value = dict(command='foo') + result = transform(value) + print result + {'command': 'foo', 'display': 'text', 'validate': None} + + Supported argument spec: + * key - specifies how to map a single value to a dict + * read_from - read and apply the argument_spec from the module + * required - a value is required + * type - type of value (uses AnsibleModule type checker) + * fallback - implements fallback function + * choices - set of valid options + * default - default value + """ + + def __init__(self, module, attrs=None, args=None, keys=None, from_argspec=False): + args = [] if args is None else args + + self._attributes = attrs or {} + self._module = module + + for arg in args: + self._attributes[arg] = dict() + if from_argspec: + self._attributes[arg]['read_from'] = arg + if keys and arg in keys: + self._attributes[arg]['key'] = True + + self.attr_names = frozenset(self._attributes.keys()) + + _has_key = False + + for name, attr in iteritems(self._attributes): + if attr.get('read_from'): + if attr['read_from'] not in self._module.argument_spec: + module.fail_json(msg='argument %s does not exist' % attr['read_from']) + spec = self._module.argument_spec.get(attr['read_from']) + for key, value in iteritems(spec): + if key not in attr: + attr[key] = value + + if attr.get('key'): + if _has_key: + module.fail_json(msg='only one key value can be specified') + _has_key = True + attr['required'] = True + + def serialize(self): + return self._attributes + + def to_dict(self, value): + obj = {} + for name, attr in iteritems(self._attributes): + if attr.get('key'): + obj[name] = value + else: + obj[name] = attr.get('default') + return obj + + def __call__(self, value, strict=True): + if not isinstance(value, dict): + value = self.to_dict(value) + + if strict: + unknown = set(value).difference(self.attr_names) + if unknown: + self._module.fail_json(msg='invalid keys: %s' % ','.join(unknown)) + + for name, attr in iteritems(self._attributes): + if value.get(name) is None: + value[name] = attr.get('default') + + if attr.get('fallback') and not value.get(name): + fallback = attr.get('fallback', (None,)) + fallback_strategy = fallback[0] + fallback_args = [] + fallback_kwargs = {} + if fallback_strategy is not None: + for item in fallback[1:]: + if isinstance(item, dict): + fallback_kwargs = item + else: + fallback_args = item + try: + value[name] = fallback_strategy(*fallback_args, **fallback_kwargs) + except basic.AnsibleFallbackNotFound: + continue + + if attr.get('required') and value.get(name) is None: + self._module.fail_json(msg='missing required attribute %s' % name) + + if 'choices' in attr: + if value[name] not in attr['choices']: + self._module.fail_json(msg='%s must be one of %s, got %s' % (name, ', '.join(attr['choices']), value[name])) + + if value[name] is not None: + value_type = attr.get('type', 'str') + type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type] + type_checker(value[name]) + elif value.get(name): + value[name] = self._module.params[name] + + return value + + +class EntityCollection(Entity): + """Extends ```Entity``` to handle a list of dicts """ + + def __call__(self, iterable, strict=True): + if iterable is None: + iterable = [super(EntityCollection, self).__call__(self._module.params, strict)] + + if not isinstance(iterable, (list, tuple)): + self._module.fail_json(msg='value must be an iterable') + + return [(super(EntityCollection, self).__call__(i, strict)) for i in iterable] + + +# these two are for backwards compatibility and can be removed once all of the +# modules that use them are updated +class ComplexDict(Entity): + def __init__(self, attrs, module, *args, **kwargs): + super(ComplexDict, self).__init__(module, attrs, *args, **kwargs) + + +class ComplexList(EntityCollection): + def __init__(self, attrs, module, *args, **kwargs): + super(ComplexList, self).__init__(module, attrs, *args, **kwargs) + + +def dict_diff(base, comparable): + """ Generate a dict object of differences + + This function will compare two dict objects and return the difference + between them as a dict object. For scalar values, the key will reflect + the updated value. If the key does not exist in `comparable`, then then no + key will be returned. For lists, the value in comparable will wholly replace + the value in base for the key. For dicts, the returned value will only + return keys that are different. + + :param base: dict object to base the diff on + :param comparable: dict object to compare against base + + :returns: new dict object with differences + """ + if not isinstance(base, dict): + raise AssertionError("`base` must be of type ") + if not isinstance(comparable, dict): + if comparable is None: + comparable = dict() + else: + raise AssertionError("`comparable` must be of type ") + + updates = dict() + + for key, value in iteritems(base): + if isinstance(value, dict): + item = comparable.get(key) + if item is not None: + sub_diff = dict_diff(value, comparable[key]) + if sub_diff: + updates[key] = sub_diff + else: + comparable_value = comparable.get(key) + if comparable_value is not None: + if sort_list(base[key]) != sort_list(comparable_value): + updates[key] = comparable_value + + for key in set(comparable.keys()).difference(base.keys()): + updates[key] = comparable.get(key) + + return updates + + +def dict_merge(base, other): + """ Return a new dict object that combines base and other + + This will create a new dict object that is a combination of the key/value + pairs from base and other. When both keys exist, the value will be + selected from other. If the value is a list object, the two lists will + be combined and duplicate entries removed. + + :param base: dict object to serve as base + :param other: dict object to combine with base + + :returns: new combined dict object + """ + if not isinstance(base, dict): + raise AssertionError("`base` must be of type ") + if not isinstance(other, dict): + raise AssertionError("`other` must be of type ") + + combined = dict() + + for key, value in iteritems(base): + if isinstance(value, dict): + if key in other: + item = other.get(key) + if item is not None: + if isinstance(other[key], Mapping): + combined[key] = dict_merge(value, other[key]) + else: + combined[key] = other[key] + else: + combined[key] = item + else: + combined[key] = value + elif isinstance(value, list): + if key in other: + item = other.get(key) + if item is not None: + try: + combined[key] = list(set(chain(value, item))) + except TypeError: + value.extend([i for i in item if i not in value]) + combined[key] = value + else: + combined[key] = item + else: + combined[key] = value + else: + if key in other: + other_value = other.get(key) + if other_value is not None: + if sort_list(base[key]) != sort_list(other_value): + combined[key] = other_value + else: + combined[key] = value + else: + combined[key] = other_value + else: + combined[key] = value + + for key in set(other.keys()).difference(base.keys()): + combined[key] = other.get(key) + + return combined + + +def param_list_to_dict(param_list, unique_key="name", remove_key=True): + """Rotates a list of dictionaries to be a dictionary of dictionaries. + + :param param_list: The aforementioned list of dictionaries + :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value + behind this key will be the key each dictionary can be found at in the new root dictionary + :param remove_key: If True, remove unique_key from the individual dictionaries before returning. + """ + param_dict = {} + for params in param_list: + params = params.copy() + if remove_key: + name = params.pop(unique_key) + else: + name = params.get(unique_key) + param_dict[name] = params + + return param_dict + + +def conditional(expr, val, cast=None): + match = re.match(r'^(.+)\((.+)\)$', str(expr), re.I) + if match: + op, arg = match.groups() + else: + op = 'eq' + if ' ' in str(expr): + raise AssertionError('invalid expression: cannot contain spaces') + arg = expr + + if cast is None and val is not None: + arg = type(val)(arg) + elif callable(cast): + arg = cast(arg) + val = cast(val) + + op = next((oper for alias, oper in ALIASES if op == alias), op) + + if not hasattr(operator, op) and op not in OPERATORS: + raise ValueError('unknown operator: %s' % op) + + func = getattr(operator, op) + return func(val, arg) + + +def ternary(value, true_val, false_val): + ''' value ? true_val : false_val ''' + if value: + return true_val + else: + return false_val + + +def remove_default_spec(spec): + for item in spec: + if 'default' in spec[item]: + del spec[item]['default'] + + +def validate_ip_address(address): + try: + socket.inet_aton(address) + except socket.error: + return False + return address.count('.') == 3 + + +def validate_ip_v6_address(address): + try: + socket.inet_pton(socket.AF_INET6, address) + except socket.error: + return False + return True + + +def validate_prefix(prefix): + if prefix and not 0 <= int(prefix) <= 32: + return False + return True + + +def load_provider(spec, args): + provider = args.get('provider') or {} + for key, value in iteritems(spec): + if key not in provider: + if 'fallback' in value: + provider[key] = _fallback(value['fallback']) + elif 'default' in value: + provider[key] = value['default'] + else: + provider[key] = None + if 'authorize' in provider: + # Coerce authorize to provider if a string has somehow snuck in. + provider['authorize'] = boolean(provider['authorize'] or False) + args['provider'] = provider + return provider + + +def _fallback(fallback): + strategy = fallback[0] + args = [] + kwargs = {} + + for item in fallback[1:]: + if isinstance(item, dict): + kwargs = item + else: + args = item + try: + return strategy(*args, **kwargs) + except basic.AnsibleFallbackNotFound: + pass + + +def generate_dict(spec): + """ + Generate dictionary which is in sync with argspec + + :param spec: A dictionary that is the argspec of the module + :rtype: A dictionary + :returns: A dictionary in sync with argspec with default value + """ + obj = {} + if not spec: + return obj + + for key, val in iteritems(spec): + if 'default' in val: + dct = {key: val['default']} + elif 'type' in val and val['type'] == 'dict': + dct = {key: generate_dict(val['options'])} + else: + dct = {key: None} + obj.update(dct) + return obj + + +def parse_conf_arg(cfg, arg): + """ + Parse config based on argument + + :param cfg: A text string which is a line of configuration. + :param arg: A text string which is to be matched. + :rtype: A text string + :returns: A text string if match is found + """ + match = re.search(r'%s (.+)(\n|$)' % arg, cfg, re.M) + if match: + result = match.group(1).strip() + else: + result = None + return result + + +def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str='no'): + """ + Parse config based on command + + :param cfg: A text string which is a line of configuration. + :param cmd: A text string which is the command to be matched + :param res1: A text string to be returned if the command is present + :param res2: A text string to be returned if the negate command + is present + :param delete_str: A text string to identify the start of the + negate command + :rtype: A text string + :returns: A text string if match is found + """ + match = re.search(r'\n\s+%s(\n|$)' % cmd, cfg) + if match: + return res1 + if res2 is not None: + match = re.search(r'\n\s+%s %s(\n|$)' % (delete_str, cmd), cfg) + if match: + return res2 + return None + + +def get_xml_conf_arg(cfg, path, data='text'): + """ + :param cfg: The top level configuration lxml Element tree object + :param path: The relative xpath w.r.t to top level element (cfg) + to be searched in the xml hierarchy + :param data: The type of data to be returned for the matched xml node. + Valid values are text, tag, attrib, with default as text. + :return: Returns the required type for the matched xml node or else None + """ + match = cfg.xpath(path) + if len(match): + if data == 'tag': + result = getattr(match[0], 'tag') + elif data == 'attrib': + result = getattr(match[0], 'attrib') + else: + result = getattr(match[0], 'text') + else: + result = None + return result + + +def remove_empties(cfg_dict): + """ + Generate final config dictionary + + :param cfg_dict: A dictionary parsed in the facts system + :rtype: A dictionary + :returns: A dictionary by eliminating keys that have null values + """ + final_cfg = {} + if not cfg_dict: + return final_cfg + + for key, val in iteritems(cfg_dict): + dct = None + if isinstance(val, dict): + child_val = remove_empties(val) + if child_val: + dct = {key: child_val} + elif (isinstance(val, list) and val + and all([isinstance(x, dict) for x in val])): + child_val = [remove_empties(x) for x in val] + if child_val: + dct = {key: child_val} + elif val not in [None, [], {}, (), '']: + dct = {key: val} + if dct: + final_cfg.update(dct) + return final_cfg + + +def validate_config(spec, data): + """ + Validate if the input data against the AnsibleModule spec format + :param spec: Ansible argument spec + :param data: Data to be validated + :return: + """ + params = basic._ANSIBLE_ARGS + basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': data})) + validated_data = basic.AnsibleModule(spec).params + basic._ANSIBLE_ARGS = params + return validated_data + + +def search_obj_in_list(name, lst, key='name'): + if not lst: + return None + else: + for item in lst: + if item.get(key) == name: + return item + + +class Template: + + def __init__(self): + if not HAS_JINJA2: + raise ImportError("jinja2 is required but does not appear to be installed. " + "It can be installed using `pip install jinja2`") + + self.env = Environment(undefined=StrictUndefined) + self.env.filters.update({'ternary': ternary}) + + def __call__(self, value, variables=None, fail_on_undefined=True): + variables = variables or {} + + if not self.contains_vars(value): + return value + + try: + value = self.env.from_string(value).render(variables) + except UndefinedError: + if not fail_on_undefined: + return None + raise + + if value: + try: + return ast.literal_eval(value) + except Exception: + return str(value) + else: + return None + + def contains_vars(self, data): + if isinstance(data, string_types): + for marker in (self.env.block_start_string, self.env.variable_start_string, self.env.comment_start_string): + if marker in data: + return True + return False diff --git a/test/support/integration/plugins/module_utils/postgres.py b/test/support/integration/plugins/module_utils/postgres.py new file mode 100644 index 00000000..63811c30 --- /dev/null +++ b/test/support/integration/plugins/module_utils/postgres.py @@ -0,0 +1,330 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Ted Timmons , 2017. +# Most of this was originally added by other creators in the postgresql_user module. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +psycopg2 = None # This line needs for unit tests +try: + import psycopg2 + HAS_PSYCOPG2 = True +except ImportError: + HAS_PSYCOPG2 = False + +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils._text import to_native +from ansible.module_utils.six import iteritems +from distutils.version import LooseVersion + + +def postgres_common_argument_spec(): + """ + Return a dictionary with connection options. + + The options are commonly used by most of PostgreSQL modules. + """ + return dict( + login_user=dict(default='postgres'), + login_password=dict(default='', no_log=True), + login_host=dict(default=''), + login_unix_socket=dict(default=''), + port=dict(type='int', default=5432, aliases=['login_port']), + ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']), + ca_cert=dict(aliases=['ssl_rootcert']), + ) + + +def ensure_required_libs(module): + """Check required libraries.""" + if not HAS_PSYCOPG2: + module.fail_json(msg=missing_required_lib('psycopg2')) + + if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'): + module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter') + + +def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True): + """Connect to a PostgreSQL database. + + Return psycopg2 connection object. + + Args: + module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class + conn_params (dict) -- dictionary with connection parameters + + Kwargs: + autocommit (bool) -- commit automatically (default False) + fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True) + """ + ensure_required_libs(module) + + db_connection = None + try: + db_connection = psycopg2.connect(**conn_params) + if autocommit: + if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'): + db_connection.set_session(autocommit=True) + else: + db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + + # Switch role, if specified: + if module.params.get('session_role'): + cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + + try: + cursor.execute('SET ROLE "%s"' % module.params['session_role']) + except Exception as e: + module.fail_json(msg="Could not switch role: %s" % to_native(e)) + finally: + cursor.close() + + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least ' + 'version 8.4 to support sslrootcert') + + if fail_on_conn: + module.fail_json(msg="unable to connect to database: %s" % to_native(e)) + else: + module.warn("PostgreSQL server is unavailable: %s" % to_native(e)) + db_connection = None + + except Exception as e: + if fail_on_conn: + module.fail_json(msg="unable to connect to database: %s" % to_native(e)) + else: + module.warn("PostgreSQL server is unavailable: %s" % to_native(e)) + db_connection = None + + return db_connection + + +def exec_sql(obj, query, query_params=None, ddl=False, add_to_executed=True, dont_exec=False): + """Execute SQL. + + Auxiliary function for PostgreSQL user classes. + + Returns a query result if possible or True/False if ddl=True arg was passed. + It necessary for statements that don't return any result (like DDL queries). + + Args: + obj (obj) -- must be an object of a user class. + The object must have module (AnsibleModule class object) and + cursor (psycopg cursor object) attributes + query (str) -- SQL query to execute + + Kwargs: + query_params (dict or tuple) -- Query parameters to prevent SQL injections, + could be a dict or tuple + ddl (bool) -- must return True or False instead of rows (typical for DDL queries) + (default False) + add_to_executed (bool) -- append the query to obj.executed_queries attribute + dont_exec (bool) -- used with add_to_executed=True to generate a query, add it + to obj.executed_queries list and return True (default False) + """ + + if dont_exec: + # This is usually needed to return queries in check_mode + # without execution + query = obj.cursor.mogrify(query, query_params) + if add_to_executed: + obj.executed_queries.append(query) + + return True + + try: + if query_params is not None: + obj.cursor.execute(query, query_params) + else: + obj.cursor.execute(query) + + if add_to_executed: + if query_params is not None: + obj.executed_queries.append(obj.cursor.mogrify(query, query_params)) + else: + obj.executed_queries.append(query) + + if not ddl: + res = obj.cursor.fetchall() + return res + return True + except Exception as e: + obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + return False + + +def get_conn_params(module, params_dict, warn_db_default=True): + """Get connection parameters from the passed dictionary. + + Return a dictionary with parameters to connect to PostgreSQL server. + + Args: + module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class + params_dict (dict) -- dictionary with variables + + Kwargs: + warn_db_default (bool) -- warn that the default DB is used (default True) + """ + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the return dictionary + params_map = { + "login_host": "host", + "login_user": "user", + "login_password": "password", + "port": "port", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + + # Might be different in the modules: + if params_dict.get('db'): + params_map['db'] = 'database' + elif params_dict.get('database'): + params_map['database'] = 'database' + elif params_dict.get('login_db'): + params_map['login_db'] = 'database' + else: + if warn_db_default: + module.warn('Database name has not been passed, ' + 'used default database to connect to.') + + kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict) + if k in params_map and v != '' and v is not None) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost" + if is_localhost and params_dict["login_unix_socket"] != "": + kw["host"] = params_dict["login_unix_socket"] + + return kw + + +class PgMembership(object): + def __init__(self, module, cursor, groups, target_roles, fail_on_role=True): + self.module = module + self.cursor = cursor + self.target_roles = [r.strip() for r in target_roles] + self.groups = [r.strip() for r in groups] + self.executed_queries = [] + self.granted = {} + self.revoked = {} + self.fail_on_role = fail_on_role + self.non_existent_roles = [] + self.changed = False + self.__check_roles_exist() + + def grant(self): + for group in self.groups: + self.granted[group] = [] + + for role in self.target_roles: + # If role is in a group now, pass: + if self.__check_membership(group, role): + continue + + query = 'GRANT "%s" TO "%s"' % (group, role) + self.changed = exec_sql(self, query, ddl=True) + + if self.changed: + self.granted[group].append(role) + + return self.changed + + def revoke(self): + for group in self.groups: + self.revoked[group] = [] + + for role in self.target_roles: + # If role is not in a group now, pass: + if not self.__check_membership(group, role): + continue + + query = 'REVOKE "%s" FROM "%s"' % (group, role) + self.changed = exec_sql(self, query, ddl=True) + + if self.changed: + self.revoked[group].append(role) + + return self.changed + + def __check_membership(self, src_role, dst_role): + query = ("SELECT ARRAY(SELECT b.rolname FROM " + "pg_catalog.pg_auth_members m " + "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) " + "WHERE m.member = r.oid) " + "FROM pg_catalog.pg_roles r " + "WHERE r.rolname = %(dst_role)s") + + res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False) + membership = [] + if res: + membership = res[0][0] + + if not membership: + return False + + if src_role in membership: + return True + + return False + + def __check_roles_exist(self): + existent_groups = self.__roles_exist(self.groups) + existent_roles = self.__roles_exist(self.target_roles) + + for group in self.groups: + if group not in existent_groups: + if self.fail_on_role: + self.module.fail_json(msg="Role %s does not exist" % group) + else: + self.module.warn("Role %s does not exist, pass" % group) + self.non_existent_roles.append(group) + + for role in self.target_roles: + if role not in existent_roles: + if self.fail_on_role: + self.module.fail_json(msg="Role %s does not exist" % role) + else: + self.module.warn("Role %s does not exist, pass" % role) + + if role not in self.groups: + self.non_existent_roles.append(role) + + else: + if self.fail_on_role: + self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role)) + else: + self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role)) + + # Update role lists, excluding non existent roles: + self.groups = [g for g in self.groups if g not in self.non_existent_roles] + + self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles] + + def __roles_exist(self, roles): + tmp = ["'" + x + "'" for x in roles] + query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp) + return [x[0] for x in exec_sql(self, query, add_to_executed=False)] diff --git a/test/support/integration/plugins/module_utils/rabbitmq.py b/test/support/integration/plugins/module_utils/rabbitmq.py new file mode 100644 index 00000000..cf764006 --- /dev/null +++ b/test/support/integration/plugins/module_utils/rabbitmq.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2016, Jorge Rodriguez +# Copyright: (c) 2018, John Imison +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils.six.moves.urllib import parse as urllib_parse +from mimetypes import MimeTypes + +import os +import json +import traceback + +PIKA_IMP_ERR = None +try: + import pika + import pika.exceptions + from pika import spec + HAS_PIKA = True +except ImportError: + PIKA_IMP_ERR = traceback.format_exc() + HAS_PIKA = False + + +def rabbitmq_argument_spec(): + return dict( + login_user=dict(type='str', default='guest'), + login_password=dict(type='str', default='guest', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='str', default='15672'), + login_protocol=dict(type='str', default='http', choices=['http', 'https']), + ca_cert=dict(type='path', aliases=['cacert']), + client_cert=dict(type='path', aliases=['cert']), + client_key=dict(type='path', aliases=['key']), + vhost=dict(type='str', default='/'), + ) + + +# notification/rabbitmq_basic_publish.py +class RabbitClient(): + def __init__(self, module): + self.module = module + self.params = module.params + self.check_required_library() + self.check_host_params() + self.url = self.params['url'] + self.proto = self.params['proto'] + self.username = self.params['username'] + self.password = self.params['password'] + self.host = self.params['host'] + self.port = self.params['port'] + self.vhost = self.params['vhost'] + self.queue = self.params['queue'] + self.headers = self.params['headers'] + self.cafile = self.params['cafile'] + self.certfile = self.params['certfile'] + self.keyfile = self.params['keyfile'] + + if self.host is not None: + self.build_url() + + if self.cafile is not None: + self.append_ssl_certs() + + self.connect_to_rabbitmq() + + def check_required_library(self): + if not HAS_PIKA: + self.module.fail_json(msg=missing_required_lib("pika"), exception=PIKA_IMP_ERR) + + def check_host_params(self): + # Fail if url is specified and other conflicting parameters have been specified + if self.params['url'] is not None and any(self.params[k] is not None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']): + self.module.fail_json(msg="url and proto, host, port, vhost, username or password cannot be specified at the same time.") + + # Fail if url not specified and there is a missing parameter to build the url + if self.params['url'] is None and any(self.params[k] is None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']): + self.module.fail_json(msg="Connection parameters must be passed via url, or, proto, host, port, vhost, username or password.") + + def append_ssl_certs(self): + ssl_options = {} + if self.cafile: + ssl_options['cafile'] = self.cafile + if self.certfile: + ssl_options['certfile'] = self.certfile + if self.keyfile: + ssl_options['keyfile'] = self.keyfile + + self.url = self.url + '?ssl_options=' + urllib_parse.quote(json.dumps(ssl_options)) + + @staticmethod + def rabbitmq_argument_spec(): + return dict( + url=dict(type='str'), + proto=dict(type='str', choices=['amqp', 'amqps']), + host=dict(type='str'), + port=dict(type='int'), + username=dict(type='str'), + password=dict(type='str', no_log=True), + vhost=dict(type='str'), + queue=dict(type='str') + ) + + ''' Consider some file size limits here ''' + def _read_file(self, path): + try: + with open(path, "rb") as file_handle: + return file_handle.read() + except IOError as e: + self.module.fail_json(msg="Unable to open file %s: %s" % (path, to_native(e))) + + @staticmethod + def _check_file_mime_type(path): + mime = MimeTypes() + return mime.guess_type(path) + + def build_url(self): + self.url = '{0}://{1}:{2}@{3}:{4}/{5}'.format(self.proto, + self.username, + self.password, + self.host, + self.port, + self.vhost) + + def connect_to_rabbitmq(self): + """ + Function to connect to rabbitmq using username and password + """ + try: + parameters = pika.URLParameters(self.url) + except Exception as e: + self.module.fail_json(msg="URL malformed: %s" % to_native(e)) + + try: + self.connection = pika.BlockingConnection(parameters) + except Exception as e: + self.module.fail_json(msg="Connection issue: %s" % to_native(e)) + + try: + self.conn_channel = self.connection.channel() + except pika.exceptions.AMQPChannelError as e: + self.close_connection() + self.module.fail_json(msg="Channel issue: %s" % to_native(e)) + + def close_connection(self): + try: + self.connection.close() + except pika.exceptions.AMQPConnectionError: + pass + + def basic_publish(self): + self.content_type = self.params.get("content_type") + + if self.params.get("body") is not None: + args = dict( + body=self.params.get("body"), + exchange=self.params.get("exchange"), + routing_key=self.params.get("routing_key"), + properties=pika.BasicProperties(content_type=self.content_type, delivery_mode=1, headers=self.headers)) + + # If src (file) is defined and content_type is left as default, do a mime lookup on the file + if self.params.get("src") is not None and self.content_type == 'text/plain': + self.content_type = RabbitClient._check_file_mime_type(self.params.get("src"))[0] + self.headers.update( + filename=os.path.basename(self.params.get("src")) + ) + + args = dict( + body=self._read_file(self.params.get("src")), + exchange=self.params.get("exchange"), + routing_key=self.params.get("routing_key"), + properties=pika.BasicProperties(content_type=self.content_type, + delivery_mode=1, + headers=self.headers + )) + elif self.params.get("src") is not None: + args = dict( + body=self._read_file(self.params.get("src")), + exchange=self.params.get("exchange"), + routing_key=self.params.get("routing_key"), + properties=pika.BasicProperties(content_type=self.content_type, + delivery_mode=1, + headers=self.headers + )) + + try: + # If queue is not defined, RabbitMQ will return the queue name of the automatically generated queue. + if self.queue is None: + result = self.conn_channel.queue_declare(durable=self.params.get("durable"), + exclusive=self.params.get("exclusive"), + auto_delete=self.params.get("auto_delete")) + self.conn_channel.confirm_delivery() + self.queue = result.method.queue + else: + self.conn_channel.queue_declare(queue=self.queue, + durable=self.params.get("durable"), + exclusive=self.params.get("exclusive"), + auto_delete=self.params.get("auto_delete")) + self.conn_channel.confirm_delivery() + except Exception as e: + self.module.fail_json(msg="Queue declare issue: %s" % to_native(e)) + + # https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/cloudstack.py#L150 + if args['routing_key'] is None: + args['routing_key'] = self.queue + + if args['exchange'] is None: + args['exchange'] = '' + + try: + self.conn_channel.basic_publish(**args) + return True + except pika.exceptions.UnroutableError: + return False diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py new file mode 120000 index 00000000..f9993bfb --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py @@ -0,0 +1 @@ +azure_rm_mariadbconfiguration_info.py \ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py new file mode 120000 index 00000000..b8293e64 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py @@ -0,0 +1 @@ +azure_rm_mariadbdatabase_info.py \ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py new file mode 120000 index 00000000..4311a0c1 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py @@ -0,0 +1 @@ +azure_rm_mariadbfirewallrule_info.py \ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py new file mode 120000 index 00000000..5f76e0e9 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py @@ -0,0 +1 @@ +azure_rm_mariadbserver_info.py \ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_resource_facts.py b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py new file mode 120000 index 00000000..710fda10 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py @@ -0,0 +1 @@ +azure_rm_resource_info.py \ No newline at end of file diff --git a/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py new file mode 120000 index 00000000..ead87c85 --- /dev/null +++ b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py @@ -0,0 +1 @@ +azure_rm_webapp_info.py \ No newline at end of file diff --git a/test/support/integration/plugins/modules/aws_az_info.py b/test/support/integration/plugins/modules/aws_az_info.py new file mode 100644 index 00000000..c1efed6f --- /dev/null +++ b/test/support/integration/plugins/modules/aws_az_info.py @@ -0,0 +1,111 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = ''' +module: aws_az_info +short_description: Gather information about availability zones in AWS. +description: + - Gather information about availability zones in AWS. + - This module was called C(aws_az_facts) before Ansible 2.9. The usage did not change. +version_added: '2.5' +author: 'Henrique Rodrigues (@Sodki)' +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for + possible filters. Filter names and values are case sensitive. You can also use underscores + instead of dashes (-) in the filter keys, which will take precedence in case of conflict. + required: false + default: {} + type: dict +extends_documentation_fragment: + - aws + - ec2 +requirements: [botocore, boto3] +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all availability zones +- aws_az_info: + +# Gather information about a single availability zone +- aws_az_info: + filters: + zone-name: eu-west-1a +''' + +RETURN = ''' +availability_zones: + returned: on success + description: > + Availability zones that match the provided filters. Each element consists of a dict with all the information + related to that available zone. + type: list + sample: "[ + { + 'messages': [], + 'region_name': 'us-west-1', + 'state': 'available', + 'zone_name': 'us-west-1b' + }, + { + 'messages': [], + 'region_name': 'us-west-1', + 'state': 'available', + 'zone_name': 'us-west-1c' + } + ]" +''' + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + if module._name == 'aws_az_facts': + module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", + version='2.14', collection_name='ansible.builtin') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility + sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items()) + + try: + availability_zones = connection.describe_availability_zones( + Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe availability zones.") + + # Turn the boto3 result into ansible_friendly_snaked_names + snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']] + + module.exit_json(availability_zones=snaked_availability_zones) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/aws_s3.py b/test/support/integration/plugins/modules/aws_s3.py new file mode 100644 index 00000000..54874f05 --- /dev/null +++ b/test/support/integration/plugins/modules/aws_s3.py @@ -0,0 +1,925 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: aws_s3 +short_description: manage objects in S3. +description: + - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and + deleting both objects and buckets, retrieving objects as files or strings and generating download links. + This module has a dependency on boto3 and botocore. +notes: + - In 2.4, this module has been renamed from C(s3) into M(aws_s3). +version_added: "1.1" +options: + bucket: + description: + - Bucket name. + required: true + type: str + dest: + description: + - The destination file path when downloading an object/key with a GET operation. + version_added: "1.3" + type: path + encrypt: + description: + - When set for PUT mode, asks for server-side encryption. + default: true + version_added: "2.0" + type: bool + encryption_mode: + description: + - What encryption mode to use if I(encrypt=true). + default: AES256 + choices: + - AES256 + - aws:kms + version_added: "2.7" + type: str + expiry: + description: + - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation. + default: 600 + aliases: ['expiration'] + type: int + headers: + description: + - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. + version_added: "2.0" + type: dict + marker: + description: + - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. + version_added: "2.0" + type: str + max_keys: + description: + - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys. + default: 1000 + version_added: "2.0" + type: int + metadata: + description: + - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. + version_added: "1.6" + type: dict + mode: + description: + - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+), + getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket), + and delobj (delete object, Ansible 2.0+). + required: true + choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'] + type: str + object: + description: + - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. + type: str + permission: + description: + - This option lets the user set the canned permissions on the object/bucket that are created. + The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or + C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read), + C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list. + default: ['private'] + version_added: "2.0" + type: list + elements: str + prefix: + description: + - Limits the response to keys that begin with the specified prefix for list mode. + default: "" + version_added: "2.0" + type: str + version: + description: + - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. + version_added: "2.0" + type: str + overwrite: + description: + - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. + Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0. + When this is set to 'different', the md5 sum of the local file is compared with the 'ETag' of the object/key in S3. + The ETag may or may not be an MD5 digest of the object data. See the ETag response header here + U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html) + default: 'always' + aliases: ['force'] + version_added: "1.2" + type: str + retries: + description: + - On recoverable failure, how many times to retry before actually failing. + default: 0 + version_added: "2.0" + type: int + aliases: ['retry'] + s3_url: + description: + - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS. + aliases: [ S3_URL ] + type: str + dualstack: + description: + - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + - Requires at least botocore version 1.4.45. + type: bool + default: false + version_added: "2.7" + rgw: + description: + - Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url). + default: false + version_added: "2.2" + type: bool + src: + description: + - The source file path when performing a PUT operation. + version_added: "1.3" + type: str + ignore_nonexistent_bucket: + description: + - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the + GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying + I(ignore_nonexistent_bucket=true)." + version_added: "2.3" + type: bool + encryption_kms_key_id: + description: + - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms) + version_added: "2.7" + type: str +requirements: [ "boto3", "botocore" ] +author: + - "Lester Wade (@lwade)" + - "Sloane Hertel (@s-hertel)" +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +- name: Simple PUT operation + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + +- name: Simple PUT operation in Ceph RGW S3 + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + rgw: true + s3_url: "http://localhost:8000" + +- name: Simple GET operation + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + +- name: Get a specific version of an object. + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + version: 48c9ee5131af7a716edc22df9772aa6f + dest: /usr/local/myfile.txt + mode: get + +- name: PUT/upload with metadata + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + metadata: 'Content-Encoding=gzip,Cache-Control=no-cache' + +- name: PUT/upload with custom headers + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + headers: 'x-amz-grant-full-control=emailAddress=owner@example.com' + +- name: List keys simple + aws_s3: + bucket: mybucket + mode: list + +- name: List keys all options + aws_s3: + bucket: mybucket + mode: list + prefix: /my/desired/ + marker: /my/desired/0023.txt + max_keys: 472 + +- name: Create an empty bucket + aws_s3: + bucket: mybucket + mode: create + permission: public-read + +- name: Create a bucket with key as directory, in the EU region + aws_s3: + bucket: mybucket + object: /my/directory/path + mode: create + region: eu-west-1 + +- name: Delete a bucket and all contents + aws_s3: + bucket: mybucket + mode: delete + +- name: GET an object but don't download if the file checksums match. New in 2.0 + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + overwrite: different + +- name: Delete an object from a bucket + aws_s3: + bucket: mybucket + object: /my/desired/key.txt + mode: delobj +''' + +RETURN = ''' +msg: + description: Message indicating the status of the operation. + returned: always + type: str + sample: PUT operation complete +url: + description: URL of the object. + returned: (for put and geturl operations) + type: str + sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=&Expires=1506888865&Signature= +expiry: + description: Number of seconds the presigned url is valid for. + returned: (for geturl operation) + type: int + sample: 600 +contents: + description: Contents of the object as string. + returned: (for getstr operation) + type: str + sample: "Hello, world!" +s3_keys: + description: List of object keys. + returned: (for list operation) + type: list + elements: str + sample: + - prefix1/ + - prefix1/key1 + - prefix1/key2 +''' + +import mimetypes +import os +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ssl import SSLError +from ansible.module_utils.basic import to_text, to_native +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5 +from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn + +try: + import botocore +except ImportError: + pass # will be detected by imported AnsibleAWSModule + +IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented'] + + +class Sigv4Required(Exception): + pass + + +def key_check(module, s3, bucket, obj, version=None, validate=True): + exists = True + try: + if version: + s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + else: + s3.head_object(Bucket=bucket, Key=obj) + except botocore.exceptions.ClientError as e: + # if a client error is thrown, check if it's a 404 error + # if it's a 404 error, then the object does not exist + error_code = int(e.response['Error']['Code']) + if error_code == 404: + exists = False + elif error_code == 403 and validate is False: + pass + else: + module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + return exists + + +def etag_compare(module, local_file, s3, bucket, obj, version=None): + s3_etag = get_etag(s3, bucket, obj, version=version) + local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version) + + return s3_etag == local_etag + + +def get_etag(s3, bucket, obj, version=None): + if version: + key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + else: + key_check = s3.head_object(Bucket=bucket, Key=obj) + if not key_check: + return None + return key_check['ETag'] + + +def bucket_check(module, s3, bucket, validate=True): + exists = True + try: + s3.head_bucket(Bucket=bucket) + except botocore.exceptions.ClientError as e: + # If a client error is thrown, then check that it was a 404 error. + # If it was a 404 error, then the bucket does not exist. + error_code = int(e.response['Error']['Code']) + if error_code == 404: + exists = False + elif error_code == 403 and validate is False: + pass + else: + module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) + except botocore.exceptions.EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided") + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) + return exists + + +def create_bucket(module, s3, bucket, location=None): + if module.check_mode: + module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True) + configuration = {} + if location not in ('us-east-1', None): + configuration['LocationConstraint'] = location + try: + if len(configuration) > 0: + s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration) + else: + s3.create_bucket(Bucket=bucket) + if module.params.get('permission'): + # Wait for the bucket to exist before setting ACLs + s3.get_waiter('bucket_exists').wait(Bucket=bucket) + for acl in module.params.get('permission'): + s3.put_bucket_acl(ACL=acl, Bucket=bucket) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS: + module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") + else: + module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).") + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).") + + if bucket: + return True + + +def paginated_list(s3, **pagination_params): + pg = s3.get_paginator('list_objects_v2') + for page in pg.paginate(**pagination_params): + yield [data['Key'] for data in page.get('Contents', [])] + + +def paginated_versioned_list_with_fallback(s3, **pagination_params): + try: + versioned_pg = s3.get_paginator('list_object_versions') + for page in versioned_pg.paginate(**pagination_params): + delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])] + current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])] + yield delete_markers + current_objects + except botocore.exceptions.ClientError as e: + if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']: + for page in paginated_list(s3, **pagination_params): + yield [{'Key': data['Key']} for data in page] + + +def list_keys(module, s3, bucket, prefix, marker, max_keys): + pagination_params = {'Bucket': bucket} + for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)): + pagination_params[param_name] = param_value + try: + keys = sum(paginated_list(s3, **pagination_params), []) + module.exit_json(msg="LIST operation complete", s3_keys=keys) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket)) + + +def delete_bucket(module, s3, bucket): + if module.check_mode: + module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + try: + exists = bucket_check(module, s3, bucket) + if exists is False: + return False + # if there are contents then we need to delete them before we can delete the bucket + for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket): + if keys: + s3.delete_objects(Bucket=bucket, Delete={'Objects': keys}) + s3.delete_bucket(Bucket=bucket) + return True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket) + + +def delete_key(module, s3, bucket, obj): + if module.check_mode: + module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + try: + s3.delete_object(Bucket=bucket, Key=obj) + module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj) + + +def create_dirkey(module, s3, bucket, obj, encrypt): + if module.check_mode: + module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) + try: + params = {'Bucket': bucket, 'Key': obj, 'Body': b''} + if encrypt: + params['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] + + s3.put_object(**params) + for acl in module.params.get('permission'): + s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS: + module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning") + else: + module.fail_json_aws(e, msg="Failed while creating object %s." % obj) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while creating object %s." % obj) + module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True) + + +def path_check(path): + if os.path.exists(path): + return True + else: + return False + + +def option_in_extra_args(option): + temp_option = option.replace('-', '').lower() + + allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition', + 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage', + 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl', + 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP', + 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption', + 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey', + 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'} + + if temp_option in allowed_extra_args: + return allowed_extra_args[temp_option] + + +def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers): + if module.check_mode: + module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) + try: + extra = {} + if encrypt: + extra['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] + if metadata: + extra['Metadata'] = {} + + # determine object metadata and extra arguments + for option in metadata: + extra_args_option = option_in_extra_args(option) + if extra_args_option is not None: + extra[extra_args_option] = metadata[option] + else: + extra['Metadata'][option] = metadata[option] + + if 'ContentType' not in extra: + content_type = mimetypes.guess_type(src)[0] + if content_type is None: + # s3 default content type + content_type = 'binary/octet-stream' + extra['ContentType'] = content_type + + s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to complete PUT operation.") + try: + for acl in module.params.get('permission'): + s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS: + module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") + else: + module.fail_json_aws(e, msg="Unable to set object ACL") + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Unable to set object ACL") + try: + url = s3.generate_presigned_url(ClientMethod='put_object', + Params={'Bucket': bucket, 'Key': obj}, + ExpiresIn=expiry) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to generate presigned URL") + module.exit_json(msg="PUT operation complete", url=url, changed=True) + + +def download_s3file(module, s3, bucket, obj, dest, retries, version=None): + if module.check_mode: + module.exit_json(msg="GET operation skipped - running in check mode", changed=True) + # retries is the number of loops; range/xrange needs to be one + # more to get that count of loops. + try: + if version: + key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version) + else: + key = s3.get_object(Bucket=bucket, Key=obj) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e): + raise Sigv4Required() + elif e.response['Error']['Code'] not in ("403", "404"): + # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but + # user does not have the s3:GetObject permission. 404 errors are handled by download_file(). + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + + optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {} + for x in range(0, retries + 1): + try: + s3.download_file(bucket, obj, dest, **optional_kwargs) + module.exit_json(msg="GET operation complete", changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # actually fail on last pass through the loop. + if x >= retries: + module.fail_json_aws(e, msg="Failed while downloading %s." % obj) + # otherwise, try again, this may be a transient timeout. + except SSLError as e: # will ClientError catch SSLError? + # actually fail on last pass through the loop. + if x >= retries: + module.fail_json_aws(e, msg="s3 download failed") + # otherwise, try again, this may be a transient timeout. + + +def download_s3str(module, s3, bucket, obj, version=None, validate=True): + if module.check_mode: + module.exit_json(msg="GET operation skipped - running in check mode", changed=True) + try: + if version: + contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read()) + else: + contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read()) + module.exit_json(msg="GET operation complete", contents=contents, changed=True) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e): + raise Sigv4Required() + else: + module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + + +def get_download_url(module, s3, bucket, obj, expiry, changed=True): + try: + url = s3.generate_presigned_url(ClientMethod='get_object', + Params={'Bucket': bucket, 'Key': obj}, + ExpiresIn=expiry) + module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while getting download url.") + + +def is_fakes3(s3_url): + """ Return True if s3_url has scheme fakes3:// """ + if s3_url is not None: + return urlparse(s3_url).scheme in ('fakes3', 'fakes3s') + else: + return False + + +def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False): + if s3_url and rgw: # TODO - test this + rgw = urlparse(s3_url) + params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs) + elif is_fakes3(s3_url): + fakes3 = urlparse(s3_url) + port = fakes3.port + if fakes3.scheme == 'fakes3s': + protocol = "https" + if port is None: + port = 443 + else: + protocol = "http" + if port is None: + port = 80 + params = dict(module=module, conn_type='client', resource='s3', region=location, + endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), + use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) + else: + params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs) + if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': + params['config'] = botocore.client.Config(signature_version='s3v4') + elif module.params['mode'] in ('get', 'getstr') and sig_4: + params['config'] = botocore.client.Config(signature_version='s3v4') + if module.params['dualstack']: + dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True}) + if 'config' in params: + params['config'] = params['config'].merge(dualconf) + else: + params['config'] = dualconf + return boto3_conn(**params) + + +def main(): + argument_spec = dict( + bucket=dict(required=True), + dest=dict(default=None, type='path'), + encrypt=dict(default=True, type='bool'), + encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'), + expiry=dict(default=600, type='int', aliases=['expiration']), + headers=dict(type='dict'), + marker=dict(default=""), + max_keys=dict(default=1000, type='int'), + metadata=dict(type='dict'), + mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), + object=dict(), + permission=dict(type='list', default=['private']), + version=dict(default=None), + overwrite=dict(aliases=['force'], default='always'), + prefix=dict(default=""), + retries=dict(aliases=['retry'], type='int', default=0), + s3_url=dict(aliases=['S3_URL']), + dualstack=dict(default='no', type='bool'), + rgw=dict(default='no', type='bool'), + src=dict(), + ignore_nonexistent_bucket=dict(default=False, type='bool'), + encryption_kms_key_id=dict() + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['mode', 'put', ['src', 'object']], + ['mode', 'get', ['dest', 'object']], + ['mode', 'getstr', ['object']], + ['mode', 'geturl', ['object']]], + ) + + bucket = module.params.get('bucket') + encrypt = module.params.get('encrypt') + expiry = module.params.get('expiry') + dest = module.params.get('dest', '') + headers = module.params.get('headers') + marker = module.params.get('marker') + max_keys = module.params.get('max_keys') + metadata = module.params.get('metadata') + mode = module.params.get('mode') + obj = module.params.get('object') + version = module.params.get('version') + overwrite = module.params.get('overwrite') + prefix = module.params.get('prefix') + retries = module.params.get('retries') + s3_url = module.params.get('s3_url') + dualstack = module.params.get('dualstack') + rgw = module.params.get('rgw') + src = module.params.get('src') + ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket') + + object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"] + bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"] + + if overwrite not in ['always', 'never', 'different']: + if module.boolean(overwrite): + overwrite = 'always' + else: + overwrite = 'never' + + if overwrite == 'different' and not HAS_MD5: + module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + + if region in ('us-east-1', '', None): + # default to US Standard region + location = 'us-east-1' + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + + if module.params.get('object'): + obj = module.params['object'] + # If there is a top level object, do nothing - if the object starts with / + # remove the leading character to maintain compatibility with Ansible versions < 2.4 + if obj.startswith('/'): + obj = obj[1:] + + # Bucket deletion does not require obj. Prevents ambiguity with delobj. + if obj and mode == "delete": + module.fail_json(msg='Parameter obj cannot be used with mode=delete') + + # allow eucarc environment variables to be used if ansible vars aren't set + if not s3_url and 'S3_URL' in os.environ: + s3_url = os.environ['S3_URL'] + + if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url: + module.fail_json(msg='dualstack only applies to AWS S3') + + if dualstack and not module.botocore_at_least('1.4.45'): + module.fail_json(msg='dualstack requires botocore >= 1.4.45') + + # rgw requires an explicit url + if rgw and not s3_url: + module.fail_json(msg='rgw flavour requires s3_url') + + # Look at s3_url and tweak connection settings + # if connecting to RGW, Walrus or fakes3 + if s3_url: + for key in ['validate_certs', 'security_token', 'profile_name']: + aws_connect_kwargs.pop(key, None) + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url) + + validate = not ignore_nonexistent_bucket + + # separate types of ACLs + bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl] + object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl] + error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl] + if error_acl: + module.fail_json(msg='Unknown permission specified: %s' % error_acl) + + # First, we check to see if the bucket exists, we get "bucket" returned. + bucketrtn = bucket_check(module, s3, bucket, validate=validate) + + if validate and mode not in ('create', 'put', 'delete') and not bucketrtn: + module.fail_json(msg="Source bucket cannot be found.") + + if mode == 'get': + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn is False: + if version: + module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + if path_check(dest) and overwrite != 'always': + if overwrite == 'never': + module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False) + if etag_compare(module, dest, s3, bucket, obj, version=version): + module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) + + try: + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + + if mode == 'put': + + # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified + # these were separated into the variables bucket_acl and object_acl above + + if not path_check(src): + module.fail_json(msg="Local object for PUT does not exist") + + if bucketrtn: + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + else: + # If the bucket doesn't exist we should create it. + # only use valid bucket acls for create_bucket function + module.params['permission'] = bucket_acl + create_bucket(module, s3, bucket, location) + + if keyrtn and overwrite != 'always': + if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj): + # Return the download URL for the existing object + get_download_url(module, s3, bucket, obj, expiry, changed=False) + + # only use valid object acls for the upload_s3file function + module.params['permission'] = object_acl + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) + + # Delete an object from a bucket, not the entire bucket + if mode == 'delobj': + if obj is None: + module.fail_json(msg="object parameter is required") + if bucket: + deletertn = delete_key(module, s3, bucket, obj) + if deletertn is True: + module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True) + else: + module.fail_json(msg="Bucket parameter is required.") + + # Delete an entire bucket, including all objects in the bucket + if mode == 'delete': + if bucket: + deletertn = delete_bucket(module, s3, bucket) + if deletertn is True: + module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True) + else: + module.fail_json(msg="Bucket parameter is required.") + + # Support for listing a set of keys + if mode == 'list': + exists = bucket_check(module, s3, bucket) + + # If the bucket does not exist then bail out + if not exists: + module.fail_json(msg="Target bucket (%s) cannot be found" % bucket) + + list_keys(module, s3, bucket, prefix, marker, max_keys) + + # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. + # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. + if mode == 'create': + + # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified + # these were separated above into the variables bucket_acl and object_acl + + if bucket and not obj: + if bucketrtn: + module.exit_json(msg="Bucket already exists.", changed=False) + else: + # only use valid bucket acls when creating the bucket + module.params['permission'] = bucket_acl + module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) + if bucket and obj: + if obj.endswith('/'): + dirobj = obj + else: + dirobj = obj + "/" + if bucketrtn: + if key_check(module, s3, bucket, dirobj): + module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) + else: + # setting valid object acls for the create_dirkey function + module.params['permission'] = object_acl + create_dirkey(module, s3, bucket, dirobj, encrypt) + else: + # only use valid bucket acls for the create_bucket function + module.params['permission'] = bucket_acl + created = create_bucket(module, s3, bucket, location) + # only use valid object acls for the create_dirkey function + module.params['permission'] = object_acl + create_dirkey(module, s3, bucket, dirobj, encrypt) + + # Support for grabbing the time-expired URL for an object in S3/Walrus. + if mode == 'geturl': + if not bucket and not obj: + module.fail_json(msg="Bucket and Object parameters must be set") + + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn: + get_download_url(module, s3, bucket, obj, expiry) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + if mode == 'getstr': + if bucket and obj: + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn: + try: + download_s3str(module, s3, bucket, obj, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True) + download_s3str(module, s3, bucket, obj, version=version) + elif version is not None: + module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + module.exit_json(failed=False) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_appserviceplan.py b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py new file mode 100644 index 00000000..ee871c35 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py @@ -0,0 +1,379 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_appserviceplan +version_added: "2.7" +short_description: Manage App Service Plan +description: + - Create, update and delete instance of App Service Plan. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + + name: + description: + - Unique name of the app service plan to create or update. + required: True + + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + + sku: + description: + - The pricing tiers, e.g., C(F1), C(D1), C(B1), C(B2), C(B3), C(S1), C(P1), C(P1V2) etc. + - Please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/plans/) for more detail. + - For Linux app service plan, please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/) for more detail. + is_linux: + description: + - Describe whether to host webapp on Linux worker. + type: bool + default: false + + number_of_workers: + description: + - Describe number of workers to be allocated. + + state: + description: + - Assert the state of the app service plan. + - Use C(present) to create or update an app service plan and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a windows app service plan + azure_rm_appserviceplan: + resource_group: myResourceGroup + name: myAppPlan + location: eastus + sku: S1 + + - name: Create a linux app service plan + azure_rm_appserviceplan: + resource_group: myResourceGroup + name: myAppPlan + location: eastus + sku: S1 + is_linux: true + number_of_workers: 1 + + - name: update sku of existing windows app service plan + azure_rm_appserviceplan: + resource_group: myResourceGroup + name: myAppPlan + location: eastus + sku: S2 +''' + +RETURN = ''' +azure_appserviceplan: + description: Facts about the current state of the app service plan. + returned: always + type: dict + sample: { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppPlan" + } +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrestazure.azure_operation import AzureOperationPoller + from msrest.serialization import Model + from azure.mgmt.web.models import ( + app_service_plan, AppServicePlan, SkuDescription + ) +except ImportError: + # This is handled in azure_rm_common + pass + + +def _normalize_sku(sku): + if sku is None: + return sku + + sku = sku.upper() + if sku == 'FREE': + return 'F1' + elif sku == 'SHARED': + return 'D1' + return sku + + +def get_sku_name(tier): + tier = tier.upper() + if tier == 'F1' or tier == "FREE": + return 'FREE' + elif tier == 'D1' or tier == "SHARED": + return 'SHARED' + elif tier in ['B1', 'B2', 'B3', 'BASIC']: + return 'BASIC' + elif tier in ['S1', 'S2', 'S3']: + return 'STANDARD' + elif tier in ['P1', 'P2', 'P3']: + return 'PREMIUM' + elif tier in ['P1V2', 'P2V2', 'P3V2']: + return 'PREMIUMV2' + else: + return None + + +def appserviceplan_to_dict(plan): + return dict( + id=plan.id, + name=plan.name, + kind=plan.kind, + location=plan.location, + reserved=plan.reserved, + is_linux=plan.reserved, + provisioning_state=plan.provisioning_state, + status=plan.status, + target_worker_count=plan.target_worker_count, + sku=dict( + name=plan.sku.name, + size=plan.sku.size, + tier=plan.sku.tier, + family=plan.sku.family, + capacity=plan.sku.capacity + ), + resource_group=plan.resource_group, + number_of_sites=plan.number_of_sites, + tags=plan.tags if plan.tags else None + ) + + +class AzureRMAppServicePlans(AzureRMModuleBase): + """Configuration class for an Azure RM App Service Plan resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + sku=dict( + type='str' + ), + is_linux=dict( + type='bool', + default=False + ), + number_of_workers=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + + self.sku = None + self.is_linux = None + self.number_of_workers = 1 + + self.tags = None + + self.results = dict( + changed=False, + ansible_facts=dict(azure_appserviceplan=None) + ) + self.state = None + + super(AzureRMAppServicePlans, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if kwargs[key]: + setattr(self, key, kwargs[key]) + + old_response = None + response = None + to_be_updated = False + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # get app service plan + old_response = self.get_plan() + + # if not existing + if not old_response: + self.log("App Service plan doesn't exist") + + if self.state == "present": + to_be_updated = True + + if not self.sku: + self.fail('Please specify sku in plan when creation') + + else: + # existing app service plan, do update + self.log("App Service Plan already exists") + + if self.state == 'present': + self.log('Result: {0}'.format(old_response)) + + update_tags, newtags = self.update_tags(old_response.get('tags', dict())) + + if update_tags: + to_be_updated = True + self.tags = newtags + + # check if sku changed + if self.sku and _normalize_sku(self.sku) != old_response['sku']['size']: + to_be_updated = True + + # check if number_of_workers changed + if self.number_of_workers and int(self.number_of_workers) != old_response['sku']['capacity']: + to_be_updated = True + + if self.is_linux and self.is_linux != old_response['reserved']: + self.fail("Operation not allowed: cannot update reserved of app service plan.") + + if old_response: + self.results['id'] = old_response['id'] + + if to_be_updated: + self.log('Need to Create/Update app service plan') + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_or_update_plan() + self.results['id'] = response['id'] + + if self.state == 'absent' and old_response: + self.log("Delete app service plan") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_plan() + + self.log('App service plan instance deleted') + + return self.results + + def get_plan(self): + ''' + Gets app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Get App Service Plan {0}".format(self.name)) + + try: + response = self.web_client.app_service_plans.get(self.resource_group, self.name) + if response: + self.log("Response : {0}".format(response)) + self.log("App Service Plan : {0} found".format(response.name)) + + return appserviceplan_to_dict(response) + except CloudError as ex: + self.log("Didn't find app service plan {0} in resource group {1}".format(self.name, self.resource_group)) + + return False + + def create_or_update_plan(self): + ''' + Creates app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Create App Service Plan {0}".format(self.name)) + + try: + # normalize sku + sku = _normalize_sku(self.sku) + + sku_def = SkuDescription(tier=get_sku_name( + sku), name=sku, capacity=self.number_of_workers) + plan_def = AppServicePlan( + location=self.location, app_service_plan_name=self.name, sku=sku_def, reserved=self.is_linux, tags=self.tags if self.tags else None) + + response = self.web_client.app_service_plans.create_or_update(self.resource_group, self.name, plan_def) + + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + self.log("Response : {0}".format(response)) + + return appserviceplan_to_dict(response) + except CloudError as ex: + self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex))) + + def delete_plan(self): + ''' + Deletes specified App service plan in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the App service plan {0}".format(self.name)) + try: + response = self.web_client.app_service_plans.delete(resource_group_name=self.resource_group, + name=self.name) + except CloudError as e: + self.log('Error attempting to delete App service plan.') + self.fail( + "Error deleting the App service plan : {0}".format(str(e))) + + return True + + +def main(): + """Main execution""" + AzureRMAppServicePlans() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp.py b/test/support/integration/plugins/modules/azure_rm_functionapp.py new file mode 100644 index 00000000..0c372a88 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_functionapp.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Thomas Stringer +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_functionapp +version_added: "2.4" +short_description: Manage Azure Function Apps +description: + - Create, update or delete an Azure Function App. +options: + resource_group: + description: + - Name of resource group. + required: true + aliases: + - resource_group_name + name: + description: + - Name of the Azure Function App. + required: true + location: + description: + - Valid Azure location. Defaults to location of the resource group. + plan: + description: + - App service plan. + - It can be name of existing app service plan in same resource group as function app. + - It can be resource id of existing app service plan. + - Resource id. For example /subscriptions//resourceGroups//providers/Microsoft.Web/serverFarms/. + - It can be a dict which contains C(name), C(resource_group). + - C(name). Name of app service plan. + - C(resource_group). Resource group name of app service plan. + version_added: "2.8" + container_settings: + description: Web app container settings. + suboptions: + name: + description: + - Name of container. For example "imagename:tag". + registry_server_url: + description: + - Container registry server url. For example C(mydockerregistry.io). + registry_server_user: + description: + - The container registry server user name. + registry_server_password: + description: + - The container registry server password. + version_added: "2.8" + storage_account: + description: + - Name of the storage account to use. + required: true + aliases: + - storage + - storage_account_name + app_settings: + description: + - Dictionary containing application settings. + state: + description: + - Assert the state of the Function App. Use C(present) to create or update a Function App and C(absent) to delete. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Thomas Stringer (@trstringer) +''' + +EXAMPLES = ''' +- name: Create a function app + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + storage_account: myStorageAccount + +- name: Create a function app with app settings + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + storage_account: myStorageAccount + app_settings: + setting1: value1 + setting2: value2 + +- name: Create container based function app + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + storage_account: myStorageAccount + plan: + resource_group: myResourceGroup + name: myAppPlan + container_settings: + name: httpd + registry_server_url: index.docker.io + +- name: Delete a function app + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the Azure Function App. + returned: success + type: dict + example: + id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myFunctionApp + name: myfunctionapp + kind: functionapp + location: East US + type: Microsoft.Web/sites + state: Running + host_names: + - myfunctionapp.azurewebsites.net + repository_site_name: myfunctionapp + usage_state: Normal + enabled: true + enabled_host_names: + - myfunctionapp.azurewebsites.net + - myfunctionapp.scm.azurewebsites.net + availability_state: Normal + host_name_ssl_states: + - name: myfunctionapp.azurewebsites.net + ssl_state: Disabled + host_type: Standard + - name: myfunctionapp.scm.azurewebsites.net + ssl_state: Disabled + host_type: Repository + server_farm_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/EastUSPlan + reserved: false + last_modified_time_utc: 2017-08-22T18:54:01.190Z + scm_site_also_stopped: false + client_affinity_enabled: true + client_cert_enabled: false + host_names_disabled: false + outbound_ip_addresses: ............ + container_size: 1536 + daily_memory_time_quota: 0 + resource_group: myResourceGroup + default_host_name: myfunctionapp.azurewebsites.net +''' # NOQA + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.web.models import ( + site_config, app_service_plan, Site, SiteConfig, NameValuePair, SiteSourceControl, + AppServicePlan, SkuDescription + ) + from azure.mgmt.resource.resources import ResourceManagementClient + from msrest.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + +container_settings_spec = dict( + name=dict(type='str', required=True), + registry_server_url=dict(type='str'), + registry_server_user=dict(type='str'), + registry_server_password=dict(type='str', no_log=True) +) + + +class AzureRMFunctionApp(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True, aliases=['resource_group_name']), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + storage_account=dict( + type='str', + aliases=['storage', 'storage_account_name'] + ), + app_settings=dict(type='dict'), + plan=dict( + type='raw' + ), + container_settings=dict( + type='dict', + options=container_settings_spec + ) + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.storage_account = None + self.app_settings = None + self.plan = None + self.container_settings = None + + required_if = [('state', 'present', ['storage_account'])] + + super(AzureRMFunctionApp, self).__init__( + self.module_arg_spec, + supports_check_mode=True, + required_if=required_if + ) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + if self.app_settings is None: + self.app_settings = dict() + + try: + resource_group = self.rm_client.resource_groups.get(self.resource_group) + except CloudError: + self.fail('Unable to retrieve resource group') + + self.location = self.location or resource_group.location + + try: + function_app = self.web_client.web_apps.get( + resource_group_name=self.resource_group, + name=self.name + ) + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + exists = function_app is not None + except CloudError as exc: + exists = False + + if self.state == 'absent': + if exists: + if self.check_mode: + self.results['changed'] = True + return self.results + try: + self.web_client.web_apps.delete( + resource_group_name=self.resource_group, + name=self.name + ) + self.results['changed'] = True + except CloudError as exc: + self.fail('Failure while deleting web app: {0}'.format(exc)) + else: + self.results['changed'] = False + else: + kind = 'functionapp' + linux_fx_version = None + if self.container_settings and self.container_settings.get('name'): + kind = 'functionapp,linux,container' + linux_fx_version = 'DOCKER|' + if self.container_settings.get('registry_server_url'): + self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url'] + linux_fx_version += self.container_settings['registry_server_url'] + '/' + linux_fx_version += self.container_settings['name'] + if self.container_settings.get('registry_server_user'): + self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings.get('registry_server_user') + + if self.container_settings.get('registry_server_password'): + self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings.get('registry_server_password') + + if not self.plan and function_app: + self.plan = function_app.server_farm_id + + if not exists: + function_app = Site( + location=self.location, + kind=kind, + site_config=SiteConfig( + app_settings=self.aggregated_app_settings(), + scm_type='LocalGit' + ) + ) + self.results['changed'] = True + else: + self.results['changed'], function_app = self.update(function_app) + + # get app service plan + if self.plan: + if isinstance(self.plan, dict): + self.plan = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Web/serverfarms/{2}".format( + self.subscription_id, + self.plan.get('resource_group', self.resource_group), + self.plan.get('name') + ) + function_app.server_farm_id = self.plan + + # set linux fx version + if linux_fx_version: + function_app.site_config.linux_fx_version = linux_fx_version + + if self.check_mode: + self.results['state'] = function_app.as_dict() + elif self.results['changed']: + try: + new_function_app = self.web_client.web_apps.create_or_update( + resource_group_name=self.resource_group, + name=self.name, + site_envelope=function_app + ).result() + self.results['state'] = new_function_app.as_dict() + except CloudError as exc: + self.fail('Error creating or updating web app: {0}'.format(exc)) + + return self.results + + def update(self, source_function_app): + """Update the Site object if there are any changes""" + + source_app_settings = self.web_client.web_apps.list_application_settings( + resource_group_name=self.resource_group, + name=self.name + ) + + changed, target_app_settings = self.update_app_settings(source_app_settings.properties) + + source_function_app.site_config = SiteConfig( + app_settings=target_app_settings, + scm_type='LocalGit' + ) + + return changed, source_function_app + + def update_app_settings(self, source_app_settings): + """Update app settings""" + + target_app_settings = self.aggregated_app_settings() + target_app_settings_dict = dict([(i.name, i.value) for i in target_app_settings]) + return target_app_settings_dict != source_app_settings, target_app_settings + + def necessary_functionapp_settings(self): + """Construct the necessary app settings required for an Azure Function App""" + + function_app_settings = [] + + if self.container_settings is None: + for key in ['AzureWebJobsStorage', 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', 'AzureWebJobsDashboard']: + function_app_settings.append(NameValuePair(name=key, value=self.storage_connection_string)) + function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~1')) + function_app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='6.5.0')) + function_app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=self.name)) + else: + function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2')) + function_app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value=False)) + function_app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=self.storage_connection_string)) + + return function_app_settings + + def aggregated_app_settings(self): + """Combine both system and user app settings""" + + function_app_settings = self.necessary_functionapp_settings() + for app_setting_key in self.app_settings: + found_setting = None + for s in function_app_settings: + if s.name == app_setting_key: + found_setting = s + break + if found_setting: + found_setting.value = self.app_settings[app_setting_key] + else: + function_app_settings.append(NameValuePair( + name=app_setting_key, + value=self.app_settings[app_setting_key] + )) + return function_app_settings + + @property + def storage_connection_string(self): + """Construct the storage account connection string""" + + return 'DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}'.format( + self.storage_account, + self.storage_key + ) + + @property + def storage_key(self): + """Retrieve the storage account key""" + + return self.storage_client.storage_accounts.list_keys( + resource_group_name=self.resource_group, + account_name=self.storage_account + ).keys[0].value + + +def main(): + """Main function execution""" + + AzureRMFunctionApp() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp_info.py b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py new file mode 100644 index 00000000..40672f95 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Thomas Stringer, + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_functionapp_info +version_added: "2.9" +short_description: Get Azure Function App facts +description: + - Get facts for one Azure Function App or all Function Apps within a resource group. +options: + name: + description: + - Only show results for a specific Function App. + resource_group: + description: + - Limit results to a resource group. Required when filtering by name. + aliases: + - resource_group_name + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + +extends_documentation_fragment: + - azure + +author: + - Thomas Stringer (@trstringer) +''' + +EXAMPLES = ''' + - name: Get facts for one Function App + azure_rm_functionapp_info: + resource_group: myResourceGroup + name: myfunctionapp + + - name: Get facts for all Function Apps in a resource group + azure_rm_functionapp_info: + resource_group: myResourceGroup + + - name: Get facts for all Function Apps by tags + azure_rm_functionapp_info: + tags: + - testing +''' + +RETURN = ''' +azure_functionapps: + description: + - List of Azure Function Apps dicts. + returned: always + type: list + example: + id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp + name: myfunctionapp + kind: functionapp + location: East US + type: Microsoft.Web/sites + state: Running + host_names: + - myfunctionapp.azurewebsites.net + repository_site_name: myfunctionapp + usage_state: Normal + enabled: true + enabled_host_names: + - myfunctionapp.azurewebsites.net + - myfunctionapp.scm.azurewebsites.net + availability_state: Normal + host_name_ssl_states: + - name: myfunctionapp.azurewebsites.net + ssl_state: Disabled + host_type: Standard + - name: myfunctionapp.scm.azurewebsites.net + ssl_state: Disabled + host_type: Repository + server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan + reserved: false + last_modified_time_utc: 2017-08-22T18:54:01.190Z + scm_site_also_stopped: false + client_affinity_enabled: true + client_cert_enabled: false + host_names_disabled: false + outbound_ip_addresses: ............ + container_size: 1536 + daily_memory_time_quota: 0 + resource_group: myResourceGroup + default_host_name: myfunctionapp.azurewebsites.net +''' + +try: + from msrestazure.azure_exceptions import CloudError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMFunctionAppInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str', aliases=['resource_group_name']), + tags=dict(type='list'), + ) + + self.results = dict( + changed=False, + ansible_info=dict(azure_functionapps=[]) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMFunctionAppInfo, self).__init__( + self.module_arg_spec, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_functionapp_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_functionapp_facts' module has been renamed to 'azure_rm_functionapp_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + self.results['ansible_info']['azure_functionapps'] = self.get_functionapp() + elif self.resource_group: + self.results['ansible_info']['azure_functionapps'] = self.list_resource_group() + else: + self.results['ansible_info']['azure_functionapps'] = self.list_all() + + return self.results + + def get_functionapp(self): + self.log('Get properties for Function App {0}'.format(self.name)) + function_app = None + result = [] + + try: + function_app = self.web_client.web_apps.get( + self.resource_group, + self.name + ) + except CloudError: + pass + + if function_app and self.has_tags(function_app.tags, self.tags): + result = function_app.as_dict() + + return [result] + + def list_resource_group(self): + self.log('List items') + try: + response = self.web_client.web_apps.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item.as_dict()) + return results + + def list_all(self): + self.log('List all items') + try: + response = self.web_client.web_apps.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item.as_dict()) + return results + + +def main(): + AzureRMFunctionAppInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py new file mode 100644 index 00000000..212cf795 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbconfiguration +version_added: "2.8" +short_description: Manage Configuration instance +description: + - Create, update and delete instance of Configuration. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the server configuration. + required: True + value: + description: + - Value of the configuration. + state: + description: + - Assert the state of the MariaDB configuration. Use C(present) to update setting, or C(absent) to reset to default value. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) +''' + +EXAMPLES = ''' + - name: Update SQL Server setting + azure_rm_mariadbconfiguration: + resource_group: myResourceGroup + server_name: myServer + name: event_scheduler + value: "ON" +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi + gurations/event_scheduler" +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from azure.mgmt.rdbms.mysql import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbConfiguration(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + value=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.value = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + old_response = self.get_configuration() + + if not old_response: + self.log("Configuration instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Configuration instance already exists") + if self.state == 'absent' and old_response['source'] == 'user-override': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Configuration instance has to be deleted or may be updated") + if self.value != old_response.get('value'): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Configuration instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_configuration() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Configuration instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_configuration() + else: + self.log("Configuration instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_configuration(self): + self.log("Creating / Updating the Configuration instance {0}".format(self.name)) + + try: + response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + value=self.value, + source='user-override') + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the Configuration instance.') + self.fail("Error creating the Configuration instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_configuration(self): + self.log("Deleting the Configuration instance {0}".format(self.name)) + try: + response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + source='system-default') + except CloudError as e: + self.log('Error attempting to delete the Configuration instance.') + self.fail("Error deleting the Configuration instance: {0}".format(str(e))) + + return True + + def get_configuration(self): + self.log("Checking if the Configuration instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Configuration instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the Configuration instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbConfiguration() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py new file mode 100644 index 00000000..3faac5eb --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbconfiguration_info +version_added: "2.9" +short_description: Get Azure MariaDB Configuration facts +description: + - Get facts of Azure MariaDB Configuration. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - Setting name. + type: str + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get specific setting of MariaDB Server + azure_rm_mariadbconfiguration_info: + resource_group: myResourceGroup + server_name: testserver + name: deadlock_timeout + + - name: Get all settings of MariaDB Server + azure_rm_mariadbconfiguration_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +settings: + description: + - A list of dictionaries containing MariaDB Server settings. + returned: always + type: complex + contains: + id: + description: + - Setting resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver + /configurations/deadlock_timeout" + name: + description: + - Setting name. + returned: always + type: str + sample: deadlock_timeout + value: + description: + - Setting value. + returned: always + type: raw + sample: 1000 + description: + description: + - Description of the configuration. + returned: always + type: str + sample: Deadlock timeout. + source: + description: + - Source of the configuration. + returned: always + type: str + sample: system-default +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbConfigurationInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict(changed=False) + self.mgmt_client = None + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMariaDbConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbconfiguration_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbconfiguration_facts' module has been renamed to 'azure_rm_mariadbconfiguration_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name is not None: + self.results['settings'] = self.get() + else: + self.results['settings'] = self.list_by_server() + return self.results + + def get(self): + ''' + Gets facts of the specified MariaDB Configuration. + + :return: deserialized MariaDB Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for Configurations.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + ''' + Gets facts of the specified MariaDB Configuration. + + :return: deserialized MariaDB Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for Configurations.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'id': d['id'], + 'name': d['name'], + 'value': d['value'], + 'description': d['description'], + 'source': d['source'] + } + return d + + +def main(): + AzureRMMariaDbConfigurationInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py new file mode 100644 index 00000000..8492b968 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py @@ -0,0 +1,304 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbdatabase +version_added: "2.8" +short_description: Manage MariaDB Database instance +description: + - Create, update and delete instance of MariaDB Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the database. + required: True + charset: + description: + - The charset of the database. Check MariaDB documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + collation: + description: + - The collation of the database. Check MariaDB documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + force_update: + description: + - When set to C(true), will delete and recreate the existing MariaDB database if any of the properties don't match what is set. + - When set to C(false), no change will occur to the database even if any of the properties do not match. + type: bool + default: 'no' + state: + description: + - Assert the state of the MariaDB Database. Use C(present) to create or update a database and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Create (or update) MariaDB Database + azure_rm_mariadbdatabase: + resource_group: myResourceGroup + server_name: testserver + name: db1 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/databases/db1 +name: + description: + - Resource name. + returned: always + type: str + sample: db1 +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbDatabase(AzureRMModuleBase): + """Configuration class for an Azure RM MariaDB Database resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + charset=dict( + type='str' + ), + collation=dict( + type='str' + ), + force_update=dict( + type='bool', + default=False + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.force_update = None + self.parameters = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbDatabase, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "charset": + self.parameters["charset"] = kwargs[key] + elif key == "collation": + self.parameters["collation"] = kwargs[key] + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_mariadbdatabase() + + if not old_response: + self.log("MariaDB Database instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MariaDB Database instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MariaDB Database instance has to be deleted or may be updated") + if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']): + self.to_do = Actions.Update + if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']): + self.to_do = Actions.Update + if self.to_do == Actions.Update: + if self.force_update: + if not self.check_mode: + self.delete_mariadbdatabase() + else: + self.fail("Database properties cannot be updated without setting 'force_update' option") + self.to_do = Actions.NoAction + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MariaDB Database instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_mariadbdatabase() + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MariaDB Database instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_mariadbdatabase() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_mariadbdatabase(): + time.sleep(20) + else: + self.log("MariaDB Database instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["name"] = response["name"] + + return self.results + + def create_update_mariadbdatabase(self): + ''' + Creates or updates MariaDB Database with the specified configuration. + + :return: deserialized MariaDB Database instance state dictionary + ''' + self.log("Creating / Updating the MariaDB Database instance {0}".format(self.name)) + + try: + response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the MariaDB Database instance.') + self.fail("Error creating the MariaDB Database instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_mariadbdatabase(self): + ''' + Deletes specified MariaDB Database instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MariaDB Database instance {0}".format(self.name)) + try: + response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + except CloudError as e: + self.log('Error attempting to delete the MariaDB Database instance.') + self.fail("Error deleting the MariaDB Database instance: {0}".format(str(e))) + + return True + + def get_mariadbdatabase(self): + ''' + Gets the properties of the specified MariaDB Database. + + :return: deserialized MariaDB Database instance state dictionary + ''' + self.log("Checking if the MariaDB Database instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MariaDB Database instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the MariaDB Database instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbDatabase() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py new file mode 100644 index 00000000..e9c99c14 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbdatabase_info +version_added: "2.9" +short_description: Get Azure MariaDB Database facts +description: + - Get facts of MariaDB Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the database. + type: str + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get instance of MariaDB Database + azure_rm_mariadbdatabase_info: + resource_group: myResourceGroup + server_name: server_name + name: database_name + + - name: List instances of MariaDB Database + azure_rm_mariadbdatabase_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +databases: + description: + - A list of dictionaries containing facts for MariaDB Databases. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser + ver/databases/db1" + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: testrg + server_name: + description: + - Server name. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: db1 + charset: + description: + - The charset of the database. + returned: always + type: str + sample: UTF8 + collation: + description: + - The collation of the database. + returned: always + type: str + sample: English_United States.1252 +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.server_name is not None and + self.name is not None): + self.results['databases'] = self.get() + elif (self.resource_group is not None and + self.server_name is not None): + self.results['databases'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mariadb_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for Databases.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e))) + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'name': d['name'], + 'charset': d['charset'], + 'collation': d['collation'] + } + return d + + +def main(): + AzureRMMariaDbDatabaseInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py new file mode 100644 index 00000000..1fc8c5e7 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbfirewallrule +version_added: "2.8" +short_description: Manage MariaDB firewall rule instance +description: + - Create, update and delete instance of MariaDB firewall rule. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the MariaDB firewall rule. + required: True + start_ip_address: + description: + - The start IP address of the MariaDB firewall rule. Must be IPv4 format. + end_ip_address: + description: + - The end IP address of the MariaDB firewall rule. Must be IPv4 format. + state: + description: + - Assert the state of the MariaDB firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Create (or update) MariaDB firewall rule + azure_rm_mariadbfirewallrule: + resource_group: myResourceGroup + server_name: testserver + name: rule1 + start_ip_address: 10.0.0.17 + end_ip_address: 10.0.0.20 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire + wallRules/rule1" +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbFirewallRule(AzureRMModuleBase): + """Configuration class for an Azure RM MariaDB firewall rule resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + start_ip_address=dict( + type='str' + ), + end_ip_address=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.start_ip_address = None + self.end_ip_address = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_firewallrule() + + if not old_response: + self.log("MariaDB firewall rule instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MariaDB firewall rule instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MariaDB firewall rule instance has to be deleted or may be updated") + if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']): + self.to_do = Actions.Update + if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MariaDB firewall rule instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_firewallrule() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MariaDB firewall rule instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_firewallrule() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_firewallrule(): + time.sleep(20) + else: + self.log("MariaDB firewall rule instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_firewallrule(self): + ''' + Creates or updates MariaDB firewall rule with the specified configuration. + + :return: deserialized MariaDB firewall rule instance state dictionary + ''' + self.log("Creating / Updating the MariaDB firewall rule instance {0}".format(self.name)) + + try: + response = self.mariadb_client.firewall_rules.create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name, + start_ip_address=self.start_ip_address, + end_ip_address=self.end_ip_address) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the MariaDB firewall rule instance.') + self.fail("Error creating the MariaDB firewall rule instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_firewallrule(self): + ''' + Deletes specified MariaDB firewall rule instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MariaDB firewall rule instance {0}".format(self.name)) + try: + response = self.mariadb_client.firewall_rules.delete(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + except CloudError as e: + self.log('Error attempting to delete the MariaDB firewall rule instance.') + self.fail("Error deleting the MariaDB firewall rule instance: {0}".format(str(e))) + + return True + + def get_firewallrule(self): + ''' + Gets the properties of the specified MariaDB firewall rule. + + :return: deserialized MariaDB firewall rule instance state dictionary + ''' + self.log("Checking if the MariaDB firewall rule instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MariaDB firewall rule instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the MariaDB firewall rule instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbFirewallRule() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py new file mode 100644 index 00000000..ef71be8d --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbfirewallrule_info +version_added: "2.9" +short_description: Get Azure MariaDB Firewall Rule facts +description: + - Get facts of Azure MariaDB Firewall Rule. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the server firewall rule. + type: str + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get instance of MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name + name: firewall_rule_name + + - name: List instances of MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +rules: + description: + - A list of dictionaries containing facts for MariaDB Firewall Rule. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire + wallRules/rule1" + server_name: + description: + - The name of the server. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: rule1 + start_ip_address: + description: + - The start IP address of the MariaDB firewall rule. + returned: always + type: str + sample: 10.0.0.16 + end_ip_address: + description: + - The end IP address of the MariaDB firewall rule. + returned: always + type: str + sample: 10.0.0.18 +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbFirewallRuleInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMariaDbFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbfirewallrule_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbfirewallrule_facts' module has been renamed to 'azure_rm_mariadbfirewallrule_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if (self.name is not None): + self.results['rules'] = self.get() + else: + self.results['rules'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'id': d['id'], + 'server_name': self.server_name, + 'name': d['name'], + 'start_ip_address': d['start_ip_address'], + 'end_ip_address': d['end_ip_address'] + } + return d + + +def main(): + AzureRMMariaDbFirewallRuleInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py new file mode 100644 index 00000000..30a29988 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbserver +version_added: "2.8" +short_description: Manage MariaDB Server instance +description: + - Create, update and delete instance of MariaDB Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + name: + description: + - The name of the server. + required: True + sku: + description: + - The SKU (pricing tier) of the server. + suboptions: + name: + description: + - The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8). + tier: + description: + - The tier of the particular SKU, for example C(Basic). + choices: + - basic + - standard + capacity: + description: + - The scale up/out capacity, representing server's compute units. + type: int + size: + description: + - The size code, to be interpreted by resource as appropriate. + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + storage_mb: + description: + - The maximum storage allowed for a server. + type: int + version: + description: + - Server version. + choices: + - 10.2 + enforce_ssl: + description: + - Enable SSL enforcement. + type: bool + default: False + admin_username: + description: + - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation). + admin_password: + description: + - The password of the administrator login. + create_mode: + description: + - Create mode of SQL Server. + default: Default + state: + description: + - Assert the state of the MariaDB Server. Use C(present) to create or update a server and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Create (or update) MariaDB Server + azure_rm_mariadbserver: + resource_group: myResourceGroup + name: testserver + sku: + name: B_Gen5_1 + tier: Basic + location: eastus + storage_mb: 1024 + enforce_ssl: True + version: 10.2 + admin_username: cloudsa + admin_password: password +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/mariadbsrv1b6dd89593 +version: + description: + - Server version. Possible values include C(10.2). + returned: always + type: str + sample: 10.2 +state: + description: + - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled). + returned: always + type: str + sample: Ready +fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: mariadbsrv1b6dd89593.mariadb.database.azure.com +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbServers(AzureRMModuleBase): + """Configuration class for an Azure RM MariaDB Server resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + sku=dict( + type='dict' + ), + location=dict( + type='str' + ), + storage_mb=dict( + type='int' + ), + version=dict( + type='str', + choices=['10.2'] + ), + enforce_ssl=dict( + type='bool', + default=False + ), + create_mode=dict( + type='str', + default='Default' + ), + admin_username=dict( + type='str' + ), + admin_password=dict( + type='str', + no_log=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = dict() + self.tags = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbServers, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "sku": + ev = kwargs[key] + if 'tier' in ev: + if ev['tier'] == 'basic': + ev['tier'] = 'Basic' + elif ev['tier'] == 'standard': + ev['tier'] = 'Standard' + self.parameters["sku"] = ev + elif key == "location": + self.parameters["location"] = kwargs[key] + elif key == "storage_mb": + self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key] + elif key == "version": + self.parameters.setdefault("properties", {})["version"] = kwargs[key] + elif key == "enforce_ssl": + self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled' + elif key == "create_mode": + self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key] + elif key == "admin_username": + self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key] + elif key == "admin_password": + self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_mariadbserver() + + if not old_response: + self.log("MariaDB Server instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MariaDB Server instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MariaDB Server instance has to be deleted or may be updated") + update_tags, newtags = self.update_tags(old_response.get('tags', {})) + if update_tags: + self.tags = newtags + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MariaDB Server instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_mariadbserver() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MariaDB Server instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_mariadbserver() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_mariadbserver(): + time.sleep(20) + else: + self.log("MariaDB Server instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["version"] = response["version"] + self.results["state"] = response["user_visible_state"] + self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"] + + return self.results + + def create_update_mariadbserver(self): + ''' + Creates or updates MariaDB Server with the specified configuration. + + :return: deserialized MariaDB Server instance state dictionary + ''' + self.log("Creating / Updating the MariaDB Server instance {0}".format(self.name)) + + try: + self.parameters['tags'] = self.tags + if self.to_do == Actions.Create: + response = self.mariadb_client.servers.create(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + else: + # structure of parameters for update must be changed + self.parameters.update(self.parameters.pop("properties", {})) + response = self.mariadb_client.servers.update(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the MariaDB Server instance.') + self.fail("Error creating the MariaDB Server instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_mariadbserver(self): + ''' + Deletes specified MariaDB Server instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MariaDB Server instance {0}".format(self.name)) + try: + response = self.mariadb_client.servers.delete(resource_group_name=self.resource_group, + server_name=self.name) + except CloudError as e: + self.log('Error attempting to delete the MariaDB Server instance.') + self.fail("Error deleting the MariaDB Server instance: {0}".format(str(e))) + + return True + + def get_mariadbserver(self): + ''' + Gets the properties of the specified MariaDB Server. + + :return: deserialized MariaDB Server instance state dictionary + ''' + self.log("Checking if the MariaDB Server instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MariaDB Server instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the MariaDB Server instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbServers() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py new file mode 100644 index 00000000..464aa4d8 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbserver_info +version_added: "2.9" +short_description: Get Azure MariaDB Server facts +description: + - Get facts of MariaDB Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + name: + description: + - The name of the server. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get instance of MariaDB Server + azure_rm_mariadbserver_info: + resource_group: myResourceGroup + name: server_name + + - name: List instances of MariaDB Server + azure_rm_mariadbserver_info: + resource_group: myResourceGroup +''' + +RETURN = ''' +servers: + description: + - A list of dictionaries containing facts for MariaDB servers. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myabdud1223 + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: myResourceGroup + name: + description: + - Resource name. + returned: always + type: str + sample: myabdud1223 + location: + description: + - The location the resource resides in. + returned: always + type: str + sample: eastus + sku: + description: + - The SKU of the server. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: GP_Gen4_2 + tier: + description: + - The tier of the particular SKU. + returned: always + type: str + sample: GeneralPurpose + capacity: + description: + - The scale capacity. + returned: always + type: int + sample: 2 + storage_mb: + description: + - The maximum storage allowed for a server. + returned: always + type: int + sample: 128000 + enforce_ssl: + description: + - Enable SSL enforcement. + returned: always + type: bool + sample: False + admin_username: + description: + - The administrator's login name of a server. + returned: always + type: str + sample: serveradmin + version: + description: + - Server version. + returned: always + type: str + sample: "9.6" + user_visible_state: + description: + - A state of a server that is visible to user. + returned: always + type: str + sample: Ready + fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: myabdud1223.mys.database.azure.com + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + type: dict + sample: { tag1: abc } +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbServerInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.name = None + self.tags = None + super(AzureRMMariaDbServerInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbserver_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbserver_facts' module has been renamed to 'azure_rm_mariadbserver_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.name is not None): + self.results['servers'] = self.get() + elif (self.resource_group is not None): + self.results['servers'] = self.list_by_resource_group() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mariadb_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for MariaDB Server.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.mariadb_client.servers.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not get facts for MariaDB Servers.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'id': d['id'], + 'resource_group': self.resource_group, + 'name': d['name'], + 'sku': d['sku'], + 'location': d['location'], + 'storage_mb': d['storage_profile']['storage_mb'], + 'version': d['version'], + 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'), + 'admin_username': d['administrator_login'], + 'user_visible_state': d['user_visible_state'], + 'fully_qualified_domain_name': d['fully_qualified_domain_name'], + 'tags': d.get('tags') + } + + return d + + +def main(): + AzureRMMariaDbServerInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_resource.py b/test/support/integration/plugins/modules/azure_rm_resource.py new file mode 100644 index 00000000..6ea3e3bb --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_resource.py @@ -0,0 +1,427 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_resource +version_added: "2.6" +short_description: Create any Azure resource +description: + - Create, update or delete any Azure resource using Azure REST API. + - This module gives access to resources that are not supported via Ansible modules. + - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API. + +options: + url: + description: + - Azure RM Resource URL. + api_version: + description: + - Specific API version to be used. + provider: + description: + - Provider type. + - Required if URL is not specified. + resource_group: + description: + - Resource group to be used. + - Required if URL is not specified. + resource_type: + description: + - Resource type. + - Required if URL is not specified. + resource_name: + description: + - Resource name. + - Required if URL Is not specified. + subresource: + description: + - List of subresources. + suboptions: + namespace: + description: + - Subresource namespace. + type: + description: + - Subresource type. + name: + description: + - Subresource name. + body: + description: + - The body of the HTTP request/response to the web service. + method: + description: + - The HTTP method of the request or response. It must be uppercase. + choices: + - GET + - PUT + - POST + - HEAD + - PATCH + - DELETE + - MERGE + default: "PUT" + status_code: + description: + - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes. + type: list + default: [ 200, 201, 202 ] + idempotency: + description: + - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body). + default: no + type: bool + polling_timeout: + description: + - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body). + default: 0 + type: int + version_added: "2.8" + polling_interval: + description: + - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body). + default: 60 + type: int + version_added: "2.8" + state: + description: + - Assert the state of the resource. Use C(present) to create or update resource or C(absent) to delete resource. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Update scaleset info using azure_rm_resource + azure_rm_resource: + resource_group: myResourceGroup + provider: compute + resource_type: virtualmachinescalesets + resource_name: myVmss + api_version: "2017-12-01" + body: { body } +''' + +RETURN = ''' +response: + description: + - Response specific to resource type. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + type: str + returned: always + sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Storage/storageAccounts/staccb57dc95183" + kind: + description: + - The kind of storage. + type: str + returned: always + sample: Storage + location: + description: + - The resource location, defaults to location of the resource group. + type: str + returned: always + sample: eastus + name: + description: + The storage account name. + type: str + returned: always + sample: staccb57dc95183 + properties: + description: + - The storage account's related properties. + type: dict + returned: always + sample: { + "creationTime": "2019-06-13T06:34:33.0996676Z", + "encryption": { + "keySource": "Microsoft.Storage", + "services": { + "blob": { + "enabled": true, + "lastEnabledTime": "2019-06-13T06:34:33.1934074Z" + }, + "file": { + "enabled": true, + "lastEnabledTime": "2019-06-13T06:34:33.1934074Z" + } + } + }, + "networkAcls": { + "bypass": "AzureServices", + "defaultAction": "Allow", + "ipRules": [], + "virtualNetworkRules": [] + }, + "primaryEndpoints": { + "blob": "https://staccb57dc95183.blob.core.windows.net/", + "file": "https://staccb57dc95183.file.core.windows.net/", + "queue": "https://staccb57dc95183.queue.core.windows.net/", + "table": "https://staccb57dc95183.table.core.windows.net/" + }, + "primaryLocation": "eastus", + "provisioningState": "Succeeded", + "secondaryLocation": "westus", + "statusOfPrimary": "available", + "statusOfSecondary": "available", + "supportsHttpsTrafficOnly": false + } + sku: + description: + - The storage account SKU. + type: dict + returned: always + sample: { + "name": "Standard_GRS", + "tier": "Standard" + } + tags: + description: + - Resource tags. + type: dict + returned: always + sample: { 'key1': 'value1' } + type: + description: + - The resource type. + type: str + returned: always + sample: "Microsoft.Storage/storageAccounts" + +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.azure_rm_common_rest import GenericRestClient +from ansible.module_utils.common.dict_transformations import dict_merge + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.service_client import ServiceClient + from msrestazure.tools import resource_id, is_valid_resource_id + import json + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMResource(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + url=dict( + type='str' + ), + provider=dict( + type='str', + ), + resource_group=dict( + type='str', + ), + resource_type=dict( + type='str', + ), + resource_name=dict( + type='str', + ), + subresource=dict( + type='list', + default=[] + ), + api_version=dict( + type='str' + ), + method=dict( + type='str', + default='PUT', + choices=["GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE"] + ), + body=dict( + type='raw' + ), + status_code=dict( + type='list', + default=[200, 201, 202] + ), + idempotency=dict( + type='bool', + default=False + ), + polling_timeout=dict( + type='int', + default=0 + ), + polling_interval=dict( + type='int', + default=60 + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + # store the results of the module operation + self.results = dict( + changed=False, + response=None + ) + self.mgmt_client = None + self.url = None + self.api_version = None + self.provider = None + self.resource_group = None + self.resource_type = None + self.resource_name = None + self.subresource_type = None + self.subresource_name = None + self.subresource = [] + self.method = None + self.status_code = [] + self.idempotency = False + self.polling_timeout = None + self.polling_interval = None + self.state = None + self.body = None + super(AzureRMResource, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.state == 'absent': + self.method = 'DELETE' + self.status_code.append(204) + + if self.url is None: + orphan = None + rargs = dict() + rargs['subscription'] = self.subscription_id + rargs['resource_group'] = self.resource_group + if not (self.provider is None or self.provider.lower().startswith('.microsoft')): + rargs['namespace'] = "Microsoft." + self.provider + else: + rargs['namespace'] = self.provider + + if self.resource_type is not None and self.resource_name is not None: + rargs['type'] = self.resource_type + rargs['name'] = self.resource_name + for i in range(len(self.subresource)): + resource_ns = self.subresource[i].get('namespace', None) + resource_type = self.subresource[i].get('type', None) + resource_name = self.subresource[i].get('name', None) + if resource_type is not None and resource_name is not None: + rargs['child_namespace_' + str(i + 1)] = resource_ns + rargs['child_type_' + str(i + 1)] = resource_type + rargs['child_name_' + str(i + 1)] = resource_name + else: + orphan = resource_type + else: + orphan = self.resource_type + + self.url = resource_id(**rargs) + + if orphan is not None: + self.url += '/' + orphan + + # if api_version was not specified, get latest one + if not self.api_version: + try: + # extract provider and resource type + if "/providers/" in self.url: + provider = self.url.split("/providers/")[1].split("/")[0] + resourceType = self.url.split(provider + "/")[1].split("/")[0] + url = "/subscriptions/" + self.subscription_id + "/providers/" + provider + api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text) + for rt in api_versions['resourceTypes']: + if rt['resourceType'].lower() == resourceType.lower(): + self.api_version = rt['apiVersions'][0] + break + else: + # if there's no provider in API version, assume Microsoft.Resources + self.api_version = '2018-05-01' + if not self.api_version: + self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType)) + except Exception as exc: + self.fail("Failed to obtain API version: {0}".format(str(exc))) + + query_parameters = {} + query_parameters['api-version'] = self.api_version + + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + needs_update = True + response = None + + if self.idempotency: + original = self.mgmt_client.query(self.url, "GET", query_parameters, None, None, [200, 404], 0, 0) + + if original.status_code == 404: + if self.state == 'absent': + needs_update = False + else: + try: + response = json.loads(original.text) + needs_update = (dict_merge(response, self.body) != response) + except Exception: + pass + + if needs_update: + response = self.mgmt_client.query(self.url, + self.method, + query_parameters, + header_parameters, + self.body, + self.status_code, + self.polling_timeout, + self.polling_interval) + if self.state == 'present': + try: + response = json.loads(response.text) + except Exception: + response = response.text + else: + response = None + + self.results['response'] = response + self.results['changed'] = needs_update + + return self.results + + +def main(): + AzureRMResource() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_resource_info.py b/test/support/integration/plugins/modules/azure_rm_resource_info.py new file mode 100644 index 00000000..f797f662 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_resource_info.py @@ -0,0 +1,432 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_resource_info +version_added: "2.9" +short_description: Generic facts of Azure resources +description: + - Obtain facts of any resource using Azure REST API. + - This module gives access to resources that are not supported via Ansible modules. + - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API. + +options: + url: + description: + - Azure RM Resource URL. + api_version: + description: + - Specific API version to be used. + provider: + description: + - Provider type, should be specified in no URL is given. + resource_group: + description: + - Resource group to be used. + - Required if URL is not specified. + resource_type: + description: + - Resource type. + resource_name: + description: + - Resource name. + subresource: + description: + - List of subresources. + suboptions: + namespace: + description: + - Subresource namespace. + type: + description: + - Subresource type. + name: + description: + - Subresource name. + +extends_documentation_fragment: + - azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get scaleset info + azure_rm_resource_info: + resource_group: myResourceGroup + provider: compute + resource_type: virtualmachinescalesets + resource_name: myVmss + api_version: "2017-12-01" + + - name: Query all the resources in the resource group + azure_rm_resource_info: + resource_group: "{{ resource_group }}" + resource_type: resources +''' + +RETURN = ''' +response: + description: + - Response specific to resource type. + returned: always + type: complex + contains: + id: + description: + - Id of the Azure resource. + type: str + returned: always + sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/virtualMachines/myVM" + location: + description: + - Resource location. + type: str + returned: always + sample: eastus + name: + description: + - Resource name. + type: str + returned: always + sample: myVM + properties: + description: + - Specifies the virtual machine's property. + type: complex + returned: always + contains: + diagnosticsProfile: + description: + - Specifies the boot diagnostic settings state. + type: complex + returned: always + contains: + bootDiagnostics: + description: + - A debugging feature, which to view Console Output and Screenshot to diagnose VM status. + type: dict + returned: always + sample: { + "enabled": true, + "storageUri": "https://vxisurgdiag.blob.core.windows.net/" + } + hardwareProfile: + description: + - Specifies the hardware settings for the virtual machine. + type: dict + returned: always + sample: { + "vmSize": "Standard_D2s_v3" + } + networkProfile: + description: + - Specifies the network interfaces of the virtual machine. + type: complex + returned: always + contains: + networkInterfaces: + description: + - Describes a network interface reference. + type: list + returned: always + sample: + - { + "id": "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/networkInterfaces/myvm441" + } + osProfile: + description: + - Specifies the operating system settings for the virtual machine. + type: complex + returned: always + contains: + adminUsername: + description: + - Specifies the name of the administrator account. + type: str + returned: always + sample: azureuser + allowExtensionOperations: + description: + - Specifies whether extension operations should be allowed on the virtual machine. + - This may only be set to False when no extensions are present on the virtual machine. + type: bool + returned: always + sample: true + computerName: + description: + - Specifies the host OS name of the virtual machine. + type: str + returned: always + sample: myVM + requireGuestProvisionSignale: + description: + - Specifies the host require guest provision signal or not. + type: bool + returned: always + sample: true + secrets: + description: + - Specifies set of certificates that should be installed onto the virtual machine. + type: list + returned: always + sample: [] + linuxConfiguration: + description: + - Specifies the Linux operating system settings on the virtual machine. + type: dict + returned: when OS type is Linux + sample: { + "disablePasswordAuthentication": false, + "provisionVMAgent": true + } + provisioningState: + description: + - The provisioning state. + type: str + returned: always + sample: Succeeded + vmID: + description: + - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure laaS VMs SMBIOS. + - It can be read using platform BIOS commands. + type: str + returned: always + sample: "eb86d9bb-6725-4787-a487-2e497d5b340c" + storageProfile: + description: + - Specifies the storage account type for the managed disk. + type: complex + returned: always + contains: + dataDisks: + description: + - Specifies the parameters that are used to add a data disk to virtual machine. + type: list + returned: always + sample: + - { + "caching": "None", + "createOption": "Attach", + "diskSizeGB": 1023, + "lun": 2, + "managedDisk": { + "id": "/subscriptions/xxxx....xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk2", + "storageAccountType": "StandardSSD_LRS" + }, + "name": "testdisk2" + } + - { + "caching": "None", + "createOption": "Attach", + "diskSizeGB": 1023, + "lun": 1, + "managedDisk": { + "id": "/subscriptions/xxxx...xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk3", + "storageAccountType": "StandardSSD_LRS" + }, + "name": "testdisk3" + } + + imageReference: + description: + - Specifies information about the image to use. + type: dict + returned: always + sample: { + "offer": "UbuntuServer", + "publisher": "Canonical", + "sku": "18.04-LTS", + "version": "latest" + } + osDisk: + description: + - Specifies information about the operating system disk used by the virtual machine. + type: dict + returned: always + sample: { + "caching": "ReadWrite", + "createOption": "FromImage", + "diskSizeGB": 30, + "managedDisk": { + "id": "/subscriptions/xxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/disks/myVM_disk1_xxx", + "storageAccountType": "Premium_LRS" + }, + "name": "myVM_disk1_xxx", + "osType": "Linux" + } + type: + description: + - The type of identity used for the virtual machine. + type: str + returned: always + sample: "Microsoft.Compute/virtualMachines" +''' + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.azure_rm_common_rest import GenericRestClient + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.service_client import ServiceClient + from msrestazure.tools import resource_id, is_valid_resource_id + import json + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMResourceInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + url=dict( + type='str' + ), + provider=dict( + type='str' + ), + resource_group=dict( + type='str' + ), + resource_type=dict( + type='str' + ), + resource_name=dict( + type='str' + ), + subresource=dict( + type='list', + default=[] + ), + api_version=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + response=[] + ) + self.mgmt_client = None + self.url = None + self.api_version = None + self.provider = None + self.resource_group = None + self.resource_type = None + self.resource_name = None + self.subresource = [] + super(AzureRMResourceInfo, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_resource_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_resource_facts' module has been renamed to 'azure_rm_resource_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.url is None: + orphan = None + rargs = dict() + rargs['subscription'] = self.subscription_id + rargs['resource_group'] = self.resource_group + if not (self.provider is None or self.provider.lower().startswith('.microsoft')): + rargs['namespace'] = "Microsoft." + self.provider + else: + rargs['namespace'] = self.provider + + if self.resource_type is not None and self.resource_name is not None: + rargs['type'] = self.resource_type + rargs['name'] = self.resource_name + for i in range(len(self.subresource)): + resource_ns = self.subresource[i].get('namespace', None) + resource_type = self.subresource[i].get('type', None) + resource_name = self.subresource[i].get('name', None) + if resource_type is not None and resource_name is not None: + rargs['child_namespace_' + str(i + 1)] = resource_ns + rargs['child_type_' + str(i + 1)] = resource_type + rargs['child_name_' + str(i + 1)] = resource_name + else: + orphan = resource_type + else: + orphan = self.resource_type + + self.url = resource_id(**rargs) + + if orphan is not None: + self.url += '/' + orphan + + # if api_version was not specified, get latest one + if not self.api_version: + try: + # extract provider and resource type + if "/providers/" in self.url: + provider = self.url.split("/providers/")[1].split("/")[0] + resourceType = self.url.split(provider + "/")[1].split("/")[0] + url = "/subscriptions/" + self.subscription_id + "/providers/" + provider + api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text) + for rt in api_versions['resourceTypes']: + if rt['resourceType'].lower() == resourceType.lower(): + self.api_version = rt['apiVersions'][0] + break + else: + # if there's no provider in API version, assume Microsoft.Resources + self.api_version = '2018-05-01' + if not self.api_version: + self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType)) + except Exception as exc: + self.fail("Failed to obtain API version: {0}".format(str(exc))) + + self.results['url'] = self.url + + query_parameters = {} + query_parameters['api-version'] = self.api_version + + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + skiptoken = None + + while True: + if skiptoken: + query_parameters['skiptoken'] = skiptoken + response = self.mgmt_client.query(self.url, "GET", query_parameters, header_parameters, None, [200, 404], 0, 0) + try: + response = json.loads(response.text) + if isinstance(response, dict): + if response.get('value'): + self.results['response'] = self.results['response'] + response['value'] + skiptoken = response.get('nextLink') + else: + self.results['response'] = self.results['response'] + [response] + except Exception as e: + self.fail('Failed to parse response: ' + str(e)) + if not skiptoken: + break + return self.results + + +def main(): + AzureRMResourceInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_storageaccount.py b/test/support/integration/plugins/modules/azure_rm_storageaccount.py new file mode 100644 index 00000000..d4158bbd --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_storageaccount.py @@ -0,0 +1,684 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_storageaccount +version_added: "2.1" +short_description: Manage Azure storage accounts +description: + - Create, update or delete a storage account. +options: + resource_group: + description: + - Name of the resource group to use. + required: true + aliases: + - resource_group_name + name: + description: + - Name of the storage account to update or create. + state: + description: + - State of the storage account. Use C(present) to create or update a storage account and use C(absent) to delete an account. + default: present + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to location of the resource group. + account_type: + description: + - Type of storage account. Required when creating a storage account. + - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types. + - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS). + choices: + - Premium_LRS + - Standard_GRS + - Standard_LRS + - StandardSSD_LRS + - Standard_RAGRS + - Standard_ZRS + - Premium_ZRS + aliases: + - type + custom_domain: + description: + - User domain assigned to the storage account. + - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source. + - Only one custom domain is supported per storage account at this time. + - To clear the existing custom domain, use an empty string for the custom domain name property. + - Can be added to an existing storage account. Will be ignored during storage account creation. + aliases: + - custom_dns_domain_suffix + kind: + description: + - The kind of storage. + default: 'Storage' + choices: + - Storage + - StorageV2 + - BlobStorage + version_added: "2.2" + access_tier: + description: + - The access tier for this storage account. Required when I(kind=BlobStorage). + choices: + - Hot + - Cool + version_added: "2.4" + force_delete_nonempty: + description: + - Attempt deletion if resource already exists and cannot be updated. + type: bool + aliases: + - force + https_only: + description: + - Allows https traffic only to storage service when set to C(true). + type: bool + version_added: "2.8" + blob_cors: + description: + - Specifies CORS rules for the Blob service. + - You can include up to five CorsRule elements in the request. + - If no blob_cors elements are included in the argument list, nothing about CORS will be changed. + - If you want to delete all CORS rules and disable CORS for the Blob service, explicitly set I(blob_cors=[]). + type: list + version_added: "2.8" + suboptions: + allowed_origins: + description: + - A list of origin domains that will be allowed via CORS, or "*" to allow all domains. + type: list + required: true + allowed_methods: + description: + - A list of HTTP methods that are allowed to be executed by the origin. + type: list + required: true + max_age_in_seconds: + description: + - The number of seconds that the client/browser should cache a preflight response. + type: int + required: true + exposed_headers: + description: + - A list of response headers to expose to CORS clients. + type: list + required: true + allowed_headers: + description: + - A list of headers allowed to be part of the cross-origin request. + type: list + required: true + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) +''' + +EXAMPLES = ''' + - name: remove account, if it exists + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh0002 + state: absent + + - name: create an account + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh0002 + type: Standard_RAGRS + tags: + testing: testing + delete: on-exit + + - name: create an account with blob CORS + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh002 + type: Standard_RAGRS + blob_cors: + - allowed_origins: + - http://www.example.com/ + allowed_methods: + - GET + - POST + allowed_headers: + - x-ms-meta-data* + - x-ms-meta-target* + - x-ms-meta-abc + exposed_headers: + - x-ms-meta-* + max_age_in_seconds: 200 +''' + + +RETURN = ''' +state: + description: + - Current state of the storage account. + returned: always + type: complex + contains: + account_type: + description: + - Type of storage account. + returned: always + type: str + sample: Standard_RAGRS + custom_domain: + description: + - User domain assigned to the storage account. + returned: always + type: complex + contains: + name: + description: + - CNAME source. + returned: always + type: str + sample: testaccount + use_sub_domain: + description: + - Whether to use sub domain. + returned: always + type: bool + sample: true + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/clh0003" + location: + description: + - Valid Azure location. Defaults to location of the resource group. + returned: always + type: str + sample: eastus2 + name: + description: + - Name of the storage account to update or create. + returned: always + type: str + sample: clh0003 + primary_endpoints: + description: + - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the primary location. + returned: always + type: dict + sample: { + "blob": "https://clh0003.blob.core.windows.net/", + "queue": "https://clh0003.queue.core.windows.net/", + "table": "https://clh0003.table.core.windows.net/" + } + primary_location: + description: + - The location of the primary data center for the storage account. + returned: always + type: str + sample: eastus2 + provisioning_state: + description: + - The status of the storage account. + - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded). + returned: always + type: str + sample: Succeeded + resource_group: + description: + - The resource group's name. + returned: always + type: str + sample: Testing + secondary_endpoints: + description: + - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the secondary location. + returned: always + type: dict + sample: { + "blob": "https://clh0003-secondary.blob.core.windows.net/", + "queue": "https://clh0003-secondary.queue.core.windows.net/", + "table": "https://clh0003-secondary.table.core.windows.net/" + } + secondary_location: + description: + - The location of the geo-replicated secondary for the storage account. + returned: always + type: str + sample: centralus + status_of_primary: + description: + - The status of the primary location of the storage account; either C(available) or C(unavailable). + returned: always + type: str + sample: available + status_of_secondary: + description: + - The status of the secondary location of the storage account; either C(available) or C(unavailable). + returned: always + type: str + sample: available + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'tags1': 'value1' } + type: + description: + - The storage account type. + returned: always + type: str + sample: "Microsoft.Storage/storageAccounts" +''' + +try: + from msrestazure.azure_exceptions import CloudError + from azure.storage.cloudstorageaccount import CloudStorageAccount + from azure.common import AzureMissingResourceHttpError +except ImportError: + # This is handled in azure_rm_common + pass + +import copy +from ansible.module_utils.azure_rm_common import AZURE_SUCCESS_STATE, AzureRMModuleBase +from ansible.module_utils._text import to_native + +cors_rule_spec = dict( + allowed_origins=dict(type='list', elements='str', required=True), + allowed_methods=dict(type='list', elements='str', required=True), + max_age_in_seconds=dict(type='int', required=True), + exposed_headers=dict(type='list', elements='str', required=True), + allowed_headers=dict(type='list', elements='str', required=True), +) + + +def compare_cors(cors1, cors2): + if len(cors1) != len(cors2): + return False + copy2 = copy.copy(cors2) + for rule1 in cors1: + matched = False + for rule2 in copy2: + if (rule1['max_age_in_seconds'] == rule2['max_age_in_seconds'] + and set(rule1['allowed_methods']) == set(rule2['allowed_methods']) + and set(rule1['allowed_origins']) == set(rule2['allowed_origins']) + and set(rule1['allowed_headers']) == set(rule2['allowed_headers']) + and set(rule1['exposed_headers']) == set(rule2['exposed_headers'])): + matched = True + copy2.remove(rule2) + if not matched: + return False + return True + + +class AzureRMStorageAccount(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + account_type=dict(type='str', + choices=['Premium_LRS', 'Standard_GRS', 'Standard_LRS', 'StandardSSD_LRS', 'Standard_RAGRS', 'Standard_ZRS', 'Premium_ZRS'], + aliases=['type']), + custom_domain=dict(type='dict', aliases=['custom_dns_domain_suffix']), + location=dict(type='str'), + name=dict(type='str', required=True), + resource_group=dict(required=True, type='str', aliases=['resource_group_name']), + state=dict(default='present', choices=['present', 'absent']), + force_delete_nonempty=dict(type='bool', default=False, aliases=['force']), + tags=dict(type='dict'), + kind=dict(type='str', default='Storage', choices=['Storage', 'StorageV2', 'BlobStorage']), + access_tier=dict(type='str', choices=['Hot', 'Cool']), + https_only=dict(type='bool', default=False), + blob_cors=dict(type='list', options=cors_rule_spec, elements='dict') + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.account_dict = None + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.account_type = None + self.custom_domain = None + self.tags = None + self.force_delete_nonempty = None + self.kind = None + self.access_tier = None + self.https_only = None + self.blob_cors = None + + super(AzureRMStorageAccount, self).__init__(self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if len(self.name) < 3 or len(self.name) > 24: + self.fail("Parameter error: name length must be between 3 and 24 characters.") + + if self.custom_domain: + if self.custom_domain.get('name', None) is None: + self.fail("Parameter error: expecting custom_domain to have a name attribute of type string.") + if self.custom_domain.get('use_sub_domain', None) is None: + self.fail("Parameter error: expecting custom_domain to have a use_sub_domain " + "attribute of type boolean.") + + self.account_dict = self.get_account() + + if self.state == 'present' and self.account_dict and \ + self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE: + self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state " + "to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE)) + + if self.account_dict is not None: + self.results['state'] = self.account_dict + else: + self.results['state'] = dict() + + if self.state == 'present': + if not self.account_dict: + self.results['state'] = self.create_account() + else: + self.update_account() + elif self.state == 'absent' and self.account_dict: + self.delete_account() + self.results['state'] = dict(Status='Deleted') + + return self.results + + def check_name_availability(self): + self.log('Checking name availability for {0}'.format(self.name)) + try: + response = self.storage_client.storage_accounts.check_name_availability(self.name) + except CloudError as e: + self.log('Error attempting to validate name.') + self.fail("Error checking name availability: {0}".format(str(e))) + if not response.name_available: + self.log('Error name not available.') + self.fail("{0} - {1}".format(response.message, response.reason)) + + def get_account(self): + self.log('Get properties for account {0}'.format(self.name)) + account_obj = None + blob_service_props = None + account_dict = None + + try: + account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name) + blob_service_props = self.storage_client.blob_services.get_service_properties(self.resource_group, self.name) + except CloudError: + pass + + if account_obj: + account_dict = self.account_obj_to_dict(account_obj, blob_service_props) + + return account_dict + + def account_obj_to_dict(self, account_obj, blob_service_props=None): + account_dict = dict( + id=account_obj.id, + name=account_obj.name, + location=account_obj.location, + resource_group=self.resource_group, + type=account_obj.type, + access_tier=(account_obj.access_tier.value + if account_obj.access_tier is not None else None), + sku_tier=account_obj.sku.tier.value, + sku_name=account_obj.sku.name.value, + provisioning_state=account_obj.provisioning_state.value, + secondary_location=account_obj.secondary_location, + status_of_primary=(account_obj.status_of_primary.value + if account_obj.status_of_primary is not None else None), + status_of_secondary=(account_obj.status_of_secondary.value + if account_obj.status_of_secondary is not None else None), + primary_location=account_obj.primary_location, + https_only=account_obj.enable_https_traffic_only + ) + account_dict['custom_domain'] = None + if account_obj.custom_domain: + account_dict['custom_domain'] = dict( + name=account_obj.custom_domain.name, + use_sub_domain=account_obj.custom_domain.use_sub_domain + ) + + account_dict['primary_endpoints'] = None + if account_obj.primary_endpoints: + account_dict['primary_endpoints'] = dict( + blob=account_obj.primary_endpoints.blob, + queue=account_obj.primary_endpoints.queue, + table=account_obj.primary_endpoints.table + ) + account_dict['secondary_endpoints'] = None + if account_obj.secondary_endpoints: + account_dict['secondary_endpoints'] = dict( + blob=account_obj.secondary_endpoints.blob, + queue=account_obj.secondary_endpoints.queue, + table=account_obj.secondary_endpoints.table + ) + account_dict['tags'] = None + if account_obj.tags: + account_dict['tags'] = account_obj.tags + if blob_service_props and blob_service_props.cors and blob_service_props.cors.cors_rules: + account_dict['blob_cors'] = [dict( + allowed_origins=[to_native(y) for y in x.allowed_origins], + allowed_methods=[to_native(y) for y in x.allowed_methods], + max_age_in_seconds=x.max_age_in_seconds, + exposed_headers=[to_native(y) for y in x.exposed_headers], + allowed_headers=[to_native(y) for y in x.allowed_headers] + ) for x in blob_service_props.cors.cors_rules] + return account_dict + + def update_account(self): + self.log('Update storage account {0}'.format(self.name)) + if bool(self.https_only) != bool(self.account_dict.get('https_only')): + self.results['changed'] = True + self.account_dict['https_only'] = self.https_only + if not self.check_mode: + try: + parameters = self.storage_models.StorageAccountUpdateParameters(enable_https_traffic_only=self.https_only) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update account type: {0}".format(str(exc))) + + if self.account_type: + if self.account_type != self.account_dict['sku_name']: + # change the account type + SkuName = self.storage_models.SkuName + if self.account_dict['sku_name'] in [SkuName.premium_lrs, SkuName.standard_zrs]: + self.fail("Storage accounts of type {0} and {1} cannot be changed.".format( + SkuName.premium_lrs, SkuName.standard_zrs)) + if self.account_type in [SkuName.premium_lrs, SkuName.standard_zrs]: + self.fail("Storage account of type {0} cannot be changed to a type of {1} or {2}.".format( + self.account_dict['sku_name'], SkuName.premium_lrs, SkuName.standard_zrs)) + + self.results['changed'] = True + self.account_dict['sku_name'] = self.account_type + + if self.results['changed'] and not self.check_mode: + # Perform the update. The API only allows changing one attribute per call. + try: + self.log("sku_name: %s" % self.account_dict['sku_name']) + self.log("sku_tier: %s" % self.account_dict['sku_tier']) + sku = self.storage_models.Sku(name=SkuName(self.account_dict['sku_name'])) + sku.tier = self.storage_models.SkuTier(self.account_dict['sku_tier']) + parameters = self.storage_models.StorageAccountUpdateParameters(sku=sku) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update account type: {0}".format(str(exc))) + + if self.custom_domain: + if not self.account_dict['custom_domain'] or self.account_dict['custom_domain'] != self.custom_domain: + self.results['changed'] = True + self.account_dict['custom_domain'] = self.custom_domain + + if self.results['changed'] and not self.check_mode: + new_domain = self.storage_models.CustomDomain(name=self.custom_domain['name'], + use_sub_domain=self.custom_domain['use_sub_domain']) + parameters = self.storage_models.StorageAccountUpdateParameters(custom_domain=new_domain) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update custom domain: {0}".format(str(exc))) + + if self.access_tier: + if not self.account_dict['access_tier'] or self.account_dict['access_tier'] != self.access_tier: + self.results['changed'] = True + self.account_dict['access_tier'] = self.access_tier + + if self.results['changed'] and not self.check_mode: + parameters = self.storage_models.StorageAccountUpdateParameters(access_tier=self.access_tier) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update access tier: {0}".format(str(exc))) + + update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags']) + if update_tags: + self.results['changed'] = True + if not self.check_mode: + parameters = self.storage_models.StorageAccountUpdateParameters(tags=self.account_dict['tags']) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update tags: {0}".format(str(exc))) + + if self.blob_cors and not compare_cors(self.account_dict.get('blob_cors', []), self.blob_cors): + self.results['changed'] = True + if not self.check_mode: + self.set_blob_cors() + + def create_account(self): + self.log("Creating account {0}".format(self.name)) + + if not self.location: + self.fail('Parameter error: location required when creating a storage account.') + + if not self.account_type: + self.fail('Parameter error: account_type required when creating a storage account.') + + if not self.access_tier and self.kind == 'BlobStorage': + self.fail('Parameter error: access_tier required when creating a storage account of type BlobStorage.') + + self.check_name_availability() + self.results['changed'] = True + + if self.check_mode: + account_dict = dict( + location=self.location, + account_type=self.account_type, + name=self.name, + resource_group=self.resource_group, + enable_https_traffic_only=self.https_only, + tags=dict() + ) + if self.tags: + account_dict['tags'] = self.tags + if self.blob_cors: + account_dict['blob_cors'] = self.blob_cors + return account_dict + sku = self.storage_models.Sku(name=self.storage_models.SkuName(self.account_type)) + sku.tier = self.storage_models.SkuTier.standard if 'Standard' in self.account_type else \ + self.storage_models.SkuTier.premium + parameters = self.storage_models.StorageAccountCreateParameters(sku=sku, + kind=self.kind, + location=self.location, + tags=self.tags, + access_tier=self.access_tier) + self.log(str(parameters)) + try: + poller = self.storage_client.storage_accounts.create(self.resource_group, self.name, parameters) + self.get_poller_result(poller) + except CloudError as e: + self.log('Error creating storage account.') + self.fail("Failed to create account: {0}".format(str(e))) + if self.blob_cors: + self.set_blob_cors() + # the poller doesn't actually return anything + return self.get_account() + + def delete_account(self): + if self.account_dict['provisioning_state'] == self.storage_models.ProvisioningState.succeeded.value and \ + not self.force_delete_nonempty and self.account_has_blob_containers(): + self.fail("Account contains blob containers. Is it in use? Use the force_delete_nonempty option to attempt deletion.") + + self.log('Delete storage account {0}'.format(self.name)) + self.results['changed'] = True + if not self.check_mode: + try: + status = self.storage_client.storage_accounts.delete(self.resource_group, self.name) + self.log("delete status: ") + self.log(str(status)) + except CloudError as e: + self.fail("Failed to delete the account: {0}".format(str(e))) + return True + + def account_has_blob_containers(self): + ''' + If there are blob containers, then there are likely VMs depending on this account and it should + not be deleted. + ''' + self.log('Checking for existing blob containers') + blob_service = self.get_blob_client(self.resource_group, self.name) + try: + response = blob_service.list_containers() + except AzureMissingResourceHttpError: + # No blob storage available? + return False + + if len(response.items) > 0: + return True + return False + + def set_blob_cors(self): + try: + cors_rules = self.storage_models.CorsRules(cors_rules=[self.storage_models.CorsRule(**x) for x in self.blob_cors]) + self.storage_client.blob_services.set_service_properties(self.resource_group, + self.name, + self.storage_models.BlobServiceProperties(cors=cors_rules)) + except Exception as exc: + self.fail("Failed to set CORS rules: {0}".format(str(exc))) + + +def main(): + AzureRMStorageAccount() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_webapp.py b/test/support/integration/plugins/modules/azure_rm_webapp.py new file mode 100644 index 00000000..4f185f45 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_webapp.py @@ -0,0 +1,1070 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_webapp +version_added: "2.7" +short_description: Manage Web App instances +description: + - Create, update and delete instance of Web App. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + name: + description: + - Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter. + required: True + + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + + plan: + description: + - App service plan. Required for creation. + - Can be name of existing app service plan in same resource group as web app. + - Can be the resource ID of an existing app service plan. For example + /subscriptions//resourceGroups//providers/Microsoft.Web/serverFarms/. + - Can be a dict containing five parameters, defined below. + - C(name), name of app service plan. + - C(resource_group), resource group of the app service plan. + - C(sku), SKU of app service plan, allowed values listed on U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/). + - C(is_linux), whether or not the app service plan is Linux. defaults to C(False). + - C(number_of_workers), number of workers for app service plan. + + frameworks: + description: + - Set of run time framework settings. Each setting is a dictionary. + - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info. + suboptions: + name: + description: + - Name of the framework. + - Supported framework list for Windows web app and Linux web app is different. + - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018. + - Windows web apps support multiple framework at the same time. + - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018. + - Linux web apps support only one framework. + - Java framework is mutually exclusive with others. + choices: + - java + - net_framework + - php + - python + - ruby + - dotnetcore + - node + version: + description: + - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info. + - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5. + - C(php) supported value sample, C(5.5), C(5.6), C(7.0). + - C(python) supported value sample, C(5.5), C(5.6), C(7.0). + - C(node) supported value sample, C(6.6), C(6.9). + - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2). + - C(ruby) supported value sample, C(2.3). + - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app. + settings: + description: + - List of settings of the framework. + suboptions: + java_container: + description: + - Name of Java container. + - Supported only when I(frameworks=java). Sample values C(Tomcat), C(Jetty). + java_container_version: + description: + - Version of Java container. + - Supported only when I(frameworks=java). + - Sample values for C(Tomcat), C(8.0), C(8.5), C(9.0). For C(Jetty,), C(9.1), C(9.3). + + container_settings: + description: + - Web app container settings. + suboptions: + name: + description: + - Name of container, for example C(imagename:tag). + registry_server_url: + description: + - Container registry server URL, for example C(mydockerregistry.io). + registry_server_user: + description: + - The container registry server user name. + registry_server_password: + description: + - The container registry server password. + + scm_type: + description: + - Repository type of deployment source, for example C(LocalGit), C(GitHub). + - List of supported values maintained at U(https://docs.microsoft.com/en-us/rest/api/appservice/webapps/createorupdate#scmtype). + + deployment_source: + description: + - Deployment source for git. + suboptions: + url: + description: + - Repository url of deployment source. + + branch: + description: + - The branch name of the repository. + startup_file: + description: + - The web's startup file. + - Used only for Linux web apps. + + client_affinity_enabled: + description: + - Whether or not to send session affinity cookies, which route client requests in the same session to the same instance. + type: bool + default: True + + https_only: + description: + - Configures web site to accept only https requests. + type: bool + + dns_registration: + description: + - Whether or not the web app hostname is registered with DNS on creation. Set to C(false) to register. + type: bool + + skip_custom_domain_verification: + description: + - Whether or not to skip verification of custom (non *.azurewebsites.net) domains associated with web app. Set to C(true) to skip. + type: bool + + ttl_in_seconds: + description: + - Time to live in seconds for web app default domain name. + + app_settings: + description: + - Configure web app application settings. Suboptions are in key value pair format. + + purge_app_settings: + description: + - Purge any existing application settings. Replace web app application settings with app_settings. + type: bool + + app_state: + description: + - Start/Stop/Restart the web app. + type: str + choices: + - started + - stopped + - restarted + default: started + + state: + description: + - State of the Web App. + - Use C(present) to create or update a Web App and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a windows web app with non-exist app service plan + azure_rm_webapp: + resource_group: myResourceGroup + name: myWinWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + is_linux: false + sku: S1 + + - name: Create a docker web app with some app settings, with docker image + azure_rm_webapp: + resource_group: myResourceGroup + name: myDockerWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + is_linux: true + sku: S1 + number_of_workers: 2 + app_settings: + testkey: testvalue + testkey2: testvalue2 + container_settings: + name: ansible/ansible:ubuntu1404 + + - name: Create a docker web app with private acr registry + azure_rm_webapp: + resource_group: myResourceGroup + name: myDockerWebapp + plan: myAppServicePlan + app_settings: + testkey: testvalue + container_settings: + name: ansible/ubuntu1404 + registry_server_url: myregistry.io + registry_server_user: user + registry_server_password: pass + + - name: Create a linux web app with Node 6.6 framework + azure_rm_webapp: + resource_group: myResourceGroup + name: myLinuxWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "node" + version: "6.6" + + - name: Create a windows web app with node, php + azure_rm_webapp: + resource_group: myResourceGroup + name: myWinWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "node" + version: 6.6 + - name: "php" + version: "7.0" + + - name: Create a stage deployment slot for an existing web app + azure_rm_webapp: + resource_group: myResourceGroup + name: myWebapp/slots/stage + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey:testvalue + + - name: Create a linux web app with java framework + azure_rm_webapp: + resource_group: myResourceGroup + name: myLinuxWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "java" + version: "8" + settings: + java_container: "Tomcat" + java_container_version: "8.5" +''' + +RETURN = ''' +azure_webapp: + description: + - ID of current web app. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp" +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrest.serialization import Model + from azure.mgmt.web.models import ( + site_config, app_service_plan, Site, + AppServicePlan, SkuDescription, NameValuePair + ) +except ImportError: + # This is handled in azure_rm_common + pass + +container_settings_spec = dict( + name=dict(type='str', required=True), + registry_server_url=dict(type='str'), + registry_server_user=dict(type='str'), + registry_server_password=dict(type='str', no_log=True) +) + +deployment_source_spec = dict( + url=dict(type='str'), + branch=dict(type='str') +) + + +framework_settings_spec = dict( + java_container=dict(type='str', required=True), + java_container_version=dict(type='str', required=True) +) + + +framework_spec = dict( + name=dict( + type='str', + required=True, + choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']), + version=dict(type='str', required=True), + settings=dict(type='dict', options=framework_settings_spec) +) + + +def _normalize_sku(sku): + if sku is None: + return sku + + sku = sku.upper() + if sku == 'FREE': + return 'F1' + elif sku == 'SHARED': + return 'D1' + return sku + + +def get_sku_name(tier): + tier = tier.upper() + if tier == 'F1' or tier == "FREE": + return 'FREE' + elif tier == 'D1' or tier == "SHARED": + return 'SHARED' + elif tier in ['B1', 'B2', 'B3', 'BASIC']: + return 'BASIC' + elif tier in ['S1', 'S2', 'S3']: + return 'STANDARD' + elif tier in ['P1', 'P2', 'P3']: + return 'PREMIUM' + elif tier in ['P1V2', 'P2V2', 'P3V2']: + return 'PREMIUMV2' + else: + return None + + +def appserviceplan_to_dict(plan): + return dict( + id=plan.id, + name=plan.name, + kind=plan.kind, + location=plan.location, + reserved=plan.reserved, + is_linux=plan.reserved, + provisioning_state=plan.provisioning_state, + tags=plan.tags if plan.tags else None + ) + + +def webapp_to_dict(webapp): + return dict( + id=webapp.id, + name=webapp.name, + location=webapp.location, + client_cert_enabled=webapp.client_cert_enabled, + enabled=webapp.enabled, + reserved=webapp.reserved, + client_affinity_enabled=webapp.client_affinity_enabled, + server_farm_id=webapp.server_farm_id, + host_names_disabled=webapp.host_names_disabled, + https_only=webapp.https_only if hasattr(webapp, 'https_only') else None, + skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None, + ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None, + state=webapp.state, + tags=webapp.tags if webapp.tags else None + ) + + +class Actions: + CreateOrUpdate, UpdateAppSettings, Delete = range(3) + + +class AzureRMWebApps(AzureRMModuleBase): + """Configuration class for an Azure RM Web App resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + plan=dict( + type='raw' + ), + frameworks=dict( + type='list', + elements='dict', + options=framework_spec + ), + container_settings=dict( + type='dict', + options=container_settings_spec + ), + scm_type=dict( + type='str', + ), + deployment_source=dict( + type='dict', + options=deployment_source_spec + ), + startup_file=dict( + type='str' + ), + client_affinity_enabled=dict( + type='bool', + default=True + ), + dns_registration=dict( + type='bool' + ), + https_only=dict( + type='bool' + ), + skip_custom_domain_verification=dict( + type='bool' + ), + ttl_in_seconds=dict( + type='int' + ), + app_settings=dict( + type='dict' + ), + purge_app_settings=dict( + type='bool', + default=False + ), + app_state=dict( + type='str', + choices=['started', 'stopped', 'restarted'], + default='started' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + mutually_exclusive = [['container_settings', 'frameworks']] + + self.resource_group = None + self.name = None + self.location = None + + # update in create_or_update as parameters + self.client_affinity_enabled = True + self.dns_registration = None + self.skip_custom_domain_verification = None + self.ttl_in_seconds = None + self.https_only = None + + self.tags = None + + # site config, e.g app settings, ssl + self.site_config = dict() + self.app_settings = dict() + self.app_settings_strDic = None + + # app service plan + self.plan = None + + # siteSourceControl + self.deployment_source = dict() + + # site, used at level creation, or update. e.g windows/linux, client_affinity etc first level args + self.site = None + + # property for internal usage, not used for sdk + self.container_settings = None + + self.purge_app_settings = False + self.app_state = 'started' + + self.results = dict( + changed=False, + id=None, + ) + self.state = None + self.to_do = [] + + self.frameworks = None + + # set site_config value from kwargs + self.site_config_updatable_properties = ["net_framework_version", + "java_version", + "php_version", + "python_version", + "scm_type"] + + # updatable_properties + self.updatable_properties = ["client_affinity_enabled", + "force_dns_registration", + "https_only", + "skip_custom_domain_verification", + "ttl_in_seconds"] + + self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java'] + self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java'] + + super(AzureRMWebApps, self).__init__(derived_arg_spec=self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "scm_type": + self.site_config[key] = kwargs[key] + + old_response = None + response = None + to_be_updated = False + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # get existing web app + old_response = self.get_webapp() + + if old_response: + self.results['id'] = old_response['id'] + + if self.state == 'present': + if not self.plan and not old_response: + self.fail("Please specify plan for newly created web app.") + + if not self.plan: + self.plan = old_response['server_farm_id'] + + self.plan = self.parse_resource_to_dict(self.plan) + + # get app service plan + is_linux = False + old_plan = self.get_app_service_plan() + if old_plan: + is_linux = old_plan['reserved'] + else: + is_linux = self.plan['is_linux'] if 'is_linux' in self.plan else False + + if self.frameworks: + # java is mutually exclusive with other frameworks + if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks): + self.fail('Java is mutually exclusive with other frameworks.') + + if is_linux: + if len(self.frameworks) != 1: + self.fail('Can specify one framework only for Linux web app.') + + if self.frameworks[0]['name'] not in self.supported_linux_frameworks: + self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name'])) + + self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper() + + if self.frameworks[0]['name'] == 'java': + if self.frameworks[0]['version'] != '8': + self.fail("Linux web app only supports java 8.") + if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() != 'tomcat': + self.fail("Linux web app only supports tomcat container.") + + if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() == 'tomcat': + self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8' + else: + self.site_config['linux_fx_version'] = 'JAVA|8-jre8' + else: + for fx in self.frameworks: + if fx.get('name') not in self.supported_windows_frameworks: + self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name'))) + else: + self.site_config[fx.get('name') + '_version'] = fx.get('version') + + if 'settings' in fx and fx['settings'] is not None: + for key, value in fx['settings'].items(): + self.site_config[key] = value + + if not self.app_settings: + self.app_settings = dict() + + if self.container_settings: + linux_fx_version = 'DOCKER|' + + if self.container_settings.get('registry_server_url'): + self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url'] + + linux_fx_version += self.container_settings['registry_server_url'] + '/' + + linux_fx_version += self.container_settings['name'] + + self.site_config['linux_fx_version'] = linux_fx_version + + if self.container_settings.get('registry_server_user'): + self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user'] + + if self.container_settings.get('registry_server_password'): + self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password'] + + # init site + self.site = Site(location=self.location, site_config=self.site_config) + + if self.https_only is not None: + self.site.https_only = self.https_only + + if self.client_affinity_enabled: + self.site.client_affinity_enabled = self.client_affinity_enabled + + # check if the web app already present in the resource group + if not old_response: + self.log("Web App instance doesn't exist") + + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + self.site.tags = self.tags + + # service plan is required for creation + if not self.plan: + self.fail("Please specify app service plan in plan parameter.") + + if not old_plan: + # no existing service plan, create one + if (not self.plan.get('name') or not self.plan.get('sku')): + self.fail('Please specify name, is_linux, sku in plan') + + if 'location' not in self.plan: + plan_resource_group = self.get_resource_group(self.plan['resource_group']) + self.plan['location'] = plan_resource_group.location + + old_plan = self.create_app_service_plan() + + self.site.server_farm_id = old_plan['id'] + + # if linux, setup startup_file + if old_plan['is_linux']: + if hasattr(self, 'startup_file'): + self.site_config['app_command_line'] = self.startup_file + + # set app setting + if self.app_settings: + app_settings = [] + for key in self.app_settings.keys(): + app_settings.append(NameValuePair(name=key, value=self.app_settings[key])) + + self.site_config['app_settings'] = app_settings + else: + # existing web app, do update + self.log("Web App instance already exists") + + self.log('Result: {0}'.format(old_response)) + + update_tags, self.site.tags = self.update_tags(old_response.get('tags', None)) + + if update_tags: + to_be_updated = True + + # check if root level property changed + if self.is_updatable_property_changed(old_response): + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + + # check if site_config changed + old_config = self.get_webapp_configuration() + + if self.is_site_config_changed(old_config): + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + + # check if linux_fx_version changed + if old_config.linux_fx_version != self.site_config.get('linux_fx_version', ''): + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + + self.app_settings_strDic = self.list_app_settings() + + # purge existing app_settings: + if self.purge_app_settings: + to_be_updated = True + self.app_settings_strDic = dict() + self.to_do.append(Actions.UpdateAppSettings) + + # check if app settings changed + if self.purge_app_settings or self.is_app_settings_changed(): + to_be_updated = True + self.to_do.append(Actions.UpdateAppSettings) + + if self.app_settings: + for key in self.app_settings.keys(): + self.app_settings_strDic[key] = self.app_settings[key] + + elif self.state == 'absent': + if old_response: + self.log("Delete Web App instance") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_webapp() + + self.log('Web App instance deleted') + + else: + self.fail("Web app {0} not exists.".format(self.name)) + + if to_be_updated: + self.log('Need to Create/Update web app') + self.results['changed'] = True + + if self.check_mode: + return self.results + + if Actions.CreateOrUpdate in self.to_do: + response = self.create_update_webapp() + + self.results['id'] = response['id'] + + if Actions.UpdateAppSettings in self.to_do: + update_response = self.update_app_settings() + self.results['id'] = update_response.id + + webapp = None + if old_response: + webapp = old_response + if response: + webapp = response + + if webapp: + if (webapp['state'] != 'Stopped' and self.app_state == 'stopped') or \ + (webapp['state'] != 'Running' and self.app_state == 'started') or \ + self.app_state == 'restarted': + + self.results['changed'] = True + if self.check_mode: + return self.results + + self.set_webapp_state(self.app_state) + + return self.results + + # compare existing web app with input, determine weather it's update operation + def is_updatable_property_changed(self, existing_webapp): + for property_name in self.updatable_properties: + if hasattr(self, property_name) and getattr(self, property_name) is not None and \ + getattr(self, property_name) != existing_webapp.get(property_name, None): + return True + + return False + + # compare xxx_version + def is_site_config_changed(self, existing_config): + for fx_version in self.site_config_updatable_properties: + if self.site_config.get(fx_version): + if not getattr(existing_config, fx_version) or \ + getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper(): + return True + + return False + + # comparing existing app setting with input, determine whether it's changed + def is_app_settings_changed(self): + if self.app_settings: + if self.app_settings_strDic: + for key in self.app_settings.keys(): + if self.app_settings[key] != self.app_settings_strDic.get(key, None): + return True + else: + return True + return False + + # comparing deployment source with input, determine wheather it's changed + def is_deployment_source_changed(self, existing_webapp): + if self.deployment_source: + if self.deployment_source.get('url') \ + and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']: + return True + + if self.deployment_source.get('branch') \ + and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']: + return True + + return False + + def create_update_webapp(self): + ''' + Creates or updates Web App with the specified configuration. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Creating / Updating the Web App instance {0}".format(self.name)) + + try: + skip_dns_registration = self.dns_registration + force_dns_registration = None if self.dns_registration is None else not self.dns_registration + + response = self.web_client.web_apps.create_or_update(resource_group_name=self.resource_group, + name=self.name, + site_envelope=self.site, + skip_dns_registration=skip_dns_registration, + skip_custom_domain_verification=self.skip_custom_domain_verification, + force_dns_registration=force_dns_registration, + ttl_in_seconds=self.ttl_in_seconds) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the Web App instance.') + self.fail( + "Error creating the Web App instance: {0}".format(str(exc))) + return webapp_to_dict(response) + + def delete_webapp(self): + ''' + Deletes specified Web App instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Web App instance {0}".format(self.name)) + try: + response = self.web_client.web_apps.delete(resource_group_name=self.resource_group, + name=self.name) + except CloudError as e: + self.log('Error attempting to delete the Web App instance.') + self.fail( + "Error deleting the Web App instance: {0}".format(str(e))) + + return True + + def get_webapp(self): + ''' + Gets the properties of the specified Web App. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Checking if the Web App instance {0} is present".format(self.name)) + + response = None + + try: + response = self.web_client.web_apps.get(resource_group_name=self.resource_group, + name=self.name) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + if response is not None: + self.log("Response : {0}".format(response)) + self.log("Web App instance : {0} found".format(response.name)) + return webapp_to_dict(response) + + except CloudError as ex: + pass + + self.log("Didn't find web app {0} in resource group {1}".format( + self.name, self.resource_group)) + + return False + + def get_app_service_plan(self): + ''' + Gets app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Get App Service Plan {0}".format(self.plan['name'])) + + try: + response = self.web_client.app_service_plans.get( + resource_group_name=self.plan['resource_group'], + name=self.plan['name']) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + if response is not None: + self.log("Response : {0}".format(response)) + self.log("App Service Plan : {0} found".format(response.name)) + + return appserviceplan_to_dict(response) + except CloudError as ex: + pass + + self.log("Didn't find app service plan {0} in resource group {1}".format( + self.plan['name'], self.plan['resource_group'])) + + return False + + def create_app_service_plan(self): + ''' + Creates app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Create App Service Plan {0}".format(self.plan['name'])) + + try: + # normalize sku + sku = _normalize_sku(self.plan['sku']) + + sku_def = SkuDescription(tier=get_sku_name( + sku), name=sku, capacity=(self.plan.get('number_of_workers', None))) + plan_def = AppServicePlan( + location=self.plan['location'], app_service_plan_name=self.plan['name'], sku=sku_def, reserved=(self.plan.get('is_linux', None))) + + poller = self.web_client.app_service_plans.create_or_update( + self.plan['resource_group'], self.plan['name'], plan_def) + + if isinstance(poller, LROPoller): + response = self.get_poller_result(poller) + + self.log("Response : {0}".format(response)) + + return appserviceplan_to_dict(response) + except CloudError as ex: + self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format( + self.plan['name'], self.plan['resource_group'], str(ex))) + + def list_app_settings(self): + ''' + List application settings + :return: deserialized list response + ''' + self.log("List application setting") + + try: + + response = self.web_client.web_apps.list_application_settings( + resource_group_name=self.resource_group, name=self.name) + self.log("Response : {0}".format(response)) + + return response.properties + except CloudError as ex: + self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def update_app_settings(self): + ''' + Update application settings + :return: deserialized updating response + ''' + self.log("Update application setting") + + try: + response = self.web_client.web_apps.update_application_settings( + resource_group_name=self.resource_group, name=self.name, properties=self.app_settings_strDic) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def create_or_update_source_control(self): + ''' + Update site source control + :return: deserialized updating response + ''' + self.log("Update site source control") + + if self.deployment_source is None: + return False + + self.deployment_source['is_manual_integration'] = False + self.deployment_source['is_mercurial'] = False + + try: + response = self.web_client.web_client.create_or_update_source_control( + self.resource_group, self.name, self.deployment_source) + self.log("Response : {0}".format(response)) + + return response.as_dict() + except CloudError as ex: + self.fail("Failed to update site source control for web app {0} in resource group {1}".format( + self.name, self.resource_group)) + + def get_webapp_configuration(self): + ''' + Get web app configuration + :return: deserialized web app configuration response + ''' + self.log("Get web app configuration") + + try: + + response = self.web_client.web_apps.get_configuration( + resource_group_name=self.resource_group, name=self.name) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.log("Failed to get configuration for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + return False + + def set_webapp_state(self, appstate): + ''' + Start/stop/restart web app + :return: deserialized updating response + ''' + try: + if appstate == 'started': + response = self.web_client.web_apps.start(resource_group_name=self.resource_group, name=self.name) + elif appstate == 'stopped': + response = self.web_client.web_apps.stop(resource_group_name=self.resource_group, name=self.name) + elif appstate == 'restarted': + response = self.web_client.web_apps.restart(resource_group_name=self.resource_group, name=self.name) + else: + self.fail("Invalid web app state {0}".format(appstate)) + + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.log("Failed to {0} web app {1} in resource group {2}, request_id {3} - {4}".format( + appstate, self.name, self.resource_group, request_id, str(ex))) + + +def main(): + """Main execution""" + AzureRMWebApps() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_webapp_info.py b/test/support/integration/plugins/modules/azure_rm_webapp_info.py new file mode 100644 index 00000000..22286803 --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_webapp_info.py @@ -0,0 +1,489 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_webapp_info + +version_added: "2.9" + +short_description: Get Azure web app facts + +description: + - Get facts for a specific web app or all web app in a resource group, or all web app in current subscription. + +options: + name: + description: + - Only show results for a specific web app. + resource_group: + description: + - Limit results by resource group. + return_publish_profile: + description: + - Indicate whether to return publishing profile of the web app. + default: False + type: bool + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + +extends_documentation_fragment: + - azure + +author: + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get facts for web app by name + azure_rm_webapp_info: + resource_group: myResourceGroup + name: winwebapp1 + + - name: Get facts for web apps in resource group + azure_rm_webapp_info: + resource_group: myResourceGroup + + - name: Get facts for web apps with tags + azure_rm_webapp_info: + tags: + - testtag + - foo:bar +''' + +RETURN = ''' +webapps: + description: + - List of web apps. + returned: always + type: complex + contains: + id: + description: + - ID of the web app. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp + name: + description: + - Name of the web app. + returned: always + type: str + sample: winwebapp1 + resource_group: + description: + - Resource group of the web app. + returned: always + type: str + sample: myResourceGroup + location: + description: + - Location of the web app. + returned: always + type: str + sample: eastus + plan: + description: + - ID of app service plan used by the web app. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppServicePlan + app_settings: + description: + - App settings of the application. Only returned when web app has app settings. + returned: always + type: dict + sample: { + "testkey": "testvalue", + "testkey2": "testvalue2" + } + frameworks: + description: + - Frameworks of the application. Only returned when web app has frameworks. + returned: always + type: list + sample: [ + { + "name": "net_framework", + "version": "v4.0" + }, + { + "name": "java", + "settings": { + "java_container": "tomcat", + "java_container_version": "8.5" + }, + "version": "1.7" + }, + { + "name": "php", + "version": "5.6" + } + ] + availability_state: + description: + - Availability of this web app. + returned: always + type: str + sample: Normal + default_host_name: + description: + - Host name of the web app. + returned: always + type: str + sample: vxxisurg397winapp4.azurewebsites.net + enabled: + description: + - Indicates the web app enabled or not. + returned: always + type: bool + sample: true + enabled_host_names: + description: + - Enabled host names of the web app. + returned: always + type: list + sample: [ + "vxxisurg397winapp4.azurewebsites.net", + "vxxisurg397winapp4.scm.azurewebsites.net" + ] + host_name_ssl_states: + description: + - SSL state per host names of the web app. + returned: always + type: list + sample: [ + { + "hostType": "Standard", + "name": "vxxisurg397winapp4.azurewebsites.net", + "sslState": "Disabled" + }, + { + "hostType": "Repository", + "name": "vxxisurg397winapp4.scm.azurewebsites.net", + "sslState": "Disabled" + } + ] + host_names: + description: + - Host names of the web app. + returned: always + type: list + sample: [ + "vxxisurg397winapp4.azurewebsites.net" + ] + outbound_ip_addresses: + description: + - Outbound IP address of the web app. + returned: always + type: str + sample: "40.71.11.131,40.85.166.200,168.62.166.67,137.135.126.248,137.135.121.45" + ftp_publish_url: + description: + - Publishing URL of the web app when deployment type is FTP. + returned: always + type: str + sample: ftp://xxxx.ftp.azurewebsites.windows.net + state: + description: + - State of the web app. + returned: always + type: str + sample: running + publishing_username: + description: + - Publishing profile user name. + returned: only when I(return_publish_profile=True). + type: str + sample: "$vxxisuRG397winapp4" + publishing_password: + description: + - Publishing profile password. + returned: only when I(return_publish_profile=True). + type: str + sample: "uvANsPQpGjWJmrFfm4Ssd5rpBSqGhjMk11pMSgW2vCsQtNx9tcgZ0xN26s9A" + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + returned: always + type: dict + sample: { tag1: abc } +''' +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +AZURE_OBJECT_CLASS = 'WebApp' + + +class AzureRMWebAppInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list'), + return_publish_profile=dict(type='bool', default=False), + ) + + self.results = dict( + changed=False, + webapps=[], + ) + + self.name = None + self.resource_group = None + self.tags = None + self.return_publish_profile = False + + self.framework_names = ['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby'] + + super(AzureRMWebAppInfo, self).__init__(self.module_arg_spec, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_webapp_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_webapp_facts' module has been renamed to 'azure_rm_webapp_info'", + version='2.13', collection_name='ansible.builtin') + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name: + self.results['webapps'] = self.list_by_name() + elif self.resource_group: + self.results['webapps'] = self.list_by_resource_group() + else: + self.results['webapps'] = self.list_all() + + return self.results + + def list_by_name(self): + self.log('Get web app {0}'.format(self.name)) + item = None + result = [] + + try: + item = self.web_client.web_apps.get(self.resource_group, self.name) + except CloudError: + pass + + if item and self.has_tags(item.tags, self.tags): + curated_result = self.get_curated_webapp(self.resource_group, self.name, item) + result = [curated_result] + + return result + + def list_by_resource_group(self): + self.log('List web apps in resource groups {0}'.format(self.resource_group)) + try: + response = list(self.web_client.web_apps.list_by_resource_group(self.resource_group)) + except CloudError as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing web apps in resource groups {0}, request id: {1} - {2}".format(self.resource_group, request_id, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + curated_output = self.get_curated_webapp(self.resource_group, item.name, item) + results.append(curated_output) + return results + + def list_all(self): + self.log('List web apps in current subscription') + try: + response = list(self.web_client.web_apps.list()) + except CloudError as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing web apps, request id {0} - {1}".format(request_id, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + curated_output = self.get_curated_webapp(item.resource_group, item.name, item) + results.append(curated_output) + return results + + def list_webapp_configuration(self, resource_group, name): + self.log('Get web app {0} configuration'.format(name)) + + response = [] + + try: + response = self.web_client.web_apps.get_configuration(resource_group_name=resource_group, name=name) + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail('Error getting web app {0} configuration, request id {1} - {2}'.format(name, request_id, str(ex))) + + return response.as_dict() + + def list_webapp_appsettings(self, resource_group, name): + self.log('Get web app {0} app settings'.format(name)) + + response = [] + + try: + response = self.web_client.web_apps.list_application_settings(resource_group_name=resource_group, name=name) + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail('Error getting web app {0} app settings, request id {1} - {2}'.format(name, request_id, str(ex))) + + return response.as_dict() + + def get_publish_credentials(self, resource_group, name): + self.log('Get web app {0} publish credentials'.format(name)) + try: + poller = self.web_client.web_apps.list_publishing_credentials(resource_group, name) + if isinstance(poller, LROPoller): + response = self.get_poller_result(poller) + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex))) + return response + + def get_webapp_ftp_publish_url(self, resource_group, name): + import xmltodict + + self.log('Get web app {0} app publish profile'.format(name)) + + url = None + try: + content = self.web_client.web_apps.list_publishing_profile_xml_with_secrets(resource_group_name=resource_group, name=name) + if not content: + return url + + full_xml = '' + for f in content: + full_xml += f.decode() + profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile'] + + if not profiles: + return url + + for profile in profiles: + if profile['@publishMethod'] == 'FTP': + url = profile['@publishUrl'] + + except CloudError as ex: + self.fail('Error getting web app {0} app settings'.format(name)) + + return url + + def get_curated_webapp(self, resource_group, name, webapp): + pip = self.serialize_obj(webapp, AZURE_OBJECT_CLASS) + + try: + site_config = self.list_webapp_configuration(resource_group, name) + app_settings = self.list_webapp_appsettings(resource_group, name) + publish_cred = self.get_publish_credentials(resource_group, name) + ftp_publish_url = self.get_webapp_ftp_publish_url(resource_group, name) + except CloudError as ex: + pass + return self.construct_curated_webapp(webapp=pip, + configuration=site_config, + app_settings=app_settings, + deployment_slot=None, + ftp_publish_url=ftp_publish_url, + publish_credentials=publish_cred) + + def construct_curated_webapp(self, + webapp, + configuration=None, + app_settings=None, + deployment_slot=None, + ftp_publish_url=None, + publish_credentials=None): + curated_output = dict() + curated_output['id'] = webapp['id'] + curated_output['name'] = webapp['name'] + curated_output['resource_group'] = webapp['properties']['resourceGroup'] + curated_output['location'] = webapp['location'] + curated_output['plan'] = webapp['properties']['serverFarmId'] + curated_output['tags'] = webapp.get('tags', None) + + # important properties from output. not match input arguments. + curated_output['app_state'] = webapp['properties']['state'] + curated_output['availability_state'] = webapp['properties']['availabilityState'] + curated_output['default_host_name'] = webapp['properties']['defaultHostName'] + curated_output['host_names'] = webapp['properties']['hostNames'] + curated_output['enabled'] = webapp['properties']['enabled'] + curated_output['enabled_host_names'] = webapp['properties']['enabledHostNames'] + curated_output['host_name_ssl_states'] = webapp['properties']['hostNameSslStates'] + curated_output['outbound_ip_addresses'] = webapp['properties']['outboundIpAddresses'] + + # curated site_config + if configuration: + curated_output['frameworks'] = [] + for fx_name in self.framework_names: + fx_version = configuration.get(fx_name + '_version', None) + if fx_version: + fx = { + 'name': fx_name, + 'version': fx_version + } + # java container setting + if fx_name == 'java': + if configuration['java_container'] and configuration['java_container_version']: + settings = { + 'java_container': configuration['java_container'].lower(), + 'java_container_version': configuration['java_container_version'] + } + fx['settings'] = settings + + curated_output['frameworks'].append(fx) + + # linux_fx_version + if configuration.get('linux_fx_version', None): + tmp = configuration.get('linux_fx_version').split("|") + if len(tmp) == 2: + curated_output['frameworks'].append({'name': tmp[0].lower(), 'version': tmp[1]}) + + # curated app_settings + if app_settings and app_settings.get('properties', None): + curated_output['app_settings'] = dict() + for item in app_settings['properties']: + curated_output['app_settings'][item] = app_settings['properties'][item] + + # curated deploymenet_slot + if deployment_slot: + curated_output['deployment_slot'] = deployment_slot + + # ftp_publish_url + if ftp_publish_url: + curated_output['ftp_publish_url'] = ftp_publish_url + + # curated publish credentials + if publish_credentials and self.return_publish_profile: + curated_output['publishing_username'] = publish_credentials.publishing_user_name + curated_output['publishing_password'] = publish_credentials.publishing_password + return curated_output + + +def main(): + AzureRMWebAppInfo() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/azure_rm_webappslot.py b/test/support/integration/plugins/modules/azure_rm_webappslot.py new file mode 100644 index 00000000..ddba710b --- /dev/null +++ b/test/support/integration/plugins/modules/azure_rm_webappslot.py @@ -0,0 +1,1058 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_webappslot +version_added: "2.8" +short_description: Manage Azure Web App slot +description: + - Create, update and delete Azure Web App slot. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + name: + description: + - Unique name of the deployment slot to create or update. + required: True + webapp_name: + description: + - Web app name which this deployment slot belongs to. + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + configuration_source: + description: + - Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot. + auto_swap_slot_name: + description: + - Used to configure target slot name to auto swap, or disable auto swap. + - Set it target slot name to auto swap. + - Set it to False to disable auto slot swap. + swap: + description: + - Swap deployment slots of a web app. + suboptions: + action: + description: + - Swap types. + - C(preview) is to apply target slot settings on source slot first. + - C(swap) is to complete swapping. + - C(reset) is to reset the swap. + choices: + - preview + - swap + - reset + default: preview + target_slot: + description: + - Name of target slot to swap. If set to None, then swap with production slot. + preserve_vnet: + description: + - C(True) to preserve virtual network to the slot during swap. Otherwise C(False). + type: bool + default: True + frameworks: + description: + - Set of run time framework settings. Each setting is a dictionary. + - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info. + suboptions: + name: + description: + - Name of the framework. + - Supported framework list for Windows web app and Linux web app is different. + - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018. + - Windows web apps support multiple framework at same time. + - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018. + - Linux web apps support only one framework. + - Java framework is mutually exclusive with others. + choices: + - java + - net_framework + - php + - python + - ruby + - dotnetcore + - node + version: + description: + - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info. + - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5. + - C(php) supported value sample, C(5.5), C(5.6), C(7.0). + - C(python) supported value sample, C(5.5), C(5.6), C(7.0). + - C(node) supported value sample, C(6.6), C(6.9). + - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2). + - C(ruby) supported value sample, 2.3. + - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app. + settings: + description: + - List of settings of the framework. + suboptions: + java_container: + description: + - Name of Java container. This is supported by specific framework C(java) onlys, for example C(Tomcat), C(Jetty). + java_container_version: + description: + - Version of Java container. This is supported by specific framework C(java) only. + - For C(Tomcat), for example C(8.0), C(8.5), C(9.0). For C(Jetty), for example C(9.1), C(9.3). + container_settings: + description: + - Web app slot container settings. + suboptions: + name: + description: + - Name of container, for example C(imagename:tag). + registry_server_url: + description: + - Container registry server URL, for example C(mydockerregistry.io). + registry_server_user: + description: + - The container registry server user name. + registry_server_password: + description: + - The container registry server password. + startup_file: + description: + - The slot startup file. + - This only applies for Linux web app slot. + app_settings: + description: + - Configure web app slot application settings. Suboptions are in key value pair format. + purge_app_settings: + description: + - Purge any existing application settings. Replace slot application settings with app_settings. + type: bool + deployment_source: + description: + - Deployment source for git. + suboptions: + url: + description: + - Repository URL of deployment source. + branch: + description: + - The branch name of the repository. + app_state: + description: + - Start/Stop/Restart the slot. + type: str + choices: + - started + - stopped + - restarted + default: started + state: + description: + - State of the Web App deployment slot. + - Use C(present) to create or update a slot and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - Yunge Zhu(@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a webapp slot + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + configuration_source: myJavaWebApp + app_settings: + testkey: testvalue + + - name: swap the slot with production slot + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + swap: + action: swap + + - name: stop the slot + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + app_state: stopped + + - name: udpate a webapp slot app settings + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + app_settings: + testkey: testvalue2 + + - name: udpate a webapp slot frameworks + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + frameworks: + - name: "node" + version: "10.1" +''' + +RETURN = ''' +id: + description: + - ID of current slot. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1 +''' + +import time +from ansible.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrest.serialization import Model + from azure.mgmt.web.models import ( + site_config, app_service_plan, Site, + AppServicePlan, SkuDescription, NameValuePair + ) +except ImportError: + # This is handled in azure_rm_common + pass + +swap_spec = dict( + action=dict( + type='str', + choices=[ + 'preview', + 'swap', + 'reset' + ], + default='preview' + ), + target_slot=dict( + type='str' + ), + preserve_vnet=dict( + type='bool', + default=True + ) +) + +container_settings_spec = dict( + name=dict(type='str', required=True), + registry_server_url=dict(type='str'), + registry_server_user=dict(type='str'), + registry_server_password=dict(type='str', no_log=True) +) + +deployment_source_spec = dict( + url=dict(type='str'), + branch=dict(type='str') +) + + +framework_settings_spec = dict( + java_container=dict(type='str', required=True), + java_container_version=dict(type='str', required=True) +) + + +framework_spec = dict( + name=dict( + type='str', + required=True, + choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']), + version=dict(type='str', required=True), + settings=dict(type='dict', options=framework_settings_spec) +) + + +def webapp_to_dict(webapp): + return dict( + id=webapp.id, + name=webapp.name, + location=webapp.location, + client_cert_enabled=webapp.client_cert_enabled, + enabled=webapp.enabled, + reserved=webapp.reserved, + client_affinity_enabled=webapp.client_affinity_enabled, + server_farm_id=webapp.server_farm_id, + host_names_disabled=webapp.host_names_disabled, + https_only=webapp.https_only if hasattr(webapp, 'https_only') else None, + skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None, + ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None, + state=webapp.state, + tags=webapp.tags if webapp.tags else None + ) + + +def slot_to_dict(slot): + return dict( + id=slot.id, + resource_group=slot.resource_group, + server_farm_id=slot.server_farm_id, + target_swap_slot=slot.target_swap_slot, + enabled_host_names=slot.enabled_host_names, + slot_swap_status=slot.slot_swap_status, + name=slot.name, + location=slot.location, + enabled=slot.enabled, + reserved=slot.reserved, + host_names_disabled=slot.host_names_disabled, + state=slot.state, + repository_site_name=slot.repository_site_name, + default_host_name=slot.default_host_name, + kind=slot.kind, + site_config=slot.site_config, + tags=slot.tags if slot.tags else None + ) + + +class Actions: + NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4) + + +class AzureRMWebAppSlots(AzureRMModuleBase): + """Configuration class for an Azure RM Web App slot resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + webapp_name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + configuration_source=dict( + type='str' + ), + auto_swap_slot_name=dict( + type='raw' + ), + swap=dict( + type='dict', + options=swap_spec + ), + frameworks=dict( + type='list', + elements='dict', + options=framework_spec + ), + container_settings=dict( + type='dict', + options=container_settings_spec + ), + deployment_source=dict( + type='dict', + options=deployment_source_spec + ), + startup_file=dict( + type='str' + ), + app_settings=dict( + type='dict' + ), + purge_app_settings=dict( + type='bool', + default=False + ), + app_state=dict( + type='str', + choices=['started', 'stopped', 'restarted'], + default='started' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + mutually_exclusive = [['container_settings', 'frameworks']] + + self.resource_group = None + self.name = None + self.webapp_name = None + self.location = None + + self.auto_swap_slot_name = None + self.swap = None + self.tags = None + self.startup_file = None + self.configuration_source = None + self.clone = False + + # site config, e.g app settings, ssl + self.site_config = dict() + self.app_settings = dict() + self.app_settings_strDic = None + + # siteSourceControl + self.deployment_source = dict() + + # site, used at level creation, or update. + self.site = None + + # property for internal usage, not used for sdk + self.container_settings = None + + self.purge_app_settings = False + self.app_state = 'started' + + self.results = dict( + changed=False, + id=None, + ) + self.state = None + self.to_do = Actions.NoAction + + self.frameworks = None + + # set site_config value from kwargs + self.site_config_updatable_frameworks = ["net_framework_version", + "java_version", + "php_version", + "python_version", + "linux_fx_version"] + + self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java'] + self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java'] + + super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "scm_type": + self.site_config[key] = kwargs[key] + + old_response = None + response = None + to_be_updated = False + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # get web app + webapp_response = self.get_webapp() + + if not webapp_response: + self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group)) + + # get slot + old_response = self.get_slot() + + # set is_linux + is_linux = True if webapp_response['reserved'] else False + + if self.state == 'present': + if self.frameworks: + # java is mutually exclusive with other frameworks + if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks): + self.fail('Java is mutually exclusive with other frameworks.') + + if is_linux: + if len(self.frameworks) != 1: + self.fail('Can specify one framework only for Linux web app.') + + if self.frameworks[0]['name'] not in self.supported_linux_frameworks: + self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name'])) + + self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper() + + if self.frameworks[0]['name'] == 'java': + if self.frameworks[0]['version'] != '8': + self.fail("Linux web app only supports java 8.") + + if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \ + self.frameworks[0]['settings']['java_container'].lower() != 'tomcat': + self.fail("Linux web app only supports tomcat container.") + + if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \ + self.frameworks[0]['settings']['java_container'].lower() == 'tomcat': + self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8' + else: + self.site_config['linux_fx_version'] = 'JAVA|8-jre8' + else: + for fx in self.frameworks: + if fx.get('name') not in self.supported_windows_frameworks: + self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name'))) + else: + self.site_config[fx.get('name') + '_version'] = fx.get('version') + + if 'settings' in fx and fx['settings'] is not None: + for key, value in fx['settings'].items(): + self.site_config[key] = value + + if not self.app_settings: + self.app_settings = dict() + + if self.container_settings: + linux_fx_version = 'DOCKER|' + + if self.container_settings.get('registry_server_url'): + self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url'] + + linux_fx_version += self.container_settings['registry_server_url'] + '/' + + linux_fx_version += self.container_settings['name'] + + self.site_config['linux_fx_version'] = linux_fx_version + + if self.container_settings.get('registry_server_user'): + self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user'] + + if self.container_settings.get('registry_server_password'): + self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password'] + + # set auto_swap_slot_name + if self.auto_swap_slot_name and isinstance(self.auto_swap_slot_name, str): + self.site_config['auto_swap_slot_name'] = self.auto_swap_slot_name + if self.auto_swap_slot_name is False: + self.site_config['auto_swap_slot_name'] = None + + # init site + self.site = Site(location=self.location, site_config=self.site_config) + + # check if the slot already present in the webapp + if not old_response: + self.log("Web App slot doesn't exist") + + to_be_updated = True + self.to_do = Actions.CreateOrUpdate + self.site.tags = self.tags + + # if linux, setup startup_file + if self.startup_file: + self.site_config['app_command_line'] = self.startup_file + + # set app setting + if self.app_settings: + app_settings = [] + for key in self.app_settings.keys(): + app_settings.append(NameValuePair(name=key, value=self.app_settings[key])) + + self.site_config['app_settings'] = app_settings + + # clone slot + if self.configuration_source: + self.clone = True + + else: + # existing slot, do update + self.log("Web App slot already exists") + + self.log('Result: {0}'.format(old_response)) + + update_tags, self.site.tags = self.update_tags(old_response.get('tags', None)) + + if update_tags: + to_be_updated = True + + # check if site_config changed + old_config = self.get_configuration_slot(self.name) + + if self.is_site_config_changed(old_config): + to_be_updated = True + self.to_do = Actions.CreateOrUpdate + + self.app_settings_strDic = self.list_app_settings_slot(self.name) + + # purge existing app_settings: + if self.purge_app_settings: + to_be_updated = True + self.to_do = Actions.UpdateAppSettings + self.app_settings_strDic = dict() + + # check if app settings changed + if self.purge_app_settings or self.is_app_settings_changed(): + to_be_updated = True + self.to_do = Actions.UpdateAppSettings + + if self.app_settings: + for key in self.app_settings.keys(): + self.app_settings_strDic[key] = self.app_settings[key] + + elif self.state == 'absent': + if old_response: + self.log("Delete Web App slot") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_slot() + + self.log('Web App slot deleted') + + else: + self.log("Web app slot {0} not exists.".format(self.name)) + + if to_be_updated: + self.log('Need to Create/Update web app') + self.results['changed'] = True + + if self.check_mode: + return self.results + + if self.to_do == Actions.CreateOrUpdate: + response = self.create_update_slot() + + self.results['id'] = response['id'] + + if self.clone: + self.clone_slot() + + if self.to_do == Actions.UpdateAppSettings: + self.update_app_settings_slot() + + slot = None + if response: + slot = response + if old_response: + slot = old_response + + if slot: + if (slot['state'] != 'Stopped' and self.app_state == 'stopped') or \ + (slot['state'] != 'Running' and self.app_state == 'started') or \ + self.app_state == 'restarted': + + self.results['changed'] = True + if self.check_mode: + return self.results + + self.set_state_slot(self.app_state) + + if self.swap: + self.results['changed'] = True + if self.check_mode: + return self.results + + self.swap_slot() + + return self.results + + # compare site config + def is_site_config_changed(self, existing_config): + for fx_version in self.site_config_updatable_frameworks: + if self.site_config.get(fx_version): + if not getattr(existing_config, fx_version) or \ + getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper(): + return True + + if self.auto_swap_slot_name is False and existing_config.auto_swap_slot_name is not None: + return True + elif self.auto_swap_slot_name and self.auto_swap_slot_name != getattr(existing_config, 'auto_swap_slot_name', None): + return True + return False + + # comparing existing app setting with input, determine whether it's changed + def is_app_settings_changed(self): + if self.app_settings: + if len(self.app_settings_strDic) != len(self.app_settings): + return True + + if self.app_settings_strDic != self.app_settings: + return True + return False + + # comparing deployment source with input, determine whether it's changed + def is_deployment_source_changed(self, existing_webapp): + if self.deployment_source: + if self.deployment_source.get('url') \ + and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']: + return True + + if self.deployment_source.get('branch') \ + and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']: + return True + + return False + + def create_update_slot(self): + ''' + Creates or updates Web App slot with the specified configuration. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Creating / Updating the Web App slot {0}".format(self.name)) + + try: + response = self.web_client.web_apps.create_or_update_slot(resource_group_name=self.resource_group, + slot=self.name, + name=self.webapp_name, + site_envelope=self.site) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except CloudError as exc: + self.log('Error attempting to create the Web App slot instance.') + self.fail("Error creating the Web App slot: {0}".format(str(exc))) + return slot_to_dict(response) + + def delete_slot(self): + ''' + Deletes specified Web App slot in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Web App slot {0}".format(self.name)) + try: + response = self.web_client.web_apps.delete_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name) + except CloudError as e: + self.log('Error attempting to delete the Web App slot.') + self.fail( + "Error deleting the Web App slots: {0}".format(str(e))) + + return True + + def get_webapp(self): + ''' + Gets the properties of the specified Web App. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Checking if the Web App instance {0} is present".format(self.webapp_name)) + + response = None + + try: + response = self.web_client.web_apps.get(resource_group_name=self.resource_group, + name=self.webapp_name) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + if response is not None: + self.log("Response : {0}".format(response)) + self.log("Web App instance : {0} found".format(response.name)) + return webapp_to_dict(response) + + except CloudError as ex: + pass + + self.log("Didn't find web app {0} in resource group {1}".format( + self.webapp_name, self.resource_group)) + + return False + + def get_slot(self): + ''' + Gets the properties of the specified Web App slot. + + :return: deserialized Web App slot state dictionary + ''' + self.log( + "Checking if the Web App slot {0} is present".format(self.name)) + + response = None + + try: + response = self.web_client.web_apps.get_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError + if response is not None: + self.log("Response : {0}".format(response)) + self.log("Web App slot: {0} found".format(response.name)) + return slot_to_dict(response) + + except CloudError as ex: + pass + + self.log("Does not find web app slot {0} in resource group {1}".format(self.name, self.resource_group)) + + return False + + def list_app_settings(self): + ''' + List webapp application settings + :return: deserialized list response + ''' + self.log("List webapp application setting") + + try: + + response = self.web_client.web_apps.list_application_settings( + resource_group_name=self.resource_group, name=self.webapp_name) + self.log("Response : {0}".format(response)) + + return response.properties + except CloudError as ex: + self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def list_app_settings_slot(self, slot_name): + ''' + List application settings + :return: deserialized list response + ''' + self.log("List application setting") + + try: + + response = self.web_client.web_apps.list_application_settings_slot( + resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name) + self.log("Response : {0}".format(response)) + + return response.properties + except CloudError as ex: + self.fail("Failed to list application settings for web app slot {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def update_app_settings_slot(self, slot_name=None, app_settings=None): + ''' + Update application settings + :return: deserialized updating response + ''' + self.log("Update application setting") + + if slot_name is None: + slot_name = self.name + if app_settings is None: + app_settings = self.app_settings_strDic + try: + response = self.web_client.web_apps.update_application_settings_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=slot_name, + kind=None, + properties=app_settings) + self.log("Response : {0}".format(response)) + + return response.as_dict() + except CloudError as ex: + self.fail("Failed to update application settings for web app slot {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + return response + + def create_or_update_source_control_slot(self): + ''' + Update site source control + :return: deserialized updating response + ''' + self.log("Update site source control") + + if self.deployment_source is None: + return False + + self.deployment_source['is_manual_integration'] = False + self.deployment_source['is_mercurial'] = False + + try: + response = self.web_client.web_client.create_or_update_source_control_slot( + resource_group_name=self.resource_group, + name=self.webapp_name, + site_source_control=self.deployment_source, + slot=self.name) + self.log("Response : {0}".format(response)) + + return response.as_dict() + except CloudError as ex: + self.fail("Failed to update site source control for web app slot {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def get_configuration(self): + ''' + Get web app configuration + :return: deserialized web app configuration response + ''' + self.log("Get web app configuration") + + try: + + response = self.web_client.web_apps.get_configuration( + resource_group_name=self.resource_group, name=self.webapp_name) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to get configuration for web app {0} in resource group {1}: {2}".format( + self.webapp_name, self.resource_group, str(ex))) + + def get_configuration_slot(self, slot_name): + ''' + Get slot configuration + :return: deserialized slot configuration response + ''' + self.log("Get web app slot configuration") + + try: + + response = self.web_client.web_apps.get_configuration_slot( + resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to get configuration for web app slot {0} in resource group {1}: {2}".format( + slot_name, self.resource_group, str(ex))) + + def update_configuration_slot(self, slot_name=None, site_config=None): + ''' + Update slot configuration + :return: deserialized slot configuration response + ''' + self.log("Update web app slot configuration") + + if slot_name is None: + slot_name = self.name + if site_config is None: + site_config = self.site_config + try: + + response = self.web_client.web_apps.update_configuration_slot( + resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name, site_config=site_config) + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to update configuration for web app slot {0} in resource group {1}: {2}".format( + slot_name, self.resource_group, str(ex))) + + def set_state_slot(self, appstate): + ''' + Start/stop/restart web app slot + :return: deserialized updating response + ''' + try: + if appstate == 'started': + response = self.web_client.web_apps.start_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name) + elif appstate == 'stopped': + response = self.web_client.web_apps.stop_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name) + elif appstate == 'restarted': + response = self.web_client.web_apps.restart_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name) + else: + self.fail("Invalid web app slot state {0}".format(appstate)) + + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail("Failed to {0} web app slot {1} in resource group {2}, request_id {3} - {4}".format( + appstate, self.name, self.resource_group, request_id, str(ex))) + + def swap_slot(self): + ''' + Swap slot + :return: deserialized response + ''' + self.log("Swap slot") + + try: + if self.swap['action'] == 'swap': + if self.swap['target_slot'] is None: + response = self.web_client.web_apps.swap_slot_with_production(resource_group_name=self.resource_group, + name=self.webapp_name, + target_slot=self.name, + preserve_vnet=self.swap['preserve_vnet']) + else: + response = self.web_client.web_apps.swap_slot_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name, + target_slot=self.swap['target_slot'], + preserve_vnet=self.swap['preserve_vnet']) + elif self.swap['action'] == 'preview': + if self.swap['target_slot'] is None: + response = self.web_client.web_apps.apply_slot_config_to_production(resource_group_name=self.resource_group, + name=self.webapp_name, + target_slot=self.name, + preserve_vnet=self.swap['preserve_vnet']) + else: + response = self.web_client.web_apps.apply_slot_configuration_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name, + target_slot=self.swap['target_slot'], + preserve_vnet=self.swap['preserve_vnet']) + elif self.swap['action'] == 'reset': + if self.swap['target_slot'] is None: + response = self.web_client.web_apps.reset_production_slot_config(resource_group_name=self.resource_group, + name=self.webapp_name) + else: + response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.swap['target_slot']) + response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name) + + self.log("Response : {0}".format(response)) + + return response + except CloudError as ex: + self.fail("Failed to swap web app slot {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex))) + + def clone_slot(self): + if self.configuration_source: + src_slot = None if self.configuration_source.lower() == self.webapp_name.lower() else self.configuration_source + + if src_slot is None: + site_config_clone_from = self.get_configuration() + else: + site_config_clone_from = self.get_configuration_slot(slot_name=src_slot) + + self.update_configuration_slot(site_config=site_config_clone_from) + + if src_slot is None: + app_setting_clone_from = self.list_app_settings() + else: + app_setting_clone_from = self.list_app_settings_slot(src_slot) + + if self.app_settings: + app_setting_clone_from.update(self.app_settings) + + self.update_app_settings_slot(app_settings=app_setting_clone_from) + + +def main(): + """Main execution""" + AzureRMWebAppSlots() + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/cloud_init_data_facts.py b/test/support/integration/plugins/modules/cloud_init_data_facts.py new file mode 100644 index 00000000..4f871b99 --- /dev/null +++ b/test/support/integration/plugins/modules/cloud_init_data_facts.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloud_init_data_facts +short_description: Retrieve facts of cloud-init. +description: + - Gathers facts by reading the status.json and result.json of cloud-init. +version_added: 2.6 +author: René Moser (@resmo) +options: + filter: + description: + - Filter facts + choices: [ status, result ] +notes: + - See http://cloudinit.readthedocs.io/ for more information about cloud-init. +''' + +EXAMPLES = ''' +- name: Gather all facts of cloud init + cloud_init_data_facts: + register: result + +- debug: + var: result + +- name: Wait for cloud init to finish + cloud_init_data_facts: + filter: status + register: res + until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" + retries: 50 + delay: 5 +''' + +RETURN = ''' +--- +cloud_init_data_facts: + description: Facts of result and status. + returned: success + type: dict + sample: '{ + "status": { + "v1": { + "datasource": "DataSourceCloudStack", + "errors": [] + }, + "result": { + "v1": { + "datasource": "DataSourceCloudStack", + "init": { + "errors": [], + "finished": 1522066377.0185432, + "start": 1522066375.2648022 + }, + "init-local": { + "errors": [], + "finished": 1522066373.70919, + "start": 1522066373.4726632 + }, + "modules-config": { + "errors": [], + "finished": 1522066380.9097016, + "start": 1522066379.0011985 + }, + "modules-final": { + "errors": [], + "finished": 1522066383.56594, + "start": 1522066382.3449218 + }, + "stage": null + } + }' +''' + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text + + +CLOUD_INIT_PATH = "/var/lib/cloud/data/" + + +def gather_cloud_init_data_facts(module): + res = { + 'cloud_init_data_facts': dict() + } + + for i in ['result', 'status']: + filter = module.params.get('filter') + if filter is None or filter == i: + res['cloud_init_data_facts'][i] = dict() + json_file = CLOUD_INIT_PATH + i + '.json' + + if os.path.exists(json_file): + f = open(json_file, 'rb') + contents = to_text(f.read(), errors='surrogate_or_strict') + f.close() + + if contents: + res['cloud_init_data_facts'][i] = module.from_json(contents) + return res + + +def main(): + module = AnsibleModule( + argument_spec=dict( + filter=dict(choices=['result', 'status']), + ), + supports_check_mode=True, + ) + + facts = gather_cloud_init_data_facts(module) + result = dict(changed=False, ansible_facts=facts, **facts) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/cloudformation.py b/test/support/integration/plugins/modules/cloudformation.py new file mode 100644 index 00000000..cd031465 --- /dev/null +++ b/test/support/integration/plugins/modules/cloudformation.py @@ -0,0 +1,837 @@ +#!/usr/bin/python + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: cloudformation +short_description: Create or delete an AWS CloudFormation stack +description: + - Launches or updates an AWS CloudFormation stack and waits for it complete. +notes: + - CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh. + The version listed in the requirements is the oldest version that works with the module as a whole. + Some features may require recent versions, and we do not pinpoint a minimum version for each feature. + Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs. +version_added: "1.1" +options: + stack_name: + description: + - Name of the CloudFormation stack. + required: true + type: str + disable_rollback: + description: + - If a stacks fails to form, rollback will remove the stack. + default: false + type: bool + on_create_failure: + description: + - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option. + choices: + - DO_NOTHING + - ROLLBACK + - DELETE + version_added: "2.8" + type: str + create_timeout: + description: + - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED + version_added: "2.6" + type: int + template_parameters: + description: + - A list of hashes of all the template variables for the stack. The value can be a string or a dict. + - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example). + default: {} + type: dict + state: + description: + - If I(state=present), stack will be created. + - If I(state=present) and if stack exists and template has changed, it will be updated. + - If I(state=absent), stack will be removed. + default: present + choices: [ present, absent ] + type: str + template: + description: + - The local path of the CloudFormation template. + - This must be the full path to the file, relative to the working directory. If using roles this may look + like C(roles/cloudformation/files/cloudformation-example.json). + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), + I(template_body) nor I(template_url) are specified, the previous template will be reused. + type: path + notification_arns: + description: + - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events. + version_added: "2.0" + type: str + stack_policy: + description: + - The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified. + for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051) + version_added: "1.9" + type: str + tags: + description: + - Dictionary of tags to associate with stack and its resources during stack creation. + - Can be updated later, updating tags removes previous entries. + version_added: "1.4" + type: dict + template_url: + description: + - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an + S3 bucket in the same region as the stack. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified, + the previous template will be reused. + version_added: "2.0" + type: str + create_changeset: + description: + - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs + U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)." + - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be + deleted immediately with no changeset." + type: bool + default: false + version_added: "2.4" + changeset_name: + description: + - Name given to the changeset when creating a changeset. + - Only used when I(create_changeset=true). + - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters. + See the AWS Change Sets docs for more information + U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html) + version_added: "2.4" + type: str + template_format: + description: + - This parameter is ignored since Ansible 2.3 and will be removed in Ansible 2.14. + - Templates are now passed raw to CloudFormation regardless of format. + version_added: "2.0" + type: str + role_arn: + description: + - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role + docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html) + version_added: "2.3" + type: str + termination_protection: + description: + - Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18. + type: bool + version_added: "2.5" + template_body: + description: + - Template body. Use this to pass in the actual body of the CloudFormation template. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) + are specified, the previous template will be reused. + version_added: "2.5" + type: str + events_limit: + description: + - Maximum number of CloudFormation events to fetch from a stack when creating or updating it. + default: 200 + version_added: "2.7" + type: int + backoff_delay: + description: + - Number of seconds to wait for the next retry. + default: 3 + version_added: "2.8" + type: int + required: False + backoff_max_delay: + description: + - Maximum amount of time to wait between retries. + default: 30 + version_added: "2.8" + type: int + required: False + backoff_retries: + description: + - Number of times to retry operation. + - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times. + default: 10 + version_added: "2.8" + type: int + required: False + capabilities: + description: + - Specify capabilities that stack template contains. + - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND). + type: list + elements: str + version_added: "2.8" + default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ] + +author: "James S. Martin (@jsmartin)" +extends_documentation_fragment: +- aws +- ec2 +requirements: [ boto3, botocore>=1.5.45 ] +''' + +EXAMPLES = ''' +- name: create a cloudformation stack + cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "files/cloudformation-example.json" + template_parameters: + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" + ClusterSize: 3 + tags: + Stack: "ansible-cloudformation" + +# Basic role example +- name: create a stack, specify role that cloudformation assumes + cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "roles/cloudformation/files/cloudformation-example.json" + role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role' + +- name: delete a stack + cloudformation: + stack_name: "ansible-cloudformation-old" + state: "absent" + +# Create a stack, pass in template from a URL, disable rollback if stack creation fails, +# pass in some parameters to the template, provide tags for resources created +- name: create a stack, pass in the template via an URL + cloudformation: + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + template_parameters: + KeyName: jmartin + DiskType: ephemeral + InstanceType: m1.small + ClusterSize: 3 + tags: + Stack: ansible-cloudformation + +# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails, +# pass in some parameters to the template, provide tags for resources created +- name: create a stack, pass in the template body via lookup template + cloudformation: + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_body: "{{ lookup('template', 'cloudformation.j2') }}" + template_parameters: + KeyName: jmartin + DiskType: ephemeral + InstanceType: m1.small + ClusterSize: 3 + tags: + Stack: ansible-cloudformation + +# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute +# When use_previous_value is set to True, the given value will be ignored and +# CloudFormation will use the value from a previously submitted template. +# If use_previous_value is set to False (default) the given value is used. +- cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + template: "files/cloudformation-example.json" + template_parameters: + DBSnapshotIdentifier: + use_previous_value: True + value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot + DBName: + use_previous_value: True + tags: + Stack: "ansible-cloudformation" + +# Enable termination protection on a stack. +# If the stack already exists, this will update its termination protection +- name: enable termination protection during stack creation + cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + termination_protection: yes + +# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED +# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back. +- name: enable termination protection during stack creation + cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + create_timeout: 5 + +# Configure rollback behaviour on the unsuccessful creation of a stack allowing +# CloudFormation to clean up, or do nothing in the event of an unsuccessful +# deployment +# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if +# it fails to create +- name: create stack which will delete on creation failure + cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + on_create_failure: DELETE +''' + +RETURN = ''' +events: + type: list + description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. + returned: always + sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"] +log: + description: Debugging logs. Useful when modifying or finding an error. + returned: always + type: list + sample: ["updating stack"] +change_set_id: + description: The ID of the stack change set if one was created + returned: I(state=present) and I(create_changeset=true) + type: str + sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0" +stack_resources: + description: AWS stack resources and their status. List of dictionaries, one dict per resource. + returned: state == present + type: list + sample: [ + { + "last_updated_time": "2016-10-11T19:40:14.979000+00:00", + "logical_resource_id": "CFTestSg", + "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F", + "resource_type": "AWS::EC2::SecurityGroup", + "status": "UPDATE_COMPLETE", + "status_reason": null + } + ] +stack_outputs: + type: dict + description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary. + returned: state == present + sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"} +''' # NOQA + +import json +import time +import uuid +import traceback +from hashlib import sha1 + +try: + import boto3 + import botocore + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_conn, boto_exception, ec2_argument_spec, get_aws_connection_info +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native + + +def get_stack_events(cfn, stack_name, events_limit, token_filter=None): + '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.''' + ret = {'events': [], 'log': []} + + try: + pg = cfn.get_paginator( + 'describe_stack_events' + ).paginate( + StackName=stack_name, + PaginationConfig={'MaxItems': events_limit} + ) + if token_filter is not None: + events = list(pg.search( + "StackEvents[?ClientRequestToken == '{0}']".format(token_filter) + )) + else: + events = list(pg.search("StackEvents[*]")) + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + if 'does not exist' in error_msg: + # missing stack, don't bail. + ret['log'].append('Stack does not exist.') + return ret + ret['log'].append('Unknown error: ' + str(error_msg)) + return ret + + for e in events: + eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e) + ret['events'].append(eventline) + + if e['ResourceStatus'].endswith('FAILED'): + failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e) + ret['log'].append(failline) + + return ret + + +def create_stack(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.") + + # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and + # 'OnFailure' only apply on creation, not update. + if module.params.get('on_create_failure') is not None: + stack_params['OnFailure'] = module.params['on_create_failure'] + else: + stack_params['DisableRollback'] = module.params['disable_rollback'] + + if module.params.get('create_timeout') is not None: + stack_params['TimeoutInMinutes'] = module.params['create_timeout'] + if module.params.get('termination_protection') is not None: + if boto_supports_termination_protection(cfn): + stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection')) + else: + module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18") + + try: + response = cfn.create_stack(**stack_params) + # Use stack ID to follow stack state in case of on_create_failure = DELETE + result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None)) + except Exception as err: + error_msg = boto_exception(err) + module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc()) + if not result: + module.fail_json(msg="empty result") + return result + + +def list_changesets(cfn, stack_name): + res = cfn.list_change_sets(StackName=stack_name) + return [cs['ChangeSetName'] for cs in res['Summaries']] + + +def create_changeset(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + module.fail_json(msg="Either 'template' or 'template_url' is required.") + if module.params['changeset_name'] is not None: + stack_params['ChangeSetName'] = module.params['changeset_name'] + + # changesets don't accept ClientRequestToken parameters + stack_params.pop('ClientRequestToken', None) + + try: + changeset_name = build_changeset_name(stack_params) + stack_params['ChangeSetName'] = changeset_name + + # Determine if this changeset already exists + pending_changesets = list_changesets(cfn, stack_params['StackName']) + if changeset_name in pending_changesets: + warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets) + result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning]) + else: + cs = cfn.create_change_set(**stack_params) + # Make sure we don't enter an infinite loop + time_end = time.time() + 600 + while time.time() < time_end: + try: + newcs = cfn.describe_change_set(ChangeSetName=cs['Id']) + except botocore.exceptions.BotoCoreError as err: + error_msg = boto_exception(err) + module.fail_json(msg=error_msg) + if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS': + time.sleep(1) + elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']: + cfn.delete_change_set(ChangeSetName=cs['Id']) + result = dict(changed=False, + output='The created Change Set did not contain any changes to this stack and was deleted.') + # a failed change set does not trigger any stack events so we just want to + # skip any further processing of result and just return it directly + return result + else: + break + # Lets not hog the cpu/spam the AWS API + time.sleep(1) + result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit) + result['change_set_id'] = cs['Id'] + result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']), + 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'], + 'NOTE that dependencies on this stack might fail due to pending changes!'] + except Exception as err: + error_msg = boto_exception(err) + if 'No updates are to be performed.' in error_msg: + result = dict(changed=False, output='Stack is already up-to-date.') + else: + module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc()) + + if not result: + module.fail_json(msg="empty result") + return result + + +def update_stack(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + stack_params['UsePreviousTemplate'] = True + + # if the state is present and the stack already exists, we try to update it. + # AWS will tell us if the stack template and parameters are the same and + # don't need to be updated. + try: + cfn.update_stack(**stack_params) + result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None)) + except Exception as err: + error_msg = boto_exception(err) + if 'No updates are to be performed.' in error_msg: + result = dict(changed=False, output='Stack is already up-to-date.') + else: + module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc()) + if not result: + module.fail_json(msg="empty result") + return result + + +def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state): + '''updates termination protection of a stack''' + if not boto_supports_termination_protection(cfn): + module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18") + stack = get_stack_facts(cfn, stack_name) + if stack: + if stack['EnableTerminationProtection'] is not desired_termination_protection_state: + try: + cfn.update_termination_protection( + EnableTerminationProtection=desired_termination_protection_state, + StackName=stack_name) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=boto_exception(e), exception=traceback.format_exc()) + + +def boto_supports_termination_protection(cfn): + '''termination protection was added in botocore 1.7.18''' + return hasattr(cfn, "update_termination_protection") + + +def stack_operation(cfn, stack_name, operation, events_limit, op_token=None): + '''gets the status of a stack while it is created/updated/deleted''' + existed = [] + while True: + try: + stack = get_stack_facts(cfn, stack_name) + existed.append('yes') + except Exception: + # If the stack previously existed, and now can't be found then it's + # been deleted successfully. + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret + else: + return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()} + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + if not stack: + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret + else: + ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'}) + return ret + # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE + # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13 + elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET': + ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation}) + return ret + elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE': + ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'}) + return ret + # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases. + elif stack['StackStatus'].endswith('_COMPLETE'): + ret.update({'changed': True, 'output': 'Stack %s complete' % operation}) + return ret + elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation}) + return ret + # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases. + elif stack['StackStatus'].endswith('_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation}) + return ret + else: + # this can loop forever :/ + time.sleep(5) + return {'failed': True, 'output': 'Failed for unknown reasons.'} + + +def build_changeset_name(stack_params): + if 'ChangeSetName' in stack_params: + return stack_params['ChangeSetName'] + + json_params = json.dumps(stack_params, sort_keys=True) + + return 'Ansible-{0}-{1}'.format( + stack_params['StackName'], + sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest() + ) + + +def check_mode_changeset(module, stack_params, cfn): + """Create a change set, describe it and delete it before returning check mode outputs.""" + stack_params['ChangeSetName'] = build_changeset_name(stack_params) + # changesets don't accept ClientRequestToken parameters + stack_params.pop('ClientRequestToken', None) + + try: + change_set = cfn.create_change_set(**stack_params) + for i in range(60): # total time 5 min + description = cfn.describe_change_set(ChangeSetName=change_set['Id']) + if description['Status'] in ('CREATE_COMPLETE', 'FAILED'): + break + time.sleep(5) + else: + # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail + module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName']) + + cfn.delete_change_set(ChangeSetName=change_set['Id']) + + reason = description.get('StatusReason') + + if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']: + return {'changed': False, 'msg': reason, 'meta': description['StatusReason']} + return {'changed': True, 'msg': reason, 'meta': description['Changes']} + + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + module.fail_json(msg=error_msg, exception=traceback.format_exc()) + + +def get_stack_facts(cfn, stack_name): + try: + stack_response = cfn.describe_stacks(StackName=stack_name) + stack_info = stack_response['Stacks'][0] + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + if 'does not exist' in error_msg: + # missing stack, don't bail. + return None + + # other error, bail. + raise err + + if stack_response and stack_response.get('Stacks', None): + stacks = stack_response['Stacks'] + if len(stacks): + stack_info = stacks[0] + + return stack_info + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + stack_name=dict(required=True), + template_parameters=dict(required=False, type='dict', default={}), + state=dict(default='present', choices=['present', 'absent']), + template=dict(default=None, required=False, type='path'), + notification_arns=dict(default=None, required=False), + stack_policy=dict(default=None, required=False), + disable_rollback=dict(default=False, type='bool'), + on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']), + create_timeout=dict(default=None, type='int'), + template_url=dict(default=None, required=False), + template_body=dict(default=None, required=False), + template_format=dict(removed_in_version='2.14'), + create_changeset=dict(default=False, type='bool'), + changeset_name=dict(default=None, required=False), + role_arn=dict(default=None, required=False), + tags=dict(default=None, type='dict'), + termination_protection=dict(default=None, type='bool'), + events_limit=dict(default=200, type='int'), + backoff_retries=dict(type='int', default=10, required=False), + backoff_delay=dict(type='int', default=3, required=False), + backoff_max_delay=dict(type='int', default=30, required=False), + capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['template_url', 'template', 'template_body'], + ['disable_rollback', 'on_create_failure']], + supports_check_mode=True + ) + if not HAS_BOTO3: + module.fail_json(msg='boto3 and botocore are required for this module') + + invalid_capabilities = [] + user_capabilities = module.params.get('capabilities') + for user_cap in user_capabilities: + if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']: + invalid_capabilities.append(user_cap) + + if invalid_capabilities: + module.fail_json(msg="Specified capabilities are invalid : %r," + " please check documentation for valid capabilities" % invalid_capabilities) + + # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. + stack_params = { + 'Capabilities': user_capabilities, + 'ClientRequestToken': to_native(uuid.uuid4()), + } + state = module.params['state'] + stack_params['StackName'] = module.params['stack_name'] + + if module.params['template'] is not None: + with open(module.params['template'], 'r') as template_fh: + stack_params['TemplateBody'] = template_fh.read() + elif module.params['template_body'] is not None: + stack_params['TemplateBody'] = module.params['template_body'] + elif module.params['template_url'] is not None: + stack_params['TemplateURL'] = module.params['template_url'] + + if module.params.get('notification_arns'): + stack_params['NotificationARNs'] = module.params['notification_arns'].split(',') + else: + stack_params['NotificationARNs'] = [] + + # can't check the policy when verifying. + if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']: + with open(module.params['stack_policy'], 'r') as stack_policy_fh: + stack_params['StackPolicyBody'] = stack_policy_fh.read() + + template_parameters = module.params['template_parameters'] + + stack_params['Parameters'] = [] + for k, v in template_parameters.items(): + if isinstance(v, dict): + # set parameter based on a dict to allow additional CFN Parameter Attributes + param = dict(ParameterKey=k) + + if 'value' in v: + param['ParameterValue'] = str(v['value']) + + if 'use_previous_value' in v and bool(v['use_previous_value']): + param['UsePreviousValue'] = True + param.pop('ParameterValue', None) + + stack_params['Parameters'].append(param) + else: + # allow default k/v configuration to set a template parameter + stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + + if isinstance(module.params.get('tags'), dict): + stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + + if module.params.get('role_arn'): + stack_params['RoleARN'] = module.params['role_arn'] + + result = {} + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + cfn = boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg=boto_exception(e)) + + # Wrap the cloudformation client methods that this module uses with + # automatic backoff / retry for throttling error codes + backoff_wrapper = AWSRetry.jittered_backoff( + retries=module.params.get('backoff_retries'), + delay=module.params.get('backoff_delay'), + max_delay=module.params.get('backoff_max_delay') + ) + cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events) + cfn.create_stack = backoff_wrapper(cfn.create_stack) + cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets) + cfn.create_change_set = backoff_wrapper(cfn.create_change_set) + cfn.update_stack = backoff_wrapper(cfn.update_stack) + cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks) + cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources) + cfn.delete_stack = backoff_wrapper(cfn.delete_stack) + if boto_supports_termination_protection(cfn): + cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection) + + stack_info = get_stack_facts(cfn, stack_params['StackName']) + + if module.check_mode: + if state == 'absent' and stack_info: + module.exit_json(changed=True, msg='Stack would be deleted', meta=[]) + elif state == 'absent' and not stack_info: + module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[]) + elif state == 'present' and not stack_info: + module.exit_json(changed=True, msg='New stack would be created', meta=[]) + else: + module.exit_json(**check_mode_changeset(module, stack_params, cfn)) + + if state == 'present': + if not stack_info: + result = create_stack(module, stack_params, cfn, module.params.get('events_limit')) + elif module.params.get('create_changeset'): + result = create_changeset(module, stack_params, cfn, module.params.get('events_limit')) + else: + if module.params.get('termination_protection') is not None: + update_termination_protection(module, cfn, stack_params['StackName'], + bool(module.params.get('termination_protection'))) + result = update_stack(module, stack_params, cfn, module.params.get('events_limit')) + + # format the stack output + + stack = get_stack_facts(cfn, stack_params['StackName']) + if stack is not None: + if result.get('stack_outputs') is None: + # always define stack_outputs, but it may be empty + result['stack_outputs'] = {} + for output in stack.get('Outputs', []): + result['stack_outputs'][output['OutputKey']] = output['OutputValue'] + stack_resources = [] + reslist = cfn.list_stack_resources(StackName=stack_params['StackName']) + for res in reslist.get('StackResourceSummaries', []): + stack_resources.append({ + "logical_resource_id": res['LogicalResourceId'], + "physical_resource_id": res.get('PhysicalResourceId', ''), + "resource_type": res['ResourceType'], + "last_updated_time": res['LastUpdatedTimestamp'], + "status": res['ResourceStatus'], + "status_reason": res.get('ResourceStatusReason') # can be blank, apparently + }) + result['stack_resources'] = stack_resources + + elif state == 'absent': + # absent state is different because of the way delete_stack works. + # problem is it it doesn't give an error if stack isn't found + # so must describe the stack first + + try: + stack = get_stack_facts(cfn, stack_params['StackName']) + if not stack: + result = {'changed': False, 'output': 'Stack not found.'} + else: + if stack_params.get('RoleARN') is None: + cfn.delete_stack(StackName=stack_params['StackName']) + else: + cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN']) + result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'), + stack_params.get('ClientRequestToken', None)) + except Exception as err: + module.fail_json(msg=boto_exception(err), exception=traceback.format_exc()) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/cloudformation_info.py b/test/support/integration/plugins/modules/cloudformation_info.py new file mode 100644 index 00000000..ee2e5c17 --- /dev/null +++ b/test/support/integration/plugins/modules/cloudformation_info.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cloudformation_info +short_description: Obtain information about an AWS CloudFormation stack +description: + - Gets information about an AWS CloudFormation stack. + - This module was called C(cloudformation_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(cloudformation_info) module no longer returns C(ansible_facts)! +requirements: + - boto3 >= 1.0.0 + - python >= 2.6 +version_added: "2.2" +author: + - Justin Menga (@jmenga) + - Kevin Coming (@waffie1) +options: + stack_name: + description: + - The name or id of the CloudFormation stack. Gathers information on all stacks by default. + type: str + all_facts: + description: + - Get all stack information for the stack. + type: bool + default: false + stack_events: + description: + - Get stack events for the stack. + type: bool + default: false + stack_template: + description: + - Get stack template body for the stack. + type: bool + default: false + stack_resources: + description: + - Get stack resources for the stack. + type: bool + default: false + stack_policy: + description: + - Get stack policy for the stack. + type: bool + default: false + stack_change_sets: + description: + - Get stack change sets for the stack + type: bool + default: false + version_added: '2.10' +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Get summary information about a stack +- cloudformation_info: + stack_name: my-cloudformation-stack + register: output + +- debug: + msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}" + +# When the module is called as cloudformation_facts, return values are published +# in ansible_facts['cloudformation'][] and can be used as follows. +# Note that this is deprecated and will stop working in Ansible 2.13. + +- cloudformation_facts: + stack_name: my-cloudformation-stack + +- debug: + msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}" + +# Get stack outputs, when you have the stack name available as a fact +- set_fact: + stack_name: my-awesome-stack + +- cloudformation_info: + stack_name: "{{ stack_name }}" + register: my_stack + +- debug: + msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}" + +# Get all stack information about a stack +- cloudformation_info: + stack_name: my-cloudformation-stack + all_facts: true + +# Get stack resource and stack policy information about a stack +- cloudformation_info: + stack_name: my-cloudformation-stack + stack_resources: true + stack_policy: true + +# Fail if the stack doesn't exist +- name: try to get facts about a stack but fail if it doesn't exist + cloudformation_info: + stack_name: nonexistent-stack + all_facts: yes + failed_when: cloudformation['nonexistent-stack'] is undefined +''' + +RETURN = ''' +stack_description: + description: Summary facts about the stack + returned: if the stack exists + type: dict +stack_outputs: + description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each + output 'OutputValue' parameter + returned: if the stack exists + type: dict + sample: + ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com +stack_parameters: + description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of + each parameter 'ParameterValue' parameter + returned: if the stack exists + type: dict + sample: + DatabaseEngine: mysql + DatabasePassword: "***" +stack_events: + description: All stack events for the stack + returned: only if all_facts or stack_events is true and the stack exists + type: list +stack_policy: + description: Describes the stack policy for the stack + returned: only if all_facts or stack_policy is true and the stack exists + type: dict +stack_template: + description: Describes the stack template for the stack + returned: only if all_facts or stack_template is true and the stack exists + type: dict +stack_resource_list: + description: Describes stack resources for the stack + returned: only if all_facts or stack_resourses is true and the stack exists + type: list +stack_resources: + description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each + resource 'PhysicalResourceId' parameter + returned: only if all_facts or stack_resourses is true and the stack exists + type: dict + sample: + AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7" + AutoScalingSecurityGroup: "sg-abcd1234" + ApplicationDatabase: "dazvlpr01xj55a" +stack_change_sets: + description: A list of stack change sets. Each item in the list represents the details of a specific changeset + + returned: only if all_facts or stack_change_sets is true and the stack exists + type: list +''' + +import json +import traceback + +from functools import partial +from ansible.module_utils._text import to_native +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict) + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + + +class CloudFormationServiceManager: + """Handles CloudFormation Services""" + + def __init__(self, module): + self.module = module + self.client = module.client('cloudformation') + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stacks_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_stacks') + return paginator.paginate(**kwargs).build_full_result()['Stacks'] + + def describe_stacks(self, stack_name=None): + try: + kwargs = {'StackName': stack_name} if stack_name else {} + response = self.describe_stacks_with_backoff(**kwargs) + if response is not None: + return response + self.module.fail_json(msg="Error describing stack(s) - an empty response was returned") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + if 'does not exist' in e.response['Error']['Message']: + # missing stack, don't bail. + return {} + self.module.fail_json_aws(e, msg="Error describing stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_resources_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_stack_resources') + return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries'] + + def list_stack_resources(self, stack_name): + try: + return self.list_stack_resources_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_events_with_backoff(self, stack_name): + paginator = self.client.get_paginator('describe_stack_events') + return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents'] + + def describe_stack_events(self, stack_name): + try: + return self.describe_stack_events_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_change_sets_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_change_sets') + return paginator.paginate(StackName=stack_name).build_full_result()['Summaries'] + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_change_set_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_change_set') + return paginator.paginate(**kwargs).build_full_result() + + def describe_stack_change_sets(self, stack_name): + changes = [] + try: + change_sets = self.list_stack_change_sets_with_backoff(stack_name) + for item in change_sets: + changes.append(self.describe_stack_change_set_with_backoff( + StackName=stack_name, + ChangeSetName=item['ChangeSetName'])) + return changes + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_stack_policy_with_backoff(self, stack_name): + return self.client.get_stack_policy(StackName=stack_name) + + def get_stack_policy(self, stack_name): + try: + response = self.get_stack_policy_with_backoff(stack_name) + stack_policy = response.get('StackPolicyBody') + if stack_policy: + return json.loads(stack_policy) + return dict() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_template_with_backoff(self, stack_name): + return self.client.get_template(StackName=stack_name) + + def get_template(self, stack_name): + try: + response = self.get_template_with_backoff(stack_name) + return response.get('TemplateBody') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name) + + +def to_dict(items, key, value): + ''' Transforms a list of items to a Key/Value dictionary ''' + if items: + return dict(zip([i.get(key) for i in items], [i.get(value) for i in items])) + else: + return dict() + + +def main(): + argument_spec = dict( + stack_name=dict(), + all_facts=dict(required=False, default=False, type='bool'), + stack_policy=dict(required=False, default=False, type='bool'), + stack_events=dict(required=False, default=False, type='bool'), + stack_resources=dict(required=False, default=False, type='bool'), + stack_template=dict(required=False, default=False, type='bool'), + stack_change_sets=dict(required=False, default=False, type='bool'), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + is_old_facts = module._name == 'cloudformation_facts' + if is_old_facts: + module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', " + "and the renamed one no longer returns ansible_facts", + version='2.13', collection_name='ansible.builtin') + + service_mgr = CloudFormationServiceManager(module) + + if is_old_facts: + result = {'ansible_facts': {'cloudformation': {}}} + else: + result = {'cloudformation': {}} + + for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')): + facts = {'stack_description': stack_description} + stack_name = stack_description.get('StackName') + + # Create stack output and stack parameter dictionaries + if facts['stack_description']: + facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue') + facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), + 'ParameterKey', 'ParameterValue') + facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags')) + + # Create optional stack outputs + all_facts = module.params.get('all_facts') + if all_facts or module.params.get('stack_resources'): + facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name) + facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), + 'LogicalResourceId', 'PhysicalResourceId') + if all_facts or module.params.get('stack_template'): + facts['stack_template'] = service_mgr.get_template(stack_name) + if all_facts or module.params.get('stack_policy'): + facts['stack_policy'] = service_mgr.get_stack_policy(stack_name) + if all_facts or module.params.get('stack_events'): + facts['stack_events'] = service_mgr.describe_stack_events(stack_name) + if all_facts or module.params.get('stack_change_sets'): + facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name) + + if is_old_facts: + result['ansible_facts']['cloudformation'][stack_name] = facts + else: + result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs', + 'stack_parameters', + 'stack_policy', + 'stack_resources', + 'stack_tags', + 'stack_template')) + + module.exit_json(changed=False, **result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/deploy_helper.py b/test/support/integration/plugins/modules/deploy_helper.py new file mode 100644 index 00000000..38594dde --- /dev/null +++ b/test/support/integration/plugins/modules/deploy_helper.py @@ -0,0 +1,521 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jasper N. Brouwer +# (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: deploy_helper +version_added: "2.0" +author: "Ramon de la Fuente (@ramondelafuente)" +short_description: Manages some of the steps common in deploying projects. +description: + - The Deploy Helper manages some of the steps common in deploying software. + It creates a folder structure, manages a symlink for the current release + and cleans up old releases. + - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact. + C(project_path), whatever you set in the path parameter, + C(current_path), the path to the symlink that points to the active release, + C(releases_path), the path to the folder to keep releases in, + C(shared_path), the path to the folder to keep shared resources in, + C(unfinished_filename), the file to check for to recognize unfinished builds, + C(previous_release), the release the 'current' symlink is pointing to, + C(previous_release_path), the full path to the 'current' symlink target, + C(new_release), either the 'release' parameter or a generated timestamp, + C(new_release_path), the path to the new release folder (not created by the module)." + +options: + path: + required: True + aliases: ['dest'] + description: + - the root path of the project. Alias I(dest). + Returned in the C(deploy_helper.project_path) fact. + + state: + description: + - the state of the project. + C(query) will only gather facts, + C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders, + C(finalize) will remove the unfinished_filename file, create a symlink to the newly + deployed release and optionally clean old releases, + C(clean) will remove failed & old releases, + C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent)) + choices: [ present, finalize, absent, clean, query ] + default: present + + release: + description: + - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359'). + This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize). + You can use the generated fact C(release={{ deploy_helper.new_release }}). + + releases_path: + description: + - the name of the folder that will hold the releases. This can be relative to C(path) or absolute. + Returned in the C(deploy_helper.releases_path) fact. + default: releases + + shared_path: + description: + - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute. + If this is set to an empty string, no shared folder will be created. + Returned in the C(deploy_helper.shared_path) fact. + default: shared + + current_path: + description: + - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean). + Returned in the C(deploy_helper.current_path) fact. + default: current + + unfinished_filename: + description: + - the name of the file that indicates a deploy has not finished. All folders in the releases_path that + contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is + automatically deleted from the I(new_release_path) during C(state=finalize). + default: DEPLOY_UNFINISHED + + clean: + description: + - Whether to run the clean procedure in case of C(state=finalize). + type: bool + default: 'yes' + + keep_releases: + description: + - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds + will be deleted first, so only correct releases will count. The current version will not count. + default: 5 + +notes: + - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden + parameters to both calls, otherwise the second call will overwrite the facts of the first one. + - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a + new naming strategy without problems. + - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent + unless you pass your own release name with C(release). Due to the nature of deploying software, this should not + be much of a problem. +''' + +EXAMPLES = ''' + +# General explanation, starting with an example folder structure for a project: + +# root: +# releases: +# - 20140415234508 +# - 20140415235146 +# - 20140416082818 +# +# shared: +# - sessions +# - uploads +# +# current: releases/20140416082818 + + +# The 'releases' folder holds all the available releases. A release is a complete build of the application being +# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem. +# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like +# git tags or commit hashes. +# +# During a deploy, a new folder should be created in the releases folder and any build steps required should be +# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink +# with a link to this build. +# +# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server +# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release +# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps. +# +# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress. +# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new +# release is reduced to the time it takes to switch the link. +# +# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release +# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated +# procedure to remove it during cleanup. + + +# Typical usage +- name: Initialize the deploy root and gather facts + deploy_helper: + path: /path/to/root +- name: Clone the project to the new release folder + git: + repo: git://foosball.example.org/path/to/repo.git + dest: '{{ deploy_helper.new_release_path }}' + version: v1.1.1 +- name: Add an unfinished file, to allow cleanup on successful finalize + file: + path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}' + state: touch +- name: Perform some build steps, like running your dependency manager for example + composer: + command: install + working_dir: '{{ deploy_helper.new_release_path }}' +- name: Create some folders in the shared folder + file: + path: '{{ deploy_helper.shared_path }}/{{ item }}' + state: directory + with_items: + - sessions + - uploads +- name: Add symlinks from the new release to the shared folder + file: + path: '{{ deploy_helper.new_release_path }}/{{ item.path }}' + src: '{{ deploy_helper.shared_path }}/{{ item.src }}' + state: link + with_items: + - path: app/sessions + src: sessions + - path: web/uploads + src: uploads +- name: Finalize the deploy, removing the unfinished file and switching the symlink + deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Retrieving facts before running a deploy +- name: Run 'state=query' to gather facts without changing anything + deploy_helper: + path: /path/to/root + state: query +# Remember to set the 'release' parameter when you actually call 'state=present' later +- name: Initialize the deploy root + deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: present + +# all paths can be absolute or relative (to the 'path' parameter) +- deploy_helper: + path: /path/to/root + releases_path: /var/www/project/releases + shared_path: /var/www/shared + current_path: /var/www/active + +# Using your own naming strategy for releases (a version tag in this case): +- deploy_helper: + path: /path/to/root + release: v1.1.1 + state: present +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Using a different unfinished_filename: +- deploy_helper: + path: /path/to/root + unfinished_filename: README.md + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Postponing the cleanup of older builds: +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + clean: False +- deploy_helper: + path: /path/to/root + state: clean +# Or running the cleanup ahead of the new deploy +- deploy_helper: + path: /path/to/root + state: clean +- deploy_helper: + path: /path/to/root + state: present + +# Keeping more old releases: +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + keep_releases: 10 +# Or, if you use 'clean=false' on finalize: +- deploy_helper: + path: /path/to/root + state: clean + keep_releases: 10 + +# Removing the entire project root folder +- deploy_helper: + path: /path/to/root + state: absent + +# Debugging the facts returned by the module +- deploy_helper: + path: /path/to/root +- debug: + var: deploy_helper +''' +import os +import shutil +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +class DeployHelper(object): + + def __init__(self, module): + self.module = module + self.file_args = module.load_file_common_arguments(module.params) + + self.clean = module.params['clean'] + self.current_path = module.params['current_path'] + self.keep_releases = module.params['keep_releases'] + self.path = module.params['path'] + self.release = module.params['release'] + self.releases_path = module.params['releases_path'] + self.shared_path = module.params['shared_path'] + self.state = module.params['state'] + self.unfinished_filename = module.params['unfinished_filename'] + + def gather_facts(self): + current_path = os.path.join(self.path, self.current_path) + releases_path = os.path.join(self.path, self.releases_path) + if self.shared_path: + shared_path = os.path.join(self.path, self.shared_path) + else: + shared_path = None + + previous_release, previous_release_path = self._get_last_release(current_path) + + if not self.release and (self.state == 'query' or self.state == 'present'): + self.release = time.strftime("%Y%m%d%H%M%S") + + if self.release: + new_release_path = os.path.join(releases_path, self.release) + else: + new_release_path = None + + return { + 'project_path': self.path, + 'current_path': current_path, + 'releases_path': releases_path, + 'shared_path': shared_path, + 'previous_release': previous_release, + 'previous_release_path': previous_release_path, + 'new_release': self.release, + 'new_release_path': new_release_path, + 'unfinished_filename': self.unfinished_filename + } + + def delete_path(self, path): + if not os.path.lexists(path): + return False + + if not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + if not self.module.check_mode: + try: + shutil.rmtree(path, ignore_errors=False) + except Exception as e: + self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc()) + + return True + + def create_path(self, path): + changed = False + + if not os.path.lexists(path): + changed = True + if not self.module.check_mode: + os.makedirs(path) + + elif not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed) + + return changed + + def check_link(self, path): + if os.path.lexists(path): + if not os.path.islink(path): + self.module.fail_json(msg="%s exists but is not a symbolic link" % path) + + def create_link(self, source, link_name): + changed = False + + if os.path.islink(link_name): + norm_link = os.path.normpath(os.path.realpath(link_name)) + norm_source = os.path.normpath(os.path.realpath(source)) + if norm_link == norm_source: + changed = False + else: + changed = True + if not self.module.check_mode: + if not os.path.lexists(source): + self.module.fail_json(msg="the symlink target %s doesn't exists" % source) + tmp_link_name = link_name + '.' + self.unfinished_filename + if os.path.islink(tmp_link_name): + os.unlink(tmp_link_name) + os.symlink(source, tmp_link_name) + os.rename(tmp_link_name, link_name) + else: + changed = True + if not self.module.check_mode: + os.symlink(source, link_name) + + return changed + + def remove_unfinished_file(self, new_release_path): + changed = False + unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename) + if os.path.lexists(unfinished_file_path): + changed = True + if not self.module.check_mode: + os.remove(unfinished_file_path) + + return changed + + def remove_unfinished_builds(self, releases_path): + changes = 0 + + for release in os.listdir(releases_path): + if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)): + if self.module.check_mode: + changes += 1 + else: + changes += self.delete_path(os.path.join(releases_path, release)) + + return changes + + def remove_unfinished_link(self, path): + changed = False + + tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename) + if not self.module.check_mode and os.path.exists(tmp_link_name): + changed = True + os.remove(tmp_link_name) + + return changed + + def cleanup(self, releases_path, reserve_version): + changes = 0 + + if os.path.lexists(releases_path): + releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))] + try: + releases.remove(reserve_version) + except ValueError: + pass + + if not self.module.check_mode: + releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True) + for release in releases[self.keep_releases:]: + changes += self.delete_path(os.path.join(releases_path, release)) + elif len(releases) > self.keep_releases: + changes += (len(releases) - self.keep_releases) + + return changes + + def _get_file_args(self, path): + file_args = self.file_args.copy() + file_args['path'] = path + return file_args + + def _get_last_release(self, current_path): + previous_release = None + previous_release_path = None + + if os.path.lexists(current_path): + previous_release_path = os.path.realpath(current_path) + previous_release = os.path.basename(previous_release_path) + + return previous_release, previous_release_path + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + path=dict(aliases=['dest'], required=True, type='path'), + release=dict(required=False, type='str', default=None), + releases_path=dict(required=False, type='str', default='releases'), + shared_path=dict(required=False, type='path', default='shared'), + current_path=dict(required=False, type='path', default='current'), + keep_releases=dict(required=False, type='int', default=5), + clean=dict(required=False, type='bool', default=True), + unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'), + state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') + ), + add_file_common_args=True, + supports_check_mode=True + ) + + deploy_helper = DeployHelper(module) + facts = deploy_helper.gather_facts() + + result = { + 'state': deploy_helper.state + } + + changes = 0 + + if deploy_helper.state == 'query': + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'present': + deploy_helper.check_link(facts['current_path']) + changes += deploy_helper.create_path(facts['project_path']) + changes += deploy_helper.create_path(facts['releases_path']) + if deploy_helper.shared_path: + changes += deploy_helper.create_path(facts['shared_path']) + + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'finalize': + if not deploy_helper.release: + module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)") + if deploy_helper.keep_releases <= 0: + module.fail_json(msg="'keep_releases' should be at least 1") + + changes += deploy_helper.remove_unfinished_file(facts['new_release_path']) + changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path']) + if deploy_helper.clean: + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'clean': + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'absent': + # destroy the facts + result['ansible_facts'] = {'deploy_helper': []} + changes += deploy_helper.delete_path(facts['project_path']) + + if changes > 0: + result['changed'] = True + else: + result['changed'] = False + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/docker_swarm.py b/test/support/integration/plugins/modules/docker_swarm.py new file mode 100644 index 00000000..a2c076c5 --- /dev/null +++ b/test/support/integration/plugins/modules/docker_swarm.py @@ -0,0 +1,681 @@ +#!/usr/bin/python + +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: docker_swarm +short_description: Manage Swarm cluster +version_added: "2.7" +description: + - Create a new Swarm cluster. + - Add/Remove nodes or managers to an existing cluster. +options: + advertise_addr: + description: + - Externally reachable address advertised to other nodes. + - This can either be an address/port combination + in the form C(192.168.1.1:4567), or an interface followed by a + port number, like C(eth0:4567). + - If the port number is omitted, + the port number from the listen address is used. + - If I(advertise_addr) is not specified, it will be automatically + detected when possible. + - Only used when swarm is initialised or joined. Because of this it's not + considered for idempotency checking. + type: str + default_addr_pool: + description: + - Default address pool in CIDR format. + - Only used when swarm is initialised. Because of this it's not considered + for idempotency checking. + - Requires API version >= 1.39. + type: list + elements: str + version_added: "2.8" + subnet_size: + description: + - Default address pool subnet mask length. + - Only used when swarm is initialised. Because of this it's not considered + for idempotency checking. + - Requires API version >= 1.39. + type: int + version_added: "2.8" + listen_addr: + description: + - Listen address used for inter-manager communication. + - This can either be an address/port combination in the form + C(192.168.1.1:4567), or an interface followed by a port number, + like C(eth0:4567). + - If the port number is omitted, the default swarm listening port + is used. + - Only used when swarm is initialised or joined. Because of this it's not + considered for idempotency checking. + type: str + default: 0.0.0.0:2377 + force: + description: + - Use with state C(present) to force creating a new Swarm, even if already part of one. + - Use with state C(absent) to Leave the swarm even if this node is a manager. + type: bool + default: no + state: + description: + - Set to C(present), to create/update a new cluster. + - Set to C(join), to join an existing cluster. + - Set to C(absent), to leave an existing cluster. + - Set to C(remove), to remove an absent node from the cluster. + Note that removing requires Docker SDK for Python >= 2.4.0. + - Set to C(inspect) to display swarm informations. + type: str + default: present + choices: + - present + - join + - absent + - remove + - inspect + node_id: + description: + - Swarm id of the node to remove. + - Used with I(state=remove). + type: str + join_token: + description: + - Swarm token used to join a swarm cluster. + - Used with I(state=join). + type: str + remote_addrs: + description: + - Remote address of one or more manager nodes of an existing Swarm to connect to. + - Used with I(state=join). + type: list + elements: str + task_history_retention_limit: + description: + - Maximum number of tasks history stored. + - Docker default value is C(5). + type: int + snapshot_interval: + description: + - Number of logs entries between snapshot. + - Docker default value is C(10000). + type: int + keep_old_snapshots: + description: + - Number of snapshots to keep beyond the current snapshot. + - Docker default value is C(0). + type: int + log_entries_for_slow_followers: + description: + - Number of log entries to keep around to sync up slow followers after a snapshot is created. + type: int + heartbeat_tick: + description: + - Amount of ticks (in seconds) between each heartbeat. + - Docker default value is C(1s). + type: int + election_tick: + description: + - Amount of ticks (in seconds) needed without a leader to trigger a new election. + - Docker default value is C(10s). + type: int + dispatcher_heartbeat_period: + description: + - The delay for an agent to send a heartbeat to the dispatcher. + - Docker default value is C(5s). + type: int + node_cert_expiry: + description: + - Automatic expiry for nodes certificates. + - Docker default value is C(3months). + type: int + name: + description: + - The name of the swarm. + type: str + labels: + description: + - User-defined key/value metadata. + - Label operations in this module apply to the docker swarm cluster. + Use M(docker_node) module to add/modify/remove swarm node labels. + - Requires API version >= 1.32. + type: dict + signing_ca_cert: + description: + - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. + - This must not be a path to a certificate, but the contents of the certificate. + - Requires API version >= 1.30. + type: str + signing_ca_key: + description: + - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. + - This must not be a path to a key, but the contents of the key. + - Requires API version >= 1.30. + type: str + ca_force_rotate: + description: + - An integer whose purpose is to force swarm to generate a new signing CA certificate and key, + if none have been specified. + - Docker default value is C(0). + - Requires API version >= 1.30. + type: int + autolock_managers: + description: + - If set, generate a key and use it to lock data stored on the managers. + - Docker default value is C(no). + - M(docker_swarm_info) can be used to retrieve the unlock key. + type: bool + rotate_worker_token: + description: Rotate the worker join token. + type: bool + default: no + rotate_manager_token: + description: Rotate the manager join token. + type: bool + default: no +extends_documentation_fragment: + - docker + - docker.docker_py_1_documentation +requirements: + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" + - Docker API >= 1.25 +author: + - Thierry Bouvet (@tbouvet) + - Piotr Wojciechowski (@WojciechowskiPiotr) +''' + +EXAMPLES = ''' + +- name: Init a new swarm with default parameters + docker_swarm: + state: present + +- name: Update swarm configuration + docker_swarm: + state: present + election_tick: 5 + +- name: Add nodes + docker_swarm: + state: join + advertise_addr: 192.168.1.2 + join_token: SWMTKN-1--xxxxx + remote_addrs: [ '192.168.1.1:2377' ] + +- name: Leave swarm for a node + docker_swarm: + state: absent + +- name: Remove a swarm manager + docker_swarm: + state: absent + force: true + +- name: Remove node from swarm + docker_swarm: + state: remove + node_id: mynode + +- name: Inspect swarm + docker_swarm: + state: inspect + register: swarm_info +''' + +RETURN = ''' +swarm_facts: + description: Informations about swarm. + returned: success + type: dict + contains: + JoinTokens: + description: Tokens to connect to the Swarm. + returned: success + type: dict + contains: + Worker: + description: Token to create a new *worker* node + returned: success + type: str + example: SWMTKN-1--xxxxx + Manager: + description: Token to create a new *manager* node + returned: success + type: str + example: SWMTKN-1--xxxxx + UnlockKey: + description: The swarm unlock-key if I(autolock_managers) is C(true). + returned: on success if I(autolock_managers) is C(true) + and swarm is initialised, or if I(autolock_managers) has changed. + type: str + example: SWMKEY-1-xxx + +actions: + description: Provides the actions done on the swarm. + returned: when action failed. + type: list + elements: str + example: "['This cluster is already a swarm cluster']" + +''' + +import json +import traceback + +try: + from docker.errors import DockerException, APIError +except ImportError: + # missing Docker SDK for Python handled in ansible.module_utils.docker.common + pass + +from ansible.module_utils.docker.common import ( + DockerBaseClass, + DifferenceTracker, + RequestException, +) + +from ansible.module_utils.docker.swarm import AnsibleDockerSwarmClient + +from ansible.module_utils._text import to_native + + +class TaskParameters(DockerBaseClass): + def __init__(self): + super(TaskParameters, self).__init__() + + self.advertise_addr = None + self.listen_addr = None + self.remote_addrs = None + self.join_token = None + + # Spec + self.snapshot_interval = None + self.task_history_retention_limit = None + self.keep_old_snapshots = None + self.log_entries_for_slow_followers = None + self.heartbeat_tick = None + self.election_tick = None + self.dispatcher_heartbeat_period = None + self.node_cert_expiry = None + self.name = None + self.labels = None + self.log_driver = None + self.signing_ca_cert = None + self.signing_ca_key = None + self.ca_force_rotate = None + self.autolock_managers = None + self.rotate_worker_token = None + self.rotate_manager_token = None + self.default_addr_pool = None + self.subnet_size = None + + @staticmethod + def from_ansible_params(client): + result = TaskParameters() + for key, value in client.module.params.items(): + if key in result.__dict__: + setattr(result, key, value) + + result.update_parameters(client) + return result + + def update_from_swarm_info(self, swarm_info): + spec = swarm_info['Spec'] + + ca_config = spec.get('CAConfig') or dict() + if self.node_cert_expiry is None: + self.node_cert_expiry = ca_config.get('NodeCertExpiry') + if self.ca_force_rotate is None: + self.ca_force_rotate = ca_config.get('ForceRotate') + + dispatcher = spec.get('Dispatcher') or dict() + if self.dispatcher_heartbeat_period is None: + self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod') + + raft = spec.get('Raft') or dict() + if self.snapshot_interval is None: + self.snapshot_interval = raft.get('SnapshotInterval') + if self.keep_old_snapshots is None: + self.keep_old_snapshots = raft.get('KeepOldSnapshots') + if self.heartbeat_tick is None: + self.heartbeat_tick = raft.get('HeartbeatTick') + if self.log_entries_for_slow_followers is None: + self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers') + if self.election_tick is None: + self.election_tick = raft.get('ElectionTick') + + orchestration = spec.get('Orchestration') or dict() + if self.task_history_retention_limit is None: + self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit') + + encryption_config = spec.get('EncryptionConfig') or dict() + if self.autolock_managers is None: + self.autolock_managers = encryption_config.get('AutoLockManagers') + + if self.name is None: + self.name = spec['Name'] + + if self.labels is None: + self.labels = spec.get('Labels') or {} + + if 'LogDriver' in spec['TaskDefaults']: + self.log_driver = spec['TaskDefaults']['LogDriver'] + + def update_parameters(self, client): + assign = dict( + snapshot_interval='snapshot_interval', + task_history_retention_limit='task_history_retention_limit', + keep_old_snapshots='keep_old_snapshots', + log_entries_for_slow_followers='log_entries_for_slow_followers', + heartbeat_tick='heartbeat_tick', + election_tick='election_tick', + dispatcher_heartbeat_period='dispatcher_heartbeat_period', + node_cert_expiry='node_cert_expiry', + name='name', + labels='labels', + signing_ca_cert='signing_ca_cert', + signing_ca_key='signing_ca_key', + ca_force_rotate='ca_force_rotate', + autolock_managers='autolock_managers', + log_driver='log_driver', + ) + params = dict() + for dest, source in assign.items(): + if not client.option_minimal_versions[source]['supported']: + continue + value = getattr(self, source) + if value is not None: + params[dest] = value + self.spec = client.create_swarm_spec(**params) + + def compare_to_active(self, other, client, differences): + for k in self.__dict__: + if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token', + 'rotate_worker_token', 'rotate_manager_token', 'spec', + 'default_addr_pool', 'subnet_size'): + continue + if not client.option_minimal_versions[k]['supported']: + continue + value = getattr(self, k) + if value is None: + continue + other_value = getattr(other, k) + if value != other_value: + differences.add(k, parameter=value, active=other_value) + if self.rotate_worker_token: + differences.add('rotate_worker_token', parameter=True, active=False) + if self.rotate_manager_token: + differences.add('rotate_manager_token', parameter=True, active=False) + return differences + + +class SwarmManager(DockerBaseClass): + + def __init__(self, client, results): + + super(SwarmManager, self).__init__() + + self.client = client + self.results = results + self.check_mode = self.client.check_mode + self.swarm_info = {} + + self.state = client.module.params['state'] + self.force = client.module.params['force'] + self.node_id = client.module.params['node_id'] + + self.differences = DifferenceTracker() + self.parameters = TaskParameters.from_ansible_params(client) + + self.created = False + + def __call__(self): + choice_map = { + "present": self.init_swarm, + "join": self.join, + "absent": self.leave, + "remove": self.remove, + "inspect": self.inspect_swarm + } + + if self.state == 'inspect': + self.client.module.deprecate( + "The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster", + version='2.12', collection_name='ansible.builtin') + + choice_map.get(self.state)() + + if self.client.module._diff or self.parameters.debug: + diff = dict() + diff['before'], diff['after'] = self.differences.get_before_after() + self.results['diff'] = diff + + def inspect_swarm(self): + try: + data = self.client.inspect_swarm() + json_str = json.dumps(data, ensure_ascii=False) + self.swarm_info = json.loads(json_str) + + self.results['changed'] = False + self.results['swarm_facts'] = self.swarm_info + + unlock_key = self.get_unlock_key() + self.swarm_info.update(unlock_key) + except APIError: + return + + def get_unlock_key(self): + default = {'UnlockKey': None} + if not self.has_swarm_lock_changed(): + return default + try: + return self.client.get_unlock_key() or default + except APIError: + return default + + def has_swarm_lock_changed(self): + return self.parameters.autolock_managers and ( + self.created or self.differences.has_difference_for('autolock_managers') + ) + + def init_swarm(self): + if not self.force and self.client.check_if_swarm_manager(): + self.__update_swarm() + return + + if not self.check_mode: + init_arguments = { + 'advertise_addr': self.parameters.advertise_addr, + 'listen_addr': self.parameters.listen_addr, + 'force_new_cluster': self.force, + 'swarm_spec': self.parameters.spec, + } + if self.parameters.default_addr_pool is not None: + init_arguments['default_addr_pool'] = self.parameters.default_addr_pool + if self.parameters.subnet_size is not None: + init_arguments['subnet_size'] = self.parameters.subnet_size + try: + self.client.init_swarm(**init_arguments) + except APIError as exc: + self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc)) + + if not self.client.check_if_swarm_manager(): + if not self.check_mode: + self.client.fail("Swarm not created or other error!") + + self.created = True + self.inspect_swarm() + self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID'))) + self.differences.add('state', parameter='present', active='absent') + self.results['changed'] = True + self.results['swarm_facts'] = { + 'JoinTokens': self.swarm_info.get('JoinTokens'), + 'UnlockKey': self.swarm_info.get('UnlockKey') + } + + def __update_swarm(self): + try: + self.inspect_swarm() + version = self.swarm_info['Version']['Index'] + self.parameters.update_from_swarm_info(self.swarm_info) + old_parameters = TaskParameters() + old_parameters.update_from_swarm_info(self.swarm_info) + self.parameters.compare_to_active(old_parameters, self.client, self.differences) + if self.differences.empty: + self.results['actions'].append("No modification") + self.results['changed'] = False + return + update_parameters = TaskParameters.from_ansible_params(self.client) + update_parameters.update_parameters(self.client) + if not self.check_mode: + self.client.update_swarm( + version=version, swarm_spec=update_parameters.spec, + rotate_worker_token=self.parameters.rotate_worker_token, + rotate_manager_token=self.parameters.rotate_manager_token) + except APIError as exc: + self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc)) + return + + self.inspect_swarm() + self.results['actions'].append("Swarm cluster updated") + self.results['changed'] = True + + def join(self): + if self.client.check_if_swarm_node(): + self.results['actions'].append("This node is already part of a swarm.") + return + if not self.check_mode: + try: + self.client.join_swarm( + remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token, + listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr) + except APIError as exc: + self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("New node is added to swarm cluster") + self.differences.add('joined', parameter=True, active=False) + self.results['changed'] = True + + def leave(self): + if not self.client.check_if_swarm_node(): + self.results['actions'].append("This node is not part of a swarm.") + return + if not self.check_mode: + try: + self.client.leave_swarm(force=self.force) + except APIError as exc: + self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("Node has left the swarm cluster") + self.differences.add('joined', parameter='absent', active='present') + self.results['changed'] = True + + def remove(self): + if not self.client.check_if_swarm_manager(): + self.client.fail("This node is not a manager.") + + try: + status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5) + except APIError: + return + + if not status_down: + self.client.fail("Can not remove the node. The status node is ready and not down.") + + if not self.check_mode: + try: + self.client.remove_node(node_id=self.node_id, force=self.force) + except APIError as exc: + self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc)) + self.results['actions'].append("Node is removed from swarm cluster.") + self.differences.add('joined', parameter=False, active=True) + self.results['changed'] = True + + +def _detect_remove_operation(client): + return client.module.params['state'] == 'remove' + + +def main(): + argument_spec = dict( + advertise_addr=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']), + force=dict(type='bool', default=False), + listen_addr=dict(type='str', default='0.0.0.0:2377'), + remote_addrs=dict(type='list', elements='str'), + join_token=dict(type='str'), + snapshot_interval=dict(type='int'), + task_history_retention_limit=dict(type='int'), + keep_old_snapshots=dict(type='int'), + log_entries_for_slow_followers=dict(type='int'), + heartbeat_tick=dict(type='int'), + election_tick=dict(type='int'), + dispatcher_heartbeat_period=dict(type='int'), + node_cert_expiry=dict(type='int'), + name=dict(type='str'), + labels=dict(type='dict'), + signing_ca_cert=dict(type='str'), + signing_ca_key=dict(type='str'), + ca_force_rotate=dict(type='int'), + autolock_managers=dict(type='bool'), + node_id=dict(type='str'), + rotate_worker_token=dict(type='bool', default=False), + rotate_manager_token=dict(type='bool', default=False), + default_addr_pool=dict(type='list', elements='str'), + subnet_size=dict(type='int'), + ) + + required_if = [ + ('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']), + ('state', 'remove', ['node_id']) + ] + + option_minimal_versions = dict( + labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'), + signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'), + autolock_managers=dict(docker_py_version='2.6.0'), + log_driver=dict(docker_py_version='2.6.0'), + remove_operation=dict( + docker_py_version='2.4.0', + detect_usage=_detect_remove_operation, + usage_msg='remove swarm nodes' + ), + default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'), + subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'), + ) + + client = AnsibleDockerSwarmClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + min_docker_version='1.10.0', + min_docker_api_version='1.25', + option_minimal_versions=option_minimal_versions, + ) + + try: + results = dict( + changed=False, + result='', + actions=[] + ) + + SwarmManager(client, results)() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) + except RequestException as e: + client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2.py b/test/support/integration/plugins/modules/ec2.py new file mode 100644 index 00000000..952aa5a1 --- /dev/null +++ b/test/support/integration/plugins/modules/ec2.py @@ -0,0 +1,1766 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ec2 +short_description: create, terminate, start or stop an instance in ec2 +description: + - Creates or terminates ec2 instances. + - > + Note: This module uses the older boto Python module to interact with the EC2 API. + M(ec2) will still receive bug fixes, but no new features. + Consider using the M(ec2_instance) module instead. + If M(ec2_instance) does not support a feature you need that is available in M(ec2), please + file a feature request. +version_added: "0.9" +options: + key_name: + description: + - Key pair to use on the instance. + - The SSH key must already exist in AWS in order to use this argument. + - Keys can be created / deleted using the M(ec2_key) module. + aliases: ['keypair'] + type: str + id: + version_added: "1.1" + description: + - Identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. + - This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. + - For details, see the description of client token at U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + type: str + group: + description: + - Security group (or list of groups) to use with the instance. + aliases: [ 'groups' ] + type: list + elements: str + group_id: + version_added: "1.1" + description: + - Security group id (or list of ids) to use with the instance. + type: list + elements: str + zone: + version_added: "1.2" + description: + - AWS availability zone in which to launch the instance. + aliases: [ 'aws_zone', 'ec2_zone' ] + type: str + instance_type: + description: + - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + - Required when creating a new instance. + type: str + aliases: ['type'] + tenancy: + version_added: "1.9" + description: + - An instance with a tenancy of C(dedicated) runs on single-tenant hardware and can only be launched into a VPC. + - Note that to use dedicated tenancy you MUST specify a I(vpc_subnet_id) as well. + - Dedicated tenancy is not available for EC2 "micro" instances. + default: default + choices: [ "default", "dedicated" ] + type: str + spot_price: + version_added: "1.5" + description: + - Maximum spot price to bid. If not set, a regular on-demand instance is requested. + - A spot request is made with this maximum bid. When it is filled, the instance is started. + type: str + spot_type: + version_added: "2.0" + description: + - The type of spot request. + - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again. + default: "one-time" + choices: [ "one-time", "persistent" ] + type: str + image: + description: + - I(ami) ID to use for the instance. + - Required when I(state=present). + type: str + kernel: + description: + - Kernel eki to use for the instance. + type: str + ramdisk: + description: + - Ramdisk eri to use for the instance. + type: str + wait: + description: + - Wait for the instance to reach its desired state before returning. + - Does not wait for SSH, see the 'wait_for_connection' example for details. + type: bool + default: false + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int + spot_wait_timeout: + version_added: "1.5" + description: + - How long to wait for the spot instance request to be fulfilled. Affects 'Request valid until' for setting spot request lifespan. + default: 600 + type: int + count: + description: + - Number of instances to launch. + default: 1 + type: int + monitoring: + version_added: "1.1" + description: + - Enable detailed monitoring (CloudWatch) for instance. + type: bool + default: false + user_data: + version_added: "0.9" + description: + - Opaque blob of data which is made available to the EC2 instance. + type: str + instance_tags: + version_added: "1.0" + description: + - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'. + type: dict + placement_group: + version_added: "1.3" + description: + - Placement group for the instance when using EC2 Clustered Compute. + type: str + vpc_subnet_id: + version_added: "1.1" + description: + - the subnet ID in which to launch the instance (VPC). + type: str + assign_public_ip: + version_added: "1.5" + description: + - When provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+. + type: bool + private_ip: + version_added: "1.2" + description: + - The private ip address to assign the instance (from the vpc subnet). + type: str + instance_profile_name: + version_added: "1.3" + description: + - Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+. + type: str + instance_ids: + version_added: "1.3" + description: + - "list of instance ids, currently used for states: absent, running, stopped" + aliases: ['instance_id'] + type: list + elements: str + source_dest_check: + version_added: "1.6" + description: + - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers). + When initially creating an instance the EC2 API defaults this to C(True). + type: bool + termination_protection: + version_added: "2.0" + description: + - Enable or Disable the Termination Protection. + type: bool + default: false + instance_initiated_shutdown_behavior: + version_added: "2.2" + description: + - Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store. + images (which require termination on shutdown). + default: 'stop' + choices: [ "stop", "terminate" ] + type: str + state: + version_added: "1.3" + description: + - Create, terminate, start, stop or restart instances. The state 'restarted' was added in Ansible 2.2. + - When I(state=absent), I(instance_ids) is required. + - When I(state=running), I(state=stopped) or I(state=restarted) then either I(instance_ids) or I(instance_tags) is required. + default: 'present' + choices: ['absent', 'present', 'restarted', 'running', 'stopped'] + type: str + volumes: + version_added: "1.5" + description: + - A list of hash/dictionaries of volumes to add to the new instance. + type: list + elements: dict + suboptions: + device_name: + type: str + required: true + description: + - A name for the device (For example C(/dev/sda)). + delete_on_termination: + type: bool + default: false + description: + - Whether the volume should be automatically deleted when the instance is terminated. + ephemeral: + type: str + description: + - Whether the volume should be ephemeral. + - Data on ephemeral volumes is lost when the instance is stopped. + - Mutually exclusive with the I(snapshot) parameter. + encrypted: + type: bool + default: false + description: + - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK. + snapshot: + type: str + description: + - The ID of an EBS snapshot to copy when creating the volume. + - Mutually exclusive with the I(ephemeral) parameter. + volume_type: + type: str + description: + - The type of volume to create. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types. + volume_size: + type: int + description: + - The size of the volume (in GiB). + iops: + type: int + description: + - The number of IOPS per second to provision for the volume. + - Required when I(volume_type=io1). + ebs_optimized: + version_added: "1.6" + description: + - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). + default: false + type: bool + exact_count: + version_added: "1.5" + description: + - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. + Instances are either created or terminated based on this value. + type: int + count_tag: + version_added: "1.5" + description: + - Used with I(exact_count) to determine how many nodes based on a specific tag criteria should be running. + This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers + that are tagged with "class=webserver". The specified tag must already exist or be passed in as the I(instance_tags) option. + type: raw + network_interfaces: + version_added: "2.0" + description: + - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, + none of the I(assign_public_ip), I(private_ip), I(vpc_subnet_id), I(group), or I(group_id) parameters may be used. (Those parameters are + for creating a new network interface at launch.) + aliases: ['network_interface'] + type: list + elements: str + spot_launch_group: + version_added: "2.1" + description: + - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group). + type: str +author: + - "Tim Gerla (@tgerla)" + - "Lester Wade (@lwade)" + - "Seth Vidal (@skvidal)" +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic provisioning example +- ec2: + key_name: mykey + instance_type: t2.micro + image: ami-123456 + wait: yes + group: webserver + count: 3 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Advanced example with tagging and CloudWatch +- ec2: + key_name: mykey + group: databases + instance_type: t2.micro + image: ami-123456 + wait: yes + wait_timeout: 500 + count: 5 + instance_tags: + db: postgres + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Single instance with additional IOPS volume from snapshot and volume delete on termination +- ec2: + key_name: mykey + group: webserver + instance_type: c3.medium + image: ami-123456 + wait: yes + wait_timeout: 500 + volumes: + - device_name: /dev/sdb + snapshot: snap-abcdef12 + volume_type: io1 + iops: 1000 + volume_size: 100 + delete_on_termination: true + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Single instance with ssd gp2 root volume +- ec2: + key_name: mykey + group: webserver + instance_type: c3.medium + image: ami-123456 + wait: yes + wait_timeout: 500 + volumes: + - device_name: /dev/xvda + volume_type: gp2 + volume_size: 8 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + count_tag: + Name: dbserver + exact_count: 1 + +# Multiple groups example +- ec2: + key_name: mykey + group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] + instance_type: m1.large + image: ami-6e649707 + wait: yes + wait_timeout: 500 + count: 5 + instance_tags: + db: postgres + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Multiple instances with additional volume from snapshot +- ec2: + key_name: mykey + group: webserver + instance_type: m1.large + image: ami-6e649707 + wait: yes + wait_timeout: 500 + count: 5 + volumes: + - device_name: /dev/sdb + snapshot: snap-abcdef12 + volume_size: 10 + monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# Dedicated tenancy example +- local_action: + module: ec2 + assign_public_ip: yes + group_id: sg-1dc53f72 + key_name: mykey + image: ami-6e649707 + instance_type: m1.small + tenancy: dedicated + vpc_subnet_id: subnet-29e63245 + wait: yes + +# Spot instance example +- ec2: + spot_price: 0.24 + spot_wait_timeout: 600 + keypair: mykey + group_id: sg-1dc53f72 + instance_type: m1.small + image: ami-6e649707 + wait: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + spot_launch_group: report_generators + instance_initiated_shutdown_behavior: terminate + +# Examples using pre-existing network interfaces +- ec2: + key_name: mykey + instance_type: t2.small + image: ami-f005ba11 + network_interface: eni-deadbeef + +- ec2: + key_name: mykey + instance_type: t2.small + image: ami-f005ba11 + network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e'] + +# Launch instances, runs some tasks +# and then terminate them + +- name: Create a sandbox instance + hosts: localhost + gather_facts: False + vars: + keypair: my_keypair + instance_type: m1.small + security_group: my_securitygroup + image: my_ami_id + region: us-east-1 + tasks: + - name: Launch instance + ec2: + key_name: "{{ keypair }}" + group: "{{ security_group }}" + instance_type: "{{ instance_type }}" + image: "{{ image }}" + wait: true + region: "{{ region }}" + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + register: ec2 + + - name: Add new instance to host group + add_host: + hostname: "{{ item.public_ip }}" + groupname: launched + loop: "{{ ec2.instances }}" + + - name: Wait for SSH to come up + delegate_to: "{{ item.public_dns_name }}" + wait_for_connection: + delay: 60 + timeout: 320 + loop: "{{ ec2.instances }}" + +- name: Configure instance(s) + hosts: launched + become: True + gather_facts: True + roles: + - my_awesome_role + - my_awesome_test + +- name: Terminate instances + hosts: localhost + tasks: + - name: Terminate instances that were previously launched + ec2: + state: 'absent' + instance_ids: '{{ ec2.instance_ids }}' + +# Start a few existing instances, run some tasks +# and stop the instances + +- name: Start sandbox instances + hosts: localhost + gather_facts: false + vars: + instance_ids: + - 'i-xxxxxx' + - 'i-xxxxxx' + - 'i-xxxxxx' + region: us-east-1 + tasks: + - name: Start the sandbox instances + ec2: + instance_ids: '{{ instance_ids }}' + region: '{{ region }}' + state: running + wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + roles: + - do_neat_stuff + - do_more_neat_stuff + +- name: Stop sandbox instances + hosts: localhost + gather_facts: false + vars: + instance_ids: + - 'i-xxxxxx' + - 'i-xxxxxx' + - 'i-xxxxxx' + region: us-east-1 + tasks: + - name: Stop the sandbox instances + ec2: + instance_ids: '{{ instance_ids }}' + region: '{{ region }}' + state: stopped + wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# +# Start stopped instances specified by tag +# +- local_action: + module: ec2 + instance_tags: + Name: ExtraPower + state: running + +# +# Restart instances specified by tag +# +- local_action: + module: ec2 + instance_tags: + Name: ExtraPower + state: restarted + +# +# Enforce that 5 instances with a tag "foo" are running +# (Highly recommended!) +# + +- ec2: + key_name: mykey + instance_type: c1.medium + image: ami-40603AD1 + wait: yes + group: webserver + instance_tags: + foo: bar + exact_count: 5 + count_tag: foo + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# +# Enforce that 5 running instances named "database" with a "dbtype" of "postgres" +# + +- ec2: + key_name: mykey + instance_type: c1.medium + image: ami-40603AD1 + wait: yes + group: webserver + instance_tags: + Name: database + dbtype: postgres + exact_count: 5 + count_tag: + Name: database + dbtype: postgres + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + +# +# count_tag complex argument examples +# + + # instances with tag foo +- ec2: + count_tag: + foo: + + # instances with tag foo=bar +- ec2: + count_tag: + foo: bar + + # instances with tags foo=bar & baz +- ec2: + count_tag: + foo: bar + baz: + + # instances with tags foo & bar & baz=bang +- ec2: + count_tag: + - foo + - bar + - baz: bang + +''' + +import time +import datetime +import traceback +from ast import literal_eval +from distutils.version import LooseVersion + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect +from ansible.module_utils.six import get_function_code, string_types +from ansible.module_utils._text import to_bytes, to_text + +try: + import boto.ec2 + from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping + from boto.exception import EC2ResponseError + from boto import connect_ec2_endpoint + from boto import connect_vpc + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None): + + # get reservations for instances that match tag(s) and are in the desired state + state = module.params.get('state') + if state not in ['running', 'stopped']: + state = None + reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone) + + instances = [] + for res in reservations: + if hasattr(res, 'instances'): + for inst in res.instances: + if inst.state == 'terminated' or inst.state == 'shutting-down': + continue + instances.append(inst) + + return reservations, instances + + +def _set_none_to_blank(dictionary): + result = dictionary + for k in result: + if isinstance(result[k], dict): + result[k] = _set_none_to_blank(result[k]) + elif not result[k]: + result[k] = "" + return result + + +def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None): + # TODO: filters do not work with tags that have underscores + filters = dict() + + vpc_subnet_id = module.params.get('vpc_subnet_id') + vpc_id = None + if vpc_subnet_id: + filters.update({"subnet-id": vpc_subnet_id}) + if vpc: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + + if vpc_id: + filters.update({"vpc-id": vpc_id}) + + if tags is not None: + + if isinstance(tags, str): + try: + tags = literal_eval(tags) + except Exception: + pass + + # if not a string type, convert and make sure it's a text string + if isinstance(tags, int): + tags = to_text(tags) + + # if string, we only care that a tag of that name exists + if isinstance(tags, str): + filters.update({"tag-key": tags}) + + # if list, append each item to filters + if isinstance(tags, list): + for x in tags: + if isinstance(x, dict): + x = _set_none_to_blank(x) + filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items())) + else: + filters.update({"tag-key": x}) + + # if dict, add the key and value to the filter + if isinstance(tags, dict): + tags = _set_none_to_blank(tags) + filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items())) + + # lets check to see if the filters dict is empty, if so then stop + if not filters: + module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags)) + + if state: + # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api + filters.update({'instance-state-name': state}) + + if zone: + filters.update({'availability-zone': zone}) + + if module.params.get('id'): + filters['client-token'] = module.params['id'] + + results = ec2.get_all_instances(filters=filters) + + return results + + +def get_instance_info(inst): + """ + Retrieves instance information from an instance + ID and returns it as a dictionary + """ + instance_info = {'id': inst.id, + 'ami_launch_index': inst.ami_launch_index, + 'private_ip': inst.private_ip_address, + 'private_dns_name': inst.private_dns_name, + 'public_ip': inst.ip_address, + 'dns_name': inst.dns_name, + 'public_dns_name': inst.public_dns_name, + 'state_code': inst.state_code, + 'architecture': inst.architecture, + 'image_id': inst.image_id, + 'key_name': inst.key_name, + 'placement': inst.placement, + 'region': inst.placement[:-1], + 'kernel': inst.kernel, + 'ramdisk': inst.ramdisk, + 'launch_time': inst.launch_time, + 'instance_type': inst.instance_type, + 'root_device_type': inst.root_device_type, + 'root_device_name': inst.root_device_name, + 'state': inst.state, + 'hypervisor': inst.hypervisor, + 'tags': inst.tags, + 'groups': dict((group.id, group.name) for group in inst.groups), + } + try: + instance_info['virtualization_type'] = getattr(inst, 'virtualization_type') + except AttributeError: + instance_info['virtualization_type'] = None + + try: + instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized') + except AttributeError: + instance_info['ebs_optimized'] = False + + try: + bdm_dict = {} + bdm = getattr(inst, 'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict[device_name] = { + 'status': bdm[device_name].status, + 'volume_id': bdm[device_name].volume_id, + 'delete_on_termination': bdm[device_name].delete_on_termination + } + instance_info['block_device_mapping'] = bdm_dict + except AttributeError: + instance_info['block_device_mapping'] = False + + try: + instance_info['tenancy'] = getattr(inst, 'placement_tenancy') + except AttributeError: + instance_info['tenancy'] = 'default' + + return instance_info + + +def boto_supports_associate_public_ip_address(ec2): + """ + Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification + class. Added in Boto 2.13.0 + + ec2: authenticated ec2 connection object + + Returns: + True if Boto library accepts associate_public_ip_address argument, else false + """ + + try: + network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification() + getattr(network_interface, "associate_public_ip_address") + return True + except AttributeError: + return False + + +def boto_supports_profile_name_arg(ec2): + """ + Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0 + + ec2: authenticated ec2 connection object + + Returns: + True if Boto library accept instance_profile_name argument, else false + """ + run_instances_method = getattr(ec2, 'run_instances') + return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames + + +def boto_supports_volume_encryption(): + """ + Check if Boto library supports encryption of EBS volumes (added in 2.29.0) + + Returns: + True if boto library has the named param as an argument on the request_spot_instances method, else False + """ + return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') + + +def create_block_device(module, ec2, volume): + # Not aware of a way to determine this programatically + # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ + MAX_IOPS_TO_SIZE_RATIO = 30 + + volume_type = volume.get('volume_type') + + if 'snapshot' not in volume and 'ephemeral' not in volume: + if 'volume_size' not in volume: + module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') + if 'snapshot' in volume: + if volume_type == 'io1' and 'iops' not in volume: + module.fail_json(msg='io1 volumes must have an iops value set') + if 'iops' in volume: + snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0] + size = volume.get('volume_size', snapshot.volume_size) + if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: + module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) + if 'ephemeral' in volume: + if 'snapshot' in volume: + module.fail_json(msg='Cannot set both ephemeral and snapshot') + if boto_supports_volume_encryption(): + return BlockDeviceType(snapshot_id=volume.get('snapshot'), + ephemeral_name=volume.get('ephemeral'), + size=volume.get('volume_size'), + volume_type=volume_type, + delete_on_termination=volume.get('delete_on_termination', False), + iops=volume.get('iops'), + encrypted=volume.get('encrypted', None)) + else: + return BlockDeviceType(snapshot_id=volume.get('snapshot'), + ephemeral_name=volume.get('ephemeral'), + size=volume.get('volume_size'), + volume_type=volume_type, + delete_on_termination=volume.get('delete_on_termination', False), + iops=volume.get('iops')) + + +def boto_supports_param_in_spot_request(ec2, param): + """ + Check if Boto library has a in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. + + ec2: authenticated ec2 connection object + + Returns: + True if boto library has the named param as an argument on the request_spot_instances method, else False + """ + method = getattr(ec2, 'request_spot_instances') + return param in get_function_code(method).co_varnames + + +def await_spot_requests(module, ec2, spot_requests, count): + """ + Wait for a group of spot requests to be fulfilled, or fail. + + module: Ansible module object + ec2: authenticated ec2 connection object + spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances + count: Total number of instances to be created by the spot requests + + Returns: + list of instance ID's created by the spot request(s) + """ + spot_wait_timeout = int(module.params.get('spot_wait_timeout')) + wait_complete = time.time() + spot_wait_timeout + + spot_req_inst_ids = dict() + while time.time() < wait_complete: + reqs = ec2.get_all_spot_instance_requests() + for sirb in spot_requests: + if sirb.id in spot_req_inst_ids: + continue + for sir in reqs: + if sir.id != sirb.id: + continue # this is not our spot instance + if sir.instance_id is not None: + spot_req_inst_ids[sirb.id] = sir.instance_id + elif sir.state == 'open': + continue # still waiting, nothing to do here + elif sir.state == 'active': + continue # Instance is created already, nothing to do here + elif sir.state == 'failed': + module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % ( + sir.id, sir.status.code, sir.fault.code, sir.fault.message)) + elif sir.state == 'cancelled': + module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id) + elif sir.state == 'closed': + # instance is terminating or marked for termination + # this may be intentional on the part of the operator, + # or it may have been terminated by AWS due to capacity, + # price, or group constraints in this case, we'll fail + # the module if the reason for the state is anything + # other than termination by user. Codes are documented at + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html + if sir.status.code == 'instance-terminated-by-user': + # do nothing, since the user likely did this on purpose + pass + else: + spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s" + module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message)) + + if len(spot_req_inst_ids) < count: + time.sleep(5) + else: + return list(spot_req_inst_ids.values()) + module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime()) + + +def enforce_count(module, ec2, vpc): + + exact_count = module.params.get('exact_count') + count_tag = module.params.get('count_tag') + zone = module.params.get('zone') + + # fail here if the exact count was specified without filtering + # on a tag, as this may lead to a undesired removal of instances + if exact_count and count_tag is None: + module.fail_json(msg="you must use the 'count_tag' option with exact_count") + + reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone) + + changed = None + checkmode = False + instance_dict_array = [] + changed_instance_ids = None + + if len(instances) == exact_count: + changed = False + elif len(instances) < exact_count: + changed = True + to_create = exact_count - len(instances) + if not checkmode: + (instance_dict_array, changed_instance_ids, changed) \ + = create_instances(module, ec2, vpc, override_count=to_create) + + for inst in instance_dict_array: + instances.append(inst) + elif len(instances) > exact_count: + changed = True + to_remove = len(instances) - exact_count + if not checkmode: + all_instance_ids = sorted([x.id for x in instances]) + remove_ids = all_instance_ids[0:to_remove] + + instances = [x for x in instances if x.id not in remove_ids] + + (changed, instance_dict_array, changed_instance_ids) \ + = terminate_instances(module, ec2, remove_ids) + terminated_list = [] + for inst in instance_dict_array: + inst['state'] = "terminated" + terminated_list.append(inst) + instance_dict_array = terminated_list + + # ensure all instances are dictionaries + all_instances = [] + for inst in instances: + + if not isinstance(inst, dict): + warn_if_public_ip_assignment_changed(module, inst) + inst = get_instance_info(inst) + all_instances.append(inst) + + return (all_instances, instance_dict_array, changed_instance_ids, changed) + + +def create_instances(module, ec2, vpc, override_count=None): + """ + Creates new instances + + module : AnsibleModule object + ec2: authenticated ec2 connection object + + Returns: + A list of dictionaries with instance information + about the instances that were launched + """ + + key_name = module.params.get('key_name') + id = module.params.get('id') + group_name = module.params.get('group') + group_id = module.params.get('group_id') + zone = module.params.get('zone') + instance_type = module.params.get('instance_type') + tenancy = module.params.get('tenancy') + spot_price = module.params.get('spot_price') + spot_type = module.params.get('spot_type') + image = module.params.get('image') + if override_count: + count = override_count + else: + count = module.params.get('count') + monitoring = module.params.get('monitoring') + kernel = module.params.get('kernel') + ramdisk = module.params.get('ramdisk') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + spot_wait_timeout = int(module.params.get('spot_wait_timeout')) + placement_group = module.params.get('placement_group') + user_data = module.params.get('user_data') + instance_tags = module.params.get('instance_tags') + vpc_subnet_id = module.params.get('vpc_subnet_id') + assign_public_ip = module.boolean(module.params.get('assign_public_ip')) + private_ip = module.params.get('private_ip') + instance_profile_name = module.params.get('instance_profile_name') + volumes = module.params.get('volumes') + ebs_optimized = module.params.get('ebs_optimized') + exact_count = module.params.get('exact_count') + count_tag = module.params.get('count_tag') + source_dest_check = module.boolean(module.params.get('source_dest_check')) + termination_protection = module.boolean(module.params.get('termination_protection')) + network_interfaces = module.params.get('network_interfaces') + spot_launch_group = module.params.get('spot_launch_group') + instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior') + + vpc_id = None + if vpc_subnet_id: + if not vpc: + module.fail_json(msg="region must be specified") + else: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + else: + vpc_id = None + + try: + # Here we try to lookup the group id from the security group name - if group is set. + if group_name: + if vpc_id: + grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id}) + else: + grp_details = ec2.get_all_security_groups() + if isinstance(group_name, string_types): + group_name = [group_name] + unmatched = set(group_name).difference(str(grp.name) for grp in grp_details) + if len(unmatched) > 0: + module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched)) + group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name] + # Now we try to lookup the group id testing if group exists. + elif group_id: + # wrap the group_id in a list if it's not one already + if isinstance(group_id, string_types): + group_id = [group_id] + grp_details = ec2.get_all_security_groups(group_ids=group_id) + group_name = [grp_item.name for grp_item in grp_details] + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + # Lookup any instances that much our run id. + + running_instances = [] + count_remaining = int(count) + + if id is not None: + filter_dict = {'client-token': id, 'instance-state-name': 'running'} + previous_reservations = ec2.get_all_instances(None, filter_dict) + for res in previous_reservations: + for prev_instance in res.instances: + running_instances.append(prev_instance) + count_remaining = count_remaining - len(running_instances) + + # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want. + + if count_remaining == 0: + changed = False + else: + changed = True + try: + params = {'image_id': image, + 'key_name': key_name, + 'monitoring_enabled': monitoring, + 'placement': zone, + 'instance_type': instance_type, + 'kernel_id': kernel, + 'ramdisk_id': ramdisk} + if user_data is not None: + params['user_data'] = to_bytes(user_data, errors='surrogate_or_strict') + + if ebs_optimized: + params['ebs_optimized'] = ebs_optimized + + # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request + if not spot_price: + params['tenancy'] = tenancy + + if boto_supports_profile_name_arg(ec2): + params['instance_profile_name'] = instance_profile_name + else: + if instance_profile_name is not None: + module.fail_json( + msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") + + if assign_public_ip is not None: + if not boto_supports_associate_public_ip_address(ec2): + module.fail_json( + msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.") + elif not vpc_subnet_id: + module.fail_json( + msg="assign_public_ip only available with vpc_subnet_id") + + else: + if private_ip: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=vpc_subnet_id, + private_ip_address=private_ip, + groups=group_id, + associate_public_ip_address=assign_public_ip) + else: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=vpc_subnet_id, + groups=group_id, + associate_public_ip_address=assign_public_ip) + interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + params['network_interfaces'] = interfaces + else: + if network_interfaces: + if isinstance(network_interfaces, string_types): + network_interfaces = [network_interfaces] + interfaces = [] + for i, network_interface_id in enumerate(network_interfaces): + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + network_interface_id=network_interface_id, + device_index=i) + interfaces.append(interface) + params['network_interfaces'] = \ + boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces) + else: + params['subnet_id'] = vpc_subnet_id + if vpc_subnet_id: + params['security_group_ids'] = group_id + else: + params['security_groups'] = group_name + + if volumes: + bdm = BlockDeviceMapping() + for volume in volumes: + if 'device_name' not in volume: + module.fail_json(msg='Device name must be set for volume') + # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 + # to be a signal not to create this volume + if 'volume_size' not in volume or int(volume['volume_size']) > 0: + bdm[volume['device_name']] = create_block_device(module, ec2, volume) + + params['block_device_map'] = bdm + + # check to see if we're using spot pricing first before starting instances + if not spot_price: + if assign_public_ip is not None and private_ip: + params.update( + dict( + min_count=count_remaining, + max_count=count_remaining, + client_token=id, + placement_group=placement_group, + ) + ) + else: + params.update( + dict( + min_count=count_remaining, + max_count=count_remaining, + client_token=id, + placement_group=placement_group, + private_ip_address=private_ip, + ) + ) + + # For ordinary (not spot) instances, we can select 'stop' + # (the default) or 'terminate' here. + params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop' + + try: + res = ec2.run_instances(**params) + except boto.exception.EC2ResponseError as e: + if (params['instance_initiated_shutdown_behavior'] != 'terminate' and + "InvalidParameterCombination" == e.error_code): + params['instance_initiated_shutdown_behavior'] = 'terminate' + res = ec2.run_instances(**params) + else: + raise + + instids = [i.id for i in res.instances] + while True: + try: + ec2.get_all_instances(instids) + break + except boto.exception.EC2ResponseError as e: + if "InvalidInstanceID.NotFound" in str(e): + # there's a race between start and get an instance + continue + else: + module.fail_json(msg=str(e)) + + # The instances returned through ec2.run_instances above can be in + # terminated state due to idempotency. See commit 7f11c3d for a complete + # explanation. + terminated_instances = [ + str(instance.id) for instance in res.instances if instance.state == 'terminated' + ] + if terminated_instances: + module.fail_json(msg="Instances with id(s) %s " % terminated_instances + + "were created previously but have since been terminated - " + + "use a (possibly different) 'instanceid' parameter") + + else: + if private_ip: + module.fail_json( + msg='private_ip only available with on-demand (non-spot) instances') + if boto_supports_param_in_spot_request(ec2, 'placement_group'): + params['placement_group'] = placement_group + elif placement_group: + module.fail_json( + msg="placement_group parameter requires Boto version 2.3.0 or higher.") + + # You can't tell spot instances to 'stop'; they will always be + # 'terminate'd. For convenience, we'll ignore the latter value. + if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate': + module.fail_json( + msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.") + + if spot_launch_group and isinstance(spot_launch_group, string_types): + params['launch_group'] = spot_launch_group + + params.update(dict( + count=count_remaining, + type=spot_type, + )) + + # Set spot ValidUntil + # ValidUntil -> (timestamp). The end date of the request, in + # UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z). + utc_valid_until = ( + datetime.datetime.utcnow() + + datetime.timedelta(seconds=spot_wait_timeout)) + params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z') + + res = ec2.request_spot_instances(spot_price, **params) + + # Now we have to do the intermediate waiting + if wait: + instids = await_spot_requests(module, ec2, res, count) + else: + instids = [] + except boto.exception.BotoServerError as e: + module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message)) + + # wait here until the instances are up + num_running = 0 + wait_timeout = time.time() + wait_timeout + res_list = () + while wait_timeout > time.time() and num_running < len(instids): + try: + res_list = ec2.get_all_instances(instids) + except boto.exception.BotoServerError as e: + if e.error_code == 'InvalidInstanceID.NotFound': + time.sleep(1) + continue + else: + raise + + num_running = 0 + for res in res_list: + num_running += len([i for i in res.instances if i.state == 'running']) + if len(res_list) <= 0: + # got a bad response of some sort, possibly due to + # stale/cached data. Wait a second and then try again + time.sleep(1) + continue + if wait and num_running < len(instids): + time.sleep(5) + else: + break + + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="wait for instances running timeout on %s" % time.asctime()) + + # We do this after the loop ends so that we end up with one list + for res in res_list: + running_instances.extend(res.instances) + + # Enabled by default by AWS + if source_dest_check is False: + for inst in res.instances: + inst.modify_attribute('sourceDestCheck', False) + + # Disabled by default by AWS + if termination_protection is True: + for inst in res.instances: + inst.modify_attribute('disableApiTermination', True) + + # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound + if instance_tags and instids: + try: + ec2.create_tags(instids, instance_tags) + except boto.exception.EC2ResponseError as e: + module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) + + instance_dict_array = [] + created_instance_ids = [] + for inst in running_instances: + inst.update() + d = get_instance_info(inst) + created_instance_ids.append(inst.id) + instance_dict_array.append(d) + + return (instance_dict_array, created_instance_ids, changed) + + +def terminate_instances(module, ec2, instance_ids): + """ + Terminates a list of instances + + module: Ansible module object + ec2: authenticated ec2 connection object + termination_list: a list of instances to terminate in the form of + [ {id: }, ..] + + Returns a dictionary of instance information + about the instances terminated. + + If the instance to be terminated is running + "changed" will be set to False. + + """ + + # Whether to wait for termination to complete before returning + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + terminated_instance_ids = [] + for res in ec2.get_all_instances(instance_ids): + for inst in res.instances: + if inst.state == 'running' or inst.state == 'stopped': + terminated_instance_ids.append(inst.id) + instance_dict_array.append(get_instance_info(inst)) + try: + ec2.terminate_instances([inst.id]) + except EC2ResponseError as e: + module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) + changed = True + + # wait here until the instances are 'terminated' + if wait: + num_terminated = 0 + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids): + response = ec2.get_all_instances(instance_ids=terminated_instance_ids, + filters={'instance-state-name': 'terminated'}) + try: + num_terminated = sum([len(res.instances) for res in response]) + except Exception as e: + # got a bad response of some sort, possibly due to + # stale/cached data. Wait a second and then try again + time.sleep(1) + continue + + if num_terminated < len(terminated_instance_ids): + time.sleep(5) + + # waiting took too long + if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids): + module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime()) + # Lets get the current state of the instances after terminating - issue600 + instance_dict_array = [] + for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}): + for inst in res.instances: + instance_dict_array.append(get_instance_info(inst)) + + return (changed, instance_dict_array, terminated_instance_ids) + + +def startstop_instances(module, ec2, instance_ids, state, instance_tags): + """ + Starts or stops a list of existing instances + + module: Ansible module object + ec2: authenticated ec2 connection object + instance_ids: The list of instances to start in the form of + [ {id: }, ..] + instance_tags: A dict of tag keys and values in the form of + {key: value, ... } + state: Intended state ("running" or "stopped") + + Returns a dictionary of instance information + about the instances started/stopped. + + If the instance was not able to change state, + "changed" will be set to False. + + Note that if instance_ids and instance_tags are both non-empty, + this method will process the intersection of the two + """ + + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + group_id = module.params.get('group_id') + group_name = module.params.get('group') + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + # Fail unless the user defined instance tags + if not instance_tags: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + # To make an EC2 tag filter, we need to prepend 'tag:' to each key. + # An empty filter does no filtering, so it's safe to pass it to the + # get_all_instances method even if the user did not specify instance_tags + filters = {} + if instance_tags: + for key, value in instance_tags.items(): + filters["tag:" + key] = value + + if module.params.get('id'): + filters['client-token'] = module.params['id'] + # Check that our instances are not in the state we want to take + + # Check (and eventually change) instances attributes and instances state + existing_instances_array = [] + for res in ec2.get_all_instances(instance_ids, filters=filters): + for inst in res.instances: + + warn_if_public_ip_assignment_changed(module, inst) + + changed = (check_source_dest_attr(module, inst, ec2) or + check_termination_protection(module, inst) or changed) + + # Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified + if inst.vpc_id and group_name: + grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id}) + if isinstance(group_name, string_types): + group_name = [group_name] + unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details) + if unmatched: + module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched)) + group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name] + elif inst.vpc_id and group_id: + if isinstance(group_id, string_types): + group_id = [group_id] + grp_details = ec2.get_all_security_groups(group_ids=group_id) + group_ids = [grp_item.id for grp_item in grp_details] + if inst.vpc_id and (group_name or group_id): + if set(sg.id for sg in inst.groups) != set(group_ids): + changed = inst.modify_attribute('groupSet', group_ids) + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + if state == 'running': + inst.start() + else: + inst.stop() + except EC2ResponseError as e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True + existing_instances_array.append(inst.id) + + instance_ids = list(set(existing_instances_array + (instance_ids or []))) + # Wait for all the instances to finish starting or stopping + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time(): + instance_dict_array = [] + matched_instances = [] + for res in ec2.get_all_instances(instance_ids): + for i in res.instances: + if i.state == state: + instance_dict_array.append(get_instance_info(i)) + matched_instances.append(i) + if len(matched_instances) < len(instance_ids): + time.sleep(5) + else: + break + + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="wait for instances running timeout on %s" % time.asctime()) + + return (changed, instance_dict_array, instance_ids) + + +def restart_instances(module, ec2, instance_ids, state, instance_tags): + """ + Restarts a list of existing instances + + module: Ansible module object + ec2: authenticated ec2 connection object + instance_ids: The list of instances to start in the form of + [ {id: }, ..] + instance_tags: A dict of tag keys and values in the form of + {key: value, ... } + state: Intended state ("restarted") + + Returns a dictionary of instance information + about the instances. + + If the instance was not able to change state, + "changed" will be set to False. + + Wait will not apply here as this is a OS level operation. + + Note that if instance_ids and instance_tags are both non-empty, + this method will process the intersection of the two. + """ + + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + # Fail unless the user defined instance tags + if not instance_tags: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + # To make an EC2 tag filter, we need to prepend 'tag:' to each key. + # An empty filter does no filtering, so it's safe to pass it to the + # get_all_instances method even if the user did not specify instance_tags + filters = {} + if instance_tags: + for key, value in instance_tags.items(): + filters["tag:" + key] = value + if module.params.get('id'): + filters['client-token'] = module.params['id'] + + # Check that our instances are not in the state we want to take + + # Check (and eventually change) instances attributes and instances state + for res in ec2.get_all_instances(instance_ids, filters=filters): + for inst in res.instances: + + warn_if_public_ip_assignment_changed(module, inst) + + changed = (check_source_dest_attr(module, inst, ec2) or + check_termination_protection(module, inst) or changed) + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + inst.reboot() + except EC2ResponseError as e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True + + return (changed, instance_dict_array, instance_ids) + + +def check_termination_protection(module, inst): + """ + Check the instance disableApiTermination attribute. + + module: Ansible module object + inst: EC2 instance object + + returns: True if state changed None otherwise + """ + + termination_protection = module.params.get('termination_protection') + + if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None): + inst.modify_attribute('disableApiTermination', termination_protection) + return True + + +def check_source_dest_attr(module, inst, ec2): + """ + Check the instance sourceDestCheck attribute. + + module: Ansible module object + inst: EC2 instance object + + returns: True if state changed None otherwise + """ + + source_dest_check = module.params.get('source_dest_check') + + if source_dest_check is not None: + try: + if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + return True + except boto.exception.EC2ResponseError as exc: + # instances with more than one Elastic Network Interface will + # fail, because they have the sourceDestCheck attribute defined + # per-interface + if exc.code == 'InvalidInstanceID': + for interface in inst.interfaces: + if interface.source_dest_check != source_dest_check: + ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check) + return True + else: + module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc), + exception=traceback.format_exc()) + + +def warn_if_public_ip_assignment_changed(module, instance): + # This is a non-modifiable attribute. + assign_public_ip = module.params.get('assign_public_ip') + + # Check that public ip assignment is the same and warn if not + public_dns_name = getattr(instance, 'public_dns_name', None) + if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False): + module.warn("Unable to modify public ip assignment to {0} for instance {1}. " + "Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + key_name=dict(aliases=['keypair']), + id=dict(), + group=dict(type='list', aliases=['groups']), + group_id=dict(type='list'), + zone=dict(aliases=['aws_zone', 'ec2_zone']), + instance_type=dict(aliases=['type']), + spot_price=dict(), + spot_type=dict(default='one-time', choices=["one-time", "persistent"]), + spot_launch_group=dict(), + image=dict(), + kernel=dict(), + count=dict(type='int', default='1'), + monitoring=dict(type='bool', default=False), + ramdisk=dict(), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + spot_wait_timeout=dict(type='int', default=600), + placement_group=dict(), + user_data=dict(), + instance_tags=dict(type='dict'), + vpc_subnet_id=dict(), + assign_public_ip=dict(type='bool'), + private_ip=dict(), + instance_profile_name=dict(), + instance_ids=dict(type='list', aliases=['instance_id']), + source_dest_check=dict(type='bool', default=None), + termination_protection=dict(type='bool', default=None), + state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']), + instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']), + exact_count=dict(type='int', default=None), + count_tag=dict(type='raw'), + volumes=dict(type='list'), + ebs_optimized=dict(type='bool', default=False), + tenancy=dict(default='default', choices=['default', 'dedicated']), + network_interfaces=dict(type='list', aliases=['network_interface']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + # Can be uncommented when we finish the deprecation cycle. + # ['group', 'group_id'], + ['exact_count', 'count'], + ['exact_count', 'state'], + ['exact_count', 'instance_ids'], + ['network_interfaces', 'assign_public_ip'], + ['network_interfaces', 'group'], + ['network_interfaces', 'group_id'], + ['network_interfaces', 'private_ip'], + ['network_interfaces', 'vpc_subnet_id'], + ], + ) + + if module.params.get('group') and module.params.get('group_id'): + module.deprecate( + msg='Support for passing both group and group_id has been deprecated. ' + 'Currently group_id is ignored, in future passing both will result in an error', + version='2.14', collection_name='ansible.builtin') + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + if module.params.get('region') or not module.params.get('ec2_url'): + ec2 = ec2_connect(module) + elif module.params.get('ec2_url'): + ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs) + + if 'region' not in aws_connect_kwargs: + aws_connect_kwargs['region'] = ec2.region + + vpc = connect_vpc(**aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc()) + + tagged_instances = [] + + state = module.params['state'] + + if state == 'absent': + instance_ids = module.params['instance_ids'] + if not instance_ids: + module.fail_json(msg='instance_ids list is required for absent state') + + (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) + + elif state in ('running', 'stopped'): + instance_ids = module.params.get('instance_ids') + instance_tags = module.params.get('instance_tags') + if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): + module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) + + (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags) + + elif state in ('restarted'): + instance_ids = module.params.get('instance_ids') + instance_tags = module.params.get('instance_tags') + if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): + module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) + + (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags) + + elif state == 'present': + # Changed is always set to true when provisioning new instances + if not module.params.get('image'): + module.fail_json(msg='image parameter is required for new instance') + + if module.params.get('exact_count') is None: + (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc) + else: + (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc) + + # Always return instances in the same order + if new_instance_ids: + new_instance_ids.sort() + if instance_dict_array: + instance_dict_array.sort(key=lambda x: x['id']) + if tagged_instances: + tagged_instances.sort(key=lambda x: x['id']) + + module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2_ami_info.py b/test/support/integration/plugins/modules/ec2_ami_info.py new file mode 100644 index 00000000..53c2374d --- /dev/null +++ b/test/support/integration/plugins/modules/ec2_ami_info.py @@ -0,0 +1,282 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_ami_info +version_added: '2.5' +short_description: Gather information about ec2 AMIs +description: + - Gather information about ec2 AMIs + - This module was called C(ec2_ami_facts) before Ansible 2.9. The usage did not change. +author: + - Prasad Katti (@prasadkatti) +requirements: [ boto3 ] +options: + image_ids: + description: One or more image IDs. + aliases: [image_id] + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters. + - Filter names and values are case sensitive. + type: dict + owners: + description: + - Filter the images by the owner. Valid options are an AWS account ID, self, + or an AWS owner alias ( amazon | aws-marketplace | microsoft ). + aliases: [owner] + type: list + elements: str + executable_users: + description: + - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs). + aliases: [executable_user] + type: list + elements: str + describe_image_attributes: + description: + - Describe attributes (like launchPermission) of the images found. + default: no + type: bool + +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: gather information about an AMI using ami-id + ec2_ami_info: + image_ids: ami-5b488823 + +- name: gather information about all AMIs with tag key Name and value webapp + ec2_ami_info: + filters: + "tag:Name": webapp + +- name: gather information about an AMI with 'AMI Name' equal to foobar + ec2_ami_info: + filters: + name: foobar + +- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477) + ec2_ami_info: + owners: 099720109477 + filters: + name: "ubuntu/images/ubuntu-zesty-17.04-*" +''' + +RETURN = ''' +images: + description: A list of images. + returned: always + type: list + elements: dict + contains: + architecture: + description: The architecture of the image. + returned: always + type: str + sample: x86_64 + block_device_mappings: + description: Any block device mapping entries. + returned: always + type: list + elements: dict + contains: + device_name: + description: The device name exposed to the instance. + returned: always + type: str + sample: /dev/sda1 + ebs: + description: EBS volumes + returned: always + type: complex + creation_date: + description: The date and time the image was created. + returned: always + type: str + sample: '2017-10-16T19:22:13.000Z' + description: + description: The description of the AMI. + returned: always + type: str + sample: '' + ena_support: + description: Whether enhanced networking with ENA is enabled. + returned: always + type: bool + sample: true + hypervisor: + description: The hypervisor type of the image. + returned: always + type: str + sample: xen + image_id: + description: The ID of the AMI. + returned: always + type: str + sample: ami-5b466623 + image_location: + description: The location of the AMI. + returned: always + type: str + sample: 408466080000/Webapp + image_type: + description: The type of image. + returned: always + type: str + sample: machine + launch_permissions: + description: A List of AWS accounts may launch the AMI. + returned: When image is owned by calling account and I(describe_image_attributes) is yes. + type: list + elements: dict + contains: + group: + description: A value of 'all' means the AMI is public. + type: str + user_id: + description: An AWS account ID with permissions to launch the AMI. + type: str + sample: [{"group": "all"}, {"user_id": "408466080000"}] + name: + description: The name of the AMI that was provided during image creation. + returned: always + type: str + sample: Webapp + owner_id: + description: The AWS account ID of the image owner. + returned: always + type: str + sample: '408466080000' + public: + description: Whether the image has public launch permissions. + returned: always + type: bool + sample: true + root_device_name: + description: The device name of the root device. + returned: always + type: str + sample: /dev/sda1 + root_device_type: + description: The type of root device used by the AMI. + returned: always + type: str + sample: ebs + sriov_net_support: + description: Whether enhanced networking is enabled. + returned: always + type: str + sample: simple + state: + description: The current state of the AMI. + returned: always + type: str + sample: available + tags: + description: Any tags assigned to the image. + returned: always + type: dict + virtualization_type: + description: The type of virtualization of the AMI. + returned: always + type: str + sample: hvm +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict + + +def list_ec2_images(ec2_client, module): + + image_ids = module.params.get("image_ids") + owners = module.params.get("owners") + executable_users = module.params.get("executable_users") + filters = module.params.get("filters") + owner_param = [] + + # describe_images is *very* slow if you pass the `Owners` + # param (unless it's self), for some reason. + # Converting the owners to filters and removing from the + # owners param greatly speeds things up. + # Implementation based on aioue's suggestion in #24886 + for owner in owners: + if owner.isdigit(): + if 'owner-id' not in filters: + filters['owner-id'] = list() + filters['owner-id'].append(owner) + elif owner == 'self': + # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) + owner_param.append(owner) + else: + if 'owner-alias' not in filters: + filters['owner-alias'] = list() + filters['owner-alias'].append(owner) + + filters = ansible_dict_to_boto3_filter_list(filters) + + try: + images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users) + images = [camel_dict_to_snake_dict(image) for image in images["Images"]] + except (ClientError, BotoCoreError) as err: + module.fail_json_aws(err, msg="error describing images") + for image in images: + try: + image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', [])) + if module.params.get("describe_image_attributes"): + launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions'] + image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions] + except (ClientError, BotoCoreError) as err: + # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures + pass + + images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist + module.exit_json(images=images) + + +def main(): + + argument_spec = dict( + image_ids=dict(default=[], type='list', aliases=['image_id']), + filters=dict(default={}, type='dict'), + owners=dict(default=[], type='list', aliases=['owner']), + executable_users=dict(default=[], type='list', aliases=['executable_user']), + describe_image_attributes=dict(default=False, type='bool') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + if module._module._name == 'ec2_ami_facts': + module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", + version='2.13', collection_name='ansible.builtin') + + ec2_client = module.client('ec2') + + list_ec2_images(ec2_client, module) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2_group.py b/test/support/integration/plugins/modules/ec2_group.py new file mode 100644 index 00000000..bc416f66 --- /dev/null +++ b/test/support/integration/plugins/modules/ec2_group.py @@ -0,0 +1,1345 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: ec2_group +author: "Andrew de Quincey (@adq)" +version_added: "1.3" +requirements: [ boto3 ] +short_description: maintain an ec2 VPC security group. +description: + - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5. +options: + name: + description: + - Name of the security group. + - One of and only one of I(name) or I(group_id) is required. + - Required if I(state=present). + required: false + type: str + group_id: + description: + - Id of group to delete (works only with absent). + - One of and only one of I(name) or I(group_id) is required. + required: false + version_added: "2.4" + type: str + description: + description: + - Description of the security group. Required when C(state) is C(present). + required: false + type: str + vpc_id: + description: + - ID of the VPC to create the group in. + required: false + type: str + rules: + description: + - List of firewall inbound rules to enforce in this group (see example). If none are supplied, + no inbound rules will be enabled. Rules list may include its own name in `group_name`. + This allows idempotent loopback additions (e.g. allow group to access itself). + Rule sources list support was added in version 2.4. This allows to define multiple sources per + source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed. + In version 2.5 support for rule descriptions was added. + required: false + type: list + elements: dict + suboptions: + cidr_ip: + type: str + description: + - The IPv4 CIDR range traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + cidr_ipv6: + type: str + description: + - The IPv6 CIDR range traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + ip_prefix: + type: str + description: + - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) + that traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_id: + type: str + description: + - The ID of the Security Group that traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_name: + type: str + description: + - Name of the Security Group that traffic is coming from. + - If the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_desc: + type: str + description: + - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + proto: + type: str + description: + - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) + from_port: + type: int + description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports. + to_port: + type: int + description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports. + rule_desc: + type: str + description: A description for the rule. + rules_egress: + description: + - List of firewall outbound rules to enforce in this group (see example). If none are supplied, + a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. + Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions + was added. + required: false + version_added: "1.6" + type: list + elements: dict + suboptions: + cidr_ip: + type: str + description: + - The IPv4 CIDR range traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + cidr_ipv6: + type: str + description: + - The IPv6 CIDR range traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + ip_prefix: + type: str + description: + - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) + that traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_id: + type: str + description: + - The ID of the Security Group that traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_name: + type: str + description: + - Name of the Security Group that traffic is going to. + - If the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_desc: + type: str + description: + - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + proto: + type: str + description: + - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) + from_port: + type: int + description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports. + to_port: + type: int + description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports. + rule_desc: + type: str + description: A description for the rule. + state: + version_added: "1.4" + description: + - Create or delete a security group. + required: false + default: 'present' + choices: [ "present", "absent" ] + aliases: [] + type: str + purge_rules: + version_added: "1.8" + description: + - Purge existing rules on security group that are not found in rules. + required: false + default: 'true' + aliases: [] + type: bool + purge_rules_egress: + version_added: "1.8" + description: + - Purge existing rules_egress on security group that are not found in rules_egress. + required: false + default: 'true' + aliases: [] + type: bool + tags: + version_added: "2.4" + description: + - A dictionary of one or more tags to assign to the security group. + required: false + type: dict + aliases: ['resource_tags'] + purge_tags: + version_added: "2.4" + description: + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then + tags will not be modified. + required: false + default: yes + type: bool + +extends_documentation_fragment: + - aws + - ec2 + +notes: + - If a rule declares a group_name and that group doesn't exist, it will be + automatically created. In that case, group_desc should be provided as well. + The module will refuse to create a depended-on group without a description. + - Preview diff mode support is added in version 2.7. +''' + +EXAMPLES = ''' +- name: example using security group rule descriptions + ec2_group: + name: "{{ name }}" + description: sg with rule descriptions + vpc_id: vpc-xxxxxxxx + profile: "{{ aws_profile }}" + region: us-east-1 + rules: + - proto: tcp + ports: + - 80 + cidr_ip: 0.0.0.0/0 + rule_desc: allow all on port 80 + +- name: example ec2 group + ec2_group: + name: example + description: an example EC2 group + vpc_id: 12345 + region: eu-west-1 + aws_secret_key: SECRET + aws_access_key: ACCESS + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 10.0.0.0/8 + - proto: tcp + from_port: 443 + to_port: 443 + # this should only be needed for EC2 Classic security group rules + # because in a VPC an ELB will use a user-account security group + group_id: amazon-elb/sg-87654321/amazon-elb-sg + - proto: tcp + from_port: 3306 + to_port: 3306 + group_id: 123412341234/sg-87654321/exact-name-of-sg + - proto: udp + from_port: 10050 + to_port: 10050 + cidr_ip: 10.0.0.0/8 + - proto: udp + from_port: 10051 + to_port: 10051 + group_id: sg-12345678 + - proto: icmp + from_port: 8 # icmp type, -1 = any type + to_port: -1 # icmp subtype, -1 = any subtype + cidr_ip: 10.0.0.0/8 + - proto: all + # the containing group name may be specified here + group_name: example + - proto: all + # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6), + # traffic on all ports is allowed, regardless of any ports you specify + from_port: 10050 # this value is ignored + to_port: 10050 # this value is ignored + cidr_ip: 10.0.0.0/8 + + rules_egress: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + cidr_ipv6: 64:ff9b::/96 + group_name: example-other + # description to use if example-other needs to be created + group_desc: other example EC2 group + +- name: example2 ec2 group + ec2_group: + name: example2 + description: an example2 EC2 group + vpc_id: 12345 + region: eu-west-1 + rules: + # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port). + - proto: tcp + ports: 22 + group_name: example-vpn + - proto: tcp + ports: + - 80 + - 443 + - 8080-8099 + cidr_ip: 0.0.0.0/0 + # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule. + - proto: tcp + ports: + - 6379 + - 26379 + group_name: + - example-vpn + - example-redis + - proto: tcp + ports: 5665 + group_name: example-vpn + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + cidr_ipv6: + - 2607:F8B0::/32 + - 64:ff9b::/96 + group_id: + - sg-edcd9784 + diff: True + +- name: "Delete group by its id" + ec2_group: + region: eu-west-1 + group_id: sg-33b4ee5b + state: absent +''' + +RETURN = ''' +group_name: + description: Security group name + sample: My Security Group + type: str + returned: on create/update +group_id: + description: Security group id + sample: sg-abcd1234 + type: str + returned: on create/update +description: + description: Description of security group + sample: My Security Group + type: str + returned: on create/update +tags: + description: Tags associated with the security group + sample: + Name: My Security Group + Purpose: protecting stuff + type: dict + returned: on create/update +vpc_id: + description: ID of VPC to which the security group belongs + sample: vpc-abcd1234 + type: str + returned: on create/update +ip_permissions: + description: Inbound rules associated with the security group. + sample: + - from_port: 8182 + ip_protocol: tcp + ip_ranges: + - cidr_ip: "1.1.1.1/32" + ipv6_ranges: [] + prefix_list_ids: [] + to_port: 8182 + user_id_group_pairs: [] + type: list + returned: on create/update +ip_permissions_egress: + description: Outbound rules associated with the security group. + sample: + - ip_protocol: -1 + ip_ranges: + - cidr_ip: "0.0.0.0/0" + ipv6_ranges: [] + prefix_list_ids: [] + user_id_group_pairs: [] + type: list + returned: on create/update +owner_id: + description: AWS Account ID of the security group + sample: 123456789012 + type: int + returned: on create/update +''' + +import json +import re +import itertools +from copy import deepcopy +from time import sleep +from collections import namedtuple +from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible.module_utils.aws.iam import get_aws_account_id +from ansible.module_utils.aws.waiters import get_waiter +from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags +from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list +from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet +from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network +from ansible.module_utils._text import to_text +from ansible.module_utils.six import string_types + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description']) +valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix']) +current_account_id = None + + +def rule_cmp(a, b): + """Compare rules without descriptions""" + for prop in ['port_range', 'protocol', 'target', 'target_type']: + if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol): + # equal protocols can interchange `(-1, -1)` and `(None, None)` + if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)): + continue + elif getattr(a, prop) != getattr(b, prop): + return False + elif getattr(a, prop) != getattr(b, prop): + return False + return True + + +def rules_to_permissions(rules): + return [to_permission(rule) for rule in rules] + + +def to_permission(rule): + # take a Rule, output the serialized grant + perm = { + 'IpProtocol': rule.protocol, + } + perm['FromPort'], perm['ToPort'] = rule.port_range + if rule.target_type == 'ipv4': + perm['IpRanges'] = [{ + 'CidrIp': rule.target, + }] + if rule.description: + perm['IpRanges'][0]['Description'] = rule.description + elif rule.target_type == 'ipv6': + perm['Ipv6Ranges'] = [{ + 'CidrIpv6': rule.target, + }] + if rule.description: + perm['Ipv6Ranges'][0]['Description'] = rule.description + elif rule.target_type == 'group': + if isinstance(rule.target, tuple): + pair = {} + if rule.target[0]: + pair['UserId'] = rule.target[0] + # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific + if rule.target[1]: + pair['GroupId'] = rule.target[1] + elif rule.target[2]: + pair['GroupName'] = rule.target[2] + perm['UserIdGroupPairs'] = [pair] + else: + perm['UserIdGroupPairs'] = [{ + 'GroupId': rule.target + }] + if rule.description: + perm['UserIdGroupPairs'][0]['Description'] = rule.description + elif rule.target_type == 'ip_prefix': + perm['PrefixListIds'] = [{ + 'PrefixListId': rule.target, + }] + if rule.description: + perm['PrefixListIds'][0]['Description'] = rule.description + elif rule.target_type not in valid_targets: + raise ValueError('Invalid target type for rule {0}'.format(rule)) + return fix_port_and_protocol(perm) + + +def rule_from_group_permission(perm): + def ports_from_permission(p): + if 'FromPort' not in p and 'ToPort' not in p: + return (None, None) + return (int(perm['FromPort']), int(perm['ToPort'])) + + # outputs a rule tuple + for target_key, target_subkey, target_type in [ + ('IpRanges', 'CidrIp', 'ipv4'), + ('Ipv6Ranges', 'CidrIpv6', 'ipv6'), + ('PrefixListIds', 'PrefixListId', 'ip_prefix'), + ]: + if target_key not in perm: + continue + for r in perm[target_key]: + # there may be several IP ranges here, which is ok + yield Rule( + ports_from_permission(perm), + to_text(perm['IpProtocol']), + r[target_subkey], + target_type, + r.get('Description') + ) + if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']: + for pair in perm['UserIdGroupPairs']: + target = ( + pair.get('UserId', None), + pair.get('GroupId', None), + pair.get('GroupName', None), + ) + if pair.get('UserId', '').startswith('amazon-'): + # amazon-elb and amazon-prefix rules don't need + # group-id specified, so remove it when querying + # from permission + target = ( + target[0], + None, + target[2], + ) + elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id: + target = ( + pair.get('UserId', None), + pair.get('GroupId', None), + pair.get('GroupName', None), + ) + + yield Rule( + ports_from_permission(perm), + to_text(perm['IpProtocol']), + target, + 'group', + pair.get('Description') + ) + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['InvalidGroup.NotFound']) +def get_security_groups_with_backoff(connection, **kwargs): + return connection.describe_security_groups(**kwargs) + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def sg_exists_with_backoff(connection, **kwargs): + try: + return connection.describe_security_groups(**kwargs) + except is_boto3_error_code('InvalidGroup.NotFound'): + return {'SecurityGroups': []} + + +def deduplicate_rules_args(rules): + """Returns unique rules""" + if rules is None: + return None + return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) + + +def validate_rule(module, rule): + VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix', + 'group_id', 'group_name', 'group_desc', + 'proto', 'from_port', 'to_port', 'rule_desc') + if not isinstance(rule, dict): + module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) + for k in rule: + if k not in VALID_PARAMS: + module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule)) + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_id OR cidr_ip, not both') + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_name OR cidr_ip, not both') + elif 'group_id' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") + elif 'group_name' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") + elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg='Specify group_id OR group_name, not both') + + +def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): + """ + Returns tuple of (target_type, target, group_created) after validating rule params. + + rule: Dict describing a rule. + name: Name of the security group being managed. + groups: Dict of all available security groups. + + AWS accepts an ip range or a security group as target of a rule. This + function validate the rule specification and return either a non-None + group_id or a non-None ip range. + """ + FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)' + group_id = None + group_name = None + target_group_created = False + + validate_rule(module, rule) + if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() + group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) + groups[group_id] = group_instance + groups[group_name] = group_instance + # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific + if group_id and group_name: + group_name = None + return 'group', (owner_id, group_id, group_name), False + elif 'group_id' in rule: + return 'group', rule['group_id'], False + elif 'group_name' in rule: + group_name = rule['group_name'] + if group_name == name: + group_id = group['GroupId'] + groups[group_id] = group + groups[group_name] = group + elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): + # both are VPC groups, this is ok + group_id = groups[group_name]['GroupId'] + elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): + # both are EC2 classic, this is ok + group_id = groups[group_name]['GroupId'] + else: + auto_group = None + filters = {'group-name': group_name} + if vpc_id: + filters['vpc-id'] = vpc_id + # if we got here, either the target group does not exist, or there + # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC + # is bad, so we have to create a new SG because no compatible group + # exists + if not rule.get('group_desc', '').strip(): + # retry describing the group once + try: + auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] + except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError): + module.fail_json(msg="group %s will be automatically created by rule %s but " + "no description was provided" % (group_name, rule)) + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + elif not module.check_mode: + params = dict(GroupName=group_name, Description=rule['group_desc']) + if vpc_id: + params['VpcId'] = vpc_id + try: + auto_group = client.create_security_group(**params) + get_waiter( + client, 'security_group_exists', + ).wait( + GroupIds=[auto_group['GroupId']], + ) + except is_boto3_error_code('InvalidGroup.Duplicate'): + # The group exists, but didn't show up in any of our describe-security-groups calls + # Try searching on a filter for the name, and allow a retry window for AWS to update + # the model on their end. + try: + auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] + except IndexError as e: + module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) + except ClientError as e: + module.fail_json_aws( + e, + msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) + if auto_group is not None: + group_id = auto_group['GroupId'] + groups[group_id] = auto_group + groups[group_name] = auto_group + target_group_created = True + return 'group', group_id, target_group_created + elif 'cidr_ip' in rule: + return 'ipv4', validate_ip(module, rule['cidr_ip']), False + elif 'cidr_ipv6' in rule: + return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False + elif 'ip_prefix' in rule: + return 'ip_prefix', rule['ip_prefix'], False + + module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule) + + +def ports_expand(ports): + # takes a list of ports and returns a list of (port_from, port_to) + ports_expanded = [] + for port in ports: + if not isinstance(port, string_types): + ports_expanded.append((port,) * 2) + elif '-' in port: + ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1))) + else: + ports_expanded.append((int(port.strip()),) * 2) + + return ports_expanded + + +def rule_expand_ports(rule): + # takes a rule dict and returns a list of expanded rule dicts + if 'ports' not in rule: + if isinstance(rule.get('from_port'), string_types): + rule['from_port'] = int(rule.get('from_port')) + if isinstance(rule.get('to_port'), string_types): + rule['to_port'] = int(rule.get('to_port')) + return [rule] + + ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] + + rule_expanded = [] + for from_to in ports_expand(ports): + temp_rule = rule.copy() + del temp_rule['ports'] + temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to) + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rules_expand_ports(rules): + # takes a list of rules and expands it based on 'ports' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_ports(rule_complex)] + + +def rule_expand_source(rule, source_type): + # takes a rule dict and returns a list of expanded rule dicts for specified source_type + sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] + source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') + + rule_expanded = [] + for source in sources: + temp_rule = rule.copy() + for s in source_types_all: + temp_rule.pop(s, None) + temp_rule[source_type] = source + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rule_expand_sources(rule): + # takes a rule dict and returns a list of expanded rule discts + source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule) + + return [r for stype in source_types + for r in rule_expand_source(rule, stype)] + + +def rules_expand_sources(rules): + # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_sources(rule_complex)] + + +def update_rules_description(module, client, rule_type, group_id, ip_permissions): + if module.check_mode: + return + try: + if rule_type == "in": + client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions) + if rule_type == "out": + client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id) + + +def fix_port_and_protocol(permission): + for key in ('FromPort', 'ToPort'): + if key in permission: + if permission[key] is None: + del permission[key] + else: + permission[key] = int(permission[key]) + + permission['IpProtocol'] = to_text(permission['IpProtocol']) + + return permission + + +def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id): + if revoke_ingress: + revoke(client, module, revoke_ingress, group_id, 'in') + if revoke_egress: + revoke(client, module, revoke_egress, group_id, 'out') + return bool(revoke_ingress or revoke_egress) + + +def revoke(client, module, ip_permissions, group_id, rule_type): + if not module.check_mode: + try: + if rule_type == 'in': + client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == 'out': + client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions) + except (BotoCoreError, ClientError) as e: + rules = 'ingress rules' if rule_type == 'in' else 'egress rules' + module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions)) + + +def add_new_permissions(client, module, new_ingress, new_egress, group_id): + if new_ingress: + authorize(client, module, new_ingress, group_id, 'in') + if new_egress: + authorize(client, module, new_egress, group_id, 'out') + return bool(new_ingress or new_egress) + + +def authorize(client, module, ip_permissions, group_id, rule_type): + if not module.check_mode: + try: + if rule_type == 'in': + client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == 'out': + client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions) + except (BotoCoreError, ClientError) as e: + rules = 'ingress rules' if rule_type == 'in' else 'egress rules' + module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions)) + + +def validate_ip(module, cidr_ip): + split_addr = cidr_ip.split('/') + if len(split_addr) == 2: + # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set + # Get the network bits if IPv4, and validate if IPv6. + try: + ip = to_subnet(split_addr[0], split_addr[1]) + if ip != cidr_ip: + module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format( + cidr_ip, ip)) + except ValueError: + # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here + try: + isinstance(ip_network(to_text(cidr_ip)), IPv6Network) + ip = cidr_ip + except ValueError: + # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError + # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits + ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] + if ip6 != cidr_ip: + module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6)) + return ip6 + return ip + return cidr_ip + + +def update_tags(client, module, group_id, current_tags, tags, purge_tags): + tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) + + if not module.check_mode: + if tags_to_delete: + try: + client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete]) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete)) + + # Add/update tags + if tags_need_modify: + try: + client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) + except (BotoCoreError, ClientError) as e: + module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify)) + + return bool(tags_need_modify or tags_to_delete) + + +def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list): + changed = False + client = module.client('ec2') + ingress_needs_desc_update = [] + egress_needs_desc_update = [] + + for present_rule in present_egress: + needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + for r in needs_update: + named_tuple_egress_list.remove(r) + egress_needs_desc_update.extend(needs_update) + for present_rule in present_ingress: + needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + for r in needs_update: + named_tuple_ingress_list.remove(r) + ingress_needs_desc_update.extend(needs_update) + + if ingress_needs_desc_update: + update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update)) + changed |= True + if egress_needs_desc_update: + update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update)) + changed |= True + return changed + + +def create_security_group(client, module, name, description, vpc_id): + if not module.check_mode: + params = dict(GroupName=name, Description=description) + if vpc_id: + params['VpcId'] = vpc_id + try: + group = client.create_security_group(**params) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to create security group") + # When a group is created, an egress_rule ALLOW ALL + # to 0.0.0.0/0 is added automatically but it's not + # reflected in the object returned by the AWS API + # call. We re-read the group for getting an updated object + # amazon sometimes takes a couple seconds to update the security group so wait till it exists + while True: + sleep(3) + group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + if group.get('VpcId') and not group.get('IpPermissionsEgress'): + pass + else: + break + return group + return None + + +def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress): + group_id = group['GroupId'] + tries = 6 + + def await_rules(group, desired_rules, purge, rule_key): + for i in range(tries): + current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], [])) + if purge and len(current_rules ^ set(desired_rules)) == 0: + return group + elif purge: + conflicts = current_rules ^ set(desired_rules) + # For cases where set comparison is equivalent, but invalid port/proto exist + for a, b in itertools.combinations(conflicts, 2): + if rule_cmp(a, b): + conflicts.discard(a) + conflicts.discard(b) + if not len(conflicts): + return group + elif current_rules.issuperset(desired_rules) and not purge: + return group + sleep(10) + group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0] + module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules)) + return group + + group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0] + if 'VpcId' in group and module.params.get('rules_egress') is not None: + group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress') + return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions') + + +def group_exists(client, module, vpc_id, group_id, name): + params = {'Filters': []} + if group_id: + params['GroupIds'] = [group_id] + if name: + # Add name to filters rather than params['GroupNames'] + # because params['GroupNames'] only checks the default vpc if no vpc is provided + params['Filters'].append({'Name': 'group-name', 'Values': [name]}) + if vpc_id: + params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]}) + # Don't filter by description to maintain backwards compatibility + + try: + security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', []) + all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', []) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error in describe_security_groups") + + if security_groups: + groups = dict((group['GroupId'], group) for group in all_groups) + groups.update(dict((group['GroupName'], group) for group in all_groups)) + if vpc_id: + vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id) + groups.update(vpc_wins) + # maintain backwards compatibility by using the last matching group + return security_groups[-1], groups + return None, {} + + +def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress): + if not hasattr(client, "update_security_group_rule_descriptions_egress"): + all_rules = rules if rules else [] + rules_egress if rules_egress else [] + if any('rule_desc' in rule for rule in all_rules): + module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.") + + +def get_diff_final_resource(client, module, security_group): + def get_account_id(security_group, module): + try: + owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account']) + except (BotoCoreError, ClientError) as e: + owner_id = "Unable to determine owner_id: {0}".format(to_text(e)) + return owner_id + + def get_final_tags(security_group_tags, specified_tags, purge_tags): + if specified_tags is None: + return security_group_tags + tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags) + end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete) + end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete)) + end_result_tags.update(tags_need_modify) + return end_result_tags + + def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules): + if specified_rules is None: + return security_group_rules + if purge_rules: + final_rules = [] + else: + final_rules = list(security_group_rules) + specified_rules = flatten_nested_targets(module, deepcopy(specified_rules)) + for rule in specified_rules: + format_rule = { + 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'), + 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': [] + } + if rule.get('proto', 'tcp') in ('all', '-1', -1): + format_rule['ip_protocol'] = '-1' + format_rule.pop('from_port') + format_rule.pop('to_port') + elif rule.get('ports'): + if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)): + rule['ports'] = [rule['ports']] + for port in rule.get('ports'): + if isinstance(port, string_types) and '-' in port: + format_rule['from_port'], format_rule['to_port'] = port.split('-') + else: + format_rule['from_port'] = format_rule['to_port'] = port + elif rule.get('from_port') or rule.get('to_port'): + format_rule['from_port'] = rule.get('from_port', rule.get('to_port')) + format_rule['to_port'] = rule.get('to_port', rule.get('from_port')) + for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'): + if rule.get(source_type): + rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type) + if rule.get('rule_desc'): + format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}] + else: + if not isinstance(rule[source_type], list): + rule[source_type] = [rule[source_type]] + format_rule[rule_key] = [{source_type: target} for target in rule[source_type]] + if rule.get('group_id') or rule.get('group_name'): + rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0]) + format_rule['user_id_group_pairs'] = [{ + 'description': rule_sg.get('description', rule_sg.get('group_desc')), + 'group_id': rule_sg.get('group_id', rule.get('group_id')), + 'group_name': rule_sg.get('group_name', rule.get('group_name')), + 'peering_status': rule_sg.get('peering_status'), + 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)), + 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']), + 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id') + }] + for k, v in list(format_rule['user_id_group_pairs'][0].items()): + if v is None: + format_rule['user_id_group_pairs'][0].pop(k) + final_rules.append(format_rule) + # Order final rules consistently + final_rules.sort(key=get_ip_permissions_sort_key) + return final_rules + security_group_ingress = security_group.get('ip_permissions', []) + specified_ingress = module.params['rules'] + purge_ingress = module.params['purge_rules'] + security_group_egress = security_group.get('ip_permissions_egress', []) + specified_egress = module.params['rules_egress'] + purge_egress = module.params['purge_rules_egress'] + return { + 'description': module.params['description'], + 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'), + 'group_name': security_group.get('group_name', module.params['name']), + 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress), + 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress), + 'owner_id': get_account_id(security_group, module), + 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']), + 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])} + + +def flatten_nested_targets(module, rules): + def _flatten(targets): + for target in targets: + if isinstance(target, list): + for t in _flatten(target): + yield t + elif isinstance(target, string_types): + yield target + + if rules is not None: + for rule in rules: + target_list_type = None + if isinstance(rule.get('cidr_ip'), list): + target_list_type = 'cidr_ip' + elif isinstance(rule.get('cidr_ipv6'), list): + target_list_type = 'cidr_ipv6' + if target_list_type is not None: + rule[target_list_type] = list(_flatten(rule[target_list_type])) + return rules + + +def get_rule_sort_key(dicts): + if dicts.get('cidr_ip'): + return dicts.get('cidr_ip') + elif dicts.get('cidr_ipv6'): + return dicts.get('cidr_ipv6') + elif dicts.get('prefix_list_id'): + return dicts.get('prefix_list_id') + elif dicts.get('group_id'): + return dicts.get('group_id') + return None + + +def get_ip_permissions_sort_key(rule): + if rule.get('ip_ranges'): + rule.get('ip_ranges').sort(key=get_rule_sort_key) + return rule.get('ip_ranges')[0]['cidr_ip'] + elif rule.get('ipv6_ranges'): + rule.get('ipv6_ranges').sort(key=get_rule_sort_key) + return rule.get('ipv6_ranges')[0]['cidr_ipv6'] + elif rule.get('prefix_list_ids'): + rule.get('prefix_list_ids').sort(key=get_rule_sort_key) + return rule.get('prefix_list_ids')[0]['prefix_list_id'] + elif rule.get('user_id_group_pairs'): + rule.get('user_id_group_pairs').sort(key=get_rule_sort_key) + return rule.get('user_id_group_pairs')[0]['group_id'] + return None + + +def main(): + argument_spec = dict( + name=dict(), + group_id=dict(), + description=dict(), + vpc_id=dict(), + rules=dict(type='list'), + rules_egress=dict(type='list'), + state=dict(default='present', type='str', choices=['present', 'absent']), + purge_rules=dict(default=True, required=False, type='bool'), + purge_rules_egress=dict(default=True, required=False, type='bool'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, required=False, type='bool') + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[['name', 'group_id']], + required_if=[['state', 'present', ['name']]], + ) + + name = module.params['name'] + group_id = module.params['group_id'] + description = module.params['description'] + vpc_id = module.params['vpc_id'] + rules = flatten_nested_targets(module, deepcopy(module.params['rules'])) + rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress'])) + rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules))) + rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress))) + state = module.params.get('state') + purge_rules = module.params['purge_rules'] + purge_rules_egress = module.params['purge_rules_egress'] + tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + + if state == 'present' and not description: + module.fail_json(msg='Must provide description when state is present.') + + changed = False + client = module.client('ec2') + + verify_rules_with_descriptions_permitted(client, module, rules, rules_egress) + group, groups = group_exists(client, module, vpc_id, group_id, name) + group_created_new = not bool(group) + + global current_account_id + current_account_id = get_aws_account_id(module) + + before = {} + after = {} + + # Ensure requested group is absent + if state == 'absent': + if group: + # found a match, delete it + before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) + before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) + try: + if not module.check_mode: + client.delete_security_group(GroupId=group['GroupId']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group) + else: + group = None + changed = True + else: + # no match found, no changes required + pass + + # Ensure requested group is present + elif state == 'present': + if group: + # existing group + before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) + before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) + if group['Description'] != description: + module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " + "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") + else: + # no match found, create it + group = create_security_group(client, module, name, description, vpc_id) + changed = True + + if tags is not None and group is not None: + current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) + changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags) + + if group: + named_tuple_ingress_list = [] + named_tuple_egress_list = [] + current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], []) + current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], []) + + for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list), + (rules_egress, 'out', named_tuple_egress_list)]: + if new_rules is None: + continue + for rule in new_rules: + target_type, target, target_group_created = get_target_from_rule( + module, client, rule, name, group, groups, vpc_id) + changed |= target_group_created + + if rule.get('proto', 'tcp') in ('all', '-1', -1): + rule['proto'] = '-1' + rule['from_port'] = None + rule['to_port'] = None + try: + int(rule.get('proto', 'tcp')) + rule['proto'] = to_text(rule.get('proto', 'tcp')) + rule['from_port'] = None + rule['to_port'] = None + except ValueError: + # rule does not use numeric protocol spec + pass + + named_tuple_rule_list.append( + Rule( + port_range=(rule['from_port'], rule['to_port']), + protocol=to_text(rule.get('proto', 'tcp')), + target=target, target_type=target_type, + description=rule.get('rule_desc'), + ) + ) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))] + + if module.params.get('rules_egress') is None and 'VpcId' in group: + # when no egress rules are specified and we're in a VPC, + # we add in a default allow all out rule, which was the + # default behavior before egress rules were added + rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) + if rule in current_egress: + named_tuple_egress_list.append(rule) + if rule not in current_egress: + current_egress.append(rule) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress))) + present_egress = list(set(named_tuple_egress_list).union(set(current_egress))) + + if purge_rules: + revoke_ingress = [] + for p in present_ingress: + if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]): + revoke_ingress.append(to_permission(p)) + else: + revoke_ingress = [] + if purge_rules_egress and module.params.get('rules_egress') is not None: + if module.params.get('rules_egress') is []: + revoke_egress = [ + to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list) + if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) + ] + else: + revoke_egress = [] + for p in present_egress: + if not any([rule_cmp(p, b) for b in named_tuple_egress_list]): + revoke_egress.append(to_permission(p)) + else: + revoke_egress = [] + + # named_tuple_ingress_list and named_tuple_egress_list got updated by + # method update_rule_descriptions, deep copy these two lists to new + # variables for the record of the 'desired' ingress and egress sg permissions + desired_ingress = deepcopy(named_tuple_ingress_list) + desired_egress = deepcopy(named_tuple_egress_list) + + changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list) + + # Revoke old rules + changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId']) + rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress) + + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress)) + new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress)) + # Authorize new rules + changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId']) + + if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None: + # A new group with no rules provided is already being awaited. + # When it is created we wait for the default egress rule to be added by AWS + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + elif changed and not module.check_mode: + # keep pulling until current security group rules match the desired ingress and egress rules + security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress) + else: + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags']) + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', [])) + + else: + security_group = {'group_id': None} + + if module._diff: + if module.params['state'] == 'present': + after = get_diff_final_resource(client, module, security_group) + if before.get('ip_permissions'): + before['ip_permissions'].sort(key=get_ip_permissions_sort_key) + + security_group['diff'] = [{'before': before, 'after': after}] + + module.exit_json(changed=changed, **security_group) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2_vpc_net.py b/test/support/integration/plugins/modules/ec2_vpc_net.py new file mode 100644 index 00000000..30e4b1e9 --- /dev/null +++ b/test/support/integration/plugins/modules/ec2_vpc_net.py @@ -0,0 +1,524 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net +short_description: Configure AWS virtual private clouds +description: + - Create, modify, and terminate AWS virtual private clouds. +version_added: "2.0" +author: + - Jonathan Davila (@defionscode) + - Sloane Hertel (@s-hertel) +options: + name: + description: + - The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists. + required: yes + type: str + cidr_block: + description: + - The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR + and is used in conjunction with the C(name) to ensure idempotence. + required: yes + type: list + elements: str + ipv6_cidr: + description: + - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses, + or the size of the CIDR block. + default: False + type: bool + version_added: '2.10' + purge_cidrs: + description: + - Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block). + default: no + type: bool + version_added: '2.5' + tenancy: + description: + - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created. + default: default + choices: [ 'default', 'dedicated' ] + type: str + dns_support: + description: + - Whether to enable AWS DNS support. + default: yes + type: bool + dns_hostnames: + description: + - Whether to enable AWS hostname support. + default: yes + type: bool + dhcp_opts_id: + description: + - The id of the DHCP options to use for this VPC. + type: str + tags: + description: + - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of + the VPC if it's different. + aliases: [ 'resource_tags' ] + type: dict + state: + description: + - The state of the VPC. Either absent or present. + default: present + choices: [ 'present', 'absent' ] + type: str + multi_ok: + description: + - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want + duplicate VPCs created. + type: bool + default: false +requirements: + - boto3 + - botocore +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: create a VPC with dedicated tenancy and a couple of tags + ec2_vpc_net: + name: Module_dev2 + cidr_block: 10.10.0.0/16 + region: us-east-1 + tags: + module: ec2_vpc_net + this: works + tenancy: dedicated + +- name: create a VPC with dedicated tenancy and request an IPv6 CIDR + ec2_vpc_net: + name: Module_dev2 + cidr_block: 10.10.0.0/16 + ipv6_cidr: True + region: us-east-1 + tenancy: dedicated +''' + +RETURN = ''' +vpc: + description: info about the VPC that was created or deleted + returned: always + type: complex + contains: + cidr_block: + description: The CIDR of the VPC + returned: always + type: str + sample: 10.0.0.0/16 + cidr_block_association_set: + description: IPv4 CIDR blocks associated with the VPC + returned: success + type: list + sample: + "cidr_block_association_set": [ + { + "association_id": "vpc-cidr-assoc-97aeeefd", + "cidr_block": "20.0.0.0/24", + "cidr_block_state": { + "state": "associated" + } + } + ] + classic_link_enabled: + description: indicates whether ClassicLink is enabled + returned: always + type: bool + sample: false + dhcp_options_id: + description: the id of the DHCP options associated with this VPC + returned: always + type: str + sample: dopt-0fb8bd6b + id: + description: VPC resource id + returned: always + type: str + sample: vpc-c2e00da5 + instance_tenancy: + description: indicates whether VPC uses default or dedicated tenancy + returned: always + type: str + sample: default + ipv6_cidr_block_association_set: + description: IPv6 CIDR blocks associated with the VPC + returned: success + type: list + sample: + "ipv6_cidr_block_association_set": [ + { + "association_id": "vpc-cidr-assoc-97aeeefd", + "ipv6_cidr_block": "2001:db8::/56", + "ipv6_cidr_block_state": { + "state": "associated" + } + } + ] + is_default: + description: indicates whether this is the default VPC + returned: always + type: bool + sample: false + state: + description: state of the VPC + returned: always + type: str + sample: available + tags: + description: tags attached to the VPC, includes name + returned: always + type: complex + contains: + Name: + description: name tag for the VPC + returned: always + type: str + sample: pk_vpc4 +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from time import sleep, time +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags, + ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict) +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native +from ansible.module_utils.network.common.utils import to_subnet + + +def vpc_exists(module, vpc, name, cidr_block, multi): + """Returns None or a vpc object depending on the existence of a VPC. When supplied + with a CIDR, it will check for matching tags to determine if it is a match + otherwise it will assume the VPC does not exist and thus return None. + """ + try: + matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': cidr_block}])['Vpcs'] + # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block) + if not matching_vpcs: + matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': [cidr_block[0]]}])['Vpcs'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + + if multi: + return None + elif len(matching_vpcs) == 1: + return matching_vpcs[0]['VpcId'] + elif len(matching_vpcs) > 1: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + 'CIDR block you specified. If you would like to create ' + 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) + return None + + +@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound']) +def get_classic_link_with_backoff(connection, vpc_id): + try: + return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled') + except botocore.exceptions.ClientError as e: + if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.": + return False + else: + raise + + +def get_vpc(module, connection, vpc_id): + # wait for vpc to be available + try: + connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id)) + + try: + vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + try: + vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + + return vpc_obj + + +def update_vpc_tags(connection, module, vpc_id, tags, name): + if tags is None: + tags = dict() + + tags.update({'Name': name}) + tags = dict((k, to_native(v)) for k, v in tags.items()) + try: + current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags']) + tags_to_update, dummy = compare_aws_tags(current_tags, tags, False) + if tags_to_update: + if not module.check_mode: + tags = ansible_dict_to_boto3_tag_list(tags_to_update) + vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True) + + # Wait for tags to be updated + expected_tags = boto3_tag_list_to_ansible_dict(tags) + filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()] + connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters) + + return True + else: + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update tags") + + +def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + if vpc_obj['DhcpOptionsId'] != dhcp_id: + if not module.check_mode: + try: + connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id)) + + try: + # Wait for DhcpOptionsId to be updated + filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}] + connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to wait for DhcpOptionsId to be updated") + + return True + else: + return False + + +def create_vpc(connection, module, cidr_block, tenancy): + try: + if not module.check_mode: + vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy) + else: + module.exit_json(changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to create the VPC") + + # wait for vpc to exist + try: + connection.get_waiter('vpc_exists').wait(VpcIds=[vpc_obj['Vpc']['VpcId']]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId'])) + + return vpc_obj['Vpc']['VpcId'] + + +def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value): + start_time = time() + updated = False + while time() < start_time + 300: + current_value = connection.describe_vpc_attribute( + Attribute=attribute, + VpcId=vpc_id + )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value'] + if current_value != expected_value: + sleep(3) + else: + updated = True + break + if not updated: + module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute)) + + +def get_cidr_network_bits(module, cidr_block): + fixed_cidrs = [] + for cidr in cidr_block: + split_addr = cidr.split('/') + if len(split_addr) == 2: + # this_ip is a IPv4 CIDR that may or may not have host bits set + # Get the network bits. + valid_cidr = to_subnet(split_addr[0], split_addr[1]) + if cidr != valid_cidr: + module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr)) + fixed_cidrs.append(valid_cidr) + else: + # let AWS handle invalid CIDRs + fixed_cidrs.append(cidr) + return fixed_cidrs + + +def main(): + argument_spec = dict( + name=dict(required=True), + cidr_block=dict(type='list', required=True), + ipv6_cidr=dict(type='bool', default=False), + tenancy=dict(choices=['default', 'dedicated'], default='default'), + dns_support=dict(type='bool', default=True), + dns_hostnames=dict(type='bool', default=True), + dhcp_opts_id=dict(), + tags=dict(type='dict', aliases=['resource_tags']), + state=dict(choices=['present', 'absent'], default='present'), + multi_ok=dict(type='bool', default=False), + purge_cidrs=dict(type='bool', default=False), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + name = module.params.get('name') + cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block')) + ipv6_cidr = module.params.get('ipv6_cidr') + purge_cidrs = module.params.get('purge_cidrs') + tenancy = module.params.get('tenancy') + dns_support = module.params.get('dns_support') + dns_hostnames = module.params.get('dns_hostnames') + dhcp_id = module.params.get('dhcp_opts_id') + tags = module.params.get('tags') + state = module.params.get('state') + multi = module.params.get('multi_ok') + + changed = False + + connection = module.client( + 'ec2', + retry_decorator=AWSRetry.jittered_backoff( + retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound'] + ) + ) + + if dns_hostnames and not dns_support: + module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support') + + if state == 'present': + + # Check if VPC exists + vpc_id = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_id is None: + vpc_id = create_vpc(connection, module, cidr_block[0], tenancy) + changed = True + + vpc_obj = get_vpc(module, connection, vpc_id) + + associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', []) + if cidr['CidrBlockState']['State'] != 'disassociated') + to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs] + to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block] + expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add + + if len(cidr_block) > 1: + for cidr in to_add: + changed = True + try: + connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr)) + if ipv6_cidr: + if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys(): + module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format( + vpc_id, + vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock'])) + else: + try: + connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr)) + + if purge_cidrs: + for association_id in to_remove: + changed = True + try: + connection.disassociate_vpc_cidr_block(AssociationId=association_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that " + "are associated with the CIDR block before you can disassociate it.".format(association_id)) + + if dhcp_id is not None: + try: + if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update DHCP options") + + if tags is not None or name is not None: + try: + if update_vpc_tags(connection, module, vpc_id, tags, name): + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update tags") + + current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value'] + current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value'] + if current_dns_enabled != dns_support: + changed = True + if not module.check_mode: + try: + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update enabled dns support attribute") + if current_dns_hostnames != dns_hostnames: + changed = True + if not module.check_mode: + try: + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute") + + # wait for associated cidrs to match + if to_add or to_remove: + try: + connection.get_waiter('vpc_available').wait( + VpcIds=[vpc_id], + Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to wait for CIDRs to update") + + # try to wait for enableDnsSupport and enableDnsHostnames to match + wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support) + wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames) + + final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id)) + final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', [])) + final_state['id'] = final_state.pop('vpc_id') + + module.exit_json(changed=changed, vpc=final_state) + + elif state == 'absent': + + # Check if VPC exists + vpc_id = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_id is not None: + try: + if not module.check_mode: + connection.delete_vpc(VpcId=vpc_id) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " + "and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id)) + + module.exit_json(changed=changed, vpc={}) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/ec2_vpc_subnet.py b/test/support/integration/plugins/modules/ec2_vpc_subnet.py new file mode 100644 index 00000000..5085e99b --- /dev/null +++ b/test/support/integration/plugins/modules/ec2_vpc_subnet.py @@ -0,0 +1,604 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_subnet +short_description: Manage subnets in AWS virtual private clouds +description: + - Manage subnets in AWS virtual private clouds. +version_added: "2.0" +author: +- Robert Estelle (@erydo) +- Brad Davidson (@brandond) +requirements: [ boto3 ] +options: + az: + description: + - "The availability zone for the subnet." + type: str + cidr: + description: + - "The CIDR block for the subnet. E.g. 192.0.2.0/24." + type: str + required: true + ipv6_cidr: + description: + - "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range." + - "Required if I(assign_instances_ipv6=true)" + version_added: "2.5" + type: str + tags: + description: + - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed." + aliases: [ 'resource_tags' ] + type: dict + state: + description: + - "Create or remove the subnet." + default: present + choices: [ 'present', 'absent' ] + type: str + vpc_id: + description: + - "VPC ID of the VPC in which to create or delete the subnet." + required: true + type: str + map_public: + description: + - "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default." + type: bool + default: 'no' + version_added: "2.4" + assign_instances_ipv6: + description: + - "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address." + type: bool + default: false + version_added: "2.5" + wait: + description: + - "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing." + type: bool + default: true + version_added: "2.5" + wait_timeout: + description: + - "Number of seconds to wait for subnet to become available I(wait=True)." + default: 300 + version_added: "2.5" + type: int + purge_tags: + description: + - Whether or not to remove tags that do not appear in the I(tags) list. + type: bool + default: true + version_added: "2.5" +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create subnet for database servers + ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.0.1.16/28 + tags: + Name: Database Subnet + register: database_subnet + +- name: Remove subnet for database servers + ec2_vpc_subnet: + state: absent + vpc_id: vpc-123456 + cidr: 10.0.1.16/28 + +- name: Create subnet with IPv6 block assigned + ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.1.100.0/24 + ipv6_cidr: 2001:db8:0:102::/64 + +- name: Remove IPv6 block assigned to subnet + ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.1.100.0/24 + ipv6_cidr: '' +''' + +RETURN = ''' +subnet: + description: Dictionary of subnet values + returned: I(state=present) + type: complex + contains: + id: + description: Subnet resource id + returned: I(state=present) + type: str + sample: subnet-b883b2c4 + cidr_block: + description: The IPv4 CIDR of the Subnet + returned: I(state=present) + type: str + sample: "10.0.0.0/16" + ipv6_cidr_block: + description: The IPv6 CIDR block actively associated with the Subnet + returned: I(state=present) + type: str + sample: "2001:db8:0:102::/64" + availability_zone: + description: Availability zone of the Subnet + returned: I(state=present) + type: str + sample: us-east-1a + state: + description: state of the Subnet + returned: I(state=present) + type: str + sample: available + tags: + description: tags attached to the Subnet, includes name + returned: I(state=present) + type: dict + sample: {"Name": "My Subnet", "env": "staging"} + map_public_ip_on_launch: + description: whether public IP is auto-assigned to new instances + returned: I(state=present) + type: bool + sample: false + assign_ipv6_address_on_creation: + description: whether IPv6 address is auto-assigned to new instances + returned: I(state=present) + type: bool + sample: false + vpc_id: + description: the id of the VPC where this Subnet exists + returned: I(state=present) + type: str + sample: vpc-67236184 + available_ip_address_count: + description: number of available IPv4 addresses + returned: I(state=present) + type: str + sample: 251 + default_for_az: + description: indicates whether this is the default Subnet for this Availability Zone + returned: I(state=present) + type: bool + sample: false + ipv6_association_id: + description: The IPv6 association ID for the currently associated CIDR + returned: I(state=present) + type: str + sample: subnet-cidr-assoc-b85c74d2 + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: I(state=present) + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the subnet. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str +''' + + +import time + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils._text import to_text +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.aws.waiters import get_waiter +from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list, + camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, AWSRetry) + + +def get_subnet_info(subnet): + if 'Subnets' in subnet: + return [get_subnet_info(s) for s in subnet['Subnets']] + elif 'Subnet' in subnet: + subnet = camel_dict_to_snake_dict(subnet['Subnet']) + else: + subnet = camel_dict_to_snake_dict(subnet) + + if 'tags' in subnet: + subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags']) + else: + subnet['tags'] = dict() + + if 'subnet_id' in subnet: + subnet['id'] = subnet['subnet_id'] + del subnet['subnet_id'] + + subnet['ipv6_cidr_block'] = '' + subnet['ipv6_association_id'] = '' + ipv6set = subnet.get('ipv6_cidr_block_association_set') + if ipv6set: + for item in ipv6set: + if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'): + subnet['ipv6_cidr_block'] = item['ipv6_cidr_block'] + subnet['ipv6_association_id'] = item['association_id'] + + return subnet + + +@AWSRetry.exponential_backoff() +def describe_subnets_with_backoff(client, **params): + return client.describe_subnets(**params) + + +def waiter_params(module, params, start_time): + if not module.botocore_at_least("1.7.0"): + remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time()) + params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5} + return params + + +def handle_waiter(conn, module, waiter_name, params, start_time): + try: + get_waiter(conn, waiter_name).wait( + **waiter_params(module, params, start_time) + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, "Failed to wait for updates to complete") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "An exception happened while trying to wait for updates") + + +def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None): + wait = module.params['wait'] + wait_timeout = module.params['wait_timeout'] + + params = dict(VpcId=vpc_id, + CidrBlock=cidr) + + if ipv6_cidr: + params['Ipv6CidrBlock'] = ipv6_cidr + + if az: + params['AvailabilityZone'] = az + + try: + subnet = get_subnet_info(conn.create_subnet(**params)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create subnet") + + # Sometimes AWS takes its time to create a subnet and so using + # new subnets's id to do things like create tags results in + # exception. + if wait and subnet.get('state') != 'available': + handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) + try: + conn.get_waiter('subnet_available').wait( + **waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time) + ) + subnet['state'] = 'available' + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available") + + return subnet + + +def ensure_tags(conn, module, subnet, tags, purge_tags, start_time): + changed = False + + filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'}) + try: + cur_tags = conn.describe_tags(Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe tags") + + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) + + if to_update: + try: + if not module.check_mode: + AWSRetry.exponential_backoff( + catch_extra_error_codes=['InvalidSubnetID.NotFound'] + )(conn.create_tags)( + Resources=[subnet['id']], + Tags=ansible_dict_to_boto3_tag_list(to_update) + ) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create tags") + + if to_delete: + try: + if not module.check_mode: + tags_list = [] + for key in to_delete: + tags_list.append({'Key': key}) + + AWSRetry.exponential_backoff( + catch_extra_error_codes=['InvalidSubnetID.NotFound'] + )(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list) + + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete tags") + + if module.params['wait'] and not module.check_mode: + # Wait for tags to be updated + filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()] + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + return changed + + +def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time): + if check_mode: + return + try: + conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't modify subnet attribute") + + +def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time): + if check_mode: + return + try: + conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't modify subnet attribute") + + +def disassociate_ipv6_cidr(conn, module, subnet, start_time): + if subnet.get('assign_ipv6_address_on_creation'): + ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time) + + try: + conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}" + .format(subnet['ipv6_association_id'], subnet['id'])) + + # Wait for cidr block to be disassociated + if module.params['wait']: + filters = ansible_dict_to_boto3_filter_list( + {'ipv6-cidr-block-association.state': ['disassociated'], + 'vpc-id': subnet['vpc_id']} + ) + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + +def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time): + wait = module.params['wait'] + changed = False + + if subnet['ipv6_association_id'] and not ipv6_cidr: + if not check_mode: + disassociate_ipv6_cidr(conn, module, subnet, start_time) + changed = True + + if ipv6_cidr: + filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr, + 'vpc-id': subnet['vpc_id']}) + + try: + check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get subnet info") + + if check_subnets and check_subnets[0]['ipv6_cidr_block']: + module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr)) + + if subnet['ipv6_association_id']: + if not check_mode: + disassociate_ipv6_cidr(conn, module, subnet, start_time) + changed = True + + try: + if not check_mode: + associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id'])) + else: + if not check_mode and wait: + filters = ansible_dict_to_boto3_filter_list( + {'ipv6-cidr-block-association.state': ['associated'], + 'vpc-id': subnet['vpc_id']} + ) + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'): + subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId'] + subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + if subnet['ipv6_cidr_block_association_set']: + subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']) + else: + subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])) + + return changed + + +def get_matching_subnet(conn, module, vpc_id, cidr): + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr}) + try: + subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get matching subnet") + + if subnets: + return subnets[0] + + return None + + +def ensure_subnet_present(conn, module): + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + changed = False + + # Initialize start so max time does not exceed the specified wait_timeout for multiple operations + start_time = time.time() + + if subnet is None: + if not module.check_mode: + subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'], + ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time) + changed = True + # Subnet will be None when check_mode is true + if subnet is None: + return { + 'changed': changed, + 'subnet': {} + } + if module.params['wait']: + handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) + + if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'): + if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time): + changed = True + + if module.params['map_public'] != subnet['map_public_ip_on_launch']: + ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time) + changed = True + + if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'): + ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time) + changed = True + + if module.params['tags'] != subnet['tags']: + stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items()) + if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time): + changed = True + + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + if not module.check_mode and module.params['wait']: + # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation + # so we only wait for those if necessary just before returning the subnet + subnet = ensure_final_subnet(conn, module, subnet, start_time) + + return { + 'changed': changed, + 'subnet': subnet + } + + +def ensure_final_subnet(conn, module, subnet, start_time): + for rewait in range(0, 30): + map_public_correct = False + assign_ipv6_correct = False + + if module.params['map_public'] == subnet['map_public_ip_on_launch']: + map_public_correct = True + else: + if module.params['map_public']: + handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time) + else: + handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time) + + if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'): + assign_ipv6_correct = True + else: + if module.params['assign_instances_ipv6']: + handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + else: + handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + + if map_public_correct and assign_ipv6_correct: + break + + time.sleep(5) + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + + return subnet + + +def ensure_subnet_absent(conn, module): + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + if subnet is None: + return {'changed': False} + + try: + if not module.check_mode: + conn.delete_subnet(SubnetId=subnet['id']) + if module.params['wait']: + handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time()) + return {'changed': True} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete subnet") + + +def main(): + argument_spec = dict( + az=dict(default=None, required=False), + cidr=dict(required=True), + ipv6_cidr=dict(default='', required=False), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']), + vpc_id=dict(required=True), + map_public=dict(default=False, required=False, type='bool'), + assign_instances_ipv6=dict(default=False, required=False, type='bool'), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=300, required=False), + purge_tags=dict(default=True, type='bool') + ) + + required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + + if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'): + module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string") + + if not module.botocore_at_least("1.7.0"): + module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times") + + connection = module.client('ec2') + + state = module.params.get('state') + + try: + if state == 'present': + result = ensure_subnet_present(connection, module) + elif state == 'absent': + result = ensure_subnet_absent(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/flatpak_remote.py b/test/support/integration/plugins/modules/flatpak_remote.py new file mode 100644 index 00000000..db208f1b --- /dev/null +++ b/test/support/integration/plugins/modules/flatpak_remote.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) +# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) +# Copyright: (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +# ATTENTION CONTRIBUTORS! +# +# TL;DR: Run this module's integration tests manually before opening a pull request +# +# Long explanation: +# The integration tests for this module are currently NOT run on the Ansible project's continuous +# delivery pipeline. So please: When you make changes to this module, make sure that you run the +# included integration tests manually for both Python 2 and Python 3: +# +# Python 2: +# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote +# Python 3: +# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote +# +# Because of external dependencies, the current integration tests are somewhat too slow and brittle +# to be included right now. I have plans to rewrite the integration tests based on a local flatpak +# repository so that they can be included into the normal CI pipeline. +# //oolongbrothers + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: flatpak_remote +version_added: '2.6' +short_description: Manage flatpak repository remotes +description: +- Allows users to add or remove flatpak remotes. +- The flatpak remotes concept is comparable to what is called repositories in other packaging + formats. +- Currently, remote addition is only supported via I(flatpakrepo) file URLs. +- Existing remotes will not be updated. +- See the M(flatpak) module for managing flatpaks. +author: +- John Kwiatkoski (@JayKayy) +- Alexander Bethke (@oolongbrothers) +requirements: +- flatpak +options: + executable: + description: + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. + default: flatpak + flatpakrepo_url: + description: + - The URL to the I(flatpakrepo) file representing the repository remote to add. + - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url) + is added using the specified installation C(method). + - When used with I(state=absent), this is not required. + - Required when I(state=present). + method: + description: + - The installation method to use. + - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) + or only for the current C(user). + choices: [ system, user ] + default: system + name: + description: + - The desired name for the flatpak remote to be registered under on the managed host. + - When used with I(state=present), the remote will be added to the managed host under + the specified I(name). + - When used with I(state=absent) the remote with that name will be removed. + required: true + state: + description: + - Indicates the desired package state. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = r''' +- name: Add the Gnome flatpak remote to the system installation + flatpak_remote: + name: gnome + state: present + flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo + +- name: Add the flathub flatpak repository remote to the user installation + flatpak_remote: + name: flathub + state: present + flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + method: user + +- name: Remove the Gnome flatpak remote from the user installation + flatpak_remote: + name: gnome + state: absent + method: user + +- name: Remove the flathub remote from the system installation + flatpak_remote: + name: flathub + state: absent +''' + +RETURN = r''' +command: + description: The exact flatpak command that was executed + returned: When a flatpak command has been executed + type: str + sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" +msg: + description: Module error message + returned: failure + type: str + sample: "Executable '/usr/local/bin/flatpak' was not found on the system." +rc: + description: Return code from flatpak binary + returned: When a flatpak command has been executed + type: int + sample: 0 +stderr: + description: Error output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n" +stdout: + description: Output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n" +''' + +import subprocess +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native + + +def add_remote(module, binary, name, flatpakrepo_url, method): + """Add a new remote.""" + global result + command = "{0} remote-add --{1} {2} {3}".format( + binary, method, name, flatpakrepo_url) + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remove_remote(module, binary, name, method): + """Remove an existing remote.""" + global result + command = "{0} remote-delete --{1} --force {2} ".format( + binary, method, name) + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_exists(module, binary, name, method): + """Check if the remote exists.""" + command = "{0} remote-list -d --{1}".format(binary, method) + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return True + return False + + +def _flatpak_command(module, noop, command): + global result + if noop: + result['rc'] = 0 + result['command'] = command + return "" + + process = subprocess.Popen( + command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout_data, stderr_data = process.communicate() + result['rc'] = process.returncode + result['command'] = command + result['stdout'] = stdout_data + result['stderr'] = stderr_data + if result['rc'] != 0: + module.fail_json(msg="Failed to execute flatpak command", **result) + return to_native(stdout_data) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + flatpakrepo_url=dict(type='str'), + method=dict(type='str', default='system', + choices=['user', 'system']), + state=dict(type='str', default="present", + choices=['absent', 'present']), + executable=dict(type='str', default="flatpak") + ), + # This module supports check mode + supports_check_mode=True, + ) + + name = module.params['name'] + flatpakrepo_url = module.params['flatpakrepo_url'] + method = module.params['method'] + state = module.params['state'] + executable = module.params['executable'] + binary = module.get_bin_path(executable, None) + + if flatpakrepo_url is None: + flatpakrepo_url = '' + + global result + result = dict( + changed=False + ) + + # If the binary was not found, fail the operation + if not binary: + module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + + remote_already_exists = remote_exists(module, binary, to_bytes(name), method) + + if state == 'present' and not remote_already_exists: + add_remote(module, binary, name, flatpakrepo_url, method) + elif state == 'absent' and remote_already_exists: + remove_remote(module, binary, name, method) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/htpasswd.py b/test/support/integration/plugins/modules/htpasswd.py new file mode 100644 index 00000000..ad12b0c0 --- /dev/null +++ b/test/support/integration/plugins/modules/htpasswd.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Nimbis Services, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +module: htpasswd +version_added: "1.3" +short_description: manage user files for basic authentication +description: + - Add and remove username/password entries in a password file using htpasswd. + - This is used by web servers such as Apache and Nginx for basic authentication. +options: + path: + required: true + aliases: [ dest, destfile ] + description: + - Path to the file that contains the usernames and passwords + name: + required: true + aliases: [ username ] + description: + - User name to add or remove + password: + required: false + description: + - Password associated with user. + - Must be specified if user does not exist yet. + crypt_scheme: + required: false + choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + default: "apr_md5_crypt" + description: + - Encryption scheme to be used. As well as the four choices listed + here, you can also use any other hash supported by passlib, such as + md5_crypt and sha256_crypt, which are linux passwd hashes. If you + do so the password file will not be compatible with Apache or Nginx + state: + required: false + choices: [ present, absent ] + default: "present" + description: + - Whether the user entry should be present or not + create: + required: false + type: bool + default: "yes" + description: + - Used with C(state=present). If specified, the file will be created + if it does not already exist. If set to "no", will fail if the + file does not exist +notes: + - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems." + - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." + - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." +requirements: [ passlib>=1.6 ] +author: "Ansible Core Team" +extends_documentation_fragment: files +""" + +EXAMPLES = """ +# Add a user to a password file and ensure permissions are set +- htpasswd: + path: /etc/nginx/passwdfile + name: janedoe + password: '9s36?;fyNp' + owner: root + group: www-data + mode: 0640 + +# Remove a user from a password file +- htpasswd: + path: /etc/apache2/passwdfile + name: foobar + state: absent + +# Add a user to a password file suitable for use by libpam-pwdfile +- htpasswd: + path: /etc/mail/passwords + name: alex + password: oedu2eGh + crypt_scheme: md5_crypt +""" + + +import os +import tempfile +import traceback +from distutils.version import LooseVersion +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +PASSLIB_IMP_ERR = None +try: + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext + import passlib +except ImportError: + PASSLIB_IMP_ERR = traceback.format_exc() + passlib_installed = False +else: + passlib_installed = True + +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + + +def create_missing_directories(dest): + destpath = os.path.dirname(dest) + if not os.path.exists(destpath): + os.makedirs(destpath) + + +def present(dest, username, password, crypt_scheme, create, check_mode): + """ Ensures user is present + + Returns (msg, changed) """ + if crypt_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes=[crypt_scheme] + apache_hashes) + if not os.path.exists(dest): + if not create: + raise ValueError('Destination %s does not exist' % dest) + if check_mode: + return ("Create %s" % dest, True) + create_missing_directories(dest) + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Created %s and added %s" % (dest, username), True) + else: + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, default=crypt_scheme, context=context) + + found = None + if getattr(ht, 'check_password', None): + found = ht.check_password(username, password) + else: + found = ht.verify(username, password) + + if found: + return ("%s already present" % username, False) + else: + if not check_mode: + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Add/update %s" % username, True) + + +def absent(dest, username, check_mode): + """ Ensures user is absent + + Returns (msg, changed) """ + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False) + else: + ht = HtpasswdFile(dest) + + if username not in ht.users(): + return ("%s not present" % username, False) + else: + if not check_mode: + ht.delete(username) + ht.save() + return ("Remove %s" % username, True) + + +def check_file_attrs(module, changed, message): + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + + if changed: + message += " and " + changed = True + message += "ownership, perms or SE linux context changed" + + return message, changed + + +def main(): + arg_spec = dict( + path=dict(required=True, aliases=["dest", "destfile"]), + name=dict(required=True, aliases=["username"]), + password=dict(required=False, default=None, no_log=True), + crypt_scheme=dict(required=False, default="apr_md5_crypt"), + state=dict(required=False, default="present"), + create=dict(type='bool', default='yes'), + + ) + module = AnsibleModule(argument_spec=arg_spec, + add_file_common_args=True, + supports_check_mode=True) + + path = module.params['path'] + username = module.params['name'] + password = module.params['password'] + crypt_scheme = module.params['crypt_scheme'] + state = module.params['state'] + create = module.params['create'] + check_mode = module.check_mode + + if not passlib_installed: + module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR) + + # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. + try: + f = open(path, "r") + except IOError: + # No preexisting file to remove blank lines from + f = None + else: + try: + lines = f.readlines() + finally: + f.close() + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + break + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path, "w") + try: + [f.write(line) for line in lines if line.strip()] + finally: + f.close() + + try: + if state == 'present': + (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) + elif state == 'absent': + if not os.path.exists(path): + module.exit_json(msg="%s not present" % username, + warnings="%s does not exist" % path, changed=False) + (msg, changed) = absent(path, username, check_mode) + else: + module.fail_json(msg="Invalid state: %s" % state) + + check_file_attrs(module, changed, msg) + module.exit_json(msg=msg, changed=changed) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/locale_gen.py b/test/support/integration/plugins/modules/locale_gen.py new file mode 100644 index 00000000..4968b834 --- /dev/null +++ b/test/support/integration/plugins/modules/locale_gen.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: locale_gen +short_description: Creates or removes locales +description: + - Manages locales by editing /etc/locale.gen and invoking locale-gen. +version_added: "1.6" +author: +- Augustus Kling (@AugustusKling) +options: + name: + description: + - Name and encoding of the locale, such as "en_GB.UTF-8". + required: true + state: + description: + - Whether the locale shall be present. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = ''' +- name: Ensure a locale exists + locale_gen: + name: de_CH.UTF-8 + state: present +''' + +import os +import re +from subprocess import Popen, PIPE, call + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +LOCALE_NORMALIZATION = { + ".utf8": ".UTF-8", + ".eucjp": ".EUC-JP", + ".iso885915": ".ISO-8859-15", + ".cp1251": ".CP1251", + ".koi8r": ".KOI8-R", + ".armscii8": ".ARMSCII-8", + ".euckr": ".EUC-KR", + ".gbk": ".GBK", + ".gb18030": ".GB18030", + ".euctw": ".EUC-TW", +} + + +# =========================================== +# location module specific support methods. +# + +def is_available(name, ubuntuMode): + """Check if the given locale is available on the system. This is done by + checking either : + * if the locale is present in /etc/locales.gen + * or if the locale is present in /usr/share/i18n/SUPPORTED""" + if ubuntuMode: + __regexp = r'^(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/usr/share/i18n/SUPPORTED' + else: + __regexp = r'^#{0,1}\s*(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/etc/locale.gen' + + re_compiled = re.compile(__regexp) + fd = open(__locales_available, 'r') + for line in fd: + result = re_compiled.match(line) + if result and result.group('locale') == name: + return True + fd.close() + return False + + +def is_present(name): + """Checks if the given locale is currently installed.""" + output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] + output = to_native(output) + return any(fix_case(name) == fix_case(line) for line in output.splitlines()) + + +def fix_case(name): + """locale -a might return the encoding in either lower or upper case. + Passing through this function makes them uniform for comparisons.""" + for s, r in LOCALE_NORMALIZATION.items(): + name = name.replace(s, r) + return name + + +def replace_line(existing_line, new_line): + """Replaces lines in /etc/locale.gen""" + try: + f = open("/etc/locale.gen", "r") + lines = [line.replace(existing_line, new_line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() + + +def set_locale(name, enabled=True): + """ Sets the state of the locale. Defaults to enabled. """ + search_string = r'#{0,1}\s*%s (?P.+)' % name + if enabled: + new_string = r'%s \g' % (name) + else: + new_string = r'# %s \g' % (name) + try: + f = open("/etc/locale.gen", "r") + lines = [re.sub(search_string, new_string, line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() + + +def apply_change(targetState, name): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + """ + if targetState == "present": + # Create locale. + set_locale(name, enabled=True) + else: + # Delete locale. + set_locale(name, enabled=False) + + localeGenExitValue = call("locale-gen") + if localeGenExitValue != 0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue)) + + +def apply_change_ubuntu(targetState, name): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + """ + if targetState == "present": + # Create locale. + # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local + localeGenExitValue = call(["locale-gen", name]) + else: + # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. + try: + f = open("/var/lib/locales/supported.d/local", "r") + content = f.readlines() + finally: + f.close() + try: + f = open("/var/lib/locales/supported.d/local", "w") + for line in content: + locale, charset = line.split(' ') + if locale != name: + f.write(line) + finally: + f.close() + # Purge locales and regenerate. + # Please provide a patch if you know how to avoid regenerating the locales to keep! + localeGenExitValue = call(["locale-gen", "--purge"]) + + if localeGenExitValue != 0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + name = module.params['name'] + state = module.params['state'] + + if not os.path.exists("/etc/locale.gen"): + if os.path.exists("/var/lib/locales/supported.d/"): + # Ubuntu created its own system to manage locales. + ubuntuMode = True + else: + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?") + else: + # We found the common way to manage locales. + ubuntuMode = False + + if not is_available(name, ubuntuMode): + module.fail_json(msg="The locale you've entered is not available " + "on your system.") + + if is_present(name): + prev_state = "present" + else: + prev_state = "absent" + changed = (prev_state != state) + + if module.check_mode: + module.exit_json(changed=changed) + else: + if changed: + try: + if ubuntuMode is False: + apply_change(state, name) + else: + apply_change_ubuntu(state, name) + except EnvironmentError as e: + module.fail_json(msg=to_native(e), exitValue=e.errno) + + module.exit_json(name=name, changed=changed, msg="OK") + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/lvg.py b/test/support/integration/plugins/modules/lvg.py new file mode 100644 index 00000000..e2035f68 --- /dev/null +++ b/test/support/integration/plugins/modules/lvg.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Alexander Bulimov +# Based on lvol module by Jeroen Hoekx +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +author: +- Alexander Bulimov (@abulimov) +module: lvg +short_description: Configure LVM volume groups +description: + - This module creates, removes or resizes volume groups. +version_added: "1.1" +options: + vg: + description: + - The name of the volume group. + type: str + required: true + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or resizing volume group. + - The module will take care of running pvcreate if needed. + type: list + pesize: + description: + - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector + (where the sector size is the largest sector size of the PVs currently used in the VG), + or at least 128KiB." + - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. + type: str + default: "4" + pv_options: + description: + - Additional options to pass to C(pvcreate) when creating the volume group. + type: str + version_added: "2.4" + vg_options: + description: + - Additional options to pass to C(vgcreate) when creating the volume group. + type: str + version_added: "1.6" + state: + description: + - Control if the volume group exists. + type: str + choices: [ absent, present ] + default: present + force: + description: + - If C(yes), allows to remove volume group with logical volumes. + type: bool + default: no +seealso: +- module: filesystem +- module: lvol +- module: parted +notes: + - This module does not modify PE size for already present volume group. +''' + +EXAMPLES = r''' +- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB + lvg: + vg: vg.services + pvs: /dev/sda1 + pesize: 32 + +- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB + lvg: + vg: vg.services + pvs: /dev/sdb + pesize: 128K + +# If, for example, we already have VG vg.services on top of /dev/sdb1, +# this VG will be extended by /dev/sdc5. Or if vg.services was created on +# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, +# and then reduce by /dev/sda5. +- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. + lvg: + vg: vg.services + pvs: /dev/sdb1,/dev/sdc5 + +- name: Remove a volume group with name vg.services + lvg: + vg: vg.services + state: absent +''' + +import itertools +import os + +from ansible.module_utils.basic import AnsibleModule + + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'pv_count': int(parts[1]), + 'lv_count': int(parts[2]), + }) + return vgs + + +def find_mapper_device_name(module, dm_device): + dmsetup_cmd = module.get_bin_path('dmsetup', True) + mapper_prefix = '/dev/mapper/' + rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) + if rc != 0: + module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) + mapper_device = mapper_prefix + dm_name.rstrip() + return mapper_device + + +def parse_pvs(module, data): + pvs = [] + dm_prefix = '/dev/dm-' + for line in data.splitlines(): + parts = line.strip().split(';') + if parts[0].startswith(dm_prefix): + parts[0] = find_mapper_device_name(module, parts[0]) + pvs.append({ + 'name': parts[0], + 'vg_name': parts[1], + }) + return pvs + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + pvs=dict(type='list'), + pesize=dict(type='str', default='4'), + pv_options=dict(type='str', default=''), + vg_options=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'present']), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + vg = module.params['vg'] + state = module.params['state'] + force = module.boolean(module.params['force']) + pesize = module.params['pesize'] + pvoptions = module.params['pv_options'].split() + vgoptions = module.params['vg_options'].split() + + dev_list = [] + if module.params['pvs']: + dev_list = list(module.params['pvs']) + elif state == 'present': + module.fail_json(msg="No physical volumes given.") + + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) + + if state == 'present': + # check given devices + for test_dev in dev_list: + if not os.path.exists(test_dev): + module.fail_json(msg="Device %s not found." % test_dev) + + # get pv list + pvs_cmd = module.get_bin_path('pvs', True) + if dev_list: + pvs_filter_pv_name = ' || '.join( + 'pv_name = {0}'.format(x) + for x in itertools.chain(dev_list, module.params['pvs']) + ) + pvs_filter_vg_name = 'vg_name = {0}'.format(vg) + pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name) + else: + pvs_filter = '' + rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter)) + if rc != 0: + module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) + + # check pv for devices + pvs = parse_pvs(module, current_pvs) + used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg] + if used_pvs: + module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) + + vgs_cmd = module.get_bin_path('vgs', True) + rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) + + if rc != 0: + module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err) + + changed = False + + vgs = parse_vgs(current_vgs) + + for test_vg in vgs: + if test_vg['name'] == vg: + this_vg = test_vg + break + else: + this_vg = None + + if this_vg is None: + if state == 'present': + # create VG + if module.check_mode: + changed = True + else: + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in dev_list: + rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + vgcreate_cmd = module.get_bin_path('vgcreate') + rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err) + else: + if state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + else: + if this_vg['lv_count'] == 0 or force: + # remove VG + vgremove_cmd = module.get_bin_path('vgremove', True) + rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) + else: + module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg)) + + # resize VG + current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] + devs_to_remove = list(set(current_devs) - set(dev_list)) + devs_to_add = list(set(dev_list) - set(current_devs)) + + if devs_to_add or devs_to_remove: + if module.check_mode: + changed = True + else: + if devs_to_add: + devs_to_add_string = ' '.join(devs_to_add) + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in devs_to_add: + rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + # add PV to our VG + vgextend_cmd = module.get_bin_path('vgextend', True) + rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) + + # remove some PV from our VG + if devs_to_remove: + devs_to_remove_string = ' '.join(devs_to_remove) + vgreduce_cmd = module.get_bin_path('vgreduce', True) + rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/mongodb_parameter.py b/test/support/integration/plugins/modules/mongodb_parameter.py new file mode 100644 index 00000000..05de42b2 --- /dev/null +++ b/test/support/integration/plugins/modules/mongodb_parameter.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: mongodb_parameter +short_description: Change an administrative parameter on a MongoDB server +description: + - Change an administrative parameter on a MongoDB server. +version_added: "2.1" +options: + login_user: + description: + - The MongoDB username used to authenticate with. + type: str + login_password: + description: + - The login user's password used to authenticate with. + type: str + login_host: + description: + - The host running the database. + type: str + default: localhost + login_port: + description: + - The MongoDB port to connect to. + default: 27017 + type: int + login_database: + description: + - The database where login credentials are stored. + type: str + replica_set: + description: + - Replica set to connect to (automatically connects to primary for writes). + type: str + ssl: + description: + - Whether to use an SSL connection when connecting to the database. + type: bool + default: no + param: + description: + - MongoDB administrative parameter to modify. + type: str + required: true + value: + description: + - MongoDB administrative parameter value to set. + type: str + required: true + param_type: + description: + - Define the type of parameter value. + default: str + type: str + choices: [int, str] + +notes: + - Requires the pymongo Python package on the remote host, version 2.4.2+. + - This can be installed using pip or the OS package manager. + - See also U(http://api.mongodb.org/python/current/installation.html) +requirements: [ "pymongo" ] +author: "Loic Blot (@nerzhul)" +''' + +EXAMPLES = r''' +- name: Set MongoDB syncdelay to 60 (this is an int) + mongodb_parameter: + param: syncdelay + value: 60 + param_type: int +''' + +RETURN = r''' +before: + description: value before modification + returned: success + type: str +after: + description: value after modification + returned: success + type: str +''' + +import os +import traceback + +try: + from pymongo.errors import ConnectionFailure + from pymongo.errors import OperationFailure + from pymongo import version as PyMongoVersion + from pymongo import MongoClient +except ImportError: + try: # for older PyMongo 2.2 + from pymongo import Connection as MongoClient + except ImportError: + pymongo_found = False + else: + pymongo_found = True +else: + pymongo_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves import configparser +from ansible.module_utils._text import to_native + + +# ========================================= +# MongoDB module specific support methods. +# + +def load_mongocnf(): + config = configparser.RawConfigParser() + mongocnf = os.path.expanduser('~/.mongodb.cnf') + + try: + config.readfp(open(mongocnf)) + creds = dict( + user=config.get('client', 'user'), + password=config.get('client', 'pass') + ) + except (configparser.NoOptionError, IOError): + return False + + return creds + + +# ========================================= +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None), + login_password=dict(default=None, no_log=True), + login_host=dict(default='localhost'), + login_port=dict(default=27017, type='int'), + login_database=dict(default=None), + replica_set=dict(default=None), + param=dict(required=True), + value=dict(required=True), + param_type=dict(default="str", choices=['str', 'int']), + ssl=dict(default=False, type='bool'), + ) + ) + + if not pymongo_found: + module.fail_json(msg=missing_required_lib('pymongo')) + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + login_database = module.params['login_database'] + + replica_set = module.params['replica_set'] + ssl = module.params['ssl'] + + param = module.params['param'] + param_type = module.params['param_type'] + value = module.params['value'] + + # Verify parameter is coherent with specified type + try: + if param_type == 'int': + value = int(value) + except ValueError: + module.fail_json(msg="value '%s' is not %s" % (value, param_type)) + + try: + if replica_set: + client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl) + else: + client = MongoClient(login_host, int(login_port), ssl=ssl) + + if login_user is None and login_password is None: + mongocnf_creds = load_mongocnf() + if mongocnf_creds is not False: + login_user = mongocnf_creds['user'] + login_password = mongocnf_creds['password'] + elif login_password is None or login_user is None: + module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') + + if login_user is not None and login_password is not None: + client.admin.authenticate(login_user, login_password, source=login_database) + + except ConnectionFailure as e: + module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc()) + + db = client.admin + + try: + after_value = db.command("setParameter", **{param: value}) + except OperationFailure as e: + module.fail_json(msg="unable to change parameter: %s" % to_native(e), exception=traceback.format_exc()) + + if "was" not in after_value: + module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.") + else: + module.exit_json(changed=(value != after_value["was"]), before=after_value["was"], + after=value) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/mongodb_user.py b/test/support/integration/plugins/modules/mongodb_user.py new file mode 100644 index 00000000..362b3aa4 --- /dev/null +++ b/test/support/integration/plugins/modules/mongodb_user.py @@ -0,0 +1,474 @@ +#!/usr/bin/python + +# (c) 2012, Elliott Foster +# Sponsored by Four Kitchens http://fourkitchens.com. +# (c) 2014, Epic Games, Inc. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: mongodb_user +short_description: Adds or removes a user from a MongoDB database +description: + - Adds or removes a user from a MongoDB database. +version_added: "1.1" +options: + login_user: + description: + - The MongoDB username used to authenticate with. + type: str + login_password: + description: + - The login user's password used to authenticate with. + type: str + login_host: + description: + - The host running the database. + default: localhost + type: str + login_port: + description: + - The MongoDB port to connect to. + default: '27017' + type: str + login_database: + version_added: "2.0" + description: + - The database where login credentials are stored. + type: str + replica_set: + version_added: "1.6" + description: + - Replica set to connect to (automatically connects to primary for writes). + type: str + database: + description: + - The name of the database to add/remove the user from. + required: true + type: str + aliases: [db] + name: + description: + - The name of the user to add or remove. + required: true + aliases: [user] + type: str + password: + description: + - The password to use for the user. + type: str + aliases: [pass] + ssl: + version_added: "1.8" + description: + - Whether to use an SSL connection when connecting to the database. + type: bool + ssl_cert_reqs: + version_added: "2.2" + description: + - Specifies whether a certificate is required from the other side of the connection, + and whether it will be validated if provided. + default: CERT_REQUIRED + choices: [CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED] + type: str + roles: + version_added: "1.3" + type: list + elements: raw + description: + - > + The database user roles valid values could either be one or more of the following strings: + 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', + 'dbAdminAnyDatabase' + - "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'." + - "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required." + state: + description: + - The database user state. + default: present + choices: [absent, present] + type: str + update_password: + default: always + choices: [always, on_create] + version_added: "2.1" + description: + - C(always) will update passwords if they differ. + - C(on_create) will only set the password for newly created users. + type: str + +notes: + - Requires the pymongo Python package on the remote host, version 2.4.2+. This + can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html +requirements: [ "pymongo" ] +author: + - "Elliott Foster (@elliotttf)" + - "Julien Thebault (@Lujeni)" +''' + +EXAMPLES = ''' +- name: Create 'burgers' database user with name 'bob' and password '12345'. + mongodb_user: + database: burgers + name: bob + password: 12345 + state: present + +- name: Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly) + mongodb_user: + database: burgers + name: bob + password: 12345 + state: present + ssl: True + +- name: Delete 'burgers' database user with name 'bob'. + mongodb_user: + database: burgers + name: bob + state: absent + +- name: Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style) + mongodb_user: + database: burgers + name: ben + password: 12345 + roles: read + state: present + +- name: Define roles + mongodb_user: + database: burgers + name: jim + password: 12345 + roles: readWrite,dbAdmin,userAdmin + state: present + +- name: Define roles + mongodb_user: + database: burgers + name: joe + password: 12345 + roles: readWriteAnyDatabase + state: present + +- name: Add a user to database in a replica set, the primary server is automatically discovered and written to + mongodb_user: + database: burgers + name: bob + replica_set: belcher + password: 12345 + roles: readWriteAnyDatabase + state: present + +# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL). +# please notice the credentials must be added to the 'admin' database because the 'local' database is not synchronized and can't receive user credentials +# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin" +# This syntax requires mongodb 2.6+ and pymongo 2.5+ +- name: Roles as a dictionary + mongodb_user: + login_user: root + login_password: root_password + database: admin + user: oplog_reader + password: oplog_reader_password + state: present + replica_set: belcher + roles: + - db: local + role: read + +''' + +RETURN = ''' +user: + description: The name of the user to add or remove. + returned: success + type: str +''' + +import os +import ssl as ssl_lib +import traceback +from distutils.version import LooseVersion +from operator import itemgetter + +try: + from pymongo.errors import ConnectionFailure + from pymongo.errors import OperationFailure + from pymongo import version as PyMongoVersion + from pymongo import MongoClient +except ImportError: + try: # for older PyMongo 2.2 + from pymongo import Connection as MongoClient + except ImportError: + pymongo_found = False + else: + pymongo_found = True +else: + pymongo_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six import binary_type, text_type +from ansible.module_utils.six.moves import configparser +from ansible.module_utils._text import to_native + + +# ========================================= +# MongoDB module specific support methods. +# + +def check_compatibility(module, client): + """Check the compatibility between the driver and the database. + + See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility + + Args: + module: Ansible module. + client (cursor): Mongodb cursor on admin database. + """ + loose_srv_version = LooseVersion(client.server_info()['version']) + loose_driver_version = LooseVersion(PyMongoVersion) + + if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'): + module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)') + + elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'): + module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)') + + elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'): + module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)') + + elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'): + module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)') + + +def user_find(client, user, db_name): + """Check if the user exists. + + Args: + client (cursor): Mongodb cursor on admin database. + user (str): User to check. + db_name (str): User's database. + + Returns: + dict: when user exists, False otherwise. + """ + for mongo_user in client["admin"].system.users.find(): + if mongo_user['user'] == user: + # NOTE: there is no 'db' field in mongo 2.4. + if 'db' not in mongo_user: + return mongo_user + + if mongo_user["db"] == db_name: + return mongo_user + return False + + +def user_add(module, client, db_name, user, password, roles): + # pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated + # without reproducing a lot of the logic in database.py of pymongo + db = client[db_name] + + if roles is None: + db.add_user(user, password, False) + else: + db.add_user(user, password, None, roles=roles) + + +def user_remove(module, client, db_name, user): + exists = user_find(client, user, db_name) + if exists: + if module.check_mode: + module.exit_json(changed=True, user=user) + db = client[db_name] + db.remove_user(user) + else: + module.exit_json(changed=False, user=user) + + +def load_mongocnf(): + config = configparser.RawConfigParser() + mongocnf = os.path.expanduser('~/.mongodb.cnf') + + try: + config.readfp(open(mongocnf)) + creds = dict( + user=config.get('client', 'user'), + password=config.get('client', 'pass') + ) + except (configparser.NoOptionError, IOError): + return False + + return creds + + +def check_if_roles_changed(uinfo, roles, db_name): + # We must be aware of users which can read the oplog on a replicaset + # Such users must have access to the local DB, but since this DB does not store users credentials + # and is not synchronized among replica sets, the user must be stored on the admin db + # Therefore their structure is the following : + # { + # "_id" : "admin.oplog_reader", + # "user" : "oplog_reader", + # "db" : "admin", # <-- admin DB + # "roles" : [ + # { + # "role" : "read", + # "db" : "local" # <-- local DB + # } + # ] + # } + + def make_sure_roles_are_a_list_of_dict(roles, db_name): + output = list() + for role in roles: + if isinstance(role, (binary_type, text_type)): + new_role = {"role": role, "db": db_name} + output.append(new_role) + else: + output.append(role) + return output + + roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name) + uinfo_roles = uinfo.get('roles', []) + + if sorted(roles_as_list_of_dict, key=itemgetter('db')) == sorted(uinfo_roles, key=itemgetter('db')): + return False + return True + + +# ========================================= +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None), + login_password=dict(default=None, no_log=True), + login_host=dict(default='localhost'), + login_port=dict(default='27017'), + login_database=dict(default=None), + replica_set=dict(default=None), + database=dict(required=True, aliases=['db']), + name=dict(required=True, aliases=['user']), + password=dict(aliases=['pass'], no_log=True), + ssl=dict(default=False, type='bool'), + roles=dict(default=None, type='list', elements='raw'), + state=dict(default='present', choices=['absent', 'present']), + update_password=dict(default="always", choices=["always", "on_create"]), + ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']), + ), + supports_check_mode=True + ) + + if not pymongo_found: + module.fail_json(msg=missing_required_lib('pymongo')) + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + login_database = module.params['login_database'] + + replica_set = module.params['replica_set'] + db_name = module.params['database'] + user = module.params['name'] + password = module.params['password'] + ssl = module.params['ssl'] + roles = module.params['roles'] or [] + state = module.params['state'] + update_password = module.params['update_password'] + + try: + connection_params = { + "host": login_host, + "port": int(login_port), + } + + if replica_set: + connection_params["replicaset"] = replica_set + + if ssl: + connection_params["ssl"] = ssl + connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs']) + + client = MongoClient(**connection_params) + + # NOTE: this check must be done ASAP. + # We doesn't need to be authenticated (this ability has lost in PyMongo 3.6) + if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'): + check_compatibility(module, client) + + if login_user is None and login_password is None: + mongocnf_creds = load_mongocnf() + if mongocnf_creds is not False: + login_user = mongocnf_creds['user'] + login_password = mongocnf_creds['password'] + elif login_password is None or login_user is None: + module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') + + if login_user is not None and login_password is not None: + client.admin.authenticate(login_user, login_password, source=login_database) + elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'): + if db_name != "admin": + module.fail_json(msg='The localhost login exception only allows the first admin account to be created') + # else: this has to be the first admin user added + + except Exception as e: + module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc()) + + if state == 'present': + if password is None and update_password == 'always': + module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create') + + try: + if update_password != 'always': + uinfo = user_find(client, user, db_name) + if uinfo: + password = None + if not check_if_roles_changed(uinfo, roles, db_name): + module.exit_json(changed=False, user=user) + + if module.check_mode: + module.exit_json(changed=True, user=user) + + user_add(module, client, db_name, user, password, roles) + except Exception as e: + module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc()) + finally: + try: + client.close() + except Exception: + pass + # Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848 + # newuinfo = user_find(client, user, db_name) + # if uinfo['role'] == newuinfo['role'] and CheckPasswordHere: + # module.exit_json(changed=False, user=user) + + elif state == 'absent': + try: + user_remove(module, client, db_name, user) + except Exception as e: + module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc()) + finally: + try: + client.close() + except Exception: + pass + module.exit_json(changed=True, user=user) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/pids.py b/test/support/integration/plugins/modules/pids.py new file mode 100644 index 00000000..4cbf45a9 --- /dev/null +++ b/test/support/integration/plugins/modules/pids.py @@ -0,0 +1,89 @@ +#!/usr/bin/python +# Copyright: (c) 2019, Saranya Sridharan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: pids +version_added: 2.8 +description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists." +short_description: "Retrieves process IDs list if the process is running otherwise return empty list" +author: + - Saranya Sridharan (@saranyasridharan) +requirements: + - psutil(python module) +options: + name: + description: the name of the process you want to get PID for. + required: true + type: str +''' + +EXAMPLES = ''' +# Pass the process name +- name: Getting process IDs of the process + pids: + name: python + register: pids_of_python + +- name: Printing the process IDs obtained + debug: + msg: "PIDS of python:{{pids_of_python.pids|join(',')}}" +''' + +RETURN = ''' +pids: + description: Process IDs of the given process + returned: list of none, one, or more process IDs + type: list + sample: [100,200] +''' + +from ansible.module_utils.basic import AnsibleModule +try: + import psutil + HAS_PSUTIL = True +except ImportError: + HAS_PSUTIL = False + + +def compare_lower(a, b): + if a is None or b is None: + # this could just be "return False" but would lead to surprising behavior if both a and b are None + return a == b + + return a.lower() == b.lower() + + +def get_pid(name): + pids = [] + + for proc in psutil.process_iter(attrs=['name', 'cmdline']): + if compare_lower(proc.info['name'], name) or \ + proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name): + pids.append(proc.pid) + + return pids + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type="str"), + ), + supports_check_mode=True, + ) + if not HAS_PSUTIL: + module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil") + name = module.params["name"] + response = dict(pids=get_pid(name)) + module.exit_json(**response) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/pkgng.py b/test/support/integration/plugins/modules/pkgng.py new file mode 100644 index 00000000..11363479 --- /dev/null +++ b/test/support/integration/plugins/modules/pkgng.py @@ -0,0 +1,406 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, bleader +# Written by bleader +# Based on pkgin module written by Shaun Zinck +# that was based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: pkgng +short_description: Package manager for FreeBSD >= 9.0 +description: + - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0. +version_added: "1.2" +options: + name: + description: + - Name or list of names of packages to install/remove. + required: true + state: + description: + - State of the package. + - 'Note: "latest" added in 2.7' + choices: [ 'present', 'latest', 'absent' ] + required: false + default: present + cached: + description: + - Use local package base instead of fetching an updated one. + type: bool + required: false + default: no + annotation: + description: + - A comma-separated list of keyvalue-pairs of the form + C(<+/-/:>[=]). A C(+) denotes adding an annotation, a + C(-) denotes removing an annotation, and C(:) denotes modifying an + annotation. + If setting or modifying annotations, a value must be provided. + required: false + version_added: "1.6" + pkgsite: + description: + - For pkgng versions before 1.1.4, specify packagesite to use + for downloading packages. If not specified, use settings from + C(/usr/local/etc/pkg.conf). + - For newer pkgng versions, specify a the name of a repository + configured in C(/usr/local/etc/pkg/repos). + required: false + rootdir: + description: + - For pkgng versions 1.5 and later, pkg will install all packages + within the specified root directory. + - Can not be used together with I(chroot) or I(jail) options. + required: false + chroot: + version_added: "2.1" + description: + - Pkg will chroot in the specified environment. + - Can not be used together with I(rootdir) or I(jail) options. + required: false + jail: + version_added: "2.4" + description: + - Pkg will execute in the given jail name or id. + - Can not be used together with I(chroot) or I(rootdir) options. + autoremove: + version_added: "2.2" + description: + - Remove automatically installed packages which are no longer needed. + required: false + type: bool + default: no +author: "bleader (@bleader)" +notes: + - When using pkgsite, be careful that already in cache packages won't be downloaded again. + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +''' + +EXAMPLES = ''' +- name: Install package foo + pkgng: + name: foo + state: present + +- name: Annotate package foo and bar + pkgng: + name: foo,bar + annotation: '+test1=baz,-test2,:test3=foobar' + +- name: Remove packages foo and bar + pkgng: + name: foo,bar + state: absent + +# "latest" support added in 2.7 +- name: Upgrade package baz + pkgng: + name: baz + state: latest +''' + + +import re +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, pkgng_path, name, dir_arg): + + rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name)) + + if rc == 0: + return True + + return False + + +def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite): + + # Check to see if a package upgrade is available. + # rc = 0, no updates available or package not installed + # rc = 1, updates available + if old_pkgng: + rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name)) + else: + rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name)) + + if rc == 1: + return True + + return False + + +def pkgng_older_than(module, pkgng_path, compare_version): + + rc, out, err = module.run_command("%s -v" % pkgng_path) + version = [int(x) for x in re.split(r'[\._]', out)] + + i = 0 + new_pkgng = True + while compare_version[i] == version[i]: + i += 1 + if i == min(len(compare_version), len(version)): + break + else: + if compare_version[i] > version[i]: + new_pkgng = False + return not new_pkgng + + +def remove_packages(module, pkgng_path, packages, dir_arg): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, pkgng_path, package, dir_arg): + continue + + if not module.check_mode: + rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package)) + + if not module.check_mode and query_package(module, pkgng_path, package, dir_arg): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + return (True, "removed %s package(s)" % remove_c) + + return (False, "package(s) already absent") + + +def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state): + + install_c = 0 + + # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions + # in /usr/local/etc/pkg/repos + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4]) + if pkgsite != "": + if old_pkgng: + pkgsite = "PACKAGESITE=%s" % (pkgsite) + else: + pkgsite = "-r %s" % (pkgsite) + + # This environment variable skips mid-install prompts, + # setting them to their default values. + batch_var = 'env BATCH=yes' + + if not module.check_mode and not cached: + if old_pkgng: + rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) + else: + rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg)) + if rc != 0: + module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err)) + + for package in packages: + already_installed = query_package(module, pkgng_path, package, dir_arg) + if already_installed and state == "present": + continue + + update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite) + if not update_available and already_installed and state == "latest": + continue + + if not module.check_mode: + if already_installed: + action = "upgrade" + else: + action = "install" + if old_pkgng: + rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package)) + else: + rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package)) + + if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg): + module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stderr=err) + + install_c += 1 + + if install_c > 0: + return (True, "added %s package(s)" % (install_c)) + + return (False, "package(s) already %s" % (state)) + + +def annotation_query(module, pkgng_path, package, tag, dir_arg): + rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package)) + match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) + if match: + return match.group('value') + return False + + +def annotation_add(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if not _value: + # Annotation does not exist, add it. + rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"' + % (pkgng_path, dir_arg, package, tag, value)) + if rc != 0: + module.fail_json(msg="could not annotate %s: %s" + % (package, out), stderr=err) + return True + elif _value != value: + # Annotation exists, but value differs + module.fail_json( + mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s" + % (package, tag, _value, value)) + return False + else: + # Annotation exists, nothing to do + return False + + +def annotation_delete(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if _value: + rc, out, err = module.run_command('%s %s annotate -y -D %s %s' + % (pkgng_path, dir_arg, package, tag)) + if rc != 0: + module.fail_json(msg="could not delete annotation to %s: %s" + % (package, out), stderr=err) + return True + return False + + +def annotation_modify(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) + if not value: + # No such tag + module.fail_json(msg="could not change annotation to %s: tag %s does not exist" + % (package, tag)) + elif _value == value: + # No change in value + return False + else: + rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"' + % (pkgng_path, dir_arg, package, tag, value)) + if rc != 0: + module.fail_json(msg="could not change annotation annotation to %s: %s" + % (package, out), stderr=err) + return True + + +def annotate_packages(module, pkgng_path, packages, annotation, dir_arg): + annotate_c = 0 + annotations = map(lambda _annotation: + re.match(r'(?P[\+-:])(?P\w+)(=(?P\w+))?', + _annotation).groupdict(), + re.split(r',', annotation)) + + operation = { + '+': annotation_add, + '-': annotation_delete, + ':': annotation_modify + } + + for package in packages: + for _annotation in annotations: + if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']): + annotate_c += 1 + + if annotate_c > 0: + return (True, "added %s annotations." % annotate_c) + return (False, "changed no annotations") + + +def autoremove_packages(module, pkgng_path, dir_arg): + rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg)) + + autoremove_c = 0 + + match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE) + if match: + autoremove_c = int(match.group(1)) + + if autoremove_c == 0: + return False, "no package(s) to autoremove" + + if not module.check_mode: + rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg)) + + return True, "autoremoved %d package(s)" % (autoremove_c) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "latest", "absent"], required=False), + name=dict(aliases=["pkg"], required=True, type='list'), + cached=dict(default=False, type='bool'), + annotation=dict(default="", required=False), + pkgsite=dict(default="", required=False), + rootdir=dict(default="", required=False, type='path'), + chroot=dict(default="", required=False, type='path'), + jail=dict(default="", required=False, type='str'), + autoremove=dict(default=False, type='bool')), + supports_check_mode=True, + mutually_exclusive=[["rootdir", "chroot", "jail"]]) + + pkgng_path = module.get_bin_path('pkg', True) + + p = module.params + + pkgs = p["name"] + + changed = False + msgs = [] + dir_arg = "" + + if p["rootdir"] != "": + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0]) + if old_pkgng: + module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater") + else: + dir_arg = "--rootdir %s" % (p["rootdir"]) + + if p["chroot"] != "": + dir_arg = '--chroot %s' % (p["chroot"]) + + if p["jail"] != "": + dir_arg = '--jail %s' % (p["jail"]) + + if p["state"] in ("present", "latest"): + _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg, p["state"]) + changed = changed or _changed + msgs.append(_msg) + + elif p["state"] == "absent": + _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg) + changed = changed or _changed + msgs.append(_msg) + + if p["autoremove"]: + _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg) + changed = changed or _changed + msgs.append(_msg) + + if p["annotation"]: + _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg) + changed = changed or _changed + msgs.append(_msg) + + module.exit_json(changed=changed, msg=", ".join(msgs)) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_db.py b/test/support/integration/plugins/modules/postgresql_db.py new file mode 100644 index 00000000..40858d99 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_db.py @@ -0,0 +1,657 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_db +short_description: Add or remove PostgreSQL databases from a remote host. +description: + - Add or remove PostgreSQL databases from a remote host. +version_added: '0.6' +options: + name: + description: + - Name of the database to add or remove + type: str + required: true + aliases: [ db ] + port: + description: + - Database port to connect (if needed) + type: int + default: 5432 + aliases: + - login_port + owner: + description: + - Name of the role to set as owner of the database + type: str + template: + description: + - Template used to create the database + type: str + encoding: + description: + - Encoding of the database + type: str + lc_collate: + description: + - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template. + type: str + lc_ctype: + description: + - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0) + is used as template. + type: str + session_role: + description: + - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + version_added: '2.8' + state: + description: + - The database state. + - C(present) implies that the database should be created if necessary. + - C(absent) implies that the database should be removed if present. + - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4) + Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module, + returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.), + so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of + pg_dump returns rc 1 in this case. + - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4) + - The format of the backup will be detected based on the target name. + - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz) + - Supported formats for dump and restore include C(.sql) and C(.tar) + type: str + choices: [ absent, dump, present, restore ] + default: present + target: + description: + - File to back up or restore from. + - Used when I(state) is C(dump) or C(restore). + type: path + version_added: '2.4' + target_opts: + description: + - Further arguments for pg_dump or pg_restore. + - Used when I(state) is C(dump) or C(restore). + type: str + version_added: '2.4' + maintenance_db: + description: + - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to. + type: str + default: postgres + version_added: '2.5' + conn_limit: + description: + - Specifies the database connection limit. + type: str + version_added: '2.8' + tablespace: + description: + - The tablespace to set for the database + U(https://www.postgresql.org/docs/current/sql-alterdatabase.html). + - If you want to move the database back to the default tablespace, + explicitly set this to pg_default. + type: path + version_added: '2.9' + dump_extra_args: + description: + - Provides additional arguments when I(state) is C(dump). + - Cannot be used with dump-file-format-related arguments like ``--format=d``. + type: str + version_added: '2.10' +seealso: +- name: CREATE DATABASE reference + description: Complete reference of the CREATE DATABASE command documentation. + link: https://www.postgresql.org/docs/current/sql-createdatabase.html +- name: DROP DATABASE reference + description: Complete reference of the DROP DATABASE command documentation. + link: https://www.postgresql.org/docs/current/sql-dropdatabase.html +- name: pg_dump reference + description: Complete reference of pg_dump documentation. + link: https://www.postgresql.org/docs/current/app-pgdump.html +- name: pg_restore reference + description: Complete reference of pg_restore documentation. + link: https://www.postgresql.org/docs/current/app-pgrestore.html +- module: postgresql_tablespace +- module: postgresql_info +- module: postgresql_ping +notes: +- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8. +author: "Ansible Core Team" +extends_documentation_fragment: +- postgres +''' + +EXAMPLES = r''' +- name: Create a new database with name "acme" + postgresql_db: + name: acme + +# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template. +- name: Create a new database with name "acme" and specific encoding and locale # settings. + postgresql_db: + name: acme + encoding: UTF-8 + lc_collate: de_DE.UTF-8 + lc_ctype: de_DE.UTF-8 + template: template0 + +# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited" +- name: Create a new database with name "acme" which has a limit of 100 concurrent connections + postgresql_db: + name: acme + conn_limit: "100" + +- name: Dump an existing database to a file + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + +- name: Dump an existing database to a file excluding the test table + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + dump_extra_args: --exclude-table=test + +- name: Dump an existing database to a file (with compression) + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql.gz + +- name: Dump a single schema for an existing database + postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + target_opts: "-n public" + +# Note: In the example below, if database foo exists and has another tablespace +# the tablespace will be changed to foo. Access to the database will be locked +# until the copying of database files is finished. +- name: Create a new database called foo in tablespace bar + postgresql_db: + name: foo + tablespace: bar +''' + +RETURN = r''' +executed_commands: + description: List of commands which tried to run. + returned: always + type: list + sample: ["CREATE DATABASE acme"] + version_added: '2.10' +''' + + +import os +import subprocess +import traceback + +try: + import psycopg2 + import psycopg2.extras +except ImportError: + HAS_PSYCOPG2 = False +else: + HAS_PSYCOPG2 = True + +import ansible.module_utils.postgres as pgutils +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.database import SQLParseError, pg_quote_identifier +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_native + +executed_commands = [] + + +class NotSupportedError(Exception): + pass + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def set_owner(cursor, db, owner): + query = 'ALTER DATABASE %s OWNER TO "%s"' % ( + pg_quote_identifier(db, 'database'), + owner) + executed_commands.append(query) + cursor.execute(query) + return True + + +def set_conn_limit(cursor, db, conn_limit): + query = "ALTER DATABASE %s CONNECTION LIMIT %s" % ( + pg_quote_identifier(db, 'database'), + conn_limit) + executed_commands.append(query) + cursor.execute(query) + return True + + +def get_encoding_id(cursor, encoding): + query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;" + cursor.execute(query, {'encoding': encoding}) + return cursor.fetchone()['encoding_id'] + + +def get_db_info(cursor, db): + query = """ + SELECT rolname AS owner, + pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, + datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit, + spcname AS tablespace + FROM pg_database + JOIN pg_roles ON pg_roles.oid = pg_database.datdba + JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace + WHERE datname = %(db)s + """ + cursor.execute(query, {'db': db}) + return cursor.fetchone() + + +def db_exists(cursor, db): + query = "SELECT * FROM pg_database WHERE datname=%(db)s" + cursor.execute(query, {'db': db}) + return cursor.rowcount == 1 + + +def db_delete(cursor, db): + if db_exists(cursor, db): + query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database') + executed_commands.append(query) + cursor.execute(query) + return True + else: + return False + + +def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): + params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace) + if not db_exists(cursor, db): + query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')] + if owner: + query_fragments.append('OWNER "%s"' % owner) + if template: + query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database')) + if encoding: + query_fragments.append('ENCODING %(enc)s') + if lc_collate: + query_fragments.append('LC_COLLATE %(collate)s') + if lc_ctype: + query_fragments.append('LC_CTYPE %(ctype)s') + if tablespace: + query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace')) + if conn_limit: + query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + query = ' '.join(query_fragments) + executed_commands.append(cursor.mogrify(query, params)) + cursor.execute(query, params) + return True + else: + db_info = get_db_info(cursor, db) + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): + raise NotSupportedError( + 'Changing database encoding is not supported. ' + 'Current encoding: %s' % db_info['encoding'] + ) + elif lc_collate and lc_collate != db_info['lc_collate']: + raise NotSupportedError( + 'Changing LC_COLLATE is not supported. ' + 'Current LC_COLLATE: %s' % db_info['lc_collate'] + ) + elif lc_ctype and lc_ctype != db_info['lc_ctype']: + raise NotSupportedError( + 'Changing LC_CTYPE is not supported.' + 'Current LC_CTYPE: %s' % db_info['lc_ctype'] + ) + else: + changed = False + + if owner and owner != db_info['owner']: + changed = set_owner(cursor, db, owner) + + if conn_limit and conn_limit != str(db_info['conn_limit']): + changed = set_conn_limit(cursor, db, conn_limit) + + if tablespace and tablespace != db_info['tablespace']: + changed = set_tablespace(cursor, db, tablespace) + + return changed + + +def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): + if not db_exists(cursor, db): + return False + else: + db_info = get_db_info(cursor, db) + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): + return False + elif lc_collate and lc_collate != db_info['lc_collate']: + return False + elif lc_ctype and lc_ctype != db_info['lc_ctype']: + return False + elif owner and owner != db_info['owner']: + return False + elif conn_limit and conn_limit != str(db_info['conn_limit']): + return False + elif tablespace and tablespace != db_info['tablespace']: + return False + else: + return True + + +def db_dump(module, target, target_opts="", + db=None, + dump_extra_args=None, + user=None, + password=None, + host=None, + port=None, + **kw): + + flags = login_flags(db, host, port, user, db_prefix=False) + cmd = module.get_bin_path('pg_dump', True) + comp_prog_path = None + + if os.path.splitext(target)[-1] == '.tar': + flags.append(' --format=t') + elif os.path.splitext(target)[-1] == '.pgc': + flags.append(' --format=c') + if os.path.splitext(target)[-1] == '.gz': + if module.get_bin_path('pigz'): + comp_prog_path = module.get_bin_path('pigz', True) + else: + comp_prog_path = module.get_bin_path('gzip', True) + elif os.path.splitext(target)[-1] == '.bz2': + comp_prog_path = module.get_bin_path('bzip2', True) + elif os.path.splitext(target)[-1] == '.xz': + comp_prog_path = module.get_bin_path('xz', True) + + cmd += "".join(flags) + + if dump_extra_args: + cmd += " {0} ".format(dump_extra_args) + + if target_opts: + cmd += " {0} ".format(target_opts) + + if comp_prog_path: + # Use a fifo to be notified of an error in pg_dump + # Using shell pipe has no way to return the code of the first command + # in a portable way. + fifo = os.path.join(module.tmpdir, 'pg_fifo') + os.mkfifo(fifo) + cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo) + else: + cmd = '{0} > {1}'.format(cmd, shlex_quote(target)) + + return do_with_password(module, cmd, password) + + +def db_restore(module, target, target_opts="", + db=None, + user=None, + password=None, + host=None, + port=None, + **kw): + + flags = login_flags(db, host, port, user) + comp_prog_path = None + cmd = module.get_bin_path('psql', True) + + if os.path.splitext(target)[-1] == '.sql': + flags.append(' --file={0}'.format(target)) + + elif os.path.splitext(target)[-1] == '.tar': + flags.append(' --format=Tar') + cmd = module.get_bin_path('pg_restore', True) + + elif os.path.splitext(target)[-1] == '.pgc': + flags.append(' --format=Custom') + cmd = module.get_bin_path('pg_restore', True) + + elif os.path.splitext(target)[-1] == '.gz': + comp_prog_path = module.get_bin_path('zcat', True) + + elif os.path.splitext(target)[-1] == '.bz2': + comp_prog_path = module.get_bin_path('bzcat', True) + + elif os.path.splitext(target)[-1] == '.xz': + comp_prog_path = module.get_bin_path('xzcat', True) + + cmd += "".join(flags) + if target_opts: + cmd += " {0} ".format(target_opts) + + if comp_prog_path: + env = os.environ.copy() + if password: + env = {"PGPASSWORD": password} + p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env) + (stdout2, stderr2) = p2.communicate() + p1.stdout.close() + p1.wait() + if p1.returncode != 0: + stderr1 = p1.stderr.read() + return p1.returncode, '', stderr1, 'cmd: ****' + else: + return p2.returncode, '', stderr2, 'cmd: ****' + else: + cmd = '{0} < {1}'.format(cmd, shlex_quote(target)) + + return do_with_password(module, cmd, password) + + +def login_flags(db, host, port, user, db_prefix=True): + """ + returns a list of connection argument strings each prefixed + with a space and quoted where necessary to later be combined + in a single shell string with `"".join(rv)` + + db_prefix determines if "--dbname" is prefixed to the db argument, + since the argument was introduced in 9.3. + """ + flags = [] + if db: + if db_prefix: + flags.append(' --dbname={0}'.format(shlex_quote(db))) + else: + flags.append(' {0}'.format(shlex_quote(db))) + if host: + flags.append(' --host={0}'.format(host)) + if port: + flags.append(' --port={0}'.format(port)) + if user: + flags.append(' --username={0}'.format(user)) + return flags + + +def do_with_password(module, cmd, password): + env = {} + if password: + env = {"PGPASSWORD": password} + executed_commands.append(cmd) + rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env) + return rc, stderr, stdout, cmd + + +def set_tablespace(cursor, db, tablespace): + query = "ALTER DATABASE %s SET TABLESPACE %s" % ( + pg_quote_identifier(db, 'database'), + pg_quote_identifier(tablespace, 'tablespace')) + executed_commands.append(query) + cursor.execute(query) + return True + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = pgutils.postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', required=True, aliases=['name']), + owner=dict(type='str', default=''), + template=dict(type='str', default=''), + encoding=dict(type='str', default=''), + lc_collate=dict(type='str', default=''), + lc_ctype=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']), + target=dict(type='path', default=''), + target_opts=dict(type='str', default=''), + maintenance_db=dict(type='str', default="postgres"), + session_role=dict(type='str'), + conn_limit=dict(type='str', default=''), + tablespace=dict(type='path', default=''), + dump_extra_args=dict(type='str', default=None), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + db = module.params["db"] + owner = module.params["owner"] + template = module.params["template"] + encoding = module.params["encoding"] + lc_collate = module.params["lc_collate"] + lc_ctype = module.params["lc_ctype"] + target = module.params["target"] + target_opts = module.params["target_opts"] + state = module.params["state"] + changed = False + maintenance_db = module.params['maintenance_db'] + session_role = module.params["session_role"] + conn_limit = module.params['conn_limit'] + tablespace = module.params['tablespace'] + dump_extra_args = module.params['dump_extra_args'] + + raw_connection = state in ("dump", "restore") + + if not raw_connection: + pgutils.ensure_required_libs(module) + + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "login_host": "host", + "login_user": "user", + "login_password": "password", + "port": "port", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + kw = dict((params_map[k], v) for (k, v) in iteritems(module.params) + if k in params_map and v != '' and v is not None) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + + if is_localhost and module.params["login_unix_socket"] != "": + kw["host"] = module.params["login_unix_socket"] + + if target == "": + target = "{0}/{1}.sql".format(os.getcwd(), db) + target = os.path.expanduser(target) + + if not raw_connection: + try: + db_connection = psycopg2.connect(database=maintenance_db, **kw) + + # Enable autocommit so we can create databases + if psycopg2.__version__ >= '2.4.2': + db_connection.autocommit = True + else: + db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + if session_role: + try: + cursor.execute('SET ROLE "%s"' % session_role) + except Exception as e: + module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc()) + + try: + if module.check_mode: + if state == "absent": + changed = db_exists(cursor, db) + elif state == "present": + changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + module.exit_json(changed=changed, db=db, executed_commands=executed_commands) + + if state == "absent": + try: + changed = db_delete(cursor, db) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state == "present": + try: + changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state in ("dump", "restore"): + method = state == "dump" and db_dump or db_restore + try: + if state == 'dump': + rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw) + else: + rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw) + + if rc != 0: + module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd) + else: + module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd, + executed_commands=executed_commands) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + except NotSupportedError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except SystemExit: + # Avoid catching this on Python 2.4 + raise + except Exception as e: + module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, db=db, executed_commands=executed_commands) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_privs.py b/test/support/integration/plugins/modules/postgresql_privs.py new file mode 100644 index 00000000..ba8324dd --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_privs.py @@ -0,0 +1,1097 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: postgresql_privs +version_added: '1.2' +short_description: Grant or revoke privileges on PostgreSQL database objects +description: +- Grant or revoke privileges on PostgreSQL database objects. +- This module is basically a wrapper around most of the functionality of + PostgreSQL's GRANT and REVOKE statements with detection of changes + (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)). +options: + database: + description: + - Name of database to connect to. + required: yes + type: str + aliases: + - db + - login_db + state: + description: + - If C(present), the specified privileges are granted, if C(absent) they are revoked. + type: str + default: present + choices: [ absent, present ] + privs: + description: + - Comma separated list of privileges to grant/revoke. + type: str + aliases: + - priv + type: + description: + - Type of database object to set privileges on. + - The C(default_privs) choice is available starting at version 2.7. + - The C(foreign_data_wrapper) and C(foreign_server) object types are available from Ansible version '2.8'. + - The C(type) choice is available from Ansible version '2.10'. + type: str + default: table + choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function, + group, language, table, tablespace, schema, sequence, type ] + objs: + description: + - Comma separated list of database objects to set privileges on. + - If I(type) is C(table), C(partition table), C(sequence) or C(function), + the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all + database objects of type I(type) in the schema specified via I(schema). + (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available + for C(function) and C(partition table) from version 2.8) + - If I(type) is C(database), this parameter can be omitted, in which case + privileges are set for the database specified via I(database). + - 'If I(type) is I(function), colons (":") in object names will be + replaced with commas (needed to specify function signatures, see examples)' + type: str + aliases: + - obj + schema: + description: + - Schema that contains the database objects specified via I(objs). + - May only be provided if I(type) is C(table), C(sequence), C(function), C(type), + or C(default_privs). Defaults to C(public) in these cases. + - Pay attention, for embedded types when I(type=type) + I(schema) can be C(pg_catalog) or C(information_schema) respectively. + type: str + roles: + description: + - Comma separated list of role (user/group) names to set permissions for. + - The special value C(PUBLIC) can be provided instead to set permissions + for the implicitly defined PUBLIC group. + type: str + required: yes + aliases: + - role + fail_on_role: + version_added: '2.8' + description: + - If C(yes), fail when target role (for whom privs need to be granted) does not exist. + Otherwise just warn and continue. + default: yes + type: bool + session_role: + version_added: '2.8' + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + target_roles: + description: + - A list of existing role (user/group) names to set as the + default permissions for database objects subsequently created by them. + - Parameter I(target_roles) is only available with C(type=default_privs). + type: str + version_added: '2.8' + grant_option: + description: + - Whether C(role) may grant/revoke the specified privileges/group memberships to others. + - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes. + - I(grant_option) only has an effect if I(state) is C(present). + type: bool + aliases: + - admin_option + host: + description: + - Database host address. If unspecified, connect via Unix socket. + type: str + aliases: + - login_host + port: + description: + - Database port to connect to. + type: int + default: 5432 + aliases: + - login_port + unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + aliases: + - login_unix_socket + login: + description: + - The username to authenticate with. + type: str + default: postgres + aliases: + - login_user + password: + description: + - The password to authenticate with. + type: str + aliases: + - login_password + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + version_added: '2.3' + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + version_added: '2.3' + type: str + aliases: + - ssl_rootcert + +notes: +- Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) + have singular alias names (I(priv), I(obj), I(role)). +- To revoke only C(GRANT OPTION) for a specific object, set I(state) to + C(present) and I(grant_option) to C(no) (see examples). +- Note that when revoking privileges from a role R, this role may still have + access via privileges granted to any role R is a member of including C(PUBLIC). +- Note that when revoking privileges from a role R, you do so as the user + specified via I(login). If R has been granted the same privileges by + another user also, R can still access database objects via these privileges. +- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). + +seealso: +- module: postgresql_user +- module: postgresql_owner +- module: postgresql_membership +- name: PostgreSQL privileges + description: General information about PostgreSQL privileges. + link: https://www.postgresql.org/docs/current/ddl-priv.html +- name: PostgreSQL GRANT command reference + description: Complete reference of the PostgreSQL GRANT command documentation. + link: https://www.postgresql.org/docs/current/sql-grant.html +- name: PostgreSQL REVOKE command reference + description: Complete reference of the PostgreSQL REVOKE command documentation. + link: https://www.postgresql.org/docs/current/sql-revoke.html + +extends_documentation_fragment: +- postgres + +author: +- Bernhard Weitzhofer (@b6d) +- Tobias Birkefeld (@tcraxs) +''' + +EXAMPLES = r''' +# On database "library": +# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors +# TO librarian, reader WITH GRANT OPTION +- name: Grant privs to librarian and reader on database library + postgresql_privs: + database: library + state: present + privs: SELECT,INSERT,UPDATE + type: table + objs: books,authors + schema: public + roles: librarian,reader + grant_option: yes + +- name: Same as above leveraging default values + postgresql_privs: + db: library + privs: SELECT,INSERT,UPDATE + objs: books,authors + roles: librarian,reader + grant_option: yes + +# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader +# Note that role "reader" will be *granted* INSERT privilege itself if this +# isn't already the case (since state: present). +- name: Revoke privs from reader + postgresql_privs: + db: library + state: present + priv: INSERT + obj: books + role: reader + grant_option: no + +# "public" is the default schema. This also works for PostgreSQL 8.x. +- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader + postgresql_privs: + db: library + state: absent + privs: INSERT,UPDATE + objs: ALL_IN_SCHEMA + role: reader + +- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian + postgresql_privs: + db: library + privs: ALL + type: schema + objs: public,math + role: librarian + +# Note the separation of arguments with colons. +- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader + postgresql_privs: + db: library + privs: ALL + type: function + obj: add(int:int) + schema: math + roles: librarian,reader + +# Note that group role memberships apply cluster-wide and therefore are not +# restricted to database "library" here. +- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION + postgresql_privs: + db: library + type: group + objs: librarian,reader + roles: alice,bob + admin_option: yes + +# Note that here "db: postgres" specifies the database to connect to, not the +# database to grant privileges on (which is specified via the "objs" param) +- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian + postgresql_privs: + db: postgres + privs: ALL + type: database + obj: library + role: librarian + +# If objs is omitted for type "database", it defaults to the database +# to which the connection is established +- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian + postgresql_privs: + db: library + privs: ALL + type: database + role: librarian + +# Available since version 2.7 +# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS +# ALL_DEFAULT works only with privs=ALL +# For specific +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian + postgresql_privs: + db: library + objs: ALL_DEFAULT + privs: ALL + type: default_privs + role: librarian + grant_option: yes + +# Available since version 2.7 +# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS +# ALL_DEFAULT works only with privs=ALL +# For specific +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1 + postgresql_privs: + db: library + objs: TABLES,SEQUENCES + privs: SELECT + type: default_privs + role: reader + +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2 + postgresql_privs: + db: library + objs: TYPES + privs: USAGE + type: default_privs + role: reader + +# Available since version 2.8 +- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader + postgresql_privs: + db: test + objs: fdw + privs: ALL + type: foreign_data_wrapper + role: reader + +# Available since version 2.10 +- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader + postgresql_privs: + db: test + objs: customtype + privs: ALL + type: type + role: reader + +# Available since version 2.8 +- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader + postgresql_privs: + db: test + objs: fdw_server + privs: ALL + type: foreign_server + role: reader + +# Available since version 2.8 +# Grant 'execute' permissions on all functions in schema 'common' to role 'caller' +- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller + postgresql_privs: + type: function + state: present + privs: EXECUTE + roles: caller + objs: ALL_IN_SCHEMA + schema: common + +# Available since version 2.8 +# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader +# GRANT SELECT privileges for new TABLES objects created by librarian as +# default to the role reader. +# For specific +- name: ALTER privs + postgresql_privs: + db: library + schema: library + objs: TABLES + privs: SELECT + type: default_privs + role: reader + target_roles: librarian + +# Available since version 2.8 +# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader +# REVOKE SELECT privileges for new TABLES objects created by librarian as +# default from the role reader. +# For specific +- name: ALTER privs + postgresql_privs: + db: library + state: absent + schema: library + objs: TABLES + privs: SELECT + type: default_privs + role: reader + target_roles: librarian + +# Available since version 2.10 +- name: Grant type privileges for pg_catalog.numeric type to alice + postgresql_privs: + type: type + roles: alice + privs: ALL + objs: numeric + schema: pg_catalog + db: acme +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";'] + version_added: '2.8' +''' + +import traceback + +PSYCOPG2_IMP_ERR = None +try: + import psycopg2 + import psycopg2.extensions +except ImportError: + PSYCOPG2_IMP_ERR = traceback.format_exc() + psycopg2 = None + +# import module snippets +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.database import pg_quote_identifier +from ansible.module_utils.postgres import postgres_common_argument_spec +from ansible.module_utils._text import to_native + +VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', + 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', + 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE')) +VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'), + 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'), + 'FUNCTIONS': ('ALL', 'EXECUTE'), + 'TYPES': ('ALL', 'USAGE')} + +executed_queries = [] + + +class Error(Exception): + pass + + +def role_exists(module, cursor, rolname): + """Check user exists or not""" + query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname + try: + cursor.execute(query) + return cursor.rowcount > 0 + + except Exception as e: + module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + + return False + + +# We don't have functools.partial in Python < 2.5 +def partial(f, *args, **kwargs): + """Partial function application""" + + def g(*g_args, **g_kwargs): + new_kwargs = kwargs.copy() + new_kwargs.update(g_kwargs) + return f(*(args + g_args), **g_kwargs) + + g.f = f + g.args = args + g.kwargs = kwargs + return g + + +class Connection(object): + """Wrapper around a psycopg2 connection with some convenience methods""" + + def __init__(self, params, module): + self.database = params.database + self.module = module + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "host": "host", + "login": "user", + "password": "password", + "port": "port", + "database": "database", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + + kw = dict((params_map[k], getattr(params, k)) for k in params_map + if getattr(params, k) != '' and getattr(params, k) is not None) + + # If a unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and params.unix_socket != "": + kw["host"] = params.unix_socket + + sslrootcert = params.ca_cert + if psycopg2.__version__ < '2.4.3' and sslrootcert is not None: + raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter') + + self.connection = psycopg2.connect(**kw) + self.cursor = self.connection.cursor() + + def commit(self): + self.connection.commit() + + def rollback(self): + self.connection.rollback() + + @property + def encoding(self): + """Connection encoding in Python-compatible form""" + return psycopg2.extensions.encodings[self.connection.encoding] + + # Methods for querying database objects + + # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like + # phrases in GRANT or REVOKE statements, therefore alternative methods are + # provided here. + + def schema_exists(self, schema): + query = """SELECT count(*) + FROM pg_catalog.pg_namespace WHERE nspname = %s""" + self.cursor.execute(query, (schema,)) + return self.cursor.fetchone()[0] > 0 + + def get_all_tables_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT relname + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_all_sequences_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT relname + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind = 'S'""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_all_functions_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT p.proname, oidvectortypes(p.proargtypes) + FROM pg_catalog.pg_proc p + JOIN pg_namespace n ON n.oid = p.pronamespace + WHERE nspname = %s""" + self.cursor.execute(query, (schema,)) + return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()] + + # Methods for getting access control lists and group membership info + + # To determine whether anything has changed after granting/revoking + # privileges, we compare the access control lists of the specified database + # objects before and afterwards. Python's list/string comparison should + # suffice for change detection, we should not actually have to parse ACLs. + # The same should apply to group membership information. + + def get_table_acls(self, schema, tables): + query = """SELECT relacl + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s) + ORDER BY relname""" + self.cursor.execute(query, (schema, tables)) + return [t[0] for t in self.cursor.fetchall()] + + def get_sequence_acls(self, schema, sequences): + query = """SELECT relacl + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) + ORDER BY relname""" + self.cursor.execute(query, (schema, sequences)) + return [t[0] for t in self.cursor.fetchall()] + + def get_function_acls(self, schema, function_signatures): + funcnames = [f.split('(', 1)[0] for f in function_signatures] + query = """SELECT proacl + FROM pg_catalog.pg_proc p + JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace + WHERE nspname = %s AND proname = ANY (%s) + ORDER BY proname, proargtypes""" + self.cursor.execute(query, (schema, funcnames)) + return [t[0] for t in self.cursor.fetchall()] + + def get_schema_acls(self, schemas): + query = """SELECT nspacl FROM pg_catalog.pg_namespace + WHERE nspname = ANY (%s) ORDER BY nspname""" + self.cursor.execute(query, (schemas,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_language_acls(self, languages): + query = """SELECT lanacl FROM pg_catalog.pg_language + WHERE lanname = ANY (%s) ORDER BY lanname""" + self.cursor.execute(query, (languages,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_tablespace_acls(self, tablespaces): + query = """SELECT spcacl FROM pg_catalog.pg_tablespace + WHERE spcname = ANY (%s) ORDER BY spcname""" + self.cursor.execute(query, (tablespaces,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_database_acls(self, databases): + query = """SELECT datacl FROM pg_catalog.pg_database + WHERE datname = ANY (%s) ORDER BY datname""" + self.cursor.execute(query, (databases,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_group_memberships(self, groups): + query = """SELECT roleid, grantor, member, admin_option + FROM pg_catalog.pg_auth_members am + JOIN pg_catalog.pg_roles r ON r.oid = am.roleid + WHERE r.rolname = ANY(%s) + ORDER BY roleid, grantor, member""" + self.cursor.execute(query, (groups,)) + return self.cursor.fetchall() + + def get_default_privs(self, schema, *args): + query = """SELECT defaclacl + FROM pg_default_acl a + JOIN pg_namespace b ON a.defaclnamespace=b.oid + WHERE b.nspname = %s;""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_foreign_data_wrapper_acls(self, fdws): + query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper + WHERE fdwname = ANY (%s) ORDER BY fdwname""" + self.cursor.execute(query, (fdws,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_foreign_server_acls(self, fs): + query = """SELECT srvacl FROM pg_catalog.pg_foreign_server + WHERE srvname = ANY (%s) ORDER BY srvname""" + self.cursor.execute(query, (fs,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_type_acls(self, schema, types): + query = """SELECT t.typacl FROM pg_catalog.pg_type t + JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname""" + self.cursor.execute(query, (schema, types)) + return [t[0] for t in self.cursor.fetchall()] + + # Manipulating privileges + + def manipulate_privs(self, obj_type, privs, objs, roles, target_roles, + state, grant_option, schema_qualifier=None, fail_on_role=True): + """Manipulate database object privileges. + + :param obj_type: Type of database object to grant/revoke + privileges for. + :param privs: Either a list of privileges to grant/revoke + or None if type is "group". + :param objs: List of database objects to grant/revoke + privileges for. + :param roles: Either a list of role names or "PUBLIC" + for the implicitly defined "PUBLIC" group + :param target_roles: List of role names to grant/revoke + default privileges as. + :param state: "present" to grant privileges, "absent" to revoke. + :param grant_option: Only for state "present": If True, set + grant/admin option. If False, revoke it. + If None, don't change grant option. + :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", + "FUNCTION") must be qualified by schema. + Ignored for other Types. + """ + # get_status: function to get current status + if obj_type == 'table': + get_status = partial(self.get_table_acls, schema_qualifier) + elif obj_type == 'sequence': + get_status = partial(self.get_sequence_acls, schema_qualifier) + elif obj_type == 'function': + get_status = partial(self.get_function_acls, schema_qualifier) + elif obj_type == 'schema': + get_status = self.get_schema_acls + elif obj_type == 'language': + get_status = self.get_language_acls + elif obj_type == 'tablespace': + get_status = self.get_tablespace_acls + elif obj_type == 'database': + get_status = self.get_database_acls + elif obj_type == 'group': + get_status = self.get_group_memberships + elif obj_type == 'default_privs': + get_status = partial(self.get_default_privs, schema_qualifier) + elif obj_type == 'foreign_data_wrapper': + get_status = self.get_foreign_data_wrapper_acls + elif obj_type == 'foreign_server': + get_status = self.get_foreign_server_acls + elif obj_type == 'type': + get_status = partial(self.get_type_acls, schema_qualifier) + else: + raise Error('Unsupported database object type "%s".' % obj_type) + + # Return False (nothing has changed) if there are no objs to work on. + if not objs: + return False + + # obj_ids: quoted db object identifiers (sometimes schema-qualified) + if obj_type == 'function': + obj_ids = [] + for obj in objs: + try: + f, args = obj.split('(', 1) + except Exception: + raise Error('Illegal function signature: "%s".' % obj) + obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args)) + elif obj_type in ['table', 'sequence', 'type']: + obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs] + else: + obj_ids = ['"%s"' % o for o in objs] + + # set_what: SQL-fragment specifying what to set for the target roles: + # Either group membership or privileges on objects of a certain type + if obj_type == 'group': + set_what = ','.join('"%s"' % i for i in obj_ids) + elif obj_type == 'default_privs': + # We don't want privs to be quoted here + set_what = ','.join(privs) + else: + # function types are already quoted above + if obj_type != 'function': + obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] + # Note: obj_type has been checked against a set of string literals + # and privs was escaped when it was parsed + # Note: Underscores are replaced with spaces to support multi-word obj_type + set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '), + ','.join(obj_ids)) + + # for_whom: SQL-fragment specifying for whom to set the above + if roles == 'PUBLIC': + for_whom = 'PUBLIC' + else: + for_whom = [] + for r in roles: + if not role_exists(self.module, self.cursor, r): + if fail_on_role: + self.module.fail_json(msg="Role '%s' does not exist" % r.strip()) + + else: + self.module.warn("Role '%s' does not exist, pass it" % r.strip()) + else: + for_whom.append('"%s"' % r) + + if not for_whom: + return False + + for_whom = ','.join(for_whom) + + # as_who: + as_who = None + if target_roles: + as_who = ','.join('"%s"' % r for r in target_roles) + + status_before = get_status(objs) + + query = QueryBuilder(state) \ + .for_objtype(obj_type) \ + .with_grant_option(grant_option) \ + .for_whom(for_whom) \ + .as_who(as_who) \ + .for_schema(schema_qualifier) \ + .set_what(set_what) \ + .for_objs(objs) \ + .build() + + executed_queries.append(query) + self.cursor.execute(query) + status_after = get_status(objs) + + def nonesorted(e): + # For python 3+ that can fail trying + # to compare NoneType elements by sort method. + if e is None: + return '' + return e + + status_before.sort(key=nonesorted) + status_after.sort(key=nonesorted) + return status_before != status_after + + +class QueryBuilder(object): + def __init__(self, state): + self._grant_option = None + self._for_whom = None + self._as_who = None + self._set_what = None + self._obj_type = None + self._state = state + self._schema = None + self._objs = None + self.query = [] + + def for_objs(self, objs): + self._objs = objs + return self + + def for_schema(self, schema): + self._schema = schema + return self + + def with_grant_option(self, option): + self._grant_option = option + return self + + def for_whom(self, who): + self._for_whom = who + return self + + def as_who(self, target_roles): + self._as_who = target_roles + return self + + def set_what(self, what): + self._set_what = what + return self + + def for_objtype(self, objtype): + self._obj_type = objtype + return self + + def build(self): + if self._state == 'present': + self.build_present() + elif self._state == 'absent': + self.build_absent() + else: + self.build_absent() + return '\n'.join(self.query) + + def add_default_revoke(self): + for obj in self._objs: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who, + self._schema, obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj, + self._for_whom)) + + def add_grant_option(self): + if self._grant_option: + if self._obj_type == 'group': + self.query[-1] += ' WITH ADMIN OPTION;' + else: + self.query[-1] += ' WITH GRANT OPTION;' + else: + self.query[-1] += ';' + if self._obj_type == 'group': + self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom)) + elif not self._obj_type == 'default_privs': + self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom)) + + def add_default_priv(self): + for obj in self._objs: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who, + self._schema, + self._set_what, + obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema, + self._set_what, + obj, + self._for_whom)) + self.add_grant_option() + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who, + self._schema, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom)) + self.add_grant_option() + + def build_present(self): + if self._obj_type == 'default_privs': + self.add_default_revoke() + self.add_default_priv() + else: + self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom)) + self.add_grant_option() + + def build_absent(self): + if self._obj_type == 'default_privs': + self.query = [] + for obj in ['TABLES', 'SEQUENCES', 'TYPES']: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who, + self._schema, obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj, + self._for_whom)) + else: + self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom)) + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + database=dict(required=True, aliases=['db', 'login_db']), + state=dict(default='present', choices=['present', 'absent']), + privs=dict(required=False, aliases=['priv']), + type=dict(default='table', + choices=['table', + 'sequence', + 'function', + 'database', + 'schema', + 'language', + 'tablespace', + 'group', + 'default_privs', + 'foreign_data_wrapper', + 'foreign_server', + 'type', ]), + objs=dict(required=False, aliases=['obj']), + schema=dict(required=False), + roles=dict(required=True, aliases=['role']), + session_role=dict(required=False), + target_roles=dict(required=False), + grant_option=dict(required=False, type='bool', + aliases=['admin_option']), + host=dict(default='', aliases=['login_host']), + unix_socket=dict(default='', aliases=['login_unix_socket']), + login=dict(default='postgres', aliases=['login_user']), + password=dict(default='', aliases=['login_password'], no_log=True), + fail_on_role=dict(type='bool', default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + fail_on_role = module.params['fail_on_role'] + + # Create type object as namespace for module params + p = type('Params', (), module.params) + # param "schema": default, allowed depends on param "type" + if p.type in ['table', 'sequence', 'function', 'type', 'default_privs']: + p.schema = p.schema or 'public' + elif p.schema: + module.fail_json(msg='Argument "schema" is not allowed ' + 'for type "%s".' % p.type) + + # param "objs": default, required depends on param "type" + if p.type == 'database': + p.objs = p.objs or p.database + elif not p.objs: + module.fail_json(msg='Argument "objs" is required ' + 'for type "%s".' % p.type) + + # param "privs": allowed, required depends on param "type" + if p.type == 'group': + if p.privs: + module.fail_json(msg='Argument "privs" is not allowed ' + 'for type "group".') + elif not p.privs: + module.fail_json(msg='Argument "privs" is required ' + 'for type "%s".' % p.type) + + # Connect to Database + if not psycopg2: + module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR) + try: + conn = Connection(p, module) + except psycopg2.Error as e: + module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc()) + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert') + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + except ValueError as e: + # We raise this when the psycopg library is too old + module.fail_json(msg=to_native(e)) + + if p.session_role: + try: + conn.cursor.execute('SET ROLE "%s"' % p.session_role) + except Exception as e: + module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc()) + + try: + # privs + if p.privs: + privs = frozenset(pr.upper() for pr in p.privs.split(',')) + if not privs.issubset(VALID_PRIVS): + module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) + else: + privs = None + # objs: + if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_tables_in_schema(p.schema) + elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_sequences_in_schema(p.schema) + elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_functions_in_schema(p.schema) + elif p.type == 'default_privs': + if p.objs == 'ALL_DEFAULT': + objs = frozenset(VALID_DEFAULT_OBJS.keys()) + else: + objs = frozenset(obj.upper() for obj in p.objs.split(',')) + if not objs.issubset(VALID_DEFAULT_OBJS): + module.fail_json( + msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys())) + # Again, do we have valid privs specified for object type: + valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj])) + if not valid_objects_for_priv == objs: + module.fail_json( + msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format( + valid_objects_for_priv, objs)) + else: + objs = p.objs.split(',') + + # function signatures are encoded using ':' to separate args + if p.type == 'function': + objs = [obj.replace(':', ',') for obj in objs] + + # roles + if p.roles == 'PUBLIC': + roles = 'PUBLIC' + else: + roles = p.roles.split(',') + + if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]): + module.exit_json(changed=False) + + if fail_on_role: + module.fail_json(msg="Role '%s' does not exist" % roles[0].strip()) + + else: + module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip()) + + # check if target_roles is set with type: default_privs + if p.target_roles and not p.type == 'default_privs': + module.warn('"target_roles" will be ignored ' + 'Argument "type: default_privs" is required for usage of "target_roles".') + + # target roles + if p.target_roles: + target_roles = p.target_roles.split(',') + else: + target_roles = None + + changed = conn.manipulate_privs( + obj_type=p.type, + privs=privs, + objs=objs, + roles=roles, + target_roles=target_roles, + state=p.state, + grant_option=p.grant_option, + schema_qualifier=p.schema, + fail_on_role=fail_on_role, + ) + + except Error as e: + conn.rollback() + module.fail_json(msg=e.message, exception=traceback.format_exc()) + + except psycopg2.Error as e: + conn.rollback() + module.fail_json(msg=to_native(e.message)) + + if module.check_mode: + conn.rollback() + else: + conn.commit() + module.exit_json(changed=changed, queries=executed_queries) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_query.py b/test/support/integration/plugins/modules/postgresql_query.py new file mode 100644 index 00000000..18d63e33 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_query.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Felix Archambault +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'supported_by': 'community', + 'status': ['preview'] +} + +DOCUMENTATION = r''' +--- +module: postgresql_query +short_description: Run PostgreSQL queries +description: +- Runs arbitrary PostgreSQL queries. +- Can run queries from SQL script files. +- Does not run against backup files. Use M(postgresql_db) with I(state=restore) + to run queries on files made by pg_dump/pg_dumpall utilities. +version_added: '2.8' +options: + query: + description: + - SQL query to run. Variables can be escaped with psycopg2 syntax + U(http://initd.org/psycopg/docs/usage.html). + type: str + positional_args: + description: + - List of values to be passed as positional arguments to the query. + When the value is a list, it will be converted to PostgreSQL array. + - Mutually exclusive with I(named_args). + type: list + elements: raw + named_args: + description: + - Dictionary of key-value arguments to pass to the query. + When the value is a list, it will be converted to PostgreSQL array. + - Mutually exclusive with I(positional_args). + type: dict + path_to_script: + description: + - Path to SQL script on the remote host. + - Returns result of the last query in the script. + - Mutually exclusive with I(query). + type: path + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect to and run queries against. + type: str + aliases: + - login_db + autocommit: + description: + - Execute in autocommit mode when the query can't be run inside a transaction block + (e.g., VACUUM). + - Mutually exclusive with I(check_mode). + type: bool + default: no + version_added: '2.9' + encoding: + description: + - Set the client encoding for the current session (e.g. C(UTF-8)). + - The default is the encoding defined by the database. + type: str + version_added: '2.10' +seealso: +- module: postgresql_db +author: +- Felix Archambault (@archf) +- Andrew Klychkov (@Andersson007) +- Will Rouesnel (@wrouesnel) +extends_documentation_fragment: postgres +''' + +EXAMPLES = r''' +- name: Simple select query to acme db + postgresql_query: + db: acme + query: SELECT version() + +- name: Select query to db acme with positional arguments and non-default credentials + postgresql_query: + db: acme + login_user: django + login_password: mysecretpass + query: SELECT * FROM acme WHERE id = %s AND story = %s + positional_args: + - 1 + - test + +- name: Select query to test_db with named_args + postgresql_query: + db: test_db + query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s + named_args: + id_val: 1 + story_val: test + +- name: Insert query to test_table in db test_db + postgresql_query: + db: test_db + query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story') + +- name: Run queries from SQL script using UTF-8 client encoding for session + postgresql_query: + db: test_db + path_to_script: /var/lib/pgsql/test.sql + positional_args: + - 1 + encoding: UTF-8 + +- name: Example of using autocommit parameter + postgresql_query: + db: test_db + query: VACUUM + autocommit: yes + +- name: > + Insert data to the column of array type using positional_args. + Note that we use quotes here, the same as for passing JSON, etc. + postgresql_query: + query: INSERT INTO test_table (array_column) VALUES (%s) + positional_args: + - '{1,2,3}' + +# Pass list and string vars as positional_args +- name: Set vars + set_fact: + my_list: + - 1 + - 2 + - 3 + my_arr: '{1, 2, 3}' + +- name: Select from test table by passing positional_args as arrays + postgresql_query: + query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s + positional_args: + - '{{ my_list }}' + - '{{ my_arr|string }}' +''' + +RETURN = r''' +query: + description: Query that was tried to be executed. + returned: always + type: str + sample: 'SELECT * FROM bar' +statusmessage: + description: Attribute containing the message returned by the command. + returned: always + type: str + sample: 'INSERT 0 1' +query_result: + description: + - List of dictionaries in column:value form representing returned rows. + returned: changed + type: list + sample: [{"Column": "Value1"},{"Column": "Value2"}] +rowcount: + description: Number of affected rows. + returned: changed + type: int + sample: 5 +''' + +try: + from psycopg2 import ProgrammingError as Psycopg2ProgrammingError + from psycopg2.extras import DictCursor +except ImportError: + # it is needed for checking 'no result to fetch' in main(), + # psycopg2 availability will be checked by connect_to_db() into + # ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native +from ansible.module_utils.six import iteritems + + +# =========================================== +# Module execution. +# + +def list_to_pg_array(elem): + """Convert the passed list to PostgreSQL array + represented as a string. + + Args: + elem (list): List that needs to be converted. + + Returns: + elem (str): String representation of PostgreSQL array. + """ + elem = str(elem).strip('[]') + elem = '{' + elem + '}' + return elem + + +def convert_elements_to_pg_arrays(obj): + """Convert list elements of the passed object + to PostgreSQL arrays represented as strings. + + Args: + obj (dict or list): Object whose elements need to be converted. + + Returns: + obj (dict or list): Object with converted elements. + """ + if isinstance(obj, dict): + for (key, elem) in iteritems(obj): + if isinstance(elem, list): + obj[key] = list_to_pg_array(elem) + + elif isinstance(obj, list): + for i, elem in enumerate(obj): + if isinstance(elem, list): + obj[i] = list_to_pg_array(elem) + + return obj + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + query=dict(type='str'), + db=dict(type='str', aliases=['login_db']), + positional_args=dict(type='list', elements='raw'), + named_args=dict(type='dict'), + session_role=dict(type='str'), + path_to_script=dict(type='path'), + autocommit=dict(type='bool', default=False), + encoding=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=(('positional_args', 'named_args'),), + supports_check_mode=True, + ) + + query = module.params["query"] + positional_args = module.params["positional_args"] + named_args = module.params["named_args"] + path_to_script = module.params["path_to_script"] + autocommit = module.params["autocommit"] + encoding = module.params["encoding"] + + if autocommit and module.check_mode: + module.fail_json(msg="Using autocommit is mutually exclusive with check_mode") + + if path_to_script and query: + module.fail_json(msg="path_to_script is mutually exclusive with query") + + if positional_args: + positional_args = convert_elements_to_pg_arrays(positional_args) + + elif named_args: + named_args = convert_elements_to_pg_arrays(named_args) + + if path_to_script: + try: + with open(path_to_script, 'rb') as f: + query = to_native(f.read()) + except Exception as e: + module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e))) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=autocommit) + if encoding is not None: + db_connection.set_client_encoding(encoding) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Prepare args: + if module.params.get("positional_args"): + arguments = module.params["positional_args"] + elif module.params.get("named_args"): + arguments = module.params["named_args"] + else: + arguments = None + + # Set defaults: + changed = False + + # Execute query: + try: + cursor.execute(query, arguments) + except Exception as e: + if not autocommit: + db_connection.rollback() + + cursor.close() + db_connection.close() + module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e))) + + statusmessage = cursor.statusmessage + rowcount = cursor.rowcount + + try: + query_result = [dict(row) for row in cursor.fetchall()] + except Psycopg2ProgrammingError as e: + if to_native(e) == 'no results to fetch': + query_result = {} + + except Exception as e: + module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e)) + + if 'SELECT' not in statusmessage: + if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage: + s = statusmessage.split() + if len(s) == 3: + if statusmessage.split()[2] != '0': + changed = True + + elif len(s) == 2: + if statusmessage.split()[1] != '0': + changed = True + + else: + changed = True + + else: + changed = True + + if module.check_mode: + db_connection.rollback() + else: + if not autocommit: + db_connection.commit() + + kw = dict( + changed=changed, + query=cursor.query, + statusmessage=statusmessage, + query_result=query_result, + rowcount=rowcount if rowcount >= 0 else 0, + ) + + cursor.close() + db_connection.close() + + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_set.py b/test/support/integration/plugins/modules/postgresql_set.py new file mode 100644 index 00000000..cfbdae64 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_set.py @@ -0,0 +1,434 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_set +short_description: Change a PostgreSQL server configuration parameter +description: + - Allows to change a PostgreSQL server configuration parameter. + - The module uses ALTER SYSTEM command and applies changes by reload server configuration. + - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster. + - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file. + - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file, + which is read in addition to postgresql.conf. + - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter + string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required). + - After change you can see in the ansible output the previous and + the new parameter value and other information using returned values and M(debug) module. +version_added: '2.8' +options: + name: + description: + - Name of PostgreSQL server parameter. + type: str + required: true + value: + description: + - Parameter value to set. + - To remove parameter string from postgresql.auto.conf and + reload the server configuration you must pass I(value=default). + With I(value=default) the playbook always returns changed is true. + type: str + reset: + description: + - Restore parameter to initial state (boot_val). Mutually exclusive with I(value). + type: bool + default: false + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect. + type: str + aliases: + - login_db +notes: +- Supported version of PostgreSQL is 9.4 and later. +- Pay attention, change setting with 'postmaster' context can return changed is true + when actually nothing changes because the same value may be presented in + several different form, for example, 1024MB, 1GB, etc. However in pg_settings + system view it can be defined like 131072 number of 8kB pages. + The final check of the parameter value cannot compare it because the server was + not restarted and the value in pg_settings is not updated yet. +- For some parameters restart of PostgreSQL server is required. + See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html). +seealso: +- module: postgresql_info +- name: PostgreSQL server configuration + description: General information about PostgreSQL server configuration. + link: https://www.postgresql.org/docs/current/runtime-config.html +- name: PostgreSQL view pg_settings reference + description: Complete reference of the pg_settings view documentation. + link: https://www.postgresql.org/docs/current/view-pg-settings.html +- name: PostgreSQL ALTER SYSTEM command reference + description: Complete reference of the ALTER SYSTEM command documentation. + link: https://www.postgresql.org/docs/current/sql-altersystem.html +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: postgres +''' + +EXAMPLES = r''' +- name: Restore wal_keep_segments parameter to initial state + postgresql_set: + name: wal_keep_segments + reset: yes + +# Set work_mem parameter to 32MB and show what's been changed and restart is required or not +# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False") +- name: Set work mem parameter + postgresql_set: + name: work_mem + value: 32mb + register: set + +- debug: + msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}" + when: set.changed +# Ensure that the restart of PostgreSQL server must be required for some parameters. +# In this situation you see the same parameter in prev_val and value_prettyue, but 'changed=True' +# (If you passed the value that was different from the current server setting). + +- name: Set log_min_duration_statement parameter to 1 second + postgresql_set: + name: log_min_duration_statement + value: 1s + +- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf) + postgresql_set: + name: wal_log_hints + value: default +''' + +RETURN = r''' +name: + description: Name of PostgreSQL server parameter. + returned: always + type: str + sample: 'shared_buffers' +restart_required: + description: Information about parameter current state. + returned: always + type: bool + sample: true +prev_val_pretty: + description: Information about previous state of the parameter. + returned: always + type: str + sample: '4MB' +value_pretty: + description: Information about current state of the parameter. + returned: always + type: str + sample: '64MB' +value: + description: + - Dictionary that contains the current parameter value (at the time of playbook finish). + - Pay attention that for real change some parameters restart of PostgreSQL server is required. + - Returns the current value in the check mode. + returned: always + type: dict + sample: { "value": 67108864, "unit": "b" } +context: + description: + - PostgreSQL setting context. + returned: always + type: str + sample: user +''' + +try: + from psycopg2.extras import DictCursor +except Exception: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native + +PG_REQ_VER = 90400 + +# To allow to set value like 1mb instead of 1MB, etc: +POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb") + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def param_get(cursor, module, name): + query = ("SELECT name, setting, unit, context, boot_val " + "FROM pg_settings WHERE name = %(name)s") + try: + cursor.execute(query, {'name': name}) + info = cursor.fetchall() + cursor.execute("SHOW %s" % name) + val = cursor.fetchone() + + except Exception as e: + module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e))) + + raw_val = info[0][1] + unit = info[0][2] + context = info[0][3] + boot_val = info[0][4] + + if val[0] == 'True': + val[0] = 'on' + elif val[0] == 'False': + val[0] = 'off' + + if unit == 'kB': + if int(raw_val) > 0: + raw_val = int(raw_val) * 1024 + if int(boot_val) > 0: + boot_val = int(boot_val) * 1024 + + unit = 'b' + + elif unit == 'MB': + if int(raw_val) > 0: + raw_val = int(raw_val) * 1024 * 1024 + if int(boot_val) > 0: + boot_val = int(boot_val) * 1024 * 1024 + + unit = 'b' + + return (val[0], raw_val, unit, boot_val, context) + + +def pretty_to_bytes(pretty_val): + # The function returns a value in bytes + # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'. + # Otherwise it returns the passed argument. + + val_in_bytes = None + + if 'kB' in pretty_val: + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 + + elif 'MB' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 * 1024 + + elif 'GB' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 * 1024 * 1024 + + elif 'TB' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024 + + elif 'B' in pretty_val.upper(): + num_part = int(''.join(d for d in pretty_val if d.isdigit())) + val_in_bytes = num_part + + else: + return pretty_val + + return val_in_bytes + + +def param_set(cursor, module, name, value, context): + try: + if str(value).lower() == 'default': + query = "ALTER SYSTEM SET %s = DEFAULT" % name + else: + query = "ALTER SYSTEM SET %s = '%s'" % (name, value) + cursor.execute(query) + + if context != 'postmaster': + cursor.execute("SELECT pg_reload_conf()") + + except Exception as e: + module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e))) + + return True + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + name=dict(type='str', required=True), + db=dict(type='str', aliases=['login_db']), + value=dict(type='str'), + reset=dict(type='bool'), + session_role=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params["name"] + value = module.params["value"] + reset = module.params["reset"] + + # Allow to pass values like 1mb instead of 1MB, etc: + if value: + for unit in POSSIBLE_SIZE_UNITS: + if value[:-2].isdigit() and unit in value[-2:]: + value = value.upper() + + if value and reset: + module.fail_json(msg="%s: value and reset params are mutually exclusive" % name) + + if not value and not reset: + module.fail_json(msg="%s: at least one of value or reset param must be specified" % name) + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + kw = {} + # Check server version (needs 9.4 or later): + ver = db_connection.server_version + if ver < PG_REQ_VER: + module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER)) + kw = dict( + changed=False, + restart_required=False, + value_pretty="", + prev_val_pretty="", + value={"value": "", "unit": ""}, + ) + kw['name'] = name + db_connection.close() + module.exit_json(**kw) + + # Set default returned values: + restart_required = False + changed = False + kw['name'] = name + kw['restart_required'] = False + + # Get info about param state: + res = param_get(cursor, module, name) + current_value = res[0] + raw_val = res[1] + unit = res[2] + boot_val = res[3] + context = res[4] + + if value == 'True': + value = 'on' + elif value == 'False': + value = 'off' + + kw['prev_val_pretty'] = current_value + kw['value_pretty'] = deepcopy(kw['prev_val_pretty']) + kw['context'] = context + + # Do job + if context == "internal": + module.fail_json(msg="%s: cannot be changed (internal context). See " + "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name) + + if context == "postmaster": + restart_required = True + + # If check_mode, just compare and exit: + if module.check_mode: + if pretty_to_bytes(value) == pretty_to_bytes(current_value): + kw['changed'] = False + + else: + kw['value_pretty'] = value + kw['changed'] = True + + # Anyway returns current raw value in the check_mode: + kw['value'] = dict( + value=raw_val, + unit=unit, + ) + kw['restart_required'] = restart_required + module.exit_json(**kw) + + # Set param: + if value and value != current_value: + changed = param_set(cursor, module, name, value, context) + + kw['value_pretty'] = value + + # Reset param: + elif reset: + if raw_val == boot_val: + # nothing to change, exit: + kw['value'] = dict( + value=raw_val, + unit=unit, + ) + module.exit_json(**kw) + + changed = param_set(cursor, module, name, boot_val, context) + + if restart_required: + module.warn("Restart of PostgreSQL is required for setting %s" % name) + + cursor.close() + db_connection.close() + + # Reconnect and recheck current value: + if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'): + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + res = param_get(cursor, module, name) + # f_ means 'final' + f_value = res[0] + f_raw_val = res[1] + + if raw_val == f_raw_val: + changed = False + + else: + changed = True + + kw['value_pretty'] = f_value + kw['value'] = dict( + value=f_raw_val, + unit=unit, + ) + + cursor.close() + db_connection.close() + + kw['changed'] = changed + kw['restart_required'] = restart_required + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_table.py b/test/support/integration/plugins/modules/postgresql_table.py new file mode 100644 index 00000000..3bef03b0 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_table.py @@ -0,0 +1,601 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_table +short_description: Create, drop, or modify a PostgreSQL table +description: +- Allows to create, drop, rename, truncate a table, or change some table attributes. +version_added: '2.8' +options: + table: + description: + - Table name. + required: true + aliases: + - name + type: str + state: + description: + - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged), + I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename). + type: str + default: present + choices: [ absent, present ] + tablespace: + description: + - Set a tablespace for the table. + required: false + type: str + owner: + description: + - Set a table owner. + type: str + unlogged: + description: + - Create an unlogged table. + type: bool + default: no + like: + description: + - Create a table like another table (with similar DDL). + Mutually exclusive with I(columns), I(rename), and I(truncate). + type: str + including: + description: + - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL. + Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate). + type: str + columns: + description: + - Columns that are needed. + type: list + elements: str + rename: + description: + - New table name. Mutually exclusive with I(tablespace), I(owner), + I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params). + type: str + truncate: + description: + - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged), + I(like), I(including), I(columns), I(rename), and I(storage_params). + type: bool + default: no + storage_params: + description: + - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc. + Mutually exclusive with I(rename) and I(truncate). + type: list + elements: str + db: + description: + - Name of database to connect and where the table will be created. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + cascade: + description: + - Automatically drop objects that depend on the table (such as views). + Used with I(state=absent) only. + type: bool + default: no + version_added: '2.9' +notes: +- If you do not pass db parameter, tables will be created in the database + named postgres. +- PostgreSQL allows to create columnless table, so columns param is optional. +- Unlogged tables are available from PostgreSQL server version 9.1. +seealso: +- module: postgresql_sequence +- module: postgresql_idx +- module: postgresql_info +- module: postgresql_tablespace +- module: postgresql_owner +- module: postgresql_privs +- module: postgresql_copy +- name: CREATE TABLE reference + description: Complete reference of the CREATE TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-createtable.html +- name: ALTER TABLE reference + description: Complete reference of the ALTER TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-altertable.html +- name: DROP TABLE reference + description: Complete reference of the DROP TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-droptable.html +- name: PostgreSQL data types + description: Complete reference of the PostgreSQL data types documentation. + link: https://www.postgresql.org/docs/current/datatype.html +author: +- Andrei Klychkov (@Andersson007) +extends_documentation_fragment: postgres +''' + +EXAMPLES = r''' +- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner + postgresql_table: + db: acme + name: tbl2 + like: tbl1 + owner: testuser + +- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes + postgresql_table: + db: acme + table: tbl2 + like: tbl1 + including: comments, indexes + tablespace: ssd + +- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1 + postgresql_table: + name: test_table + columns: + - id bigserial primary key + - num bigint + - stories text + tablespace: ssd + storage_params: + - fillfactor=10 + - autovacuum_analyze_threshold=1 + +- name: Create an unlogged table in schema acme + postgresql_table: + name: acme.useless_data + columns: waste_id int + unlogged: true + +- name: Rename table foo to bar + postgresql_table: + table: foo + rename: bar + +- name: Rename table foo from schema acme to bar + postgresql_table: + name: acme.foo + rename: bar + +- name: Set owner to someuser + postgresql_table: + name: foo + owner: someuser + +- name: Change tablespace of foo table to new_tablespace and set owner to new_user + postgresql_table: + name: foo + tablespace: new_tablespace + owner: new_user + +- name: Truncate table foo + postgresql_table: + name: foo + truncate: yes + +- name: Drop table foo from schema acme + postgresql_table: + name: acme.foo + state: absent + +- name: Drop table bar cascade + postgresql_table: + name: bar + state: absent + cascade: yes +''' + +RETURN = r''' +table: + description: Name of a table. + returned: always + type: str + sample: 'foo' +state: + description: Table state. + returned: always + type: str + sample: 'present' +owner: + description: Table owner. + returned: always + type: str + sample: 'postgres' +tablespace: + description: Tablespace. + returned: always + type: str + sample: 'ssd_tablespace' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'CREATE TABLE "test_table" (id bigint)' ] +storage_params: + description: Storage parameters. + returned: always + type: list + sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ] +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.database import pg_quote_identifier +from ansible.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class Table(object): + def __init__(self, name, module, cursor): + self.name = name + self.module = module + self.cursor = cursor + self.info = { + 'owner': '', + 'tblspace': '', + 'storage_params': [], + } + self.exists = False + self.__exists_in_db() + self.executed_queries = [] + + def get_info(self): + """Getter to refresh and get table info""" + self.__exists_in_db() + + def __exists_in_db(self): + """Check table exists and refresh info""" + if "." in self.name: + schema = self.name.split('.')[-2] + tblname = self.name.split('.')[-1] + else: + schema = 'public' + tblname = self.name + + query = ("SELECT t.tableowner, t.tablespace, c.reloptions " + "FROM pg_tables AS t " + "INNER JOIN pg_class AS c ON c.relname = t.tablename " + "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid " + "WHERE t.tablename = %(tblname)s " + "AND n.nspname = %(schema)s") + res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema}, + add_to_executed=False) + if res: + self.exists = True + self.info = dict( + owner=res[0][0], + tblspace=res[0][1] if res[0][1] else '', + storage_params=res[0][2] if res[0][2] else [], + ) + + return True + else: + self.exists = False + return False + + def create(self, columns='', params='', tblspace='', + unlogged=False, owner=''): + """ + Create table. + If table exists, check passed args (params, tblspace, owner) and, + if they're different from current, change them. + Arguments: + params - storage params (passed by "WITH (...)" in SQL), + comma separated. + tblspace - tablespace. + owner - table owner. + unlogged - create unlogged table. + columns - column string (comma separated). + """ + name = pg_quote_identifier(self.name, 'table') + + changed = False + + if self.exists: + if tblspace == 'pg_default' and self.info['tblspace'] is None: + pass # Because they have the same meaning + elif tblspace and self.info['tblspace'] != tblspace: + self.set_tblspace(tblspace) + changed = True + + if owner and self.info['owner'] != owner: + self.set_owner(owner) + changed = True + + if params: + param_list = [p.strip(' ') for p in params.split(',')] + + new_param = False + for p in param_list: + if p not in self.info['storage_params']: + new_param = True + + if new_param: + self.set_stor_params(params) + changed = True + + if changed: + return True + return False + + query = "CREATE" + if unlogged: + query += " UNLOGGED TABLE %s" % name + else: + query += " TABLE %s" % name + + if columns: + query += " (%s)" % columns + else: + query += " ()" + + if params: + query += " WITH (%s)" % params + + if tblspace: + query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database') + + if exec_sql(self, query, ddl=True): + changed = True + + if owner: + changed = self.set_owner(owner) + + return changed + + def create_like(self, src_table, including='', tblspace='', + unlogged=False, params='', owner=''): + """ + Create table like another table (with similar DDL). + Arguments: + src_table - source table. + including - corresponds to optional INCLUDING expression + in CREATE TABLE ... LIKE statement. + params - storage params (passed by "WITH (...)" in SQL), + comma separated. + tblspace - tablespace. + owner - table owner. + unlogged - create unlogged table. + """ + changed = False + + name = pg_quote_identifier(self.name, 'table') + + query = "CREATE" + if unlogged: + query += " UNLOGGED TABLE %s" % name + else: + query += " TABLE %s" % name + + query += " (LIKE %s" % pg_quote_identifier(src_table, 'table') + + if including: + including = including.split(',') + for i in including: + query += " INCLUDING %s" % i + + query += ')' + + if params: + query += " WITH (%s)" % params + + if tblspace: + query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database') + + if exec_sql(self, query, ddl=True): + changed = True + + if owner: + changed = self.set_owner(owner) + + return changed + + def truncate(self): + query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table') + return exec_sql(self, query, ddl=True) + + def rename(self, newname): + query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(newname, 'table')) + return exec_sql(self, query, ddl=True) + + def set_owner(self, username): + query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(username, 'role')) + return exec_sql(self, query, ddl=True) + + def drop(self, cascade=False): + if not self.exists: + return False + + query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table') + if cascade: + query += " CASCADE" + return exec_sql(self, query, ddl=True) + + def set_tblspace(self, tblspace): + query = "ALTER TABLE %s SET TABLESPACE %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(tblspace, 'database')) + return exec_sql(self, query, ddl=True) + + def set_stor_params(self, params): + query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params) + return exec_sql(self, query, ddl=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + table=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default="present", choices=["absent", "present"]), + db=dict(type='str', default='', aliases=['login_db']), + tablespace=dict(type='str'), + owner=dict(type='str'), + unlogged=dict(type='bool', default=False), + like=dict(type='str'), + including=dict(type='str'), + rename=dict(type='str'), + truncate=dict(type='bool', default=False), + columns=dict(type='list', elements='str'), + storage_params=dict(type='list', elements='str'), + session_role=dict(type='str'), + cascade=dict(type='bool', default=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + table = module.params["table"] + state = module.params["state"] + tablespace = module.params["tablespace"] + owner = module.params["owner"] + unlogged = module.params["unlogged"] + like = module.params["like"] + including = module.params["including"] + newname = module.params["rename"] + storage_params = module.params["storage_params"] + truncate = module.params["truncate"] + columns = module.params["columns"] + cascade = module.params["cascade"] + + if state == 'present' and cascade: + module.warn("cascade=true is ignored when state=present") + + # Check mutual exclusive parameters: + if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including): + module.fail_json(msg="%s: state=absent is mutually exclusive with: " + "truncate, rename, columns, tablespace, " + "including, like, storage_params, unlogged, owner" % table) + + if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including): + module.fail_json(msg="%s: truncate is mutually exclusive with: " + "rename, columns, like, unlogged, including, " + "storage_params, owner, tablespace" % table) + + if newname and (columns or like or unlogged or storage_params or owner or tablespace or including): + module.fail_json(msg="%s: rename is mutually exclusive with: " + "columns, like, unlogged, including, " + "storage_params, owner, tablespace" % table) + + if like and columns: + module.fail_json(msg="%s: like and columns params are mutually exclusive" % table) + if including and not like: + module.fail_json(msg="%s: including param needs like param specified" % table) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + if storage_params: + storage_params = ','.join(storage_params) + + if columns: + columns = ','.join(columns) + + ############## + # Do main job: + table_obj = Table(table, module, cursor) + + # Set default returned values: + changed = False + kw = {} + kw['table'] = table + kw['state'] = '' + if table_obj.exists: + kw = dict( + table=table, + state='present', + owner=table_obj.info['owner'], + tablespace=table_obj.info['tblspace'], + storage_params=table_obj.info['storage_params'], + ) + + if state == 'absent': + changed = table_obj.drop(cascade=cascade) + + elif truncate: + changed = table_obj.truncate() + + elif newname: + changed = table_obj.rename(newname) + q = table_obj.executed_queries + table_obj = Table(newname, module, cursor) + table_obj.executed_queries = q + + elif state == 'present' and not like: + changed = table_obj.create(columns, storage_params, + tablespace, unlogged, owner) + + elif state == 'present' and like: + changed = table_obj.create_like(like, including, tablespace, + unlogged, storage_params) + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + # Refresh table info for RETURN. + # Note, if table has been renamed, it gets info by newname: + table_obj.get_info() + db_connection.commit() + if table_obj.exists: + kw = dict( + table=table, + state='present', + owner=table_obj.info['owner'], + tablespace=table_obj.info['tblspace'], + storage_params=table_obj.info['storage_params'], + ) + else: + # We just change the table state here + # to keep other information about the dropped table: + kw['state'] = 'absent' + + kw['queries'] = table_obj.executed_queries + kw['changed'] = changed + db_connection.close() + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/postgresql_user.py b/test/support/integration/plugins/modules/postgresql_user.py new file mode 100644 index 00000000..10afd0a0 --- /dev/null +++ b/test/support/integration/plugins/modules/postgresql_user.py @@ -0,0 +1,927 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +--- +module: postgresql_user +short_description: Add or remove a user (role) from a PostgreSQL server instance +description: +- Adds or removes a user (role) from a PostgreSQL server instance + ("cluster" in PostgreSQL terminology) and, optionally, + grants the user access to an existing database or tables. +- A user is a role with login privilege. +- The fundamental function of the module is to create, or delete, users from + a PostgreSQL instances. Privilege assignment, or removal, is an optional + step, which works on one database at a time. This allows for the module to + be called several times in the same module to modify the permissions on + different databases, or to grant permissions to already existing users. +- A user cannot be removed until all the privileges have been stripped from + the user. In such situation, if the module tries to remove the user it + will fail. To avoid this from happening the fail_on_user option signals + the module to try to remove the user, but if not possible keep going; the + module will report if changes happened and separately if the user was + removed or not. +version_added: '0.6' +options: + name: + description: + - Name of the user (role) to add or remove. + type: str + required: true + aliases: + - user + password: + description: + - Set the user's password, before 1.4 this was required. + - Password can be passed unhashed or hashed (MD5-hashed). + - Unhashed password will automatically be hashed when saved into the + database if C(encrypted) parameter is set, otherwise it will be save in + plain text format. + - When passing a hashed password it must be generated with the format + C('str["md5"] + md5[ password + username ]'), resulting in a total of + 35 characters. An easy way to do this is C(echo "md5$(echo -n + 'verysecretpasswordJOE' | md5sum | awk '{print $1}')"). + - Note that if the provided password string is already in MD5-hashed + format, then it is used as-is, regardless of C(encrypted) parameter. + type: str + db: + description: + - Name of database to connect to and where user's permissions will be granted. + type: str + aliases: + - login_db + fail_on_user: + description: + - If C(yes), fail when user (role) can't be removed. Otherwise just log and continue. + default: 'yes' + type: bool + aliases: + - fail_on_role + priv: + description: + - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where + privileges can be defined for database ( allowed options - 'CREATE', + 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or + for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE', + 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example + C(table:SELECT) ). Mixed example of this string: + C(CONNECT/CREATE/table1:SELECT/table2:INSERT)." + type: str + role_attr_flags: + description: + - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER." + - Note that '[NO]CREATEUSER' is deprecated. + - To create a simple role for using it like a group, use C(NOLOGIN) flag. + type: str + choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB', + '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ] + session_role: + version_added: '2.8' + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + state: + description: + - The user (role) state. + type: str + default: present + choices: [ absent, present ] + encrypted: + description: + - Whether the password is stored hashed in the database. + - Passwords can be passed already hashed or unhashed, and postgresql + ensures the stored password is hashed when C(encrypted) is set. + - "Note: Postgresql 10 and newer doesn't support unhashed passwords." + - Previous to Ansible 2.6, this was C(no) by default. + default: 'yes' + type: bool + version_added: '1.4' + expires: + description: + - The date at which the user's password is to expire. + - If set to C('infinity'), user's password never expire. + - Note that this value should be a valid SQL date and time type. + type: str + version_added: '1.4' + no_password_changes: + description: + - If C(yes), don't inspect database for password changes. Effective when + C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make + password changes as necessary. + default: 'no' + type: bool + version_added: '2.0' + conn_limit: + description: + - Specifies the user (role) connection limit. + type: int + version_added: '2.4' + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + version_added: '2.3' + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] + version_added: '2.3' + groups: + description: + - The list of groups (roles) that need to be granted to the user. + type: list + elements: str + version_added: '2.9' + comment: + description: + - Add a comment on the user (equal to the COMMENT ON ROLE statement result). + type: str + version_added: '2.10' +notes: +- The module creates a user (role) with login privilege by default. + Use NOLOGIN role_attr_flags to change this behaviour. +- If you specify PUBLIC as the user (role), then the privilege changes will apply to all users (roles). + You may not specify password or role_attr_flags when the PUBLIC user is specified. +seealso: +- module: postgresql_privs +- module: postgresql_membership +- module: postgresql_owner +- name: PostgreSQL database roles + description: Complete reference of the PostgreSQL database roles documentation. + link: https://www.postgresql.org/docs/current/user-manag.html +author: +- Ansible Core Team +extends_documentation_fragment: postgres +''' + +EXAMPLES = r''' +- name: Connect to acme database, create django user, and grant access to database and products table + postgresql_user: + db: acme + name: django + password: ceec4eif7ya + priv: "CONNECT/products:ALL" + expires: "Jan 31 2020" + +- name: Add a comment on django user + postgresql_user: + db: acme + name: django + comment: This is a test user + +# Connect to default database, create rails user, set its password (MD5-hashed), +# and grant privilege to create other databases and demote rails from super user status if user exists +- name: Create rails user, set MD5-hashed password, grant privs + postgresql_user: + name: rails + password: md59543f1d82624df2b31672ec0f7050460 + role_attr_flags: CREATEDB,NOSUPERUSER + +- name: Connect to acme database and remove test user privileges from there + postgresql_user: + db: acme + name: test + priv: "ALL/products:ALL" + state: absent + fail_on_user: no + +- name: Connect to test database, remove test user from cluster + postgresql_user: + db: test + name: test + priv: ALL + state: absent + +- name: Connect to acme database and set user's password with no expire date + postgresql_user: + db: acme + name: django + password: mysupersecretword + priv: "CONNECT/products:ALL" + expires: infinity + +# Example privileges string format +# INSERT,UPDATE/table:SELECT/anothertable:ALL + +- name: Connect to test database and remove an existing user's password + postgresql_user: + db: test + user: test + password: "" + +- name: Create user test and grant group user_ro and user_rw to it + postgresql_user: + name: test + groups: + - user_ro + - user_rw +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"'] + version_added: '2.8' +''' + +import itertools +import re +import traceback +from hashlib import md5 + +try: + import psycopg2 + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.database import pg_quote_identifier, SQLParseError +from ansible.module_utils.postgres import ( + connect_to_db, + get_conn_params, + PgMembership, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.six import iteritems + + +FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION') +FLAGS_BY_VERSION = {'BYPASSRLS': 90500} + +VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')), + database=frozenset( + ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')), + ) + +# map to cope with idiosyncracies of SUPERUSER and LOGIN +PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole', + CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin', + REPLICATION='rolreplication', BYPASSRLS='rolbypassrls') + +executed_queries = [] + + +class InvalidFlagsError(Exception): + pass + + +class InvalidPrivsError(Exception): + pass + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def user_exists(cursor, user): + # The PUBLIC user is a special case that is always there + if user == 'PUBLIC': + return True + query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s" + cursor.execute(query, {'user': user}) + return cursor.rowcount > 0 + + +def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit): + """Create a new database user (role).""" + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a + # literal + query_password_data = dict(password=password, expires=expires) + query = ['CREATE USER "%(user)s"' % + {"user": user}] + if password is not None and password != '': + query.append("WITH %(crypt)s" % {"crypt": encrypted}) + query.append("PASSWORD %(password)s") + if expires is not None: + query.append("VALID UNTIL %(expires)s") + if conn_limit is not None: + query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + query.append(role_attr_flags) + query = ' '.join(query) + executed_queries.append(query) + cursor.execute(query, query_password_data) + return True + + +def user_should_we_change_password(current_role_attrs, user, password, encrypted): + """Check if we should change the user's password. + + Compare the proposed password with the existing one, comparing + hashes if encrypted. If we can't access it assume yes. + """ + + if current_role_attrs is None: + # on some databases, E.g. AWS RDS instances, there is no access to + # the pg_authid relation to check the pre-existing password, so we + # just assume password is different + return True + + # Do we actually need to do anything? + pwchanging = False + if password is not None: + # Empty password means that the role shouldn't have a password, which + # means we need to check if the current password is None. + if password == '': + if current_role_attrs['rolpassword'] is not None: + pwchanging = True + # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits + # 3: The size of the 'md5' prefix + # When the provided password looks like a MD5-hash, value of + # 'encrypted' is ignored. + elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED': + if password != current_role_attrs['rolpassword']: + pwchanging = True + elif encrypted == 'ENCRYPTED': + hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest()) + if hashed_password != current_role_attrs['rolpassword']: + pwchanging = True + + return pwchanging + + +def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit): + """Change user password and/or attributes. Return True if changed, False otherwise.""" + changed = False + + cursor = db_connection.cursor(cursor_factory=DictCursor) + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a + # literal + if user == 'PUBLIC': + if password is not None: + module.fail_json(msg="cannot change the password for PUBLIC user") + elif role_attr_flags != '': + module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user") + else: + return False + + # Handle passwords. + if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None): + # Select password and all flag-like columns in order to verify changes. + try: + select = "SELECT * FROM pg_authid where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes. + current_role_attrs = cursor.fetchone() + except psycopg2.ProgrammingError: + current_role_attrs = None + db_connection.rollback() + + pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted) + + if current_role_attrs is None: + try: + # AWS RDS instances does not allow user to access pg_authid + # so try to get current_role_attrs from pg_roles tables + select = "SELECT * FROM pg_roles where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes from pg_roles + current_role_attrs = cursor.fetchone() + except psycopg2.ProgrammingError as e: + db_connection.rollback() + module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e)) + + role_attr_flags_changing = False + if role_attr_flags: + role_attr_flags_dict = {} + for r in role_attr_flags.split(' '): + if r.startswith('NO'): + role_attr_flags_dict[r.replace('NO', '', 1)] = False + else: + role_attr_flags_dict[r] = True + + for role_attr_name, role_attr_value in role_attr_flags_dict.items(): + if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value: + role_attr_flags_changing = True + + if expires is not None: + cursor.execute("SELECT %s::timestamptz;", (expires,)) + expires_with_tz = cursor.fetchone()[0] + expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil') + else: + expires_changing = False + + conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit']) + + if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing: + return False + + alter = ['ALTER USER "%(user)s"' % {"user": user}] + if pwchanging: + if password != '': + alter.append("WITH %(crypt)s" % {"crypt": encrypted}) + alter.append("PASSWORD %(password)s") + else: + alter.append("WITH PASSWORD NULL") + alter.append(role_attr_flags) + elif role_attr_flags: + alter.append('WITH %s' % role_attr_flags) + if expires is not None: + alter.append("VALID UNTIL %(expires)s") + if conn_limit is not None: + alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + + query_password_data = dict(password=password, expires=expires) + try: + cursor.execute(' '.join(alter), query_password_data) + changed = True + except psycopg2.InternalError as e: + if e.pgcode == '25006': + # Handle errors due to read-only transactions indicated by pgcode 25006 + # ERROR: cannot execute ALTER ROLE in a read-only transaction + changed = False + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + return changed + else: + raise psycopg2.InternalError(e) + except psycopg2.NotSupportedError as e: + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + + elif no_password_changes and role_attr_flags != '': + # Grab role information from pg_roles instead of pg_authid + select = "SELECT * FROM pg_roles where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes. + current_role_attrs = cursor.fetchone() + + role_attr_flags_changing = False + + if role_attr_flags: + role_attr_flags_dict = {} + for r in role_attr_flags.split(' '): + if r.startswith('NO'): + role_attr_flags_dict[r.replace('NO', '', 1)] = False + else: + role_attr_flags_dict[r] = True + + for role_attr_name, role_attr_value in role_attr_flags_dict.items(): + if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value: + role_attr_flags_changing = True + + if not role_attr_flags_changing: + return False + + alter = ['ALTER USER "%(user)s"' % + {"user": user}] + if role_attr_flags: + alter.append('WITH %s' % role_attr_flags) + + try: + cursor.execute(' '.join(alter)) + except psycopg2.InternalError as e: + if e.pgcode == '25006': + # Handle errors due to read-only transactions indicated by pgcode 25006 + # ERROR: cannot execute ALTER ROLE in a read-only transaction + changed = False + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + return changed + else: + raise psycopg2.InternalError(e) + + # Grab new role attributes. + cursor.execute(select, {"user": user}) + new_role_attrs = cursor.fetchone() + + # Detect any differences between current_ and new_role_attrs. + changed = current_role_attrs != new_role_attrs + + return changed + + +def user_delete(cursor, user): + """Try to remove a user. Returns True if successful otherwise False""" + cursor.execute("SAVEPOINT ansible_pgsql_user_delete") + try: + query = 'DROP USER "%s"' % user + executed_queries.append(query) + cursor.execute(query) + except Exception: + cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete") + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") + return False + + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") + return True + + +def has_table_privileges(cursor, user, table, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_table_privileges(cursor, user, table) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) + + +def get_table_privileges(cursor, user, table): + if '.' in table: + schema, table = table.split('.', 1) + else: + schema = 'public' + query = ("SELECT privilege_type FROM information_schema.role_table_grants " + "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s") + cursor.execute(query, {'user': user, 'table': table, 'schema': schema}) + return frozenset([x[0] for x in cursor.fetchall()]) + + +def grant_table_privileges(cursor, user, table, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + query = 'GRANT %s ON TABLE %s TO "%s"' % ( + privs, pg_quote_identifier(table, 'table'), user) + executed_queries.append(query) + cursor.execute(query) + + +def revoke_table_privileges(cursor, user, table, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + query = 'REVOKE %s ON TABLE %s FROM "%s"' % ( + privs, pg_quote_identifier(table, 'table'), user) + executed_queries.append(query) + cursor.execute(query) + + +def get_database_privileges(cursor, user, db): + priv_map = { + 'C': 'CREATE', + 'T': 'TEMPORARY', + 'c': 'CONNECT', + } + query = 'SELECT datacl FROM pg_database WHERE datname = %s' + cursor.execute(query, (db,)) + datacl = cursor.fetchone()[0] + if datacl is None: + return set() + r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl) + if r is None: + return set() + o = set() + for v in r.group(1): + o.add(priv_map[v]) + return normalize_privileges(o, 'database') + + +def has_database_privileges(cursor, user, db, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_database_privileges(cursor, user, db) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) + + +def grant_database_privileges(cursor, user, db, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + if user == "PUBLIC": + query = 'GRANT %s ON DATABASE %s TO PUBLIC' % ( + privs, pg_quote_identifier(db, 'database')) + else: + query = 'GRANT %s ON DATABASE %s TO "%s"' % ( + privs, pg_quote_identifier(db, 'database'), user) + + executed_queries.append(query) + cursor.execute(query) + + +def revoke_database_privileges(cursor, user, db, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + if user == "PUBLIC": + query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % ( + privs, pg_quote_identifier(db, 'database')) + else: + query = 'REVOKE %s ON DATABASE %s FROM "%s"' % ( + privs, pg_quote_identifier(db, 'database'), user) + + executed_queries.append(query) + cursor.execute(query) + + +def revoke_privileges(cursor, user, privs): + if privs is None: + return False + + revoke_funcs = dict(table=revoke_table_privileges, + database=revoke_database_privileges) + check_funcs = dict(table=has_table_privileges, + database=has_database_privileges) + + changed = False + for type_ in privs: + for name, privileges in iteritems(privs[type_]): + # Check that any of the privileges requested to be removed are + # currently granted to the user + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[0]: + revoke_funcs[type_](cursor, user, name, privileges) + changed = True + return changed + + +def grant_privileges(cursor, user, privs): + if privs is None: + return False + + grant_funcs = dict(table=grant_table_privileges, + database=grant_database_privileges) + check_funcs = dict(table=has_table_privileges, + database=has_database_privileges) + + changed = False + for type_ in privs: + for name, privileges in iteritems(privs[type_]): + # Check that any of the privileges requested for the user are + # currently missing + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[2]: + grant_funcs[type_](cursor, user, name, privileges) + changed = True + return changed + + +def parse_role_attrs(cursor, role_attr_flags): + """ + Parse role attributes string for user creation. + Format: + + attributes[,attributes,...] + + Where: + + attributes := CREATEDB,CREATEROLE,NOSUPERUSER,... + [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB", + "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION", + "[NO]BYPASSRLS" ] + + Note: "[NO]BYPASSRLS" role attribute introduced in 9.5 + Note: "[NO]CREATEUSER" role attribute is deprecated. + + """ + flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role) + + valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor))) + valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags))) + + if not flags.issubset(valid_flags): + raise InvalidFlagsError('Invalid role_attr_flags specified: %s' % + ' '.join(flags.difference(valid_flags))) + + return ' '.join(flags) + + +def normalize_privileges(privs, type_): + new_privs = set(privs) + if 'ALL' in new_privs: + new_privs.update(VALID_PRIVS[type_]) + new_privs.remove('ALL') + if 'TEMP' in new_privs: + new_privs.add('TEMPORARY') + new_privs.remove('TEMP') + + return new_privs + + +def parse_privs(privs, db): + """ + Parse privilege string to determine permissions for database db. + Format: + + privileges[/privileges/...] + + Where: + + privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] | + TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...] + """ + if privs is None: + return privs + + o_privs = { + 'database': {}, + 'table': {} + } + for token in privs.split('/'): + if ':' not in token: + type_ = 'database' + name = db + priv_set = frozenset(x.strip().upper() + for x in token.split(',') if x.strip()) + else: + type_ = 'table' + name, privileges = token.split(':', 1) + priv_set = frozenset(x.strip().upper() + for x in privileges.split(',') if x.strip()) + + if not priv_set.issubset(VALID_PRIVS[type_]): + raise InvalidPrivsError('Invalid privs specified for %s: %s' % + (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_])))) + + priv_set = normalize_privileges(priv_set, type_) + o_privs[type_][name] = priv_set + + return o_privs + + +def get_valid_flags_by_version(cursor): + """ + Some role attributes were introduced after certain versions. We want to + compile a list of valid flags against the current Postgres version. + """ + current_version = cursor.connection.server_version + + return [ + flag + for flag, version_introduced in FLAGS_BY_VERSION.items() + if current_version >= version_introduced + ] + + +def get_comment(cursor, user): + """Get user's comment.""" + query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') " + "FROM pg_catalog.pg_roles r " + "WHERE r.rolname = %(user)s") + cursor.execute(query, {'user': user}) + return cursor.fetchone()[0] + + +def add_comment(cursor, user, comment): + """Add comment on user.""" + if comment != get_comment(cursor, user): + query = 'COMMENT ON ROLE "%s" IS ' % user + cursor.execute(query + '%(comment)s', {'comment': comment}) + executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment})) + return True + else: + return False + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + user=dict(type='str', required=True, aliases=['name']), + password=dict(type='str', default=None, no_log=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + priv=dict(type='str', default=None), + db=dict(type='str', default='', aliases=['login_db']), + fail_on_user=dict(type='bool', default='yes', aliases=['fail_on_role']), + role_attr_flags=dict(type='str', default=''), + encrypted=dict(type='bool', default='yes'), + no_password_changes=dict(type='bool', default='no'), + expires=dict(type='str', default=None), + conn_limit=dict(type='int', default=None), + session_role=dict(type='str'), + groups=dict(type='list', elements='str'), + comment=dict(type='str', default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + user = module.params["user"] + password = module.params["password"] + state = module.params["state"] + fail_on_user = module.params["fail_on_user"] + if module.params['db'] == '' and module.params["priv"] is not None: + module.fail_json(msg="privileges require a database to be specified") + privs = parse_privs(module.params["priv"], module.params["db"]) + no_password_changes = module.params["no_password_changes"] + if module.params["encrypted"]: + encrypted = "ENCRYPTED" + else: + encrypted = "UNENCRYPTED" + expires = module.params["expires"] + conn_limit = module.params["conn_limit"] + role_attr_flags = module.params["role_attr_flags"] + groups = module.params["groups"] + if groups: + groups = [e.strip() for e in groups] + comment = module.params["comment"] + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + try: + role_attr_flags = parse_role_attrs(cursor, role_attr_flags) + except InvalidFlagsError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + kw = dict(user=user) + changed = False + user_removed = False + + if state == "present": + if user_exists(cursor, user): + try: + changed = user_alter(db_connection, module, user, password, + role_attr_flags, encrypted, expires, no_password_changes, conn_limit) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + else: + try: + changed = user_add(cursor, user, password, + role_attr_flags, encrypted, expires, conn_limit) + except psycopg2.ProgrammingError as e: + module.fail_json(msg="Unable to add user with given requirement " + "due to : %s" % to_native(e), + exception=traceback.format_exc()) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + try: + changed = grant_privileges(cursor, user, privs) or changed + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + if groups: + target_roles = [] + target_roles.append(user) + pg_membership = PgMembership(module, cursor, groups, target_roles) + changed = pg_membership.grant() or changed + executed_queries.extend(pg_membership.executed_queries) + + if comment is not None: + try: + changed = add_comment(cursor, user, comment) or changed + except Exception as e: + module.fail_json(msg='Unable to add comment on role: %s' % to_native(e), + exception=traceback.format_exc()) + + else: + if user_exists(cursor, user): + if module.check_mode: + changed = True + kw['user_removed'] = True + else: + try: + changed = revoke_privileges(cursor, user, privs) + user_removed = user_delete(cursor, user) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + changed = changed or user_removed + if fail_on_user and not user_removed: + msg = "Unable to remove user" + module.fail_json(msg=msg) + kw['user_removed'] = user_removed + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + kw['changed'] = changed + kw['queries'] = executed_queries + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/rabbitmq_plugin.py b/test/support/integration/plugins/modules/rabbitmq_plugin.py new file mode 100644 index 00000000..301bbfe2 --- /dev/null +++ b/test/support/integration/plugins/modules/rabbitmq_plugin.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2013, Chatham Financial +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_plugin +short_description: Manage RabbitMQ plugins +description: + - This module can be used to enable or disable RabbitMQ plugins. +version_added: "1.1" +author: + - Chris Hoffman (@chrishoffman) +options: + names: + description: + - Comma-separated list of plugin names. Also, accepts plugin name. + required: true + aliases: [name] + new_only: + description: + - Only enable missing plugins. + - Does not disable plugins that are not in the names list. + type: bool + default: "no" + state: + description: + - Specify if plugins are to be enabled or disabled. + default: enabled + choices: [enabled, disabled] + prefix: + description: + - Specify a custom install prefix to a Rabbit. + version_added: "1.3" +''' + +EXAMPLES = ''' +- name: Enables the rabbitmq_management plugin + rabbitmq_plugin: + names: rabbitmq_management + state: enabled + +- name: Enable multiple rabbitmq plugins + rabbitmq_plugin: + names: rabbitmq_management,rabbitmq_management_visualiser + state: enabled + +- name: Disable plugin + rabbitmq_plugin: + names: rabbitmq_management + state: disabled + +- name: Enable every plugin in list with existing plugins + rabbitmq_plugin: + names: rabbitmq_management,rabbitmq_management_visualiser,rabbitmq_shovel,rabbitmq_shovel_management + state: enabled + new_only: 'yes' +''' + +RETURN = ''' +enabled: + description: list of plugins enabled during task run + returned: always + type: list + sample: ["rabbitmq_management"] +disabled: + description: list of plugins disabled during task run + returned: always + type: list + sample: ["rabbitmq_management"] +''' + +import os +from ansible.module_utils.basic import AnsibleModule + + +class RabbitMqPlugins(object): + + def __init__(self, module): + self.module = module + bin_path = '' + if module.params['prefix']: + if os.path.isdir(os.path.join(module.params['prefix'], 'bin')): + bin_path = os.path.join(module.params['prefix'], 'bin') + elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')): + bin_path = os.path.join(module.params['prefix'], 'sbin') + else: + # No such path exists. + module.fail_json(msg="No binary folder in prefix %s" % module.params['prefix']) + + self._rabbitmq_plugins = os.path.join(bin_path, "rabbitmq-plugins") + else: + self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmq_plugins] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def get_all(self): + list_output = self._exec(['list', '-E', '-m'], True) + plugins = [] + for plugin in list_output: + if not plugin: + break + plugins.append(plugin) + + return plugins + + def enable(self, name): + self._exec(['enable', name]) + + def disable(self, name): + self._exec(['disable', name]) + + +def main(): + arg_spec = dict( + names=dict(required=True, aliases=['name']), + new_only=dict(default='no', type='bool'), + state=dict(default='enabled', choices=['enabled', 'disabled']), + prefix=dict(required=False, default=None) + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + result = dict() + names = module.params['names'].split(',') + new_only = module.params['new_only'] + state = module.params['state'] + + rabbitmq_plugins = RabbitMqPlugins(module) + enabled_plugins = rabbitmq_plugins.get_all() + + enabled = [] + disabled = [] + if state == 'enabled': + if not new_only: + for plugin in enabled_plugins: + if " " in plugin: + continue + if plugin not in names: + rabbitmq_plugins.disable(plugin) + disabled.append(plugin) + + for name in names: + if name not in enabled_plugins: + rabbitmq_plugins.enable(name) + enabled.append(name) + else: + for plugin in enabled_plugins: + if plugin in names: + rabbitmq_plugins.disable(plugin) + disabled.append(plugin) + + result['changed'] = len(enabled) > 0 or len(disabled) > 0 + result['enabled'] = enabled + result['disabled'] = disabled + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/rabbitmq_queue.py b/test/support/integration/plugins/modules/rabbitmq_queue.py new file mode 100644 index 00000000..567ec813 --- /dev/null +++ b/test/support/integration/plugins/modules/rabbitmq_queue.py @@ -0,0 +1,257 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Manuel Sousa +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: rabbitmq_queue +author: Manuel Sousa (@manuel-sousa) +version_added: "2.0" + +short_description: Manage rabbitMQ queues +description: + - This module uses rabbitMQ Rest API to create/delete queues +requirements: [ "requests >= 1.0.0" ] +options: + name: + description: + - Name of the queue + required: true + state: + description: + - Whether the queue should be present or absent + choices: [ "present", "absent" ] + default: present + durable: + description: + - whether queue is durable or not + type: bool + default: 'yes' + auto_delete: + description: + - if the queue should delete itself after all queues/queues unbound from it + type: bool + default: 'no' + message_ttl: + description: + - How long a message can live in queue before it is discarded (milliseconds) + default: forever + auto_expires: + description: + - How long a queue can be unused before it is automatically deleted (milliseconds) + default: forever + max_length: + description: + - How many messages can the queue contain before it starts rejecting + default: no limit + dead_letter_exchange: + description: + - Optional name of an exchange to which messages will be republished if they + - are rejected or expire + dead_letter_routing_key: + description: + - Optional replacement routing key to use when a message is dead-lettered. + - Original routing key will be used if unset + max_priority: + description: + - Maximum number of priority levels for the queue to support. + - If not set, the queue will not support message priorities. + - Larger numbers indicate higher priority. + version_added: "2.4" + arguments: + description: + - extra arguments for queue. If defined this argument is a key/value dictionary + default: {} +extends_documentation_fragment: + - rabbitmq +''' + +EXAMPLES = ''' +# Create a queue +- rabbitmq_queue: + name: myQueue + +# Create a queue on remote host +- rabbitmq_queue: + name: myRemoteQueue + login_user: user + login_password: secret + login_host: remote.example.org +''' + +import json +import traceback + +REQUESTS_IMP_ERR = None +try: + import requests + HAS_REQUESTS = True +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + HAS_REQUESTS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves.urllib import parse as urllib_parse +from ansible.module_utils.rabbitmq import rabbitmq_argument_spec + + +def main(): + + argument_spec = rabbitmq_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + name=dict(required=True, type='str'), + durable=dict(default=True, type='bool'), + auto_delete=dict(default=False, type='bool'), + message_ttl=dict(default=None, type='int'), + auto_expires=dict(default=None, type='int'), + max_length=dict(default=None, type='int'), + dead_letter_exchange=dict(default=None, type='str'), + dead_letter_routing_key=dict(default=None, type='str'), + arguments=dict(default=dict(), type='dict'), + max_priority=dict(default=None, type='int') + ) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + url = "%s://%s:%s/api/queues/%s/%s" % ( + module.params['login_protocol'], + module.params['login_host'], + module.params['login_port'], + urllib_parse.quote(module.params['vhost'], ''), + module.params['name'] + ) + + if not HAS_REQUESTS: + module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR) + + result = dict(changed=False, name=module.params['name']) + + # Check if queue already exists + r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']), + verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key'])) + + if r.status_code == 200: + queue_exists = True + response = r.json() + elif r.status_code == 404: + queue_exists = False + response = r.text + else: + module.fail_json( + msg="Invalid response from RESTAPI when trying to check if queue exists", + details=r.text + ) + + if module.params['state'] == 'present': + change_required = not queue_exists + else: + change_required = queue_exists + + # Check if attributes change on existing queue + if not change_required and r.status_code == 200 and module.params['state'] == 'present': + if not ( + response['durable'] == module.params['durable'] and + response['auto_delete'] == module.params['auto_delete'] and + ( + ('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or + ('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None) + ) and + ( + ('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or + ('x-expires' not in response['arguments'] and module.params['auto_expires'] is None) + ) and + ( + ('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or + ('x-max-length' not in response['arguments'] and module.params['max_length'] is None) + ) and + ( + ('x-dead-letter-exchange' in response['arguments'] and + response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or + ('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None) + ) and + ( + ('x-dead-letter-routing-key' in response['arguments'] and + response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or + ('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None) + ) and + ( + ('x-max-priority' in response['arguments'] and + response['arguments']['x-max-priority'] == module.params['max_priority']) or + ('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None) + ) + ): + module.fail_json( + msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues", + ) + + # Copy parameters to arguments as used by RabbitMQ + for k, v in { + 'message_ttl': 'x-message-ttl', + 'auto_expires': 'x-expires', + 'max_length': 'x-max-length', + 'dead_letter_exchange': 'x-dead-letter-exchange', + 'dead_letter_routing_key': 'x-dead-letter-routing-key', + 'max_priority': 'x-max-priority' + }.items(): + if module.params[k] is not None: + module.params['arguments'][v] = module.params[k] + + # Exit if check_mode + if module.check_mode: + result['changed'] = change_required + result['details'] = response + result['arguments'] = module.params['arguments'] + module.exit_json(**result) + + # Do changes + if change_required: + if module.params['state'] == 'present': + r = requests.put( + url, + auth=(module.params['login_user'], module.params['login_password']), + headers={"content-type": "application/json"}, + data=json.dumps({ + "durable": module.params['durable'], + "auto_delete": module.params['auto_delete'], + "arguments": module.params['arguments'] + }), + verify=module.params['ca_cert'], + cert=(module.params['client_cert'], module.params['client_key']) + ) + elif module.params['state'] == 'absent': + r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']), + verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key'])) + + # RabbitMQ 3.6.7 changed this response code from 204 to 201 + if r.status_code == 204 or r.status_code == 201: + result['changed'] = True + module.exit_json(**result) + else: + module.fail_json( + msg="Error creating queue", + status=r.status_code, + details=r.text + ) + + else: + module.exit_json( + changed=False, + name=module.params['name'] + ) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/s3_bucket.py b/test/support/integration/plugins/modules/s3_bucket.py new file mode 100644 index 00000000..f35cf53b --- /dev/null +++ b/test/support/integration/plugins/modules/s3_bucket.py @@ -0,0 +1,740 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + + +DOCUMENTATION = ''' +--- +module: s3_bucket +short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID +description: + - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID +version_added: "2.0" +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + force: + description: + - When trying to delete a bucket, delete all keys (including versions and delete markers) + in the bucket first (an s3 bucket must be empty for a successful deletion) + type: bool + default: 'no' + name: + description: + - Name of the s3 bucket + required: true + type: str + policy: + description: + - The JSON policy as a string. + type: json + s3_url: + description: + - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc. + - Assumes AWS if not specified. + - For Walrus, use FQDN of the endpoint without scheme nor path. + aliases: [ S3_URL ] + type: str + ceph: + description: + - Enable API compatibility with Ceph. It takes into account the S3 API subset working + with Ceph in order to provide the same module behaviour where possible. + type: bool + version_added: "2.2" + requester_pays: + description: + - With Requester Pays buckets, the requester instead of the bucket owner pays the cost + of the request and the data download from the bucket. + type: bool + default: False + state: + description: + - Create or remove the s3 bucket + required: false + default: present + choices: [ 'present', 'absent' ] + type: str + tags: + description: + - tags dict to apply to bucket + type: dict + purge_tags: + description: + - whether to remove tags that aren't present in the C(tags) parameter + type: bool + default: True + version_added: "2.9" + versioning: + description: + - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended) + type: bool + encryption: + description: + - Describes the default server-side encryption to apply to new objects in the bucket. + In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly. + choices: [ 'none', 'AES256', 'aws:kms' ] + version_added: "2.9" + type: str + encryption_key_id: + description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If + not specified then it will default to the AWS provided KMS key. + version_added: "2.9" + type: str +extends_documentation_fragment: + - aws + - ec2 +notes: + - If C(requestPayment), C(policy), C(tagging) or C(versioning) + operations/API aren't implemented by the endpoint, module doesn't fail + if each parameter satisfies the following condition. + I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None). +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create a simple s3 bucket +- s3_bucket: + name: mys3bucket + state: present + +# Create a simple s3 bucket on Ceph Rados Gateway +- s3_bucket: + name: mys3bucket + s3_url: http://your-ceph-rados-gateway-server.xxx + ceph: true + +# Remove an s3 bucket and any keys it contains +- s3_bucket: + name: mys3bucket + state: absent + force: yes + +# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag +- s3_bucket: + name: mys3bucket + policy: "{{ lookup('file','policy.json') }}" + requester_pays: yes + versioning: yes + tags: + example: tag1 + another: tag2 + +# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint +- s3_bucket: + name: mydobucket + s3_url: 'https://nyc3.digitaloceanspaces.com' + +# Create a bucket with AES256 encryption +- s3_bucket: + name: mys3bucket + state: present + encryption: "AES256" + +# Create a bucket with aws:kms encryption, KMS key +- s3_bucket: + name: mys3bucket + state: present + encryption: "aws:kms" + encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example" + +# Create a bucket with aws:kms encryption, default key +- s3_bucket: + name: mys3bucket + state: present + encryption: "aws:kms" +''' + +import json +import os +import time + +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.six import string_types +from ansible.module_utils.basic import to_text +from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code +from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list +from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry + +try: + from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError +except ImportError: + pass # handled by AnsibleAWSModule + + +def create_or_update_bucket(s3_client, module, location): + + policy = module.params.get("policy") + name = module.params.get("name") + requester_pays = module.params.get("requester_pays") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + versioning = module.params.get("versioning") + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + changed = False + result = {} + + try: + bucket_is_present = bucket_exists(s3_client, name) + except EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to check bucket presence") + + if not bucket_is_present: + try: + bucket_changed = create_bucket(s3_client, name, location) + s3_client.get_waiter('bucket_exists').wait(Bucket=name) + changed = changed or bucket_changed + except WaiterError as e: + module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available') + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed while creating bucket") + + # Versioning + try: + versioning_status = get_bucket_versioning(s3_client, name) + except BotoCoreError as exp: + module.fail_json_aws(exp, msg="Failed to get bucket versioning") + except ClientError as exp: + if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None: + module.fail_json_aws(exp, msg="Failed to get bucket versioning") + else: + if versioning is not None: + required_versioning = None + if versioning and versioning_status.get('Status') != "Enabled": + required_versioning = 'Enabled' + elif not versioning and versioning_status.get('Status') == "Enabled": + required_versioning = 'Suspended' + + if required_versioning: + try: + put_bucket_versioning(s3_client, name, required_versioning) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket versioning") + + versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning) + + # This output format is there to ensure compatibility with previous versions of the module + result['versioning'] = { + 'Versioning': versioning_status.get('Status', 'Disabled'), + 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'), + } + + # Requester pays + try: + requester_pays_status = get_bucket_request_payment(s3_client, name) + except BotoCoreError as exp: + module.fail_json_aws(exp, msg="Failed to get bucket request payment") + except ClientError as exp: + if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or requester_pays: + module.fail_json_aws(exp, msg="Failed to get bucket request payment") + else: + if requester_pays: + payer = 'Requester' if requester_pays else 'BucketOwner' + if requester_pays_status != payer: + put_bucket_request_payment(s3_client, name, payer) + requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False) + if requester_pays_status is None: + # We have seen that it happens quite a lot of times that the put request was not taken into + # account, so we retry one more time + put_bucket_request_payment(s3_client, name, payer) + requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True) + changed = True + + result['requester_pays'] = requester_pays + + # Policy + try: + current_policy = get_bucket_policy(s3_client, name) + except BotoCoreError as exp: + module.fail_json_aws(exp, msg="Failed to get bucket policy") + except ClientError as exp: + if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None: + module.fail_json_aws(exp, msg="Failed to get bucket policy") + else: + if policy is not None: + if isinstance(policy, string_types): + policy = json.loads(policy) + + if not policy and current_policy: + try: + delete_bucket_policy(s3_client, name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket policy") + current_policy = wait_policy_is_applied(module, s3_client, name, policy) + changed = True + elif compare_policies(current_policy, policy): + try: + put_bucket_policy(s3_client, name, policy) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket policy") + current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False) + if current_policy is None: + # As for request payement, it happens quite a lot of times that the put request was not taken into + # account, so we retry one more time + put_bucket_policy(s3_client, name, policy) + current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True) + changed = True + + result['policy'] = current_policy + + # Tags + try: + current_tags_dict = get_current_bucket_tags_dict(s3_client, name) + except BotoCoreError as exp: + module.fail_json_aws(exp, msg="Failed to get bucket tags") + except ClientError as exp: + if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or tags is not None: + module.fail_json_aws(exp, msg="Failed to get bucket tags") + else: + if tags is not None: + # Tags are always returned as text + tags = dict((to_text(k), to_text(v)) for k, v in tags.items()) + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_copy = current_tags_dict.copy() + current_copy.update(tags) + tags = current_copy + if current_tags_dict != tags: + if tags: + try: + put_bucket_tagging(s3_client, name, tags) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket tags") + else: + if purge_tags: + try: + delete_bucket_tagging(s3_client, name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket tags") + current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags) + changed = True + + result['tags'] = current_tags_dict + + # Encryption + if hasattr(s3_client, "get_bucket_encryption"): + try: + current_encryption = get_bucket_encryption(s3_client, name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get bucket encryption") + elif encryption is not None: + module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41") + + if encryption is not None: + current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None + current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None + if encryption == 'none' and current_encryption_algorithm is not None: + try: + delete_bucket_encryption(s3_client, name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket encryption") + current_encryption = wait_encryption_is_applied(module, s3_client, name, None) + changed = True + elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id): + expected_encryption = {'SSEAlgorithm': encryption} + if encryption == 'aws:kms' and encryption_key_id is not None: + expected_encryption.update({'KMSMasterKeyID': encryption_key_id}) + try: + put_bucket_encryption(s3_client, name, expected_encryption) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to set bucket encryption") + current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption) + changed = True + + result['encryption'] = current_encryption + + module.exit_json(changed=changed, name=name, **result) + + +def bucket_exists(s3_client, bucket_name): + # head_bucket appeared to be really inconsistent, so we use list_buckets instead, + # and loop over all the buckets, even if we know it's less performant :( + all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets'] + return any(bucket['Name'] == bucket_name for bucket in all_buckets) + + +@AWSRetry.exponential_backoff(max_delay=120) +def create_bucket(s3_client, bucket_name, location): + try: + configuration = {} + if location not in ('us-east-1', None): + configuration['LocationConstraint'] = location + if len(configuration) > 0: + s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration) + else: + s3_client.create_bucket(Bucket=bucket_name) + return True + except ClientError as e: + error_code = e.response['Error']['Code'] + if error_code == 'BucketAlreadyOwnedByYou': + # We should never get there since we check the bucket presence before calling the create_or_update_bucket + # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception + return False + else: + raise e + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_tagging(s3_client, bucket_name, tags): + s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_policy(s3_client, bucket_name, policy): + s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy)) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def delete_bucket_policy(s3_client, bucket_name): + s3_client.delete_bucket_policy(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def get_bucket_policy(s3_client, bucket_name): + try: + current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy')) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchBucketPolicy': + current_policy = None + else: + raise e + return current_policy + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_request_payment(s3_client, bucket_name, payer): + s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def get_bucket_request_payment(s3_client, bucket_name): + return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer') + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def get_bucket_versioning(s3_client, bucket_name): + return s3_client.get_bucket_versioning(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_versioning(s3_client, bucket_name, required_versioning): + s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def get_bucket_encryption(s3_client, bucket_name): + try: + result = s3_client.get_bucket_encryption(Bucket=bucket_name) + return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault') + except ClientError as e: + if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError': + return None + else: + raise e + except (IndexError, KeyError): + return None + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def put_bucket_encryption(s3_client, bucket_name, encryption): + server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]} + s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def delete_bucket_tagging(s3_client, bucket_name): + s3_client.delete_bucket_tagging(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket']) +def delete_bucket_encryption(s3_client, bucket_name): + s3_client.delete_bucket_encryption(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120) +def delete_bucket(s3_client, bucket_name): + try: + s3_client.delete_bucket(Bucket=bucket_name) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchBucket': + # This means bucket should have been in a deleting state when we checked it existence + # We just ignore the error + pass + else: + raise e + + +def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True): + for dummy in range(0, 12): + try: + current_policy = get_bucket_policy(s3_client, bucket_name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get bucket policy") + + if compare_policies(current_policy, expected_policy): + time.sleep(5) + else: + return current_policy + if should_fail: + module.fail_json(msg="Bucket policy failed to apply in the expected time") + else: + return None + + +def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True): + for dummy in range(0, 12): + try: + requester_pays_status = get_bucket_request_payment(s3_client, bucket_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get bucket request payment") + if requester_pays_status != expected_payer: + time.sleep(5) + else: + return requester_pays_status + if should_fail: + module.fail_json(msg="Bucket request payment failed to apply in the expected time") + else: + return None + + +def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption): + for dummy in range(0, 12): + try: + encryption = get_bucket_encryption(s3_client, bucket_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get updated encryption for bucket") + if encryption != expected_encryption: + time.sleep(5) + else: + return encryption + module.fail_json(msg="Bucket encryption failed to apply in the expected time") + + +def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning): + for dummy in range(0, 24): + try: + versioning_status = get_bucket_versioning(s3_client, bucket_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get updated versioning for bucket") + if versioning_status.get('Status') != required_versioning: + time.sleep(8) + else: + return versioning_status + module.fail_json(msg="Bucket versioning failed to apply in the expected time") + + +def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict): + for dummy in range(0, 12): + try: + current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get bucket policy") + if current_tags_dict != expected_tags_dict: + time.sleep(5) + else: + return current_tags_dict + module.fail_json(msg="Bucket tags failed to apply in the expected time") + + +def get_current_bucket_tags_dict(s3_client, bucket_name): + try: + current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet') + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchTagSet': + return {} + raise e + + return boto3_tag_list_to_ansible_dict(current_tags) + + +def paginated_list(s3_client, **pagination_params): + pg = s3_client.get_paginator('list_objects_v2') + for page in pg.paginate(**pagination_params): + yield [data['Key'] for data in page.get('Contents', [])] + + +def paginated_versions_list(s3_client, **pagination_params): + try: + pg = s3_client.get_paginator('list_object_versions') + for page in pg.paginate(**pagination_params): + # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion + yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))] + except is_boto3_error_code('NoSuchBucket'): + yield [] + + +def destroy_bucket(s3_client, module): + + force = module.params.get("force") + name = module.params.get("name") + try: + bucket_is_present = bucket_exists(s3_client, name) + except EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to check bucket presence") + + if not bucket_is_present: + module.exit_json(changed=False) + + if force: + # if there are contents then we need to delete them (including versions) before we can delete the bucket + try: + for key_version_pairs in paginated_versions_list(s3_client, Bucket=name): + formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs] + for fk in formatted_keys: + # remove VersionId from cases where they are `None` so that + # unversioned objects are deleted using `DeleteObject` + # rather than `DeleteObjectVersion`, improving backwards + # compatibility with older IAM policies. + if not fk.get('VersionId'): + fk.pop('VersionId') + + if formatted_keys: + resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys}) + if resp.get('Errors'): + module.fail_json( + msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format( + ', '.join([k['Key'] for k in resp['Errors']]) + ), + errors=resp['Errors'], response=resp + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed while deleting bucket") + + try: + delete_bucket(s3_client, name) + s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60)) + except WaiterError as e: + module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.') + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket") + + module.exit_json(changed=True) + + +def is_fakes3(s3_url): + """ Return True if s3_url has scheme fakes3:// """ + if s3_url is not None: + return urlparse(s3_url).scheme in ('fakes3', 'fakes3s') + else: + return False + + +def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url): + if s3_url and ceph: # TODO - test this + ceph = urlparse(s3_url) + params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs) + elif is_fakes3(s3_url): + fakes3 = urlparse(s3_url) + port = fakes3.port + if fakes3.scheme == 'fakes3s': + protocol = "https" + if port is None: + port = 443 + else: + protocol = "http" + if port is None: + port = 80 + params = dict(module=module, conn_type='client', resource='s3', region=location, + endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), + use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) + else: + params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs) + return boto3_conn(**params) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + force=dict(default=False, type='bool'), + policy=dict(type='json'), + name=dict(required=True), + requester_pays=dict(default=False, type='bool'), + s3_url=dict(aliases=['S3_URL']), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='dict'), + purge_tags=dict(type='bool', default=True), + versioning=dict(type='bool'), + ceph=dict(default=False, type='bool'), + encryption=dict(choices=['none', 'AES256', 'aws:kms']), + encryption_key_id=dict() + ) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + ) + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + + if region in ('us-east-1', '', None): + # default to US Standard region + location = 'us-east-1' + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + + s3_url = module.params.get('s3_url') + ceph = module.params.get('ceph') + + # allow eucarc environment variables to be used if ansible vars aren't set + if not s3_url and 'S3_URL' in os.environ: + s3_url = os.environ['S3_URL'] + + if ceph and not s3_url: + module.fail_json(msg='ceph flavour requires s3_url') + + # Look at s3_url and tweak connection settings + # if connecting to Ceph RGW, Walrus or fakes3 + if s3_url: + for key in ['validate_certs', 'security_token', 'profile_name']: + aws_connect_kwargs.pop(key, None) + s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url) + + if s3_client is None: # this should never happen + module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.') + + state = module.params.get("state") + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + + # Parameter validation + if encryption_key_id is not None and encryption is None: + module.fail_json(msg="You must specify encryption parameter along with encryption_key_id.") + elif encryption_key_id is not None and encryption != 'aws:kms': + module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.") + + if state == 'present': + create_or_update_bucket(s3_client, module, location) + elif state == 'absent': + destroy_bucket(s3_client, module) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/sefcontext.py b/test/support/integration/plugins/modules/sefcontext.py new file mode 100644 index 00000000..33e3fd2e --- /dev/null +++ b/test/support/integration/plugins/modules/sefcontext.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: sefcontext +short_description: Manages SELinux file context mapping definitions +description: +- Manages SELinux file context mapping definitions. +- Similar to the C(semanage fcontext) command. +version_added: '2.2' +options: + target: + description: + - Target path (expression). + type: str + required: yes + aliases: [ path ] + ftype: + description: + - The file type that should have SELinux contexts applied. + - "The following file type options are available:" + - C(a) for all files, + - C(b) for block devices, + - C(c) for character devices, + - C(d) for directories, + - C(f) for regular files, + - C(l) for symbolic links, + - C(p) for named pipes, + - C(s) for socket files. + type: str + choices: [ a, b, c, d, f, l, p, s ] + default: a + setype: + description: + - SELinux type for the specified target. + type: str + required: yes + seuser: + description: + - SELinux user for the specified target. + type: str + selevel: + description: + - SELinux range for the specified target. + type: str + aliases: [ serange ] + state: + description: + - Whether the SELinux file context must be C(absent) or C(present). + type: str + choices: [ absent, present ] + default: present + reload: + description: + - Reload SELinux policy after commit. + - Note that this does not apply SELinux file contexts to existing files. + type: bool + default: yes + ignore_selinux_state: + description: + - Useful for scenarios (chrooted environment) that you can't get the real SELinux state. + type: bool + default: no + version_added: '2.8' +notes: +- The changes are persistent across reboots. +- The M(sefcontext) module does not modify existing files to the new + SELinux context(s), so it is advisable to first create the SELinux + file contexts before creating files, or run C(restorecon) manually + for the existing files that require the new SELinux file contexts. +- Not applying SELinux fcontexts to existing files is a deliberate + decision as it would be unclear what reported changes would entail + to, and there's no guarantee that applying SELinux fcontext does + not pick up other unrelated prior changes. +requirements: +- libselinux-python +- policycoreutils-python +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- name: Allow apache to modify files in /srv/git_repos + sefcontext: + target: '/srv/git_repos(/.*)?' + setype: httpd_git_rw_content_t + state: present + +- name: Apply new SELinux file context to filesystem + command: restorecon -irv /srv/git_repos +''' + +RETURN = r''' +# Default return values +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +# Add missing entries (backward compatible) +if HAVE_SEOBJECT: + seobject.file_types.update( + a=seobject.SEMANAGE_FCONTEXT_ALL, + b=seobject.SEMANAGE_FCONTEXT_BLOCK, + c=seobject.SEMANAGE_FCONTEXT_CHAR, + d=seobject.SEMANAGE_FCONTEXT_DIR, + f=seobject.SEMANAGE_FCONTEXT_REG, + l=seobject.SEMANAGE_FCONTEXT_LINK, + p=seobject.SEMANAGE_FCONTEXT_PIPE, + s=seobject.SEMANAGE_FCONTEXT_SOCK, + ) + +# Make backward compatible +option_to_file_type_str = dict( + a='all files', + b='block device', + c='character device', + d='directory', + f='regular file', + l='symbolic link', + p='named pipe', + s='socket', +) + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def semanage_fcontext_exists(sefcontext, target, ftype): + ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' + + # Beware that records comprise of a string representation of the file_type + record = (target, option_to_file_type_str[ftype]) + records = sefcontext.get_all() + try: + return records[record] + except KeyError: + return None + + +def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''): + ''' Add or modify SELinux file context mapping definition to the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Modify existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if seuser is None: + seuser = orig_seuser + if serange is None: + serange = orig_serange + + if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: + if not module.check_mode: + sefcontext.modify(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) + else: + # Add missing entry + if seuser is None: + seuser = 'system_u' + if serange is None: + serange = 's0' + + if not module.check_mode: + sefcontext.add(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Addition to semanage file context mappings\n' + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) + + +def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''): + ''' Delete SELinux file context mapping definition from the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Remove existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if not module.check_mode: + sefcontext.delete(target, ftype) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, **result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + target=dict(type='str', required=True, aliases=['path']), + ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()), + setype=dict(type='str', required=True), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange']), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + target = module.params['target'] + ftype = module.params['ftype'] + setype = module.params['setype'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = dict(target=target, ftype=ftype, setype=setype, state=state) + + if state == 'present': + semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser) + elif state == 'absent': + semanage_fcontext_delete(module, result, target, ftype, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/selogin.py b/test/support/integration/plugins/modules/selogin.py new file mode 100644 index 00000000..6429ef36 --- /dev/null +++ b/test/support/integration/plugins/modules/selogin.py @@ -0,0 +1,260 @@ +#!/usr/bin/python + +# (c) 2017, Petr Lautrbach +# Based on seport.py module (c) 2014, Dan Keder + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: selogin +short_description: Manages linux user to SELinux user mapping +description: + - Manages linux user to SELinux user mapping +version_added: "2.8" +options: + login: + description: + - a Linux user + required: true + seuser: + description: + - SELinux user name + required: true + selevel: + aliases: [ serange ] + description: + - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range. + default: s0 + state: + description: + - Desired mapping value. + required: true + default: present + choices: [ 'present', 'absent' ] + reload: + description: + - Reload SELinux policy after commit. + default: yes + ignore_selinux_state: + description: + - Run independent of selinux runtime state + type: bool + default: false +notes: + - The changes are persistent across reboots + - Not tested on any debian based system +requirements: [ 'libselinux', 'policycoreutils' ] +author: +- Dan Keder (@dankeder) +- Petr Lautrbach (@bachradsusi) +- James Cassell (@jamescassell) +''' + +EXAMPLES = ''' +# Modify the default user on the system to the guest_u user +- selogin: + login: __default__ + seuser: guest_u + state: present + +# Assign gijoe user on an MLS machine a range and to the staff_u user +- selogin: + login: gijoe + seuser: staff_u + serange: SystemLow-Secret + state: present + +# Assign all users in the engineering group to the staff_u user +- selogin: + login: '%engineering' + seuser: staff_u + state: present +''' + +RETURN = r''' +# Default return values +''' + + +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''): + """ Add linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type serange: str + :param serange: SELinux MLS/MCS range (defaults to 's0') + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + # for local_login in all_logins: + if login not in all_logins.keys(): + change = True + if not module.check_mode: + selogin.add(login, seuser, serange) + else: + if all_logins[login][0] != seuser or all_logins[login][1] != serange: + change = True + if not module.check_mode: + selogin.modify(login, seuser, serange) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def semanage_login_del(module, login, seuser, do_reload, sestore=''): + """ Delete linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + if login in all_logins.keys(): + change = True + if not module.check_mode: + selogin.delete(login) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + login=dict(type='str', required=True), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange'], default='s0'), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + required_if=[ + ["state", "present", ["seuser"]] + ], + supports_check_mode=True + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + login = module.params['login'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = { + 'login': login, + 'seuser': seuser, + 'serange': serange, + 'state': state, + } + + if state == 'present': + result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange) + elif state == 'absent': + result['changed'] = semanage_login_del(module, login, seuser, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/synchronize.py b/test/support/integration/plugins/modules/synchronize.py new file mode 100644 index 00000000..e4c520b7 --- /dev/null +++ b/test/support/integration/plugins/modules/synchronize.py @@ -0,0 +1,618 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012-2013, Timothy Appnel +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: synchronize +version_added: "1.4" +short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy +description: + - C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy. + - It is run and originates on the local host where Ansible is being run. + - Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of + boilerplate options and host facts. + - This module is not intended to provide access to the full power of rsync, but does make the most common + invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case. +options: + src: + description: + - Path on the source host that will be synchronized to the destination. + - The path can be absolute or relative. + type: str + required: true + dest: + description: + - Path on the destination host that will be synchronized from the source. + - The path can be absolute or relative. + type: str + required: true + dest_port: + description: + - Port number for ssh on the destination host. + - Prior to Ansible 2.0, the ansible_ssh_port inventory var took precedence over this value. + - This parameter defaults to the value of C(ansible_ssh_port) or C(ansible_port), + the C(remote_port) config setting or the value from ssh client configuration + if none of the former have been set. + type: int + version_added: "1.5" + mode: + description: + - Specify the direction of the synchronization. + - In push mode the localhost or delegate is the source. + - In pull mode the remote host in context is the source. + type: str + choices: [ pull, push ] + default: push + archive: + description: + - Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D. + type: bool + default: yes + checksum: + description: + - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will + not disable it. + type: bool + default: no + version_added: "1.6" + compress: + description: + - Compress file data during the transfer. + - In most cases, leave this enabled unless it causes problems. + type: bool + default: yes + version_added: "1.7" + existing_only: + description: + - Skip creating new files on receiver. + type: bool + default: no + version_added: "1.5" + delete: + description: + - Delete files in C(dest) that don't exist (after transfer, not before) in the C(src) path. + - This option requires C(recursive=yes). + - This option ignores excluded files and behaves like the rsync opt --delete-excluded. + type: bool + default: no + dirs: + description: + - Transfer directories without recursing. + type: bool + default: no + recursive: + description: + - Recurse into directories. + - This parameter defaults to the value of the archive option. + type: bool + links: + description: + - Copy symlinks as symlinks. + - This parameter defaults to the value of the archive option. + type: bool + copy_links: + description: + - Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink. + type: bool + default: no + perms: + description: + - Preserve permissions. + - This parameter defaults to the value of the archive option. + type: bool + times: + description: + - Preserve modification times. + - This parameter defaults to the value of the archive option. + type: bool + owner: + description: + - Preserve owner (super user only). + - This parameter defaults to the value of the archive option. + type: bool + group: + description: + - Preserve group. + - This parameter defaults to the value of the archive option. + type: bool + rsync_path: + description: + - Specify the rsync command to run on the remote host. See C(--rsync-path) on the rsync man page. + - To specify the rsync command to run on the local host, you need to set this your task var C(ansible_rsync_path). + type: str + rsync_timeout: + description: + - Specify a C(--timeout) for the rsync command in seconds. + type: int + default: 0 + set_remote_user: + description: + - Put user@ for the remote paths. + - If you have a custom ssh config to define the remote user for a host + that does not match the inventory user, you should set this parameter to C(no). + type: bool + default: yes + use_ssh_args: + description: + - Use the ssh_args specified in ansible.cfg. + type: bool + default: no + version_added: "2.0" + rsync_opts: + description: + - Specify additional rsync options by passing in an array. + - Note that an empty string in C(rsync_opts) will end up transfer the current working directory. + type: list + default: + version_added: "1.6" + partial: + description: + - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster. + type: bool + default: no + version_added: "2.0" + verify_host: + description: + - Verify destination host key. + type: bool + default: no + version_added: "2.0" + private_key: + description: + - Specify the private key to use for SSH-based rsync connections (e.g. C(~/.ssh/id_rsa)). + type: path + version_added: "1.6" + link_dest: + description: + - Add a destination to hard link against during the rsync. + type: list + default: + version_added: "2.5" +notes: + - rsync must be installed on both the local and remote host. + - For the C(synchronize) module, the "local host" is the host `the synchronize task originates on`, and the "destination host" is the host + `synchronize is connecting to`. + - The "local host" can be changed to a different host by using `delegate_to`. This enables copying between two remote hosts or entirely on one + remote machine. + - > + The user and permissions for the synchronize `src` are those of the user running the Ansible task on the local host (or the remote_user for a + delegate_to host when delegate_to is used). + - The user and permissions for the synchronize `dest` are those of the `remote_user` on the destination host or the `become_user` if `become=yes` is active. + - In Ansible 2.0 a bug in the synchronize module made become occur on the "local host". This was fixed in Ansible 2.0.1. + - Currently, synchronize is limited to elevating permissions via passwordless sudo. This is because rsync itself is connecting to the remote machine + and rsync doesn't give us a way to pass sudo credentials in. + - Currently there are only a few connection types which support synchronize (ssh, paramiko, local, and docker) because a sync strategy has been + determined for those connection types. Note that the connection for these must not need a password as rsync itself is making the connection and + rsync does not provide us a way to pass a password to the connection. + - Expect that dest=~/x will be ~/x even if using sudo. + - Inspect the verbose output to validate the destination user/host/path are what was expected. + - To exclude files and directories from being synchronized, you may add C(.rsync-filter) files to the source directory. + - rsync daemon must be up and running with correct permission when using rsync protocol in source or destination path. + - The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process + encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly. + - link_destination is subject to the same limitations as the underlying rsync daemon. Hard links are only preserved if the relative subtrees + of the source and destination are the same. Attempts to hardlink into a directory that is a subdirectory of the source will be prevented. +seealso: +- module: copy +- module: win_robocopy +author: +- Timothy Appnel (@tima) +''' + +EXAMPLES = ''' +- name: Synchronization of src on the control machine to dest on the remote hosts + synchronize: + src: some/relative/path + dest: /some/absolute/path + +- name: Synchronization using rsync protocol (push) + synchronize: + src: some/relative/path/ + dest: rsync://somehost.com/path/ + +- name: Synchronization using rsync protocol (pull) + synchronize: + mode: pull + src: rsync://somehost.com/path/ + dest: /some/absolute/path/ + +- name: Synchronization using rsync protocol on delegate host (push) + synchronize: + src: /some/absolute/path/ + dest: rsync://somehost.com/path/ + delegate_to: delegate.host + +- name: Synchronization using rsync protocol on delegate host (pull) + synchronize: + mode: pull + src: rsync://somehost.com/path/ + dest: /some/absolute/path/ + delegate_to: delegate.host + +- name: Synchronization without any --archive options enabled + synchronize: + src: some/relative/path + dest: /some/absolute/path + archive: no + +- name: Synchronization with --archive options enabled except for --recursive + synchronize: + src: some/relative/path + dest: /some/absolute/path + recursive: no + +- name: Synchronization with --archive options enabled except for --times, with --checksum option enabled + synchronize: + src: some/relative/path + dest: /some/absolute/path + checksum: yes + times: no + +- name: Synchronization without --archive options enabled except use --links + synchronize: + src: some/relative/path + dest: /some/absolute/path + archive: no + links: yes + +- name: Synchronization of two paths both on the control machine + synchronize: + src: some/relative/path + dest: /some/absolute/path + delegate_to: localhost + +- name: Synchronization of src on the inventory host to the dest on the localhost in pull mode + synchronize: + mode: pull + src: some/relative/path + dest: /some/absolute/path + +- name: Synchronization of src on delegate host to dest on the current inventory host. + synchronize: + src: /first/absolute/path + dest: /second/absolute/path + delegate_to: delegate.host + +- name: Synchronize two directories on one remote host. + synchronize: + src: /first/absolute/path + dest: /second/absolute/path + delegate_to: "{{ inventory_hostname }}" + +- name: Synchronize and delete files in dest on the remote host that are not found in src of localhost. + synchronize: + src: some/relative/path + dest: /some/absolute/path + delete: yes + recursive: yes + +# This specific command is granted su privileges on the destination +- name: Synchronize using an alternate rsync command + synchronize: + src: some/relative/path + dest: /some/absolute/path + rsync_path: su -c rsync + +# Example .rsync-filter file in the source directory +# - var # exclude any path whose last part is 'var' +# - /var # exclude any path starting with 'var' starting at the source directory +# + /var/conf # include /var/conf even though it was previously excluded + +- name: Synchronize passing in extra rsync options + synchronize: + src: /tmp/helloworld + dest: /var/www/helloworld + rsync_opts: + - "--no-motd" + - "--exclude=.git" + +# Hardlink files if they didn't change +- name: Use hardlinks when synchronizing filesystems + synchronize: + src: /tmp/path_a/foo.txt + dest: /tmp/path_b/foo.txt + link_dest: /tmp/path_a/ + +# Specify the rsync binary to use on remote host and on local host +- hosts: groupofhosts + vars: + ansible_rsync_path: /usr/gnu/bin/rsync + + tasks: + - name: copy /tmp/localpath/ to remote location /tmp/remotepath + synchronize: + src: /tmp/localpath/ + dest: /tmp/remotepath + rsync_path: /usr/gnu/bin/rsync +''' + + +import os +import errno + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.six.moves import shlex_quote + + +client_addr = None + + +def substitute_controller(path): + global client_addr + if not client_addr: + ssh_env_string = os.environ.get('SSH_CLIENT', None) + try: + client_addr, _ = ssh_env_string.split(None, 1) + except AttributeError: + ssh_env_string = os.environ.get('SSH_CONNECTION', None) + try: + client_addr, _ = ssh_env_string.split(None, 1) + except AttributeError: + pass + if not client_addr: + raise ValueError + + if path.startswith('localhost:'): + path = path.replace('localhost', client_addr, 1) + return path + + +def is_rsh_needed(source, dest): + if source.startswith('rsync://') or dest.startswith('rsync://'): + return False + if ':' in source or ':' in dest: + return True + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(type='str', required=True), + dest=dict(type='str', required=True), + dest_port=dict(type='int'), + delete=dict(type='bool', default=False), + private_key=dict(type='path'), + rsync_path=dict(type='str'), + _local_rsync_path=dict(type='path', default='rsync'), + _local_rsync_password=dict(type='str', no_log=True), + _substitute_controller=dict(type='bool', default=False), + archive=dict(type='bool', default=True), + checksum=dict(type='bool', default=False), + compress=dict(type='bool', default=True), + existing_only=dict(type='bool', default=False), + dirs=dict(type='bool', default=False), + recursive=dict(type='bool'), + links=dict(type='bool'), + copy_links=dict(type='bool', default=False), + perms=dict(type='bool'), + times=dict(type='bool'), + owner=dict(type='bool'), + group=dict(type='bool'), + set_remote_user=dict(type='bool', default=True), + rsync_timeout=dict(type='int', default=0), + rsync_opts=dict(type='list', default=[]), + ssh_args=dict(type='str'), + partial=dict(type='bool', default=False), + verify_host=dict(type='bool', default=False), + mode=dict(type='str', default='push', choices=['pull', 'push']), + link_dest=dict(type='list') + ), + supports_check_mode=True, + ) + + if module.params['_substitute_controller']: + try: + source = substitute_controller(module.params['src']) + dest = substitute_controller(module.params['dest']) + except ValueError: + module.fail_json(msg='Could not determine controller hostname for rsync to send to') + else: + source = module.params['src'] + dest = module.params['dest'] + dest_port = module.params['dest_port'] + delete = module.params['delete'] + private_key = module.params['private_key'] + rsync_path = module.params['rsync_path'] + rsync = module.params.get('_local_rsync_path', 'rsync') + rsync_password = module.params.get('_local_rsync_password') + rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout') + archive = module.params['archive'] + checksum = module.params['checksum'] + compress = module.params['compress'] + existing_only = module.params['existing_only'] + dirs = module.params['dirs'] + partial = module.params['partial'] + # the default of these params depends on the value of archive + recursive = module.params['recursive'] + links = module.params['links'] + copy_links = module.params['copy_links'] + perms = module.params['perms'] + times = module.params['times'] + owner = module.params['owner'] + group = module.params['group'] + rsync_opts = module.params['rsync_opts'] + ssh_args = module.params['ssh_args'] + verify_host = module.params['verify_host'] + link_dest = module.params['link_dest'] + + if '/' not in rsync: + rsync = module.get_bin_path(rsync, required=True) + + cmd = [rsync, '--delay-updates', '-F'] + _sshpass_pipe = None + if rsync_password: + try: + module.run_command(["sshpass"]) + except OSError: + module.fail_json( + msg="to use rsync connection with passwords, you must install the sshpass program" + ) + _sshpass_pipe = os.pipe() + cmd = ['sshpass', '-d' + to_native(_sshpass_pipe[0], errors='surrogate_or_strict')] + cmd + if compress: + cmd.append('--compress') + if rsync_timeout: + cmd.append('--timeout=%s' % rsync_timeout) + if module.check_mode: + cmd.append('--dry-run') + if delete: + cmd.append('--delete-after') + if existing_only: + cmd.append('--existing') + if checksum: + cmd.append('--checksum') + if copy_links: + cmd.append('--copy-links') + if archive: + cmd.append('--archive') + if recursive is False: + cmd.append('--no-recursive') + if links is False: + cmd.append('--no-links') + if perms is False: + cmd.append('--no-perms') + if times is False: + cmd.append('--no-times') + if owner is False: + cmd.append('--no-owner') + if group is False: + cmd.append('--no-group') + else: + if recursive is True: + cmd.append('--recursive') + if links is True: + cmd.append('--links') + if perms is True: + cmd.append('--perms') + if times is True: + cmd.append('--times') + if owner is True: + cmd.append('--owner') + if group is True: + cmd.append('--group') + if dirs: + cmd.append('--dirs') + + if source.startswith('rsync://') and dest.startswith('rsync://'): + module.fail_json(msg='either src or dest must be a localhost', rc=1) + + if is_rsh_needed(source, dest): + + # https://github.com/ansible/ansible/issues/15907 + has_rsh = False + for rsync_opt in rsync_opts: + if '--rsh' in rsync_opt: + has_rsh = True + break + + # if the user has not supplied an --rsh option go ahead and add ours + if not has_rsh: + ssh_cmd = [module.get_bin_path('ssh', required=True), '-S', 'none'] + if private_key is not None: + ssh_cmd.extend(['-i', private_key]) + # If the user specified a port value + # Note: The action plugin takes care of setting this to a port from + # inventory if the user didn't specify an explicit dest_port + if dest_port is not None: + ssh_cmd.extend(['-o', 'Port=%s' % dest_port]) + if not verify_host: + ssh_cmd.extend(['-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null']) + ssh_cmd_str = ' '.join(shlex_quote(arg) for arg in ssh_cmd) + if ssh_args: + ssh_cmd_str += ' %s' % ssh_args + cmd.append('--rsh=%s' % ssh_cmd_str) + + if rsync_path: + cmd.append('--rsync-path=%s' % rsync_path) + + if rsync_opts: + if '' in rsync_opts: + module.warn('The empty string is present in rsync_opts which will cause rsync to' + ' transfer the current working directory. If this is intended, use "."' + ' instead to get rid of this warning. If this is unintended, check for' + ' problems in your playbook leading to empty string in rsync_opts.') + cmd.extend(rsync_opts) + + if partial: + cmd.append('--partial') + + if link_dest: + cmd.append('-H') + # verbose required because rsync does not believe that adding a + # hardlink is actually a change + cmd.append('-vv') + for x in link_dest: + link_path = os.path.abspath(os.path.expanduser(x)) + destination_path = os.path.abspath(os.path.dirname(dest)) + if destination_path.find(link_path) == 0: + module.fail_json(msg='Hardlinking into a subdirectory of the source would cause recursion. %s and %s' % (destination_path, dest)) + cmd.append('--link-dest=%s' % link_path) + + changed_marker = '<>' + cmd.append('--out-format=' + changed_marker + '%i %n%L') + + # expand the paths + if '@' not in source: + source = os.path.expanduser(source) + if '@' not in dest: + dest = os.path.expanduser(dest) + + cmd.append(source) + cmd.append(dest) + cmdstr = ' '.join(cmd) + + # If we are using password authentication, write the password into the pipe + if rsync_password: + def _write_password_to_pipe(proc): + os.close(_sshpass_pipe[0]) + try: + os.write(_sshpass_pipe[1], to_bytes(rsync_password) + b'\n') + except OSError as exc: + # Ignore broken pipe errors if the sshpass process has exited. + if exc.errno != errno.EPIPE or proc.poll() is None: + raise + + (rc, out, err) = module.run_command( + cmd, pass_fds=_sshpass_pipe, + before_communicate_callback=_write_password_to_pipe) + else: + (rc, out, err) = module.run_command(cmd) + + if rc: + return module.fail_json(msg=err, rc=rc, cmd=cmdstr) + + if link_dest: + # a leading period indicates no change + changed = (changed_marker + '.') not in out + else: + changed = changed_marker in out + + out_clean = out.replace(changed_marker, '') + out_lines = out_clean.split('\n') + while '' in out_lines: + out_lines.remove('') + if module._diff: + diff = {'prepared': out_clean} + return module.exit_json(changed=changed, msg=out_clean, + rc=rc, cmd=cmdstr, stdout_lines=out_lines, + diff=diff) + + return module.exit_json(changed=changed, msg=out_clean, + rc=rc, cmd=cmdstr, stdout_lines=out_lines) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/timezone.py b/test/support/integration/plugins/modules/timezone.py new file mode 100644 index 00000000..b7439a12 --- /dev/null +++ b/test/support/integration/plugins/modules/timezone.py @@ -0,0 +1,909 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Shinichi TAMURA (@tmshn) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: timezone +short_description: Configure timezone setting +description: + - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use M(service) module. + - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time. + - Several different tools are used depending on the OS/Distribution involved. + For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock). + On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. + On AIX, C(chtz) is used. + - As of Ansible 2.3 support was added for SmartOS and BSDs. + - As of Ansible 2.4 support was added for macOS. + - As of Ansible 2.9 support was added for AIX 6.1+ + - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails. +version_added: "2.2" +options: + name: + description: + - Name of the timezone for the system clock. + - Default is to keep current setting. + - B(At least one of name and hwclock are required.) + type: str + hwclock: + description: + - Whether the hardware clock is in UTC or in local timezone. + - Default is to keep current setting. + - Note that this option is recommended not to change and may fail + to configure, especially on virtual environments such as AWS. + - B(At least one of name and hwclock are required.) + - I(Only used on Linux.) + type: str + aliases: [ rtc ] + choices: [ local, UTC ] +notes: + - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone + - On AIX only Olson/tz database timezones are useable (POSIX is not supported). + - An OS reboot is also required on AIX for the new timezone setting to take effect. +author: + - Shinichi TAMURA (@tmshn) + - Jasper Lievisse Adriaanse (@jasperla) + - Indrajit Raychaudhuri (@indrajitr) +''' + +RETURN = r''' +diff: + description: The differences about the given arguments. + returned: success + type: complex + contains: + before: + description: The values before change + type: dict + after: + description: The values after change + type: dict +''' + +EXAMPLES = r''' +- name: Set timezone to Asia/Tokyo + timezone: + name: Asia/Tokyo +''' + +import errno +import os +import platform +import random +import re +import string +import filecmp + +from ansible.module_utils.basic import AnsibleModule, get_distribution +from ansible.module_utils.six import iteritems + + +class Timezone(object): + """This is a generic Timezone manipulation class that is subclassed based on platform. + + A subclass may wish to override the following action methods: + - get(key, phase) ... get the value from the system at `phase` + - set(key, value) ... set the value to the current system + """ + + def __new__(cls, module): + """Return the platform-specific subclass. + + It does not use load_platform_subclass() because it needs to judge based + on whether the `timedatectl` command exists and is available. + + Args: + module: The AnsibleModule. + """ + if platform.system() == 'Linux': + timedatectl = module.get_bin_path('timedatectl') + if timedatectl is not None: + rc, stdout, stderr = module.run_command(timedatectl) + if rc == 0: + return super(Timezone, SystemdTimezone).__new__(SystemdTimezone) + else: + module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr) + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + else: + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + elif re.match('^joyent_.*Z', platform.version()): + # platform.system() returns SunOS, which is too broad. So look at the + # platform version instead. However we have to ensure that we're not + # running in the global zone where changing the timezone has no effect. + zonename_cmd = module.get_bin_path('zonename') + if zonename_cmd is not None: + (rc, stdout, _) = module.run_command(zonename_cmd) + if rc == 0 and stdout.strip() == 'global': + module.fail_json(msg='Adjusting timezone is not supported in Global Zone') + + return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone) + elif platform.system() == 'Darwin': + return super(Timezone, DarwinTimezone).__new__(DarwinTimezone) + elif re.match('^(Free|Net|Open)BSD', platform.platform()): + return super(Timezone, BSDTimezone).__new__(BSDTimezone) + elif platform.system() == 'AIX': + AIXoslevel = int(platform.version() + platform.release()) + if AIXoslevel >= 61: + return super(Timezone, AIXTimezone).__new__(AIXTimezone) + else: + module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel) + else: + # Not supported yet + return super(Timezone, Timezone).__new__(Timezone) + + def __init__(self, module): + """Initialize of the class. + + Args: + module: The AnsibleModule. + """ + super(Timezone, self).__init__() + self.msg = [] + # `self.value` holds the values for each params on each phases. + # Initially there's only info of "planned" phase, but the + # `self.check()` function will fill out it. + self.value = dict() + for key in module.argument_spec: + value = module.params[key] + if value is not None: + self.value[key] = dict(planned=value) + self.module = module + + def abort(self, msg): + """Abort the process with error message. + + This is just the wrapper of module.fail_json(). + + Args: + msg: The error message. + """ + error_msg = ['Error message:', msg] + if len(self.msg) > 0: + error_msg.append('Other message(s):') + error_msg.extend(self.msg) + self.module.fail_json(msg='\n'.join(error_msg)) + + def execute(self, *commands, **kwargs): + """Execute the shell command. + + This is just the wrapper of module.run_command(). + + Args: + *commands: The command to execute. + It will be concatenated with single space. + **kwargs: Only 'log' key is checked. + If kwargs['log'] is true, record the command to self.msg. + + Returns: + stdout: Standard output of the command. + """ + command = ' '.join(commands) + (rc, stdout, stderr) = self.module.run_command(command, check_rc=True) + if kwargs.get('log', False): + self.msg.append('executed `%s`' % command) + return stdout + + def diff(self, phase1='before', phase2='after'): + """Calculate the difference between given 2 phases. + + Args: + phase1, phase2: The names of phase to compare. + + Returns: + diff: The difference of value between phase1 and phase2. + This is in the format which can be used with the + `--diff` option of ansible-playbook. + """ + diff = {phase1: {}, phase2: {}} + for key, value in iteritems(self.value): + diff[phase1][key] = value[phase1] + diff[phase2][key] = value[phase2] + return diff + + def check(self, phase): + """Check the state in given phase and set it to `self.value`. + + Args: + phase: The name of the phase to check. + + Returns: + NO RETURN VALUE + """ + if phase == 'planned': + return + for key, value in iteritems(self.value): + value[phase] = self.get(key, phase) + + def change(self): + """Make the changes effect based on `self.value`.""" + for key, value in iteritems(self.value): + if value['before'] != value['planned']: + self.set(key, value['planned']) + + # =========================================== + # Platform specific methods (must be replaced by subclass). + + def get(self, key, phase): + """Get the value for the key at the given phase. + + Called from self.check(). + + Args: + key: The key to get the value + phase: The phase to get the value + + Return: + value: The value for the key at the given phase. + """ + self.abort('get(key, phase) is not implemented on target platform') + + def set(self, key, value): + """Set the value for the key (of course, for the phase 'after'). + + Called from self.change(). + + Args: + key: Key to set the value + value: Value to set + """ + self.abort('set(key, value) is not implemented on target platform') + + def _verify_timezone(self): + tz = self.value['name']['planned'] + tzfile = '/usr/share/zoneinfo/%s' % tz + if not os.path.isfile(tzfile): + self.abort('given timezone "%s" is not available' % tz) + return tzfile + + +class SystemdTimezone(Timezone): + """This is a Timezone manipulation class for systemd-powered Linux. + + It uses the `timedatectl` command to check/set all arguments. + """ + + regexps = dict( + hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE), + name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + subcmds = dict( + hwclock='set-local-rtc', + name='set-timezone' + ) + + def __init__(self, module): + super(SystemdTimezone, self).__init__(module) + self.timedatectl = module.get_bin_path('timedatectl', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_status(self, phase): + if phase not in self.status: + self.status[phase] = self.execute(self.timedatectl, 'status') + return self.status[phase] + + def get(self, key, phase): + status = self._get_status(phase) + value = self.regexps[key].search(status).group(1) + if key == 'hwclock': + # For key='hwclock'; convert yes/no -> local/UTC + if self.module.boolean(value): + value = 'local' + else: + value = 'UTC' + return value + + def set(self, key, value): + # For key='hwclock'; convert UTC/local -> yes/no + if key == 'hwclock': + if value == 'local': + value = 'yes' + else: + value = 'no' + self.execute(self.timedatectl, self.subcmds[key], value, log=True) + + +class NosystemdTimezone(Timezone): + """This is a Timezone manipulation class for non systemd-powered Linux. + + For timezone setting, it edits the following file and reflect changes: + - /etc/sysconfig/clock ... RHEL/CentOS + - /etc/timezone ... Debian/Ubuntu + For hwclock setting, it executes `hwclock --systohc` command with the + '--utc' or '--localtime' option. + """ + + conf_files = dict( + name=None, # To be set in __init__ + hwclock=None, # To be set in __init__ + adjtime='/etc/adjtime' + ) + + # It's fine if all tree config files don't exist + allow_no_file = dict( + name=True, + hwclock=True, + adjtime=True + ) + + regexps = dict( + name=None, # To be set in __init__ + hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE), + adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE) + ) + + dist_regexps = dict( + SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE), + redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE) + ) + + dist_tzline_format = dict( + SuSE='TIMEZONE="%s"\n', + redhat='ZONE="%s"\n' + ) + + def __init__(self, module): + super(NosystemdTimezone, self).__init__(module) + # Validate given timezone + if 'name' in self.value: + tzfile = self._verify_timezone() + # `--remove-destination` is needed if /etc/localtime is a symlink so + # that it overwrites it instead of following it. + self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)] + self.update_hwclock = self.module.get_bin_path('hwclock', required=True) + # Distribution-specific configurations + if self.module.get_bin_path('dpkg-reconfigure') is not None: + # Debian/Ubuntu + if 'name' in self.value: + self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile), + '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)] + self.conf_files['name'] = '/etc/timezone' + self.conf_files['hwclock'] = '/etc/default/rcS' + self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE) + self.tzline_format = '%s\n' + else: + # RHEL/CentOS/SUSE + if self.module.get_bin_path('tzdata-update') is not None: + # tzdata-update cannot update the timezone if /etc/localtime is + # a symlink so we have to use cp to update the time zone which + # was set above. + if not os.path.islink('/etc/localtime'): + self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)] + # else: + # self.update_timezone = 'cp --remove-destination ...' <- configured above + self.conf_files['name'] = '/etc/sysconfig/clock' + self.conf_files['hwclock'] = '/etc/sysconfig/clock' + try: + f = open(self.conf_files['name'], 'r') + except IOError as err: + if self._allow_ioerror(err, 'name'): + # If the config file doesn't exist detect the distribution and set regexps. + distribution = get_distribution() + if distribution == 'SuSE': + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + else: + self.abort('could not read configuration file "%s"' % self.conf_files['name']) + else: + # The key for timezone might be `ZONE` or `TIMEZONE` + # (the former is used in RHEL/CentOS and the latter is used in SUSE linux). + # So check the content of /etc/sysconfig/clock and decide which key to use. + sysconfig_clock = f.read() + f.close() + if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE): + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + + def _allow_ioerror(self, err, key): + # In some cases, even if the target file does not exist, + # simply creating it may solve the problem. + # In such cases, we should continue the configuration rather than aborting. + if err.errno != errno.ENOENT: + # If the error is not ENOENT ("No such file or directory"), + # (e.g., permission error, etc), we should abort. + return False + return self.allow_no_file.get(key, False) + + def _edit_file(self, filename, regexp, value, key): + """Replace the first matched line with given `value`. + + If `regexp` matched more than once, other than the first line will be deleted. + + Args: + filename: The name of the file to edit. + regexp: The regular expression to search with. + value: The line which will be inserted. + key: For what key the file is being editted. + """ + # Read the file + try: + file = open(filename, 'r') + except IOError as err: + if self._allow_ioerror(err, key): + lines = [] + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + lines = file.readlines() + file.close() + # Find the all matched lines + matched_indices = [] + for i, line in enumerate(lines): + if regexp.search(line): + matched_indices.append(i) + if len(matched_indices) > 0: + insert_line = matched_indices[0] + else: + insert_line = 0 + # Remove all matched lines + for i in matched_indices[::-1]: + del lines[i] + # ...and insert the value + lines.insert(insert_line, value) + # Write the changes + try: + file = open(filename, 'w') + except IOError: + self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename)) + else: + file.writelines(lines) + file.close() + self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename)) + + def _get_value_from_config(self, key, phase): + filename = self.conf_files[key] + try: + file = open(filename, mode='r') + except IOError as err: + if self._allow_ioerror(err, key): + if key == 'hwclock': + return 'n/a' + elif key == 'adjtime': + return 'UTC' + elif key == 'name': + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + status = file.read() + file.close() + try: + value = self.regexps[key].search(status).group(1) + except AttributeError: + if key == 'hwclock': + # If we cannot find UTC in the config that's fine. + return 'n/a' + elif key == 'adjtime': + # If we cannot find UTC/LOCAL in /etc/cannot that means UTC + # will be used by default. + return 'UTC' + elif key == 'name': + if phase == 'before': + # In 'before' phase UTC/LOCAL doesn't need to be set in + # the timezone config file, so we ignore this error. + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename)) + else: + if key == 'hwclock': + # convert yes/no -> UTC/local + if self.module.boolean(value): + value = 'UTC' + else: + value = 'local' + elif key == 'adjtime': + # convert LOCAL -> local + if value != 'UTC': + value = value.lower() + return value + + def get(self, key, phase): + planned = self.value[key]['planned'] + if key == 'hwclock': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the value in the config file is the same as the 'planned' + # value, we need to check /etc/adjtime. + value = self._get_value_from_config('adjtime', phase) + elif key == 'name': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the planned values is the same as the one in the config file + # we need to check if /etc/localtime is also set to the 'planned' zone. + if os.path.islink('/etc/localtime'): + # If /etc/localtime is a symlink and is not set to the TZ we 'planned' + # to set, we need to return the TZ which the symlink points to. + if os.path.exists('/etc/localtime'): + # We use readlink() because on some distros zone files are symlinks + # to other zone files, so it's hard to get which TZ is actually set + # if we follow the symlink. + path = os.readlink('/etc/localtime') + linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE) + if linktz: + valuelink = linktz.group(1) + if valuelink != planned: + value = valuelink + else: + # Set current TZ to 'n/a' if the symlink points to a path + # which isn't a zone file. + value = 'n/a' + else: + # Set current TZ to 'n/a' if the symlink to the zone file is broken. + value = 'n/a' + else: + # If /etc/localtime is not a symlink best we can do is compare it with + # the 'planned' zone info file and return 'n/a' if they are different. + try: + if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned): + return 'n/a' + except Exception: + return 'n/a' + else: + self.abort('unknown parameter "%s"' % key) + return value + + def set_timezone(self, value): + self._edit_file(filename=self.conf_files['name'], + regexp=self.regexps['name'], + value=self.tzline_format % value, + key='name') + for cmd in self.update_timezone: + self.execute(cmd) + + def set_hwclock(self, value): + if value == 'local': + option = '--localtime' + utc = 'no' + else: + option = '--utc' + utc = 'yes' + if self.conf_files['hwclock'] is not None: + self._edit_file(filename=self.conf_files['hwclock'], + regexp=self.regexps['hwclock'], + value='UTC=%s\n' % utc, + key='hwclock') + self.execute(self.update_hwclock, '--systohc', option, log=True) + + def set(self, key, value): + if key == 'name': + self.set_timezone(value) + elif key == 'hwclock': + self.set_hwclock(value) + else: + self.abort('unknown parameter "%s"' % key) + + +class SmartOSTimezone(Timezone): + """This is a Timezone manipulation class for SmartOS instances. + + It uses the C(sm-set-timezone) utility to set the timezone, and + inspects C(/etc/default/init) to determine the current timezone. + + NB: A zone needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(SmartOSTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False) + if not self.settimezone: + module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.') + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/default/init`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + try: + f = open('/etc/default/init', 'r') + for line in f: + m = re.match('^TZ=(.*)$', line.strip()) + if m: + return m.groups()[0] + except Exception: + self.module.fail_json(msg='Failed to read /etc/default/init') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through sm-set-timezone, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + cmd = 'sm-set-timezone %s' % value + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # sm-set-timezone knows no state and will always set the timezone. + # XXX: https://github.com/joyent/smtools/pull/2 + m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1]) + if not (m and m.groups()[-1] == value): + self.module.fail_json(msg='Failed to set timezone') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class DarwinTimezone(Timezone): + """This is the timezone implementation for Darwin which, unlike other *BSD + implementations, uses the `systemsetup` command on Darwin to check/set + the timezone. + """ + + regexps = dict( + name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + def __init__(self, module): + super(DarwinTimezone, self).__init__(module) + self.systemsetup = module.get_bin_path('systemsetup', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_current_timezone(self, phase): + """Lookup the current timezone via `systemsetup -gettimezone`.""" + if phase not in self.status: + self.status[phase] = self.execute(self.systemsetup, '-gettimezone') + return self.status[phase] + + def _verify_timezone(self): + tz = self.value['name']['planned'] + # Lookup the list of supported timezones via `systemsetup -listtimezones`. + # Note: Skip the first line that contains the label 'Time Zones:' + out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:] + tz_list = list(map(lambda x: x.strip(), out)) + if tz not in tz_list: + self.abort('given timezone "%s" is not available' % tz) + return tz + + def get(self, key, phase): + if key == 'name': + status = self._get_current_timezone(phase) + value = self.regexps[key].search(status).group(1) + return value + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + self.execute(self.systemsetup, '-settimezone', value, log=True) + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class BSDTimezone(Timezone): + """This is the timezone implementation for *BSD which works simply through + updating the `/etc/localtime` symlink to point to a valid timezone name under + `/usr/share/zoneinfo`. + """ + + def __init__(self, module): + super(BSDTimezone, self).__init__(module) + + def __get_timezone(self): + zoneinfo_dir = '/usr/share/zoneinfo/' + localtime_file = '/etc/localtime' + + # Strategy 1: + # If /etc/localtime does not exist, assum the timezone is UTC. + if not os.path.exists(localtime_file): + self.module.warn('Could not read /etc/localtime. Assuming UTC.') + return 'UTC' + + # Strategy 2: + # Follow symlink of /etc/localtime + zoneinfo_file = localtime_file + while not zoneinfo_file.startswith(zoneinfo_dir): + try: + zoneinfo_file = os.readlink(localtime_file) + except OSError: + # OSError means "end of symlink chain" or broken link. + break + else: + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 3: + # (If /etc/localtime is not symlinked) + # Check all files in /usr/share/zoneinfo and return first non-link match. + for dname, _, fnames in sorted(os.walk(zoneinfo_dir)): + for fname in sorted(fnames): + zoneinfo_file = os.path.join(dname, fname) + if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file): + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 4: + # As a fall-back, return 'UTC' as default assumption. + self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.') + return 'UTC' + + def get(self, key, phase): + """Lookup the current timezone by resolving `/etc/localtime`.""" + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + # First determine if the requested timezone is valid by looking in + # the zoneinfo directory. + zonefile = '/usr/share/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone' % value) + except Exception: + self.module.fail_json(msg='Failed to stat %s' % zonefile) + + # Now (somewhat) atomically update the symlink by creating a new + # symlink and move it into place. Otherwise we have to remove the + # original symlink and create the new symlink, however that would + # create a race condition in case another process tries to read + # /etc/localtime between removal and creation. + suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)]) + new_localtime = '/etc/localtime.' + suffix + + try: + os.symlink(zonefile, new_localtime) + os.rename(new_localtime, '/etc/localtime') + except Exception: + os.remove(new_localtime) + self.module.fail_json(msg='Could not update /etc/localtime') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class AIXTimezone(Timezone): + """This is a Timezone manipulation class for AIX instances. + + It uses the C(chtz) utility to set the timezone, and + inspects C(/etc/environment) to determine the current timezone. + + While AIX time zones can be set using two formats (POSIX and + Olson) the prefered method is Olson. + See the following article for more information: + https://developer.ibm.com/articles/au-aix-posix/ + + NB: AIX needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(AIXTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('chtz', required=True) + + def __get_timezone(self): + """ Return the current value of TZ= in /etc/environment """ + try: + f = open('/etc/environment', 'r') + etcenvironment = f.read() + f.close() + except Exception: + self.module.fail_json(msg='Issue reading contents of /etc/environment') + + match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE) + if match: + return match.group(1) + else: + return None + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/environment`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through chtz, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values. + # It will only return non-zero if the chtz command itself fails, it does not check for + # valid timezones. We need to perform a basic check to confirm that the timezone + # definition exists in /usr/share/lib/zoneinfo + # This does mean that we can only support Olson for now. The below commented out regex + # detects Olson date formats, so in the future we could detect Posix or Olson and + # act accordingly. + + # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE) + # if not regex_olson.match(value): + # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value + # self.module.fail_json(msg=msg) + + # First determine if the requested timezone is valid by looking in the zoneinfo + # directory. + zonefile = '/usr/share/lib/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone.' % value) + except Exception: + self.module.fail_json(msg='Failed to check %s.' % zonefile) + + # Now set the TZ using chtz + cmd = 'chtz %s' % value + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # The best condition check we can do is to check the value of TZ after making the + # change. + TZ = self.__get_timezone() + if TZ != value: + msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value) + self.module.fail_json(msg=msg) + + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +def main(): + # Construct 'module' and 'tz' + module = AnsibleModule( + argument_spec=dict( + hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']), + name=dict(type='str'), + ), + required_one_of=[ + ['hwclock', 'name'] + ], + supports_check_mode=True, + ) + tz = Timezone(module) + + # Check the current state + tz.check(phase='before') + if module.check_mode: + diff = tz.diff('before', 'planned') + # In check mode, 'planned' state is treated as 'after' state + diff['after'] = diff.pop('planned') + else: + # Make change + tz.change() + # Check the current state + tz.check(phase='after') + # Examine if the current state matches planned state + (after, planned) = tz.diff('after', 'planned').values() + if after != planned: + tz.abort('still not desired state, though changes have made - ' + 'planned: %s, after: %s' % (str(planned), str(after))) + diff = tz.diff('before', 'after') + + changed = (diff['before'] != diff['after']) + if len(tz.msg) > 0: + module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg)) + else: + module.exit_json(changed=changed, diff=diff) + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/x509_crl.py b/test/support/integration/plugins/modules/x509_crl.py new file mode 100644 index 00000000..ef601eda --- /dev/null +++ b/test/support/integration/plugins/modules/x509_crl.py @@ -0,0 +1,783 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: x509_crl +version_added: "2.10" +short_description: Generate Certificate Revocation Lists (CRLs) +description: + - This module allows one to (re)generate or update Certificate Revocation Lists (CRLs). + - Certificates on the revocation list can be either specified via serial number and (optionally) their issuer, + or as a path to a certificate file in PEM format. +requirements: + - cryptography >= 1.2 +author: + - Felix Fontein (@felixfontein) +options: + state: + description: + - Whether the CRL file should exist or not, taking action if the state is different from what is stated. + type: str + default: present + choices: [ absent, present ] + + mode: + description: + - Defines how to process entries of existing CRLs. + - If set to C(generate), makes sure that the CRL has the exact set of revoked certificates + as specified in I(revoked_certificates). + - If set to C(update), makes sure that the CRL contains the revoked certificates from + I(revoked_certificates), but can also contain other revoked certificates. If the CRL file + already exists, all entries from the existing CRL will also be included in the new CRL. + When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes). + type: str + default: generate + choices: [ generate, update ] + + force: + description: + - Should the CRL be forced to be regenerated. + type: bool + default: no + + backup: + description: + - Create a backup file including a timestamp so you can get the original + CRL back if you overwrote it with a new one by accident. + type: bool + default: no + + path: + description: + - Remote absolute path where the generated CRL file should be created or is already located. + type: path + required: yes + + privatekey_path: + description: + - Path to the CA's private key to use when signing the CRL. + - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both. + type: path + + privatekey_content: + description: + - The content of the CA's private key to use when signing the CRL. + - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both. + type: str + + privatekey_passphrase: + description: + - The passphrase for the I(privatekey_path). + - This is required if the private key is password protected. + type: str + + issuer: + description: + - Key/value pairs that will be present in the issuer name field of the CRL. + - If you need to specify more than one value with the same key, use a list as value. + - Required if I(state) is C(present). + type: dict + + last_update: + description: + - The point in time from which this CRL can be trusted. + - Time can be specified either as relative time or as absolute timestamp. + - Time will always be interpreted as UTC. + - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer + + C([w | d | h | m | s]) (e.g. C(+32w1d2h). + - Note that if using relative time this module is NOT idempotent, except when + I(ignore_timestamps) is set to C(yes). + type: str + default: "+0s" + + next_update: + description: + - "The absolute latest point in time by which this I(issuer) is expected to have issued + another CRL. Many clients will treat a CRL as expired once I(next_update) occurs." + - Time can be specified either as relative time or as absolute timestamp. + - Time will always be interpreted as UTC. + - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer + + C([w | d | h | m | s]) (e.g. C(+32w1d2h). + - Note that if using relative time this module is NOT idempotent, except when + I(ignore_timestamps) is set to C(yes). + - Required if I(state) is C(present). + type: str + + digest: + description: + - Digest algorithm to be used when signing the CRL. + type: str + default: sha256 + + revoked_certificates: + description: + - List of certificates to be revoked. + - Required if I(state) is C(present). + type: list + elements: dict + suboptions: + path: + description: + - Path to a certificate in PEM format. + - The serial number and issuer will be extracted from the certificate. + - Mutually exclusive with I(content) and I(serial_number). One of these three options + must be specified. + type: path + content: + description: + - Content of a certificate in PEM format. + - The serial number and issuer will be extracted from the certificate. + - Mutually exclusive with I(path) and I(serial_number). One of these three options + must be specified. + type: str + serial_number: + description: + - Serial number of the certificate. + - Mutually exclusive with I(path) and I(content). One of these three options must + be specified. + type: int + revocation_date: + description: + - The point in time the certificate was revoked. + - Time can be specified either as relative time or as absolute timestamp. + - Time will always be interpreted as UTC. + - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer + + C([w | d | h | m | s]) (e.g. C(+32w1d2h). + - Note that if using relative time this module is NOT idempotent, except when + I(ignore_timestamps) is set to C(yes). + type: str + default: "+0s" + issuer: + description: + - The certificate's issuer. + - "Example: C(DNS:ca.example.org)" + type: list + elements: str + issuer_critical: + description: + - Whether the certificate issuer extension should be critical. + type: bool + default: no + reason: + description: + - The value for the revocation reason extension. + type: str + choices: + - unspecified + - key_compromise + - ca_compromise + - affiliation_changed + - superseded + - cessation_of_operation + - certificate_hold + - privilege_withdrawn + - aa_compromise + - remove_from_crl + reason_critical: + description: + - Whether the revocation reason extension should be critical. + type: bool + default: no + invalidity_date: + description: + - The point in time it was known/suspected that the private key was compromised + or that the certificate otherwise became invalid. + - Time can be specified either as relative time or as absolute timestamp. + - Time will always be interpreted as UTC. + - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer + + C([w | d | h | m | s]) (e.g. C(+32w1d2h). + - Note that if using relative time this module is NOT idempotent. This will NOT + change when I(ignore_timestamps) is set to C(yes). + type: str + invalidity_date_critical: + description: + - Whether the invalidity date extension should be critical. + type: bool + default: no + + ignore_timestamps: + description: + - Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in + I(revoked_certificates)) should be ignored for idempotency checks. The timestamp + I(invalidity_date) in I(revoked_certificates) will never be ignored. + - Use this in combination with relative timestamps for these values to get idempotency. + type: bool + default: no + + return_content: + description: + - If set to C(yes), will return the (current or generated) CRL's content as I(crl). + type: bool + default: no + +extends_documentation_fragment: + - files + +notes: + - All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern. + - Date specified should be UTC. Minutes and seconds are mandatory. +''' + +EXAMPLES = r''' +- name: Generate a CRL + x509_crl: + path: /etc/ssl/my-ca.crl + privatekey_path: /etc/ssl/private/my-ca.pem + issuer: + CN: My CA + last_update: "+0s" + next_update: "+7d" + revoked_certificates: + - serial_number: 1234 + revocation_date: 20190331202428Z + issuer: + CN: My CA + - serial_number: 2345 + revocation_date: 20191013152910Z + reason: affiliation_changed + invalidity_date: 20191001000000Z + - path: /etc/ssl/crt/revoked-cert.pem + revocation_date: 20191010010203Z +''' + +RETURN = r''' +filename: + description: Path to the generated CRL + returned: changed or success + type: str + sample: /path/to/my-ca.crl +backup_file: + description: Name of backup file created. + returned: changed and if I(backup) is C(yes) + type: str + sample: /path/to/my-ca.crl.2019-03-09@11:22~ +privatekey: + description: Path to the private CA key + returned: changed or success + type: str + sample: /path/to/my-ca.pem +issuer: + description: + - The CRL's issuer. + - Note that for repeated values, only the last one will be returned. + returned: success + type: dict + sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}' +issuer_ordered: + description: The CRL's issuer as an ordered list of tuples. + returned: success + type: list + elements: list + sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]' +last_update: + description: The point in time from which this CRL can be trusted as ASN.1 TIME. + returned: success + type: str + sample: 20190413202428Z +next_update: + description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME. + returned: success + type: str + sample: 20190413202428Z +digest: + description: The signature algorithm used to sign the CRL. + returned: success + type: str + sample: sha256WithRSAEncryption +revoked_certificates: + description: List of certificates to be revoked. + returned: success + type: list + elements: dict + contains: + serial_number: + description: Serial number of the certificate. + type: int + sample: 1234 + revocation_date: + description: The point in time the certificate was revoked as ASN.1 TIME. + type: str + sample: 20190413202428Z + issuer: + description: The certificate's issuer. + type: list + elements: str + sample: '["DNS:ca.example.org"]' + issuer_critical: + description: Whether the certificate issuer extension is critical. + type: bool + sample: no + reason: + description: + - The value for the revocation reason extension. + - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded), + C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and + C(remove_from_crl). + type: str + sample: key_compromise + reason_critical: + description: Whether the revocation reason extension is critical. + type: bool + sample: no + invalidity_date: + description: | + The point in time it was known/suspected that the private key was compromised + or that the certificate otherwise became invalid as ASN.1 TIME. + type: str + sample: 20190413202428Z + invalidity_date_critical: + description: Whether the invalidity date extension is critical. + type: bool + sample: no +crl: + description: The (current or generated) CRL's content. + returned: if I(state) is C(present) and I(return_content) is C(yes) + type: str +''' + + +import os +import traceback +from distutils.version import LooseVersion + +from ansible.module_utils import crypto as crypto_utils +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +MINIMAL_CRYPTOGRAPHY_VERSION = '1.2' + +CRYPTOGRAPHY_IMP_ERR = None +try: + import cryptography + from cryptography import x509 + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives.serialization import Encoding + from cryptography.x509 import ( + CertificateRevocationListBuilder, + RevokedCertificateBuilder, + NameAttribute, + Name, + ) + CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__) +except ImportError: + CRYPTOGRAPHY_IMP_ERR = traceback.format_exc() + CRYPTOGRAPHY_FOUND = False +else: + CRYPTOGRAPHY_FOUND = True + + +TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ" + + +class CRLError(crypto_utils.OpenSSLObjectError): + pass + + +class CRL(crypto_utils.OpenSSLObject): + + def __init__(self, module): + super(CRL, self).__init__( + module.params['path'], + module.params['state'], + module.params['force'], + module.check_mode + ) + + self.update = module.params['mode'] == 'update' + self.ignore_timestamps = module.params['ignore_timestamps'] + self.return_content = module.params['return_content'] + self.crl_content = None + + self.privatekey_path = module.params['privatekey_path'] + self.privatekey_content = module.params['privatekey_content'] + if self.privatekey_content is not None: + self.privatekey_content = self.privatekey_content.encode('utf-8') + self.privatekey_passphrase = module.params['privatekey_passphrase'] + + self.issuer = crypto_utils.parse_name_field(module.params['issuer']) + self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]] + + self.last_update = crypto_utils.get_relative_time_option(module.params['last_update'], 'last_update') + self.next_update = crypto_utils.get_relative_time_option(module.params['next_update'], 'next_update') + + self.digest = crypto_utils.select_message_digest(module.params['digest']) + if self.digest is None: + raise CRLError('The digest "{0}" is not supported'.format(module.params['digest'])) + + self.revoked_certificates = [] + for i, rc in enumerate(module.params['revoked_certificates']): + result = { + 'serial_number': None, + 'revocation_date': None, + 'issuer': None, + 'issuer_critical': False, + 'reason': None, + 'reason_critical': False, + 'invalidity_date': None, + 'invalidity_date_critical': False, + } + path_prefix = 'revoked_certificates[{0}].'.format(i) + if rc['path'] is not None or rc['content'] is not None: + # Load certificate from file or content + try: + if rc['content'] is not None: + rc['content'] = rc['content'].encode('utf-8') + cert = crypto_utils.load_certificate(rc['path'], content=rc['content'], backend='cryptography') + try: + result['serial_number'] = cert.serial_number + except AttributeError: + # The property was called "serial" before cryptography 1.4 + result['serial_number'] = cert.serial + except crypto_utils.OpenSSLObjectError as e: + if rc['content'] is not None: + module.fail_json( + msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e)) + ) + else: + module.fail_json( + msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e)) + ) + else: + # Specify serial_number (and potentially issuer) directly + result['serial_number'] = rc['serial_number'] + # All other options + if rc['issuer']: + result['issuer'] = [crypto_utils.cryptography_get_name(issuer) for issuer in rc['issuer']] + result['issuer_critical'] = rc['issuer_critical'] + result['revocation_date'] = crypto_utils.get_relative_time_option( + rc['revocation_date'], + path_prefix + 'revocation_date' + ) + if rc['reason']: + result['reason'] = crypto_utils.REVOCATION_REASON_MAP[rc['reason']] + result['reason_critical'] = rc['reason_critical'] + if rc['invalidity_date']: + result['invalidity_date'] = crypto_utils.get_relative_time_option( + rc['invalidity_date'], + path_prefix + 'invalidity_date' + ) + result['invalidity_date_critical'] = rc['invalidity_date_critical'] + self.revoked_certificates.append(result) + + self.module = module + + self.backup = module.params['backup'] + self.backup_file = None + + try: + self.privatekey = crypto_utils.load_privatekey( + path=self.privatekey_path, + content=self.privatekey_content, + passphrase=self.privatekey_passphrase, + backend='cryptography' + ) + except crypto_utils.OpenSSLBadPassphraseError as exc: + raise CRLError(exc) + + self.crl = None + try: + with open(self.path, 'rb') as f: + data = f.read() + self.crl = x509.load_pem_x509_crl(data, default_backend()) + if self.return_content: + self.crl_content = data + except Exception as dummy: + self.crl_content = None + + def remove(self): + if self.backup: + self.backup_file = self.module.backup_local(self.path) + super(CRL, self).remove(self.module) + + def _compress_entry(self, entry): + if self.ignore_timestamps: + # Throw out revocation_date + return ( + entry['serial_number'], + tuple(entry['issuer']) if entry['issuer'] is not None else None, + entry['issuer_critical'], + entry['reason'], + entry['reason_critical'], + entry['invalidity_date'], + entry['invalidity_date_critical'], + ) + else: + return ( + entry['serial_number'], + entry['revocation_date'], + tuple(entry['issuer']) if entry['issuer'] is not None else None, + entry['issuer_critical'], + entry['reason'], + entry['reason_critical'], + entry['invalidity_date'], + entry['invalidity_date_critical'], + ) + + def check(self, perms_required=True): + """Ensure the resource is in its desired state.""" + + state_and_perms = super(CRL, self).check(self.module, perms_required) + + if not state_and_perms: + return False + + if self.crl is None: + return False + + if self.last_update != self.crl.last_update and not self.ignore_timestamps: + return False + if self.next_update != self.crl.next_update and not self.ignore_timestamps: + return False + if self.digest.name != self.crl.signature_hash_algorithm.name: + return False + + want_issuer = [(crypto_utils.cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer] + if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]: + return False + + old_entries = [self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(cert)) for cert in self.crl] + new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates] + if self.update: + # We don't simply use a set so that duplicate entries are treated correctly + for entry in new_entries: + try: + old_entries.remove(entry) + except ValueError: + return False + else: + if old_entries != new_entries: + return False + + return True + + def _generate_crl(self): + backend = default_backend() + crl = CertificateRevocationListBuilder() + + try: + crl = crl.issuer_name(Name([ + NameAttribute(crypto_utils.cryptography_name_to_oid(entry[0]), to_text(entry[1])) + for entry in self.issuer + ])) + except ValueError as e: + raise CRLError(e) + + crl = crl.last_update(self.last_update) + crl = crl.next_update(self.next_update) + + if self.update and self.crl: + new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates]) + for entry in self.crl: + decoded_entry = self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(entry)) + if decoded_entry not in new_entries: + crl = crl.add_revoked_certificate(entry) + for entry in self.revoked_certificates: + revoked_cert = RevokedCertificateBuilder() + revoked_cert = revoked_cert.serial_number(entry['serial_number']) + revoked_cert = revoked_cert.revocation_date(entry['revocation_date']) + if entry['issuer'] is not None: + revoked_cert = revoked_cert.add_extension( + x509.CertificateIssuer([ + crypto_utils.cryptography_get_name(name) for name in self.entry['issuer'] + ]), + entry['issuer_critical'] + ) + if entry['reason'] is not None: + revoked_cert = revoked_cert.add_extension( + x509.CRLReason(entry['reason']), + entry['reason_critical'] + ) + if entry['invalidity_date'] is not None: + revoked_cert = revoked_cert.add_extension( + x509.InvalidityDate(entry['invalidity_date']), + entry['invalidity_date_critical'] + ) + crl = crl.add_revoked_certificate(revoked_cert.build(backend)) + + self.crl = crl.sign(self.privatekey, self.digest, backend=backend) + return self.crl.public_bytes(Encoding.PEM) + + def generate(self): + if not self.check(perms_required=False) or self.force: + result = self._generate_crl() + if self.return_content: + self.crl_content = result + if self.backup: + self.backup_file = self.module.backup_local(self.path) + crypto_utils.write_file(self.module, result) + self.changed = True + + file_args = self.module.load_file_common_arguments(self.module.params) + if self.module.set_fs_attributes_if_different(file_args, False): + self.changed = True + + def _dump_revoked(self, entry): + return { + 'serial_number': entry['serial_number'], + 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT), + 'issuer': + [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']] + if entry['issuer'] is not None else None, + 'issuer_critical': entry['issuer_critical'], + 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None, + 'reason_critical': entry['reason_critical'], + 'invalidity_date': + entry['invalidity_date'].strftime(TIMESTAMP_FORMAT) + if entry['invalidity_date'] is not None else None, + 'invalidity_date_critical': entry['invalidity_date_critical'], + } + + def dump(self, check_mode=False): + result = { + 'changed': self.changed, + 'filename': self.path, + 'privatekey': self.privatekey_path, + 'last_update': None, + 'next_update': None, + 'digest': None, + 'issuer_ordered': None, + 'issuer': None, + 'revoked_certificates': [], + } + if self.backup_file: + result['backup_file'] = self.backup_file + + if check_mode: + result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT) + result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT) + # result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid) + result['digest'] = self.module.params['digest'] + result['issuer_ordered'] = self.issuer + result['issuer'] = {} + for k, v in self.issuer: + result['issuer'][k] = v + result['revoked_certificates'] = [] + for entry in self.revoked_certificates: + result['revoked_certificates'].append(self._dump_revoked(entry)) + elif self.crl: + result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT) + result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT) + try: + result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid) + except AttributeError: + # Older cryptography versions don't have signature_algorithm_oid yet + dotted = crypto_utils._obj2txt( + self.crl._backend._lib, + self.crl._backend._ffi, + self.crl._x509_crl.sig_alg.algorithm + ) + oid = x509.oid.ObjectIdentifier(dotted) + result['digest'] = crypto_utils.cryptography_oid_to_name(oid) + issuer = [] + for attribute in self.crl.issuer: + issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value]) + result['issuer_ordered'] = issuer + result['issuer'] = {} + for k, v in issuer: + result['issuer'][k] = v + result['revoked_certificates'] = [] + for cert in self.crl: + entry = crypto_utils.cryptography_decode_revoked_certificate(cert) + result['revoked_certificates'].append(self._dump_revoked(entry)) + + if self.return_content: + result['crl'] = self.crl_content + + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + mode=dict(type='str', default='generate', choices=['generate', 'update']), + force=dict(type='bool', default=False), + backup=dict(type='bool', default=False), + path=dict(type='path', required=True), + privatekey_path=dict(type='path'), + privatekey_content=dict(type='str'), + privatekey_passphrase=dict(type='str', no_log=True), + issuer=dict(type='dict'), + last_update=dict(type='str', default='+0s'), + next_update=dict(type='str'), + digest=dict(type='str', default='sha256'), + ignore_timestamps=dict(type='bool', default=False), + return_content=dict(type='bool', default=False), + revoked_certificates=dict( + type='list', + elements='dict', + options=dict( + path=dict(type='path'), + content=dict(type='str'), + serial_number=dict(type='int'), + revocation_date=dict(type='str', default='+0s'), + issuer=dict(type='list', elements='str'), + issuer_critical=dict(type='bool', default=False), + reason=dict( + type='str', + choices=[ + 'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed', + 'superseded', 'cessation_of_operation', 'certificate_hold', + 'privilege_withdrawn', 'aa_compromise', 'remove_from_crl' + ] + ), + reason_critical=dict(type='bool', default=False), + invalidity_date=dict(type='str'), + invalidity_date_critical=dict(type='bool', default=False), + ), + required_one_of=[['path', 'content', 'serial_number']], + mutually_exclusive=[['path', 'content', 'serial_number']], + ), + ), + required_if=[ + ('state', 'present', ['privatekey_path', 'privatekey_content'], True), + ('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False), + ], + mutually_exclusive=( + ['privatekey_path', 'privatekey_content'], + ), + supports_check_mode=True, + add_file_common_args=True, + ) + + if not CRYPTOGRAPHY_FOUND: + module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)), + exception=CRYPTOGRAPHY_IMP_ERR) + + try: + crl = CRL(module) + + if module.params['state'] == 'present': + if module.check_mode: + result = crl.dump(check_mode=True) + result['changed'] = module.params['force'] or not crl.check() + module.exit_json(**result) + + crl.generate() + else: + if module.check_mode: + result = crl.dump(check_mode=True) + result['changed'] = os.path.exists(module.params['path']) + module.exit_json(**result) + + crl.remove() + + result = crl.dump() + module.exit_json(**result) + except crypto_utils.OpenSSLObjectError as exc: + module.fail_json(msg=to_native(exc)) + + +if __name__ == "__main__": + main() diff --git a/test/support/integration/plugins/modules/x509_crl_info.py b/test/support/integration/plugins/modules/x509_crl_info.py new file mode 100644 index 00000000..b61db26f --- /dev/null +++ b/test/support/integration/plugins/modules/x509_crl_info.py @@ -0,0 +1,281 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: x509_crl_info +version_added: "2.10" +short_description: Retrieve information on Certificate Revocation Lists (CRLs) +description: + - This module allows one to retrieve information on Certificate Revocation Lists (CRLs). +requirements: + - cryptography >= 1.2 +author: + - Felix Fontein (@felixfontein) +options: + path: + description: + - Remote absolute path where the generated CRL file should be created or is already located. + - Either I(path) or I(content) must be specified, but not both. + type: path + content: + description: + - Content of the X.509 certificate in PEM format. + - Either I(path) or I(content) must be specified, but not both. + type: str + +notes: + - All timestamp values are provided in ASN.1 TIME format, i.e. following the C(YYYYMMDDHHMMSSZ) pattern. + They are all in UTC. +seealso: + - module: x509_crl +''' + +EXAMPLES = r''' +- name: Get information on CRL + x509_crl_info: + path: /etc/ssl/my-ca.crl + register: result + +- debug: + msg: "{{ result }}" +''' + +RETURN = r''' +issuer: + description: + - The CRL's issuer. + - Note that for repeated values, only the last one will be returned. + returned: success + type: dict + sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}' +issuer_ordered: + description: The CRL's issuer as an ordered list of tuples. + returned: success + type: list + elements: list + sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]' +last_update: + description: The point in time from which this CRL can be trusted as ASN.1 TIME. + returned: success + type: str + sample: 20190413202428Z +next_update: + description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME. + returned: success + type: str + sample: 20190413202428Z +digest: + description: The signature algorithm used to sign the CRL. + returned: success + type: str + sample: sha256WithRSAEncryption +revoked_certificates: + description: List of certificates to be revoked. + returned: success + type: list + elements: dict + contains: + serial_number: + description: Serial number of the certificate. + type: int + sample: 1234 + revocation_date: + description: The point in time the certificate was revoked as ASN.1 TIME. + type: str + sample: 20190413202428Z + issuer: + description: The certificate's issuer. + type: list + elements: str + sample: '["DNS:ca.example.org"]' + issuer_critical: + description: Whether the certificate issuer extension is critical. + type: bool + sample: no + reason: + description: + - The value for the revocation reason extension. + - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded), + C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and + C(remove_from_crl). + type: str + sample: key_compromise + reason_critical: + description: Whether the revocation reason extension is critical. + type: bool + sample: no + invalidity_date: + description: | + The point in time it was known/suspected that the private key was compromised + or that the certificate otherwise became invalid as ASN.1 TIME. + type: str + sample: 20190413202428Z + invalidity_date_critical: + description: Whether the invalidity date extension is critical. + type: bool + sample: no +''' + + +import traceback +from distutils.version import LooseVersion + +from ansible.module_utils import crypto as crypto_utils +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +MINIMAL_CRYPTOGRAPHY_VERSION = '1.2' + +CRYPTOGRAPHY_IMP_ERR = None +try: + import cryptography + from cryptography import x509 + from cryptography.hazmat.backends import default_backend + CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__) +except ImportError: + CRYPTOGRAPHY_IMP_ERR = traceback.format_exc() + CRYPTOGRAPHY_FOUND = False +else: + CRYPTOGRAPHY_FOUND = True + + +TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ" + + +class CRLError(crypto_utils.OpenSSLObjectError): + pass + + +class CRLInfo(crypto_utils.OpenSSLObject): + """The main module implementation.""" + + def __init__(self, module): + super(CRLInfo, self).__init__( + module.params['path'] or '', + 'present', + False, + module.check_mode + ) + + self.content = module.params['content'] + + self.module = module + + self.crl = None + if self.content is None: + try: + with open(self.path, 'rb') as f: + data = f.read() + except Exception as e: + self.module.fail_json(msg='Error while reading CRL file from disk: {0}'.format(e)) + else: + data = self.content.encode('utf-8') + + try: + self.crl = x509.load_pem_x509_crl(data, default_backend()) + except Exception as e: + self.module.fail_json(msg='Error while decoding CRL: {0}'.format(e)) + + def _dump_revoked(self, entry): + return { + 'serial_number': entry['serial_number'], + 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT), + 'issuer': + [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']] + if entry['issuer'] is not None else None, + 'issuer_critical': entry['issuer_critical'], + 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None, + 'reason_critical': entry['reason_critical'], + 'invalidity_date': + entry['invalidity_date'].strftime(TIMESTAMP_FORMAT) + if entry['invalidity_date'] is not None else None, + 'invalidity_date_critical': entry['invalidity_date_critical'], + } + + def get_info(self): + result = { + 'changed': False, + 'last_update': None, + 'next_update': None, + 'digest': None, + 'issuer_ordered': None, + 'issuer': None, + 'revoked_certificates': [], + } + + result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT) + result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT) + try: + result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid) + except AttributeError: + # Older cryptography versions don't have signature_algorithm_oid yet + dotted = crypto_utils._obj2txt( + self.crl._backend._lib, + self.crl._backend._ffi, + self.crl._x509_crl.sig_alg.algorithm + ) + oid = x509.oid.ObjectIdentifier(dotted) + result['digest'] = crypto_utils.cryptography_oid_to_name(oid) + issuer = [] + for attribute in self.crl.issuer: + issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value]) + result['issuer_ordered'] = issuer + result['issuer'] = {} + for k, v in issuer: + result['issuer'][k] = v + result['revoked_certificates'] = [] + for cert in self.crl: + entry = crypto_utils.cryptography_decode_revoked_certificate(cert) + result['revoked_certificates'].append(self._dump_revoked(entry)) + + return result + + def generate(self): + # Empty method because crypto_utils.OpenSSLObject wants this + pass + + def dump(self): + # Empty method because crypto_utils.OpenSSLObject wants this + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path'), + content=dict(type='str'), + ), + required_one_of=( + ['path', 'content'], + ), + mutually_exclusive=( + ['path', 'content'], + ), + supports_check_mode=True, + ) + + if not CRYPTOGRAPHY_FOUND: + module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)), + exception=CRYPTOGRAPHY_IMP_ERR) + + try: + crl = CRLInfo(module) + result = crl.get_info() + module.exit_json(**result) + except crypto_utils.OpenSSLObjectError as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/test/support/integration/plugins/modules/xml.py b/test/support/integration/plugins/modules/xml.py new file mode 100644 index 00000000..b5b35a38 --- /dev/null +++ b/test/support/integration/plugins/modules/xml.py @@ -0,0 +1,966 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Red Hat, Inc. +# Copyright: (c) 2014, Tim Bielawa +# Copyright: (c) 2014, Magnus Hedemark +# Copyright: (c) 2017, Dag Wieers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: xml +short_description: Manage bits and pieces of XML files or strings +description: +- A CRUD-like interface to managing bits of XML files. +version_added: '2.4' +options: + path: + description: + - Path to the file to operate on. + - This file must exist ahead of time. + - This parameter is required, unless C(xmlstring) is given. + type: path + required: yes + aliases: [ dest, file ] + xmlstring: + description: + - A string containing XML on which to operate. + - This parameter is required, unless C(path) is given. + type: str + required: yes + xpath: + description: + - A valid XPath expression describing the item(s) you want to manipulate. + - Operates on the document root, C(/), by default. + type: str + namespaces: + description: + - The namespace C(prefix:uri) mapping for the XPath expression. + - Needs to be a C(dict), not a C(list) of items. + type: dict + state: + description: + - Set or remove an xpath selection (node(s), attribute(s)). + type: str + choices: [ absent, present ] + default: present + aliases: [ ensure ] + attribute: + description: + - The attribute to select when using parameter C(value). + - This is a string, not prepended with C(@). + type: raw + value: + description: + - Desired state of the selected attribute. + - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)). + - Elements default to no value (but present). + - Attributes default to an empty string. + type: raw + add_children: + description: + - Add additional child-element(s) to a selected element for a given C(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C() child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires C(xpath) to be set. + type: list + set_children: + description: + - Set the child-element(s) of a selected element for a given C(xpath). + - Removes any existing children. + - Child elements must be specified as in C(add_children). + - This parameter requires C(xpath) to be set. + type: list + count: + description: + - Search for a given C(xpath) and provide the count of any matches. + - This parameter requires C(xpath) to be set. + type: bool + default: no + print_match: + description: + - Search for a given C(xpath) and print out any matches. + - This parameter requires C(xpath) to be set. + type: bool + default: no + pretty_print: + description: + - Pretty print XML output. + type: bool + default: no + content: + description: + - Search for a given C(xpath) and get content. + - This parameter requires C(xpath) to be set. + type: str + choices: [ attribute, text ] + input_type: + description: + - Type of input for C(add_children) and C(set_children). + type: str + choices: [ xml, yaml ] + default: yaml + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + type: bool + default: no + strip_cdata_tags: + description: + - Remove CDATA tags surrounding text values. + - Note that this might break your XML file if text values contain characters that could be interpreted as XML. + type: bool + default: no + version_added: '2.7' + insertbefore: + description: + - Add additional child-element(s) before the first selected element for a given C(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C() child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires C(xpath) to be set. + type: bool + default: no + version_added: '2.8' + insertafter: + description: + - Add additional child-element(s) after the last selected element for a given C(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C() child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires C(xpath) to be set. + type: bool + default: no + version_added: '2.8' +requirements: +- lxml >= 2.3.0 +notes: +- Use the C(--check) and C(--diff) options when testing your expressions. +- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure. +- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions. +- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples. +- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them. +seealso: +- name: Xml module development community wiki + description: More information related to the development of this xml module. + link: https://github.com/ansible/community/wiki/Module:-xml +- name: Introduction to XPath + description: A brief tutorial on XPath (w3schools.com). + link: https://www.w3schools.com/xml/xpath_intro.asp +- name: XPath Reference document + description: The reference documentation on XSLT/XPath (developer.mozilla.org). + link: https://developer.mozilla.org/en-US/docs/Web/XPath +author: +- Tim Bielawa (@tbielawa) +- Magnus Hedemark (@magnus919) +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +# Consider the following XML file: +# +# +# Tasty Beverage Co. +# +# Rochefort 10 +# St. Bernardus Abbot 12 +# Schlitz +# +# 10 +# +# +#
http://tastybeverageco.com
+#
+#
+ +- name: Remove the 'subjective' attribute of the 'rating' element + xml: + path: /foo/bar.xml + xpath: /business/rating/@subjective + state: absent + +- name: Set the rating to '11' + xml: + path: /foo/bar.xml + xpath: /business/rating + value: 11 + +# Retrieve and display the number of nodes +- name: Get count of 'beers' nodes + xml: + path: /foo/bar.xml + xpath: /business/beers/beer + count: yes + register: hits + +- debug: + var: hits.count + +# Example where parent XML nodes are created automatically +- name: Add a 'phonenumber' element to the 'business' element + xml: + path: /foo/bar.xml + xpath: /business/phonenumber + value: 555-555-1234 + +- name: Add several more beers to the 'beers' element + xml: + path: /foo/bar.xml + xpath: /business/beers + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + +- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element + xml: + path: /foo/bar.xml + xpath: '/business/beers/beer[text()="Rochefort 10"]' + insertbefore: yes + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + +# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements +- name: Add a 'validxhtml' element to the 'website' element + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + +- name: Add an empty 'validatedon' attribute to the 'validxhtml' element + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml/@validatedon + +- name: Add or modify an attribute, add element if needed + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + attribute: validatedon + value: 1976-08-05 + +# How to read an attribute value and access it in Ansible +- name: Read an element's attribute values + xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + content: attribute + register: xmlresp + +- name: Show an attribute value + debug: + var: xmlresp.matches[0].validxhtml.validatedon + +- name: Remove all children from the 'website' element (option 1) + xml: + path: /foo/bar.xml + xpath: /business/website/* + state: absent + +- name: Remove all children from the 'website' element (option 2) + xml: + path: /foo/bar.xml + xpath: /business/website + children: [] + +# In case of namespaces, like in below XML, they have to be explicitly stated. +# +# +# +# +# +# + +# NOTE: There is the prefix 'x' in front of the 'bar' element, too. +- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false' + xml: + path: foo.xml + xpath: /x:foo/x:bar/y:baz + namespaces: + x: http://x.test + y: http://y.test + z: http://z.test + attribute: z:my_namespaced_attribute + value: 'false' +''' + +RETURN = r''' +actions: + description: A dictionary with the original xpath, namespaces and state. + type: dict + returned: success + sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present} +backup_file: + description: The name of the backup file that was created + type: str + returned: when backup=yes + sample: /path/to/file.xml.1942.2017-08-24@14:16:01~ +count: + description: The count of xpath matches. + type: int + returned: when parameter 'count' is set + sample: 2 +matches: + description: The xpath matches found. + type: list + returned: when parameter 'print_match' is set +msg: + description: A message related to the performed action(s). + type: str + returned: always +xmlstring: + description: An XML string of the resulting output. + type: str + returned: when parameter 'xmlstring' is set +''' + +import copy +import json +import os +import re +import traceback + +from distutils.version import LooseVersion +from io import BytesIO + +LXML_IMP_ERR = None +try: + from lxml import etree, objectify + HAS_LXML = True +except ImportError: + LXML_IMP_ERR = traceback.format_exc() + HAS_LXML = False + +from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common._collections_compat import MutableMapping + +_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*" +_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT +# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate +# strings wrapped by the other delimiter' XPath trick, especially as simple XPath. +_XPSTR = "('(?:.*)'|\"(?:.*)\")" + +_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$") +_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$") +_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$") +_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$") +_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$") +_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$") + + +def has_changed(doc): + orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc))) + obj = etree.tostring(objectify.fromstring(etree.tostring(doc))) + return (orig_obj != obj) + + +def do_print_match(module, tree, xpath, namespaces): + match = tree.xpath(xpath, namespaces=namespaces) + match_xpaths = [] + for m in match: + match_xpaths.append(tree.getpath(m)) + match_str = json.dumps(match_xpaths) + msg = "selector '%s' match: %s" % (xpath, match_str) + finish(module, tree, xpath, namespaces, changed=False, msg=msg) + + +def count_nodes(module, tree, xpath, namespaces): + """ Return the count of nodes matching the xpath """ + hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces) + msg = "found %d nodes" % hits + finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits)) + + +def is_node(tree, xpath, namespaces): + """ Test if a given xpath matches anything and if that match is a node. + + For now we just assume you're only searching for one specific thing.""" + if xpath_matches(tree, xpath, namespaces): + # OK, it found something + match = tree.xpath(xpath, namespaces=namespaces) + if isinstance(match[0], etree._Element): + return True + + return False + + +def is_attribute(tree, xpath, namespaces): + """ Test if a given xpath matches and that match is an attribute + + An xpath attribute search will only match one item""" + if xpath_matches(tree, xpath, namespaces): + match = tree.xpath(xpath, namespaces=namespaces) + if isinstance(match[0], etree._ElementStringResult): + return True + elif isinstance(match[0], etree._ElementUnicodeResult): + return True + return False + + +def xpath_matches(tree, xpath, namespaces): + """ Test if a node exists """ + if tree.xpath(xpath, namespaces=namespaces): + return True + return False + + +def delete_xpath_target(module, tree, xpath, namespaces): + """ Delete an attribute or element from a tree """ + try: + for result in tree.xpath(xpath, namespaces=namespaces): + # Get the xpath for this result + if is_attribute(tree, xpath, namespaces): + # Delete an attribute + parent = result.getparent() + # Pop this attribute match out of the parent + # node's 'attrib' dict by using this match's + # 'attrname' attribute for the key + parent.attrib.pop(result.attrname) + elif is_node(tree, xpath, namespaces): + # Delete an element + result.getparent().remove(result) + else: + raise Exception("Impossible error") + except Exception as e: + module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e)) + else: + finish(module, tree, xpath, namespaces, changed=True) + + +def replace_children_of(children, match): + for element in list(match): + match.remove(element) + match.extend(children) + + +def set_target_children_inner(module, tree, xpath, namespaces, children, in_type): + matches = tree.xpath(xpath, namespaces=namespaces) + + # Create a list of our new children + children = children_to_nodes(module, children, in_type) + children_as_string = [etree.tostring(c) for c in children] + + changed = False + + # xpaths always return matches as a list, so.... + for match in matches: + # Check if elements differ + if len(list(match)) == len(children): + for idx, element in enumerate(list(match)): + if etree.tostring(element) != children_as_string[idx]: + replace_children_of(children, match) + changed = True + break + else: + replace_children_of(children, match) + changed = True + + return changed + + +def set_target_children(module, tree, xpath, namespaces, children, in_type): + changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type) + # Write it out + finish(module, tree, xpath, namespaces, changed=changed) + + +def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter): + if is_node(tree, xpath, namespaces): + new_kids = children_to_nodes(module, children, in_type) + if insertbefore or insertafter: + insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter) + else: + for node in tree.xpath(xpath, namespaces=namespaces): + node.extend(new_kids) + finish(module, tree, xpath, namespaces, changed=True) + else: + finish(module, tree, xpath, namespaces) + + +def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter): + """ + Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the + first xpath hit, with insertafter, it is inserted after the last xpath hit. + """ + insert_target = tree.xpath(xpath, namespaces=namespaces) + loc_index = 0 if insertbefore else -1 + index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index]) + parent = insert_target[0].getparent() + if insertafter: + index_in_parent += 1 + for child in children: + parent.insert(index_in_parent, child) + index_in_parent += 1 + + +def _extract_xpstr(g): + return g[1:-1] + + +def split_xpath_last(xpath): + """split an XPath of the form /foo/bar/baz into /foo/bar and baz""" + xpath = xpath.strip() + m = _RE_SPLITSIMPLELAST.match(xpath) + if m: + # requesting an element to exist + return (m.group(1), [(m.group(2), None)]) + m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath) + if m: + # requesting an element to exist with an inner text + return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) + + m = _RE_SPLITSIMPLEATTRLAST.match(xpath) + if m: + # requesting an attribute to exist + return (m.group(1), [(m.group(2), None)]) + m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath) + if m: + # requesting an attribute to exist with a value + return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) + + m = _RE_SPLITSUBLAST.match(xpath) + if m: + content = [x.strip() for x in m.group(3).split(" and ")] + return (m.group(1), [('/' + m.group(2), content)]) + + m = _RE_SPLITONLYEQVALUE.match(xpath) + if m: + # requesting a change of inner text + return (m.group(1), [("", _extract_xpstr(m.group(2)))]) + return (xpath, []) + + +def nsnameToClark(name, namespaces): + if ":" in name: + (nsname, rawname) = name.split(":") + # return "{{%s}}%s" % (namespaces[nsname], rawname) + return "{{{0}}}{1}".format(namespaces[nsname], rawname) + + # no namespace name here + return name + + +def check_or_make_target(module, tree, xpath, namespaces): + (inner_xpath, changes) = split_xpath_last(xpath) + if (inner_xpath == xpath) or (changes is None): + module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" % + (xpath, etree.tostring(tree, pretty_print=True))) + return False + + changed = False + + if not is_node(tree, inner_xpath, namespaces): + changed = check_or_make_target(module, tree, inner_xpath, namespaces) + + # we test again after calling check_or_make_target + if is_node(tree, inner_xpath, namespaces) and changes: + for (eoa, eoa_value) in changes: + if eoa and eoa[0] != '@' and eoa[0] != '/': + # implicitly creating an element + new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml") + if eoa_value: + for nk in new_kids: + nk.text = eoa_value + + for node in tree.xpath(inner_xpath, namespaces=namespaces): + node.extend(new_kids) + changed = True + # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) + elif eoa and eoa[0] == '/': + element = eoa[1:] + new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml") + for node in tree.xpath(inner_xpath, namespaces=namespaces): + node.extend(new_kids) + for nk in new_kids: + for subexpr in eoa_value: + # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" % + # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True)) + check_or_make_target(module, nk, "./" + subexpr, namespaces) + changed = True + + # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) + elif eoa == "": + for node in tree.xpath(inner_xpath, namespaces=namespaces): + if (node.text != eoa_value): + node.text = eoa_value + changed = True + + elif eoa and eoa[0] == '@': + attribute = nsnameToClark(eoa[1:], namespaces) + + for element in tree.xpath(inner_xpath, namespaces=namespaces): + changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value) + + if changing: + changed = changed or changing + if eoa_value is None: + value = "" + else: + value = eoa_value + element.attrib[attribute] = value + + # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" % + # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True))) + + else: + module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True)) + + return changed + + +def ensure_xpath_exists(module, tree, xpath, namespaces): + changed = False + + if not is_node(tree, xpath, namespaces): + changed = check_or_make_target(module, tree, xpath, namespaces) + + finish(module, tree, xpath, namespaces, changed) + + +def set_target_inner(module, tree, xpath, namespaces, attribute, value): + changed = False + + try: + if not is_node(tree, xpath, namespaces): + changed = check_or_make_target(module, tree, xpath, namespaces) + except Exception as e: + missing_namespace = "" + # NOTE: This checks only the namespaces defined in root element! + # TODO: Implement a more robust check to check for child namespaces' existence + if tree.getroot().nsmap and ":" not in xpath: + missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n" + module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" % + (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc()) + + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node! tree is %s" % + (xpath, etree.tostring(tree, pretty_print=True))) + + for element in tree.xpath(xpath, namespaces=namespaces): + if not attribute: + changed = changed or (element.text != value) + if element.text != value: + element.text = value + else: + changed = changed or (element.get(attribute) != value) + if ":" in attribute: + attr_ns, attr_name = attribute.split(":") + # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name) + attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name) + if element.get(attribute) != value: + element.set(attribute, value) + + return changed + + +def set_target(module, tree, xpath, namespaces, attribute, value): + changed = set_target_inner(module, tree, xpath, namespaces, attribute, value) + finish(module, tree, xpath, namespaces, changed) + + +def get_element_text(module, tree, xpath, namespaces): + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node!" % xpath) + + elements = [] + for element in tree.xpath(xpath, namespaces=namespaces): + elements.append({element.tag: element.text}) + + finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) + + +def get_element_attr(module, tree, xpath, namespaces): + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node!" % xpath) + + elements = [] + for element in tree.xpath(xpath, namespaces=namespaces): + child = {} + for key in element.keys(): + value = element.get(key) + child.update({key: value}) + elements.append({element.tag: child}) + + finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) + + +def child_to_element(module, child, in_type): + if in_type == 'xml': + infile = BytesIO(to_bytes(child, errors='surrogate_or_strict')) + + try: + parser = etree.XMLParser() + node = etree.parse(infile, parser) + return node.getroot() + except etree.XMLSyntaxError as e: + module.fail_json(msg="Error while parsing child element: %s" % e) + elif in_type == 'yaml': + if isinstance(child, string_types): + return etree.Element(child) + elif isinstance(child, MutableMapping): + if len(child) > 1: + module.fail_json(msg="Can only create children from hashes with one key") + + (key, value) = next(iteritems(child)) + if isinstance(value, MutableMapping): + children = value.pop('_', None) + + node = etree.Element(key, value) + + if children is not None: + if not isinstance(children, list): + module.fail_json(msg="Invalid children type: %s, must be list." % type(children)) + + subnodes = children_to_nodes(module, children) + node.extend(subnodes) + else: + node = etree.Element(key) + node.text = value + return node + else: + module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child)) + else: + module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type) + + +def children_to_nodes(module=None, children=None, type='yaml'): + """turn a str/hash/list of str&hash into a list of elements""" + children = [] if children is None else children + + return [child_to_element(module, child, type) for child in children] + + +def make_pretty(module, tree): + xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + result = dict( + changed=False, + ) + + if module.params['path']: + xml_file = module.params['path'] + with open(xml_file, 'rb') as xml_content: + if xml_string != xml_content.read(): + result['changed'] = True + if not module.check_mode: + if module.params['backup']: + result['backup_file'] = module.backup_local(module.params['path']) + tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + elif module.params['xmlstring']: + result['xmlstring'] = xml_string + # NOTE: Modifying a string is not considered a change ! + if xml_string != module.params['xmlstring']: + result['changed'] = True + + module.exit_json(**result) + + +def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()): + + result = dict( + actions=dict( + xpath=xpath, + namespaces=namespaces, + state=module.params['state'] + ), + changed=has_changed(tree), + ) + + if module.params['count'] or hitcount: + result['count'] = hitcount + + if module.params['print_match'] or matches: + result['matches'] = matches + + if msg: + result['msg'] = msg + + if result['changed']: + if module._diff: + result['diff'] = dict( + before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True), + after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True), + ) + + if module.params['path'] and not module.check_mode: + if module.params['backup']: + result['backup_file'] = module.backup_local(module.params['path']) + tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + if module.params['xmlstring']: + result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + module.exit_json(**result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', aliases=['dest', 'file']), + xmlstring=dict(type='str'), + xpath=dict(type='str'), + namespaces=dict(type='dict', default={}), + state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']), + value=dict(type='raw'), + attribute=dict(type='raw'), + add_children=dict(type='list'), + set_children=dict(type='list'), + count=dict(type='bool', default=False), + print_match=dict(type='bool', default=False), + pretty_print=dict(type='bool', default=False), + content=dict(type='str', choices=['attribute', 'text']), + input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']), + backup=dict(type='bool', default=False), + strip_cdata_tags=dict(type='bool', default=False), + insertbefore=dict(type='bool', default=False), + insertafter=dict(type='bool', default=False), + ), + supports_check_mode=True, + required_by=dict( + add_children=['xpath'], + # TODO: Reinstate this in Ansible v2.12 when we have deprecated the incorrect use below + # attribute=['value'], + content=['xpath'], + set_children=['xpath'], + value=['xpath'], + ), + required_if=[ + ['count', True, ['xpath']], + ['print_match', True, ['xpath']], + ['insertbefore', True, ['xpath']], + ['insertafter', True, ['xpath']], + ], + required_one_of=[ + ['path', 'xmlstring'], + ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'], + ], + mutually_exclusive=[ + ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'], + ['path', 'xmlstring'], + ['insertbefore', 'insertafter'], + ], + ) + + xml_file = module.params['path'] + xml_string = module.params['xmlstring'] + xpath = module.params['xpath'] + namespaces = module.params['namespaces'] + state = module.params['state'] + value = json_dict_bytes_to_unicode(module.params['value']) + attribute = module.params['attribute'] + set_children = json_dict_bytes_to_unicode(module.params['set_children']) + add_children = json_dict_bytes_to_unicode(module.params['add_children']) + pretty_print = module.params['pretty_print'] + content = module.params['content'] + input_type = module.params['input_type'] + print_match = module.params['print_match'] + count = module.params['count'] + backup = module.params['backup'] + strip_cdata_tags = module.params['strip_cdata_tags'] + insertbefore = module.params['insertbefore'] + insertafter = module.params['insertafter'] + + # Check if we have lxml 2.3.0 or newer installed + if not HAS_LXML: + module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR) + elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'): + module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine') + elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'): + module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.') + + # Report wrongly used attribute parameter when using content=attribute + # TODO: Remove this in Ansible v2.12 (and reinstate strict parameter test above) and remove the integration test example + if content == 'attribute' and attribute is not None: + module.deprecate("Parameter 'attribute=%s' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." % attribute, + '2.12', collection_name='ansible.builtin') + + # Check if the file exists + if xml_string: + infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) + elif os.path.isfile(xml_file): + infile = open(xml_file, 'rb') + else: + module.fail_json(msg="The target XML source '%s' does not exist." % xml_file) + + # Parse and evaluate xpath expression + if xpath is not None: + try: + etree.XPath(xpath) + except etree.XPathSyntaxError as e: + module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e)) + except etree.XPathEvalError as e: + module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e)) + + # Try to parse in the target XML file + try: + parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags) + doc = etree.parse(infile, parser) + except etree.XMLSyntaxError as e: + module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e)) + + # Ensure we have the original copy to compare + global orig_doc + orig_doc = copy.deepcopy(doc) + + if print_match: + do_print_match(module, doc, xpath, namespaces) + + if count: + count_nodes(module, doc, xpath, namespaces) + + if content == 'attribute': + get_element_attr(module, doc, xpath, namespaces) + elif content == 'text': + get_element_text(module, doc, xpath, namespaces) + + # File exists: + if state == 'absent': + # - absent: delete xpath target + delete_xpath_target(module, doc, xpath, namespaces) + + # - present: carry on + + # children && value both set?: should have already aborted by now + # add_children && set_children both set?: should have already aborted by now + + # set_children set? + if set_children: + set_target_children(module, doc, xpath, namespaces, set_children, input_type) + + # add_children set? + if add_children: + add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter) + + # No?: Carry on + + # Is the xpath target an attribute selector? + if value is not None: + set_target(module, doc, xpath, namespaces, attribute, value) + + # If an xpath was provided, we need to do something with the data + if xpath is not None: + ensure_xpath_exists(module, doc, xpath, namespaces) + + # Otherwise only reformat the xml data? + if pretty_print: + make_pretty(module, doc) + + module.fail_json(msg="Don't know what to do") + + +if __name__ == '__main__': + main() diff --git a/test/support/integration/plugins/modules/zypper.py b/test/support/integration/plugins/modules/zypper.py new file mode 100644 index 00000000..bfb31819 --- /dev/null +++ b/test/support/integration/plugins/modules/zypper.py @@ -0,0 +1,540 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Patrick Callahan +# based on +# openbsd_pkg +# (c) 2013 +# Patrik Lundin +# +# yum +# (c) 2012, Red Hat, Inc +# Written by Seth Vidal +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: zypper +author: + - "Patrick Callahan (@dirtyharrycallahan)" + - "Alexander Gubin (@alxgu)" + - "Thomas O'Donnell (@andytom)" + - "Robin Roth (@robinro)" + - "Andrii Radyk (@AnderEnder)" +version_added: "1.2" +short_description: Manage packages on SUSE and openSUSE +description: + - Manage packages on SUSE and openSUSE using the zypper and rpm tools. +options: + name: + description: + - Package name C(name) or package specifier or a list of either. + - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to + update the package within the version range given. + - You can also pass a url or a local path to a rpm file. + - When using state=latest, this can be '*', which updates all installed packages. + required: true + aliases: [ 'pkg' ] + state: + description: + - C(present) will make sure the package is installed. + C(latest) will make sure the latest version of the package is installed. + C(absent) will make sure the specified package is not installed. + C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. + - When using C(dist-upgrade), I(name) should be C('*'). + required: false + choices: [ present, latest, absent, dist-upgrade ] + default: "present" + type: + description: + - The type of package to be operated on. + required: false + choices: [ package, patch, pattern, product, srcpackage, application ] + default: "package" + version_added: "2.0" + extra_args_precommand: + version_added: "2.6" + required: false + description: + - Add additional global target options to C(zypper). + - Options should be supplied in a single line as if given in the command line. + disable_gpg_check: + description: + - Whether to disable to GPG signature checking of the package + signature being installed. Has an effect only if state is + I(present) or I(latest). + required: false + default: "no" + type: bool + disable_recommends: + version_added: "1.8" + description: + - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does + install recommended packages. + required: false + default: "yes" + type: bool + force: + version_added: "2.2" + description: + - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. + required: false + default: "no" + type: bool + force_resolution: + version_added: "2.10" + description: + - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution). + required: false + default: "no" + type: bool + update_cache: + version_added: "2.2" + description: + - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. + required: false + default: "no" + type: bool + aliases: [ "refresh" ] + oldpackage: + version_added: "2.2" + description: + - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a + version is specified as part of the package name. + required: false + default: "no" + type: bool + extra_args: + version_added: "2.4" + required: false + description: + - Add additional options to C(zypper) command. + - Options should be supplied in a single line as if given in the command line. +notes: + - When used with a `loop:` each package will be processed individually, + it is much more efficient to pass the list directly to the `name` option. +# informational: requirements for nodes +requirements: + - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml + - rpm +''' + +EXAMPLES = ''' +# Install "nmap" +- zypper: + name: nmap + state: present + +# Install apache2 with recommended packages +- zypper: + name: apache2 + state: present + disable_recommends: no + +# Apply a given patch +- zypper: + name: openSUSE-2016-128 + state: present + type: patch + +# Remove the "nmap" package +- zypper: + name: nmap + state: absent + +# Install the nginx rpm from a remote repo +- zypper: + name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm' + state: present + +# Install local rpm file +- zypper: + name: /tmp/fancy-software.rpm + state: present + +# Update all packages +- zypper: + name: '*' + state: latest + +# Apply all available patches +- zypper: + name: '*' + state: latest + type: patch + +# Perform a dist-upgrade with additional arguments +- zypper: + name: '*' + state: dist-upgrade + extra_args: '--no-allow-vendor-change --allow-arch-change' + +# Refresh repositories and update package "openssl" +- zypper: + name: openssl + state: present + update_cache: yes + +# Install specific version (possible comparisons: <, >, <=, >=, =) +- zypper: + name: 'docker>=1.10' + state: present + +# Wait 20 seconds to acquire the lock before failing +- zypper: + name: mosh + state: present + environment: + ZYPP_LOCK_TIMEOUT: 20 +''' + +import xml +import re +from xml.dom.minidom import parseString as parseXML +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + + +class Package: + def __init__(self, name, prefix, version): + self.name = name + self.prefix = prefix + self.version = version + self.shouldinstall = (prefix == '+') + + def __str__(self): + return self.prefix + self.name + self.version + + +def split_name_version(name): + """splits of the package name and desired version + + example formats: + - docker>=1.10 + - apache=2.4 + + Allowed version specifiers: <, >, <=, >=, = + Allowed version format: [0-9.-]* + + Also allows a prefix indicating remove "-", "~" or install "+" + """ + + prefix = '' + if name[0] in ['-', '~', '+']: + prefix = name[0] + name = name[1:] + if prefix == '~': + prefix = '-' + + version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') + try: + reres = version_check.match(name) + name, version = reres.groups() + if version is None: + version = '' + return prefix, name, version + except Exception: + return prefix, name, '' + + +def get_want_state(names, remove=False): + packages = [] + urls = [] + for name in names: + if '://' in name or name.endswith('.rpm'): + urls.append(name) + else: + prefix, pname, version = split_name_version(name) + if prefix not in ['-', '+']: + if remove: + prefix = '-' + else: + prefix = '+' + packages.append(Package(pname, prefix, version)) + return packages, urls + + +def get_installed_state(m, packages): + "get installed state of packages" + + cmd = get_cmd(m, 'search') + cmd.extend(['--match-exact', '--details', '--installed-only']) + cmd.extend([p.name for p in packages]) + return parse_zypper_xml(m, cmd, fail_not_found=False)[0] + + +def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): + rc, stdout, stderr = m.run_command(cmd, check_rc=False) + + try: + dom = parseXML(stdout) + except xml.parsers.expat.ExpatError as exc: + m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc), + rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + if rc == 104: + # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) + if fail_not_found: + errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data + m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + else: + return {}, rc, stdout, stderr + elif rc in [0, 106, 103]: + # zypper exit codes + # 0: success + # 106: signature verification failed + # 103: zypper was upgraded, run same command again + if packages is None: + firstrun = True + packages = {} + solvable_list = dom.getElementsByTagName('solvable') + for solvable in solvable_list: + name = solvable.getAttribute('name') + packages[name] = {} + packages[name]['version'] = solvable.getAttribute('edition') + packages[name]['oldversion'] = solvable.getAttribute('edition-old') + status = solvable.getAttribute('status') + packages[name]['installed'] = status == "installed" + packages[name]['group'] = solvable.parentNode.nodeName + if rc == 103 and firstrun: + # if this was the first run and it failed with 103 + # run zypper again with the same command to complete update + return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) + + return packages, rc, stdout, stderr + m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + +def get_cmd(m, subcommand): + "puts together the basic zypper command arguments with those passed to the module" + is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] + is_refresh = subcommand == 'refresh' + cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout'] + if m.params['extra_args_precommand']: + args_list = m.params['extra_args_precommand'].split() + cmd.extend(args_list) + # add global options before zypper command + if (is_install or is_refresh) and m.params['disable_gpg_check']: + cmd.append('--no-gpg-checks') + + if subcommand == 'search': + cmd.append('--disable-repositories') + + cmd.append(subcommand) + if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh: + cmd.extend(['--type', m.params['type']]) + if m.check_mode and subcommand != 'search': + cmd.append('--dry-run') + if is_install: + cmd.append('--auto-agree-with-licenses') + if m.params['disable_recommends']: + cmd.append('--no-recommends') + if m.params['force']: + cmd.append('--force') + if m.params['force_resolution']: + cmd.append('--force-resolution') + if m.params['oldpackage']: + cmd.append('--oldpackage') + if m.params['extra_args']: + args_list = m.params['extra_args'].split(' ') + cmd.extend(args_list) + + return cmd + + +def set_diff(m, retvals, result): + # TODO: if there is only one package, set before/after to version numbers + packages = {'installed': [], 'removed': [], 'upgraded': []} + if result: + for p in result: + group = result[p]['group'] + if group == 'to-upgrade': + versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')' + packages['upgraded'].append(p + versions) + elif group == 'to-install': + packages['installed'].append(p) + elif group == 'to-remove': + packages['removed'].append(p) + + output = '' + for state in packages: + if packages[state]: + output += state + ': ' + ', '.join(packages[state]) + '\n' + if 'diff' not in retvals: + retvals['diff'] = {} + if 'prepared' not in retvals['diff']: + retvals['diff']['prepared'] = output + else: + retvals['diff']['prepared'] += '\n' + output + + +def package_present(m, name, want_latest): + "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + packages, urls = get_want_state(name) + + # add oldpackage flag when a version is given to allow downgrades + if any(p.version for p in packages): + m.params['oldpackage'] = True + + if not want_latest: + # for state=present: filter out already installed packages + # if a version is given leave the package in to let zypper handle the version + # resolution + packageswithoutversion = [p for p in packages if not p.version] + prerun_state = get_installed_state(m, packageswithoutversion) + # generate lists of packages to install or remove + packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)] + + if not packages and not urls: + # nothing to install/remove and nothing to update + return None, retvals + + # zypper install also updates packages + cmd = get_cmd(m, 'install') + cmd.append('--') + cmd.extend(urls) + # pass packages to zypper + # allow for + or - prefixes in install/remove lists + # also add version specifier if given + # do this in one zypper run to allow for dependency-resolution + # for example "-exim postfix" runs without removing packages depending on mailserver + cmd.extend([str(p) for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return result, retvals + + +def package_update_all(m): + "run update or patch on all available packages" + + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + if m.params['type'] == 'patch': + cmdname = 'patch' + elif m.params['state'] == 'dist-upgrade': + cmdname = 'dist-upgrade' + else: + cmdname = 'update' + + cmd = get_cmd(m, cmdname) + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def package_absent(m, name): + "remove the packages in name" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + # Get package state + packages, urls = get_want_state(name, remove=True) + if any(p.prefix == '+' for p in packages): + m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") + if urls: + m.fail_json(msg="Can not remove via URL.") + if m.params['type'] == 'patch': + m.fail_json(msg="Can not remove patches.") + prerun_state = get_installed_state(m, packages) + packages = [p for p in packages if p.name in prerun_state] + + if not packages: + return None, retvals + + cmd = get_cmd(m, 'remove') + cmd.extend([p.name + p.version for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def repo_refresh(m): + "update the repositories" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + + cmd = get_cmd(m, 'refresh') + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return retvals + +# =========================================== +# Main control flow + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['pkg'], type='list'), + state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), + type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), + extra_args_precommand=dict(required=False, default=None), + disable_gpg_check=dict(required=False, default='no', type='bool'), + disable_recommends=dict(required=False, default='yes', type='bool'), + force=dict(required=False, default='no', type='bool'), + force_resolution=dict(required=False, default='no', type='bool'), + update_cache=dict(required=False, aliases=['refresh'], default='no', type='bool'), + oldpackage=dict(required=False, default='no', type='bool'), + extra_args=dict(required=False, default=None), + ), + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + update_cache = module.params['update_cache'] + + # remove empty strings from package list + name = list(filter(None, name)) + + # Refresh repositories + if update_cache and not module.check_mode: + retvals = repo_refresh(module) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper refresh run failed.", **retvals) + + # Perform requested action + if name == ['*'] and state in ['latest', 'dist-upgrade']: + packages_changed, retvals = package_update_all(module) + elif name != ['*'] and state == 'dist-upgrade': + module.fail_json(msg="Can not dist-upgrade specific packages.") + else: + if state in ['absent', 'removed']: + packages_changed, retvals = package_absent(module, name) + elif state in ['installed', 'present', 'latest']: + packages_changed, retvals = package_present(module, name, state == 'latest') + + retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed) + + if module._diff: + set_diff(module, retvals, packages_changed) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper run failed.", **retvals) + + if not retvals['changed']: + del retvals['stdout'] + del retvals['stderr'] + + module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) + + +if __name__ == "__main__": + main() diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/cli_config.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/cli_config.py new file mode 100644 index 00000000..089b339f --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/cli_config.py @@ -0,0 +1,40 @@ +# +# Copyright 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.action.network import ( + ActionModule as ActionNetworkModule, +) + + +class ActionModule(ActionNetworkModule): + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + self._config_module = True + if self._play_context.connection.split(".")[-1] != "network_cli": + return { + "failed": True, + "msg": "Connection type %s is not valid for cli_config module" + % self._play_context.connection, + } + + return super(ActionModule, self).run(task_vars=task_vars) diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py new file mode 100644 index 00000000..542dcfef --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py @@ -0,0 +1,90 @@ +# Copyright: (c) 2015, Ansible Inc, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import copy + +from ansible.errors import AnsibleError +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionBase): + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + result = {} + play_context = copy.deepcopy(self._play_context) + play_context.network_os = self._get_network_os(task_vars) + new_task = self._task.copy() + + module = self._get_implementation_module( + play_context.network_os, self._task.action + ) + if not module: + if self._task.args["fail_on_missing_module"]: + result["failed"] = True + else: + result["failed"] = False + + result["msg"] = ( + "Could not find implementation module %s for %s" + % (self._task.action, play_context.network_os) + ) + return result + + new_task.action = module + + action = self._shared_loader_obj.action_loader.get( + play_context.network_os, + task=new_task, + connection=self._connection, + play_context=play_context, + loader=self._loader, + templar=self._templar, + shared_loader_obj=self._shared_loader_obj, + ) + display.vvvv("Running implementation module %s" % module) + return action.run(task_vars=task_vars) + + def _get_network_os(self, task_vars): + if "network_os" in self._task.args and self._task.args["network_os"]: + display.vvvv("Getting network OS from task argument") + network_os = self._task.args["network_os"] + elif self._play_context.network_os: + display.vvvv("Getting network OS from inventory") + network_os = self._play_context.network_os + elif ( + "network_os" in task_vars.get("ansible_facts", {}) + and task_vars["ansible_facts"]["network_os"] + ): + display.vvvv("Getting network OS from fact") + network_os = task_vars["ansible_facts"]["network_os"] + else: + raise AnsibleError( + "ansible_network_os must be specified on this host to use platform agnostic modules" + ) + + return network_os + + def _get_implementation_module(self, network_os, platform_agnostic_module): + module_name = ( + network_os.split(".")[-1] + + "_" + + platform_agnostic_module.partition("_")[2] + ) + if "." in network_os: + fqcn_module = ".".join(network_os.split(".")[0:-1]) + implementation_module = fqcn_module + "." + module_name + else: + implementation_module = module_name + + if implementation_module not in self._shared_loader_obj.module_loader: + implementation_module = None + + return implementation_module diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_get.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_get.py new file mode 100644 index 00000000..40205a46 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_get.py @@ -0,0 +1,199 @@ +# (c) 2018, Ansible Inc, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import os +import re +import uuid +import hashlib + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.connection import Connection, ConnectionError +from ansible.plugins.action import ActionBase +from ansible.module_utils.six.moves.urllib.parse import urlsplit +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionBase): + def run(self, tmp=None, task_vars=None): + socket_path = None + self._get_network_os(task_vars) + persistent_connection = self._play_context.connection.split(".")[-1] + + result = super(ActionModule, self).run(task_vars=task_vars) + + if persistent_connection != "network_cli": + # It is supported only with network_cli + result["failed"] = True + result["msg"] = ( + "connection type %s is not valid for net_get module," + " please use fully qualified name of network_cli connection type" + % self._play_context.connection + ) + return result + + try: + src = self._task.args["src"] + except KeyError as exc: + return { + "failed": True, + "msg": "missing required argument: %s" % exc, + } + + # Get destination file if specified + dest = self._task.args.get("dest") + + if dest is None: + dest = self._get_default_dest(src) + else: + dest = self._handle_dest_path(dest) + + # Get proto + proto = self._task.args.get("protocol") + if proto is None: + proto = "scp" + + if socket_path is None: + socket_path = self._connection.socket_path + + conn = Connection(socket_path) + sock_timeout = conn.get_option("persistent_command_timeout") + + try: + changed = self._handle_existing_file( + conn, src, dest, proto, sock_timeout + ) + if changed is False: + result["changed"] = changed + result["destination"] = dest + return result + except Exception as exc: + result["msg"] = ( + "Warning: %s idempotency check failed. Check dest" % exc + ) + + try: + conn.get_file( + source=src, destination=dest, proto=proto, timeout=sock_timeout + ) + except Exception as exc: + result["failed"] = True + result["msg"] = "Exception received: %s" % exc + + result["changed"] = changed + result["destination"] = dest + return result + + def _handle_dest_path(self, dest): + working_path = self._get_working_path() + + if os.path.isabs(dest) or urlsplit("dest").scheme: + dst = dest + else: + dst = self._loader.path_dwim_relative(working_path, "", dest) + + return dst + + def _get_src_filename_from_path(self, src_path): + filename_list = re.split("/|:", src_path) + return filename_list[-1] + + def _get_default_dest(self, src_path): + dest_path = self._get_working_path() + src_fname = self._get_src_filename_from_path(src_path) + filename = "%s/%s" % (dest_path, src_fname) + return filename + + def _handle_existing_file(self, conn, source, dest, proto, timeout): + """ + Determines whether the source and destination file match. + + :return: False if source and dest both exist and have matching sha1 sums, True otherwise. + """ + if not os.path.exists(dest): + return True + + cwd = self._loader.get_basedir() + filename = str(uuid.uuid4()) + tmp_dest_file = os.path.join(cwd, filename) + try: + conn.get_file( + source=source, + destination=tmp_dest_file, + proto=proto, + timeout=timeout, + ) + except ConnectionError as exc: + error = to_text(exc) + if error.endswith("No such file or directory"): + if os.path.exists(tmp_dest_file): + os.remove(tmp_dest_file) + return True + + try: + with open(tmp_dest_file, "r") as f: + new_content = f.read() + with open(dest, "r") as f: + old_content = f.read() + except (IOError, OSError): + os.remove(tmp_dest_file) + raise + + sha1 = hashlib.sha1() + old_content_b = to_bytes(old_content, errors="surrogate_or_strict") + sha1.update(old_content_b) + checksum_old = sha1.digest() + + sha1 = hashlib.sha1() + new_content_b = to_bytes(new_content, errors="surrogate_or_strict") + sha1.update(new_content_b) + checksum_new = sha1.digest() + os.remove(tmp_dest_file) + if checksum_old == checksum_new: + return False + return True + + def _get_working_path(self): + cwd = self._loader.get_basedir() + if self._task._role is not None: + cwd = self._task._role._role_path + return cwd + + def _get_network_os(self, task_vars): + if "network_os" in self._task.args and self._task.args["network_os"]: + display.vvvv("Getting network OS from task argument") + network_os = self._task.args["network_os"] + elif self._play_context.network_os: + display.vvvv("Getting network OS from inventory") + network_os = self._play_context.network_os + elif ( + "network_os" in task_vars.get("ansible_facts", {}) + and task_vars["ansible_facts"]["network_os"] + ): + display.vvvv("Getting network OS from fact") + network_os = task_vars["ansible_facts"]["network_os"] + else: + raise AnsibleError( + "ansible_network_os must be specified on this host" + ) + + return network_os diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_put.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_put.py new file mode 100644 index 00000000..955329d4 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_put.py @@ -0,0 +1,235 @@ +# (c) 2018, Ansible Inc, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import os +import uuid +import hashlib + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.connection import Connection, ConnectionError +from ansible.plugins.action import ActionBase +from ansible.module_utils.six.moves.urllib.parse import urlsplit +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionBase): + def run(self, tmp=None, task_vars=None): + socket_path = None + network_os = self._get_network_os(task_vars).split(".")[-1] + persistent_connection = self._play_context.connection.split(".")[-1] + + result = super(ActionModule, self).run(task_vars=task_vars) + + if persistent_connection != "network_cli": + # It is supported only with network_cli + result["failed"] = True + result["msg"] = ( + "connection type %s is not valid for net_put module," + " please use fully qualified name of network_cli connection type" + % self._play_context.connection + ) + return result + + try: + src = self._task.args["src"] + except KeyError as exc: + return { + "failed": True, + "msg": "missing required argument: %s" % exc, + } + + src_file_path_name = src + + # Get destination file if specified + dest = self._task.args.get("dest") + + # Get proto + proto = self._task.args.get("protocol") + if proto is None: + proto = "scp" + + # Get mode if set + mode = self._task.args.get("mode") + if mode is None: + mode = "binary" + + if mode == "text": + try: + self._handle_template(convert_data=False) + except ValueError as exc: + return dict(failed=True, msg=to_text(exc)) + + # Now src has resolved file write to disk in current diectory for scp + src = self._task.args.get("src") + filename = str(uuid.uuid4()) + cwd = self._loader.get_basedir() + output_file = os.path.join(cwd, filename) + try: + with open(output_file, "wb") as f: + f.write(to_bytes(src, encoding="utf-8")) + except Exception: + os.remove(output_file) + raise + else: + try: + output_file = self._get_binary_src_file(src) + except ValueError as exc: + return dict(failed=True, msg=to_text(exc)) + + if socket_path is None: + socket_path = self._connection.socket_path + + conn = Connection(socket_path) + sock_timeout = conn.get_option("persistent_command_timeout") + + if dest is None: + dest = src_file_path_name + + try: + changed = self._handle_existing_file( + conn, output_file, dest, proto, sock_timeout + ) + if changed is False: + result["changed"] = changed + result["destination"] = dest + return result + except Exception as exc: + result["msg"] = ( + "Warning: %s idempotency check failed. Check dest" % exc + ) + + try: + conn.copy_file( + source=output_file, + destination=dest, + proto=proto, + timeout=sock_timeout, + ) + except Exception as exc: + if to_text(exc) == "No response from server": + if network_os == "iosxr": + # IOSXR sometimes closes socket prematurely after completion + # of file transfer + result[ + "msg" + ] = "Warning: iosxr scp server pre close issue. Please check dest" + else: + result["failed"] = True + result["msg"] = "Exception received: %s" % exc + + if mode == "text": + # Cleanup tmp file expanded wih ansible vars + os.remove(output_file) + + result["changed"] = changed + result["destination"] = dest + return result + + def _handle_existing_file(self, conn, source, dest, proto, timeout): + """ + Determines whether the source and destination file match. + + :return: False if source and dest both exist and have matching sha1 sums, True otherwise. + """ + cwd = self._loader.get_basedir() + filename = str(uuid.uuid4()) + tmp_source_file = os.path.join(cwd, filename) + try: + conn.get_file( + source=dest, + destination=tmp_source_file, + proto=proto, + timeout=timeout, + ) + except ConnectionError as exc: + error = to_text(exc) + if error.endswith("No such file or directory"): + if os.path.exists(tmp_source_file): + os.remove(tmp_source_file) + return True + + try: + with open(source, "r") as f: + new_content = f.read() + with open(tmp_source_file, "r") as f: + old_content = f.read() + except (IOError, OSError): + os.remove(tmp_source_file) + raise + + sha1 = hashlib.sha1() + old_content_b = to_bytes(old_content, errors="surrogate_or_strict") + sha1.update(old_content_b) + checksum_old = sha1.digest() + + sha1 = hashlib.sha1() + new_content_b = to_bytes(new_content, errors="surrogate_or_strict") + sha1.update(new_content_b) + checksum_new = sha1.digest() + os.remove(tmp_source_file) + if checksum_old == checksum_new: + return False + return True + + def _get_binary_src_file(self, src): + working_path = self._get_working_path() + + if os.path.isabs(src) or urlsplit("src").scheme: + source = src + else: + source = self._loader.path_dwim_relative( + working_path, "templates", src + ) + if not source: + source = self._loader.path_dwim_relative(working_path, src) + + if not os.path.exists(source): + raise ValueError("path specified in src not found") + + return source + + def _get_working_path(self): + cwd = self._loader.get_basedir() + if self._task._role is not None: + cwd = self._task._role._role_path + return cwd + + def _get_network_os(self, task_vars): + if "network_os" in self._task.args and self._task.args["network_os"]: + display.vvvv("Getting network OS from task argument") + network_os = self._task.args["network_os"] + elif self._play_context.network_os: + display.vvvv("Getting network OS from inventory") + network_os = self._play_context.network_os + elif ( + "network_os" in task_vars.get("ansible_facts", {}) + and task_vars["ansible_facts"]["network_os"] + ): + display.vvvv("Getting network OS from fact") + network_os = task_vars["ansible_facts"]["network_os"] + else: + raise AnsibleError( + "ansible_network_os must be specified on this host" + ) + + return network_os diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/network.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/network.py new file mode 100644 index 00000000..5d05d338 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/network.py @@ -0,0 +1,209 @@ +# +# (c) 2018 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import os +import time +import re + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.six.moves.urllib.parse import urlsplit +from ansible.plugins.action.normal import ActionModule as _ActionModule +from ansible.utils.display import Display + +display = Display() + +PRIVATE_KEYS_RE = re.compile("__.+__") + + +class ActionModule(_ActionModule): + def run(self, task_vars=None): + config_module = hasattr(self, "_config_module") and self._config_module + if config_module and self._task.args.get("src"): + try: + self._handle_src_option() + except AnsibleError as e: + return {"failed": True, "msg": e.message, "changed": False} + + result = super(ActionModule, self).run(task_vars=task_vars) + + if ( + config_module + and self._task.args.get("backup") + and not result.get("failed") + ): + self._handle_backup_option(result, task_vars) + + return result + + def _handle_backup_option(self, result, task_vars): + + filename = None + backup_path = None + try: + content = result["__backup__"] + except KeyError: + raise AnsibleError("Failed while reading configuration backup") + + backup_options = self._task.args.get("backup_options") + if backup_options: + filename = backup_options.get("filename") + backup_path = backup_options.get("dir_path") + + if not backup_path: + cwd = self._get_working_path() + backup_path = os.path.join(cwd, "backup") + if not filename: + tstamp = time.strftime( + "%Y-%m-%d@%H:%M:%S", time.localtime(time.time()) + ) + filename = "%s_config.%s" % ( + task_vars["inventory_hostname"], + tstamp, + ) + + dest = os.path.join(backup_path, filename) + backup_path = os.path.expanduser( + os.path.expandvars( + to_bytes(backup_path, errors="surrogate_or_strict") + ) + ) + + if not os.path.exists(backup_path): + os.makedirs(backup_path) + + new_task = self._task.copy() + for item in self._task.args: + if not item.startswith("_"): + new_task.args.pop(item, None) + + new_task.args.update(dict(content=content, dest=dest)) + copy_action = self._shared_loader_obj.action_loader.get( + "copy", + task=new_task, + connection=self._connection, + play_context=self._play_context, + loader=self._loader, + templar=self._templar, + shared_loader_obj=self._shared_loader_obj, + ) + copy_result = copy_action.run(task_vars=task_vars) + if copy_result.get("failed"): + result["failed"] = copy_result["failed"] + result["msg"] = copy_result.get("msg") + return + + result["backup_path"] = dest + if copy_result.get("changed", False): + result["changed"] = copy_result["changed"] + + if backup_options and backup_options.get("filename"): + result["date"] = time.strftime( + "%Y-%m-%d", + time.gmtime(os.stat(result["backup_path"]).st_ctime), + ) + result["time"] = time.strftime( + "%H:%M:%S", + time.gmtime(os.stat(result["backup_path"]).st_ctime), + ) + + else: + result["date"] = tstamp.split("@")[0] + result["time"] = tstamp.split("@")[1] + result["shortname"] = result["backup_path"][::-1].split(".", 1)[1][ + ::-1 + ] + result["filename"] = result["backup_path"].split("/")[-1] + + # strip out any keys that have two leading and two trailing + # underscore characters + for key in list(result.keys()): + if PRIVATE_KEYS_RE.match(key): + del result[key] + + def _get_working_path(self): + cwd = self._loader.get_basedir() + if self._task._role is not None: + cwd = self._task._role._role_path + return cwd + + def _handle_src_option(self, convert_data=True): + src = self._task.args.get("src") + working_path = self._get_working_path() + + if os.path.isabs(src) or urlsplit("src").scheme: + source = src + else: + source = self._loader.path_dwim_relative( + working_path, "templates", src + ) + if not source: + source = self._loader.path_dwim_relative(working_path, src) + + if not os.path.exists(source): + raise AnsibleError("path specified in src not found") + + try: + with open(source, "r") as f: + template_data = to_text(f.read()) + except IOError as e: + raise AnsibleError( + "unable to load src file {0}, I/O error({1}): {2}".format( + source, e.errno, e.strerror + ) + ) + + # Create a template search path in the following order: + # [working_path, self_role_path, dependent_role_paths, dirname(source)] + searchpath = [working_path] + if self._task._role is not None: + searchpath.append(self._task._role._role_path) + if hasattr(self._task, "_block:"): + dep_chain = self._task._block.get_dep_chain() + if dep_chain is not None: + for role in dep_chain: + searchpath.append(role._role_path) + searchpath.append(os.path.dirname(source)) + with self._templar.set_temporary_context(searchpath=searchpath): + self._task.args["src"] = self._templar.template( + template_data, convert_data=convert_data + ) + + def _get_network_os(self, task_vars): + if "network_os" in self._task.args and self._task.args["network_os"]: + display.vvvv("Getting network OS from task argument") + network_os = self._task.args["network_os"] + elif self._play_context.network_os: + display.vvvv("Getting network OS from inventory") + network_os = self._play_context.network_os + elif ( + "network_os" in task_vars.get("ansible_facts", {}) + and task_vars["ansible_facts"]["network_os"] + ): + display.vvvv("Getting network OS from fact") + network_os = task_vars["ansible_facts"]["network_os"] + else: + raise AnsibleError( + "ansible_network_os must be specified on this host" + ) + + return network_os diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/become/enable.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/become/enable.py new file mode 100644 index 00000000..33938fd1 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/become/enable.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """become: enable +short_description: Switch to elevated permissions on a network device +description: +- This become plugins allows elevated permissions on a remote network device. +author: ansible (@core) +options: + become_pass: + description: password + ini: + - section: enable_become_plugin + key: password + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_enable_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_ENABLE_PASS +notes: +- enable is really implemented in the network connection handler and as such can only + be used with network connections. +- This plugin ignores the 'become_exe' and 'become_user' settings as it uses an API + and not an executable. +""" + +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = "ansible.netcommon.enable" + + def build_become_command(self, cmd, shell): + # enable is implemented inside the network connection plugins + return cmd diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/httpapi.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/httpapi.py new file mode 100644 index 00000000..b063ef0d --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/httpapi.py @@ -0,0 +1,324 @@ +# (c) 2018 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """author: Ansible Networking Team +connection: httpapi +short_description: Use httpapi to run command on network appliances +description: +- This connection plugin provides a connection to remote devices over a HTTP(S)-based + api. +options: + host: + description: + - Specifies the remote device FQDN or IP address to establish the HTTP(S) connection + to. + default: inventory_hostname + vars: + - name: ansible_host + port: + type: int + description: + - Specifies the port on the remote device that listens for connections when establishing + the HTTP(S) connection. + - When unspecified, will pick 80 or 443 based on the value of use_ssl. + ini: + - section: defaults + key: remote_port + env: + - name: ANSIBLE_REMOTE_PORT + vars: + - name: ansible_httpapi_port + network_os: + description: + - Configures the device platform network operating system. This value is used + to load the correct httpapi plugin to communicate with the remote device + vars: + - name: ansible_network_os + remote_user: + description: + - The username used to authenticate to the remote device when the API connection + is first established. If the remote_user is not specified, the connection will + use the username of the logged in user. + - Can be configured from the CLI via the C(--user) or C(-u) options. + ini: + - section: defaults + key: remote_user + env: + - name: ANSIBLE_REMOTE_USER + vars: + - name: ansible_user + password: + description: + - Configures the user password used to authenticate to the remote device when + needed for the device API. + vars: + - name: ansible_password + - name: ansible_httpapi_pass + - name: ansible_httpapi_password + use_ssl: + type: boolean + description: + - Whether to connect using SSL (HTTPS) or not (HTTP). + default: false + vars: + - name: ansible_httpapi_use_ssl + validate_certs: + type: boolean + description: + - Whether to validate SSL certificates + default: true + vars: + - name: ansible_httpapi_validate_certs + use_proxy: + type: boolean + description: + - Whether to use https_proxy for requests. + default: true + vars: + - name: ansible_httpapi_use_proxy + become: + type: boolean + description: + - The become option will instruct the CLI session to attempt privilege escalation + on platforms that support it. Normally this means transitioning from user mode + to C(enable) mode in the CLI session. If become is set to True and the remote + device does not support privilege escalation or the privilege has already been + elevated, then this option is silently ignored. + - Can be configured from the CLI via the C(--become) or C(-b) options. + default: false + ini: + - section: privilege_escalation + key: become + env: + - name: ANSIBLE_BECOME + vars: + - name: ansible_become + become_method: + description: + - This option allows the become method to be specified in for handling privilege + escalation. Typically the become_method value is set to C(enable) but could + be defined as other values. + default: sudo + ini: + - section: privilege_escalation + key: become_method + env: + - name: ANSIBLE_BECOME_METHOD + vars: + - name: ansible_become_method + persistent_connect_timeout: + type: int + description: + - Configures, in seconds, the amount of time to wait when trying to initially + establish a persistent connection. If this value expires before the connection + to the remote device is completed, the connection will fail. + default: 30 + ini: + - section: persistent_connection + key: connect_timeout + env: + - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT + vars: + - name: ansible_connect_timeout + persistent_command_timeout: + type: int + description: + - Configures, in seconds, the amount of time to wait for a command to return from + the remote device. If this timer is exceeded before the command returns, the + connection plugin will raise an exception and close. + default: 30 + ini: + - section: persistent_connection + key: command_timeout + env: + - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT + vars: + - name: ansible_command_timeout + persistent_log_messages: + type: boolean + description: + - This flag will enable logging the command executed and response received from + target device in the ansible log file. For this option to work 'log_path' ansible + configuration option is required to be set to a file path with write access. + - Be sure to fully understand the security implications of enabling this option + as it could create a security vulnerability by logging sensitive information + in log file. + default: false + ini: + - section: persistent_connection + key: log_messages + env: + - name: ANSIBLE_PERSISTENT_LOG_MESSAGES + vars: + - name: ansible_persistent_log_messages +""" + +from io import BytesIO + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_bytes +from ansible.module_utils.six import PY3 +from ansible.module_utils.six.moves import cPickle +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import open_url +from ansible.playbook.play_context import PlayContext +from ansible.plugins.loader import httpapi_loader +from ansible.plugins.connection import NetworkConnectionBase, ensure_connect + + +class Connection(NetworkConnectionBase): + """Network API connection""" + + transport = "ansible.netcommon.httpapi" + has_pipelining = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__( + play_context, new_stdin, *args, **kwargs + ) + + self._url = None + self._auth = None + + if self._network_os: + + self.httpapi = httpapi_loader.get(self._network_os, self) + if self.httpapi: + self._sub_plugin = { + "type": "httpapi", + "name": self.httpapi._load_name, + "obj": self.httpapi, + } + self.queue_message( + "vvvv", + "loaded API plugin %s from path %s for network_os %s" + % ( + self.httpapi._load_name, + self.httpapi._original_path, + self._network_os, + ), + ) + else: + raise AnsibleConnectionFailure( + "unable to load API plugin for network_os %s" + % self._network_os + ) + + else: + raise AnsibleConnectionFailure( + "Unable to automatically determine host network os. Please " + "manually configure ansible_network_os value for this host" + ) + self.queue_message("log", "network_os is set to %s" % self._network_os) + + def update_play_context(self, pc_data): + """Updates the play context information for the connection""" + pc_data = to_bytes(pc_data) + if PY3: + pc_data = cPickle.loads(pc_data, encoding="bytes") + else: + pc_data = cPickle.loads(pc_data) + play_context = PlayContext() + play_context.deserialize(pc_data) + + self.queue_message("vvvv", "updating play_context for connection") + if self._play_context.become ^ play_context.become: + self.set_become(play_context) + if play_context.become is True: + self.queue_message("vvvv", "authorizing connection") + else: + self.queue_message("vvvv", "deauthorizing connection") + + self._play_context = play_context + + def _connect(self): + if not self.connected: + protocol = "https" if self.get_option("use_ssl") else "http" + host = self.get_option("host") + port = self.get_option("port") or ( + 443 if protocol == "https" else 80 + ) + self._url = "%s://%s:%s" % (protocol, host, port) + + self.queue_message( + "vvv", + "ESTABLISH HTTP(S) CONNECTFOR USER: %s TO %s" + % (self._play_context.remote_user, self._url), + ) + self.httpapi.set_become(self._play_context) + self._connected = True + + self.httpapi.login( + self.get_option("remote_user"), self.get_option("password") + ) + + def close(self): + """ + Close the active session to the device + """ + # only close the connection if its connected. + if self._connected: + self.queue_message("vvvv", "closing http(s) connection to device") + self.logout() + + super(Connection, self).close() + + @ensure_connect + def send(self, path, data, **kwargs): + """ + Sends the command to the device over api + """ + url_kwargs = dict( + timeout=self.get_option("persistent_command_timeout"), + validate_certs=self.get_option("validate_certs"), + use_proxy=self.get_option("use_proxy"), + headers={}, + ) + url_kwargs.update(kwargs) + if self._auth: + # Avoid modifying passed-in headers + headers = dict(kwargs.get("headers", {})) + headers.update(self._auth) + url_kwargs["headers"] = headers + else: + url_kwargs["force_basic_auth"] = True + url_kwargs["url_username"] = self.get_option("remote_user") + url_kwargs["url_password"] = self.get_option("password") + + try: + url = self._url + path + self._log_messages( + "send url '%s' with data '%s' and kwargs '%s'" + % (url, data, url_kwargs) + ) + response = open_url(url, data=data, **url_kwargs) + except HTTPError as exc: + is_handled = self.handle_httperror(exc) + if is_handled is True: + return self.send(path, data, **kwargs) + elif is_handled is False: + raise + else: + response = is_handled + except URLError as exc: + raise AnsibleConnectionFailure( + "Could not connect to {0}: {1}".format( + self._url + path, exc.reason + ) + ) + + response_buffer = BytesIO() + resp_data = response.read() + self._log_messages("received response: '%s'" % resp_data) + response_buffer.write(resp_data) + + # Try to assign a new auth token if one is given + self._auth = self.update_auth(response, response_buffer) or self._auth + + response_buffer.seek(0) + + return response, response_buffer diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/netconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/netconf.py new file mode 100644 index 00000000..1e2d3caa --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/netconf.py @@ -0,0 +1,404 @@ +# (c) 2016 Red Hat Inc. +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """author: Ansible Networking Team +connection: netconf +short_description: Provides a persistent connection using the netconf protocol +description: +- This connection plugin provides a connection to remote devices over the SSH NETCONF + subsystem. This connection plugin is typically used by network devices for sending + and receiving RPC calls over NETCONF. +- Note this connection plugin requires ncclient to be installed on the local Ansible + controller. +requirements: +- ncclient +options: + host: + description: + - Specifies the remote device FQDN or IP address to establish the SSH connection + to. + default: inventory_hostname + vars: + - name: ansible_host + port: + type: int + description: + - Specifies the port on the remote device that listens for connections when establishing + the SSH connection. + default: 830 + ini: + - section: defaults + key: remote_port + env: + - name: ANSIBLE_REMOTE_PORT + vars: + - name: ansible_port + network_os: + description: + - Configures the device platform network operating system. This value is used + to load a device specific netconf plugin. If this option is not configured + (or set to C(auto)), then Ansible will attempt to guess the correct network_os + to use. If it can not guess a network_os correctly it will use C(default). + vars: + - name: ansible_network_os + remote_user: + description: + - The username used to authenticate to the remote device when the SSH connection + is first established. If the remote_user is not specified, the connection will + use the username of the logged in user. + - Can be configured from the CLI via the C(--user) or C(-u) options. + ini: + - section: defaults + key: remote_user + env: + - name: ANSIBLE_REMOTE_USER + vars: + - name: ansible_user + password: + description: + - Configures the user password used to authenticate to the remote device when + first establishing the SSH connection. + vars: + - name: ansible_password + - name: ansible_ssh_pass + - name: ansible_ssh_password + - name: ansible_netconf_password + private_key_file: + description: + - The private SSH key or certificate file used to authenticate to the remote device + when first establishing the SSH connection. + ini: + - section: defaults + key: private_key_file + env: + - name: ANSIBLE_PRIVATE_KEY_FILE + vars: + - name: ansible_private_key_file + look_for_keys: + default: true + description: + - Enables looking for ssh keys in the usual locations for ssh keys (e.g. :file:`~/.ssh/id_*`). + env: + - name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS + ini: + - section: paramiko_connection + key: look_for_keys + type: boolean + host_key_checking: + description: Set this to "False" if you want to avoid host key checking by the + underlying tools Ansible uses to connect to the host + type: boolean + default: true + env: + - name: ANSIBLE_HOST_KEY_CHECKING + - name: ANSIBLE_SSH_HOST_KEY_CHECKING + - name: ANSIBLE_NETCONF_HOST_KEY_CHECKING + ini: + - section: defaults + key: host_key_checking + - section: paramiko_connection + key: host_key_checking + vars: + - name: ansible_host_key_checking + - name: ansible_ssh_host_key_checking + - name: ansible_netconf_host_key_checking + persistent_connect_timeout: + type: int + description: + - Configures, in seconds, the amount of time to wait when trying to initially + establish a persistent connection. If this value expires before the connection + to the remote device is completed, the connection will fail. + default: 30 + ini: + - section: persistent_connection + key: connect_timeout + env: + - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT + vars: + - name: ansible_connect_timeout + persistent_command_timeout: + type: int + description: + - Configures, in seconds, the amount of time to wait for a command to return from + the remote device. If this timer is exceeded before the command returns, the + connection plugin will raise an exception and close. + default: 30 + ini: + - section: persistent_connection + key: command_timeout + env: + - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT + vars: + - name: ansible_command_timeout + netconf_ssh_config: + description: + - This variable is used to enable bastion/jump host with netconf connection. If + set to True the bastion/jump host ssh settings should be present in ~/.ssh/config + file, alternatively it can be set to custom ssh configuration file path to read + the bastion/jump host settings. + ini: + - section: netconf_connection + key: ssh_config + version_added: '2.7' + env: + - name: ANSIBLE_NETCONF_SSH_CONFIG + vars: + - name: ansible_netconf_ssh_config + version_added: '2.7' + persistent_log_messages: + type: boolean + description: + - This flag will enable logging the command executed and response received from + target device in the ansible log file. For this option to work 'log_path' ansible + configuration option is required to be set to a file path with write access. + - Be sure to fully understand the security implications of enabling this option + as it could create a security vulnerability by logging sensitive information + in log file. + default: false + ini: + - section: persistent_connection + key: log_messages + env: + - name: ANSIBLE_PERSISTENT_LOG_MESSAGES + vars: + - name: ansible_persistent_log_messages +""" + +import os +import logging +import json + +from ansible.errors import AnsibleConnectionFailure, AnsibleError +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils.parsing.convert_bool import ( + BOOLEANS_TRUE, + BOOLEANS_FALSE, +) +from ansible.plugins.loader import netconf_loader +from ansible.plugins.connection import NetworkConnectionBase, ensure_connect + +try: + from ncclient import manager + from ncclient.operations import RPCError + from ncclient.transport.errors import SSHUnknownHostError + from ncclient.xml_ import to_ele, to_xml + + HAS_NCCLIENT = True + NCCLIENT_IMP_ERR = None +except ( + ImportError, + AttributeError, +) as err: # paramiko and gssapi are incompatible and raise AttributeError not ImportError + HAS_NCCLIENT = False + NCCLIENT_IMP_ERR = err + +logging.getLogger("ncclient").setLevel(logging.INFO) + + +class Connection(NetworkConnectionBase): + """NetConf connections""" + + transport = "ansible.netcommon.netconf" + has_pipelining = False + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__( + play_context, new_stdin, *args, **kwargs + ) + + # If network_os is not specified then set the network os to auto + # This will be used to trigger the use of guess_network_os when connecting. + self._network_os = self._network_os or "auto" + + self.netconf = netconf_loader.get(self._network_os, self) + if self.netconf: + self._sub_plugin = { + "type": "netconf", + "name": self.netconf._load_name, + "obj": self.netconf, + } + self.queue_message( + "vvvv", + "loaded netconf plugin %s from path %s for network_os %s" + % ( + self.netconf._load_name, + self.netconf._original_path, + self._network_os, + ), + ) + else: + self.netconf = netconf_loader.get("default", self) + self._sub_plugin = { + "type": "netconf", + "name": "default", + "obj": self.netconf, + } + self.queue_message( + "display", + "unable to load netconf plugin for network_os %s, falling back to default plugin" + % self._network_os, + ) + + self.queue_message("log", "network_os is set to %s" % self._network_os) + self._manager = None + self.key_filename = None + self._ssh_config = None + + def exec_command(self, cmd, in_data=None, sudoable=True): + """Sends the request to the node and returns the reply + The method accepts two forms of request. The first form is as a byte + string that represents xml string be send over netconf session. + The second form is a json-rpc (2.0) byte string. + """ + if self._manager: + # to_ele operates on native strings + request = to_ele(to_native(cmd, errors="surrogate_or_strict")) + + if request is None: + return "unable to parse request" + + try: + reply = self._manager.rpc(request) + except RPCError as exc: + error = self.internal_error( + data=to_text(to_xml(exc.xml), errors="surrogate_or_strict") + ) + return json.dumps(error) + + return reply.data_xml + else: + return super(Connection, self).exec_command(cmd, in_data, sudoable) + + @property + @ensure_connect + def manager(self): + return self._manager + + def _connect(self): + if not HAS_NCCLIENT: + raise AnsibleError( + "%s: %s" + % ( + missing_required_lib("ncclient"), + to_native(NCCLIENT_IMP_ERR), + ) + ) + + self.queue_message("log", "ssh connection done, starting ncclient") + + allow_agent = True + if self._play_context.password is not None: + allow_agent = False + setattr(self._play_context, "allow_agent", allow_agent) + + self.key_filename = ( + self._play_context.private_key_file + or self.get_option("private_key_file") + ) + if self.key_filename: + self.key_filename = str(os.path.expanduser(self.key_filename)) + + self._ssh_config = self.get_option("netconf_ssh_config") + if self._ssh_config in BOOLEANS_TRUE: + self._ssh_config = True + elif self._ssh_config in BOOLEANS_FALSE: + self._ssh_config = None + + # Try to guess the network_os if the network_os is set to auto + if self._network_os == "auto": + for cls in netconf_loader.all(class_only=True): + network_os = cls.guess_network_os(self) + if network_os: + self.queue_message( + "vvv", "discovered network_os %s" % network_os + ) + self._network_os = network_os + + # If we have tried to detect the network_os but were unable to i.e. network_os is still 'auto' + # then use default as the network_os + + if self._network_os == "auto": + # Network os not discovered. Set it to default + self.queue_message( + "vvv", + "Unable to discover network_os. Falling back to default.", + ) + self._network_os = "default" + try: + ncclient_device_handler = self.netconf.get_option( + "ncclient_device_handler" + ) + except KeyError: + ncclient_device_handler = "default" + self.queue_message( + "vvv", + "identified ncclient device handler: %s." + % ncclient_device_handler, + ) + device_params = {"name": ncclient_device_handler} + + try: + port = self._play_context.port or 830 + self.queue_message( + "vvv", + "ESTABLISH NETCONF SSH CONNECTION FOR USER: %s on PORT %s TO %s WITH SSH_CONFIG = %s" + % ( + self._play_context.remote_user, + port, + self._play_context.remote_addr, + self._ssh_config, + ), + ) + self._manager = manager.connect( + host=self._play_context.remote_addr, + port=port, + username=self._play_context.remote_user, + password=self._play_context.password, + key_filename=self.key_filename, + hostkey_verify=self.get_option("host_key_checking"), + look_for_keys=self.get_option("look_for_keys"), + device_params=device_params, + allow_agent=self._play_context.allow_agent, + timeout=self.get_option("persistent_connect_timeout"), + ssh_config=self._ssh_config, + ) + + self._manager._timeout = self.get_option( + "persistent_command_timeout" + ) + except SSHUnknownHostError as exc: + raise AnsibleConnectionFailure(to_native(exc)) + except ImportError: + raise AnsibleError( + "connection=netconf is not supported on {0}".format( + self._network_os + ) + ) + + if not self._manager.connected: + return 1, b"", b"not connected" + + self.queue_message( + "log", "ncclient manager object created successfully" + ) + + self._connected = True + + super(Connection, self)._connect() + + return ( + 0, + to_bytes(self._manager.session_id, errors="surrogate_or_strict"), + b"", + ) + + def close(self): + if self._manager: + self._manager.close_session() + super(Connection, self).close() diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py new file mode 100644 index 00000000..8abcf8e8 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py @@ -0,0 +1,924 @@ +# (c) 2016 Red Hat Inc. +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """author: Ansible Networking Team +connection: network_cli +short_description: Use network_cli to run command on network appliances +description: +- This connection plugin provides a connection to remote devices over the SSH and + implements a CLI shell. This connection plugin is typically used by network devices + for sending and receiving CLi commands to network devices. +options: + host: + description: + - Specifies the remote device FQDN or IP address to establish the SSH connection + to. + default: inventory_hostname + vars: + - name: ansible_host + port: + type: int + description: + - Specifies the port on the remote device that listens for connections when establishing + the SSH connection. + default: 22 + ini: + - section: defaults + key: remote_port + env: + - name: ANSIBLE_REMOTE_PORT + vars: + - name: ansible_port + network_os: + description: + - Configures the device platform network operating system. This value is used + to load the correct terminal and cliconf plugins to communicate with the remote + device. + vars: + - name: ansible_network_os + remote_user: + description: + - The username used to authenticate to the remote device when the SSH connection + is first established. If the remote_user is not specified, the connection will + use the username of the logged in user. + - Can be configured from the CLI via the C(--user) or C(-u) options. + ini: + - section: defaults + key: remote_user + env: + - name: ANSIBLE_REMOTE_USER + vars: + - name: ansible_user + password: + description: + - Configures the user password used to authenticate to the remote device when + first establishing the SSH connection. + vars: + - name: ansible_password + - name: ansible_ssh_pass + - name: ansible_ssh_password + private_key_file: + description: + - The private SSH key or certificate file used to authenticate to the remote device + when first establishing the SSH connection. + ini: + - section: defaults + key: private_key_file + env: + - name: ANSIBLE_PRIVATE_KEY_FILE + vars: + - name: ansible_private_key_file + become: + type: boolean + description: + - The become option will instruct the CLI session to attempt privilege escalation + on platforms that support it. Normally this means transitioning from user mode + to C(enable) mode in the CLI session. If become is set to True and the remote + device does not support privilege escalation or the privilege has already been + elevated, then this option is silently ignored. + - Can be configured from the CLI via the C(--become) or C(-b) options. + default: false + ini: + - section: privilege_escalation + key: become + env: + - name: ANSIBLE_BECOME + vars: + - name: ansible_become + become_method: + description: + - This option allows the become method to be specified in for handling privilege + escalation. Typically the become_method value is set to C(enable) but could + be defined as other values. + default: sudo + ini: + - section: privilege_escalation + key: become_method + env: + - name: ANSIBLE_BECOME_METHOD + vars: + - name: ansible_become_method + host_key_auto_add: + type: boolean + description: + - By default, Ansible will prompt the user before adding SSH keys to the known + hosts file. Since persistent connections such as network_cli run in background + processes, the user will never be prompted. By enabling this option, unknown + host keys will automatically be added to the known hosts file. + - Be sure to fully understand the security implications of enabling this option + on production systems as it could create a security vulnerability. + default: false + ini: + - section: paramiko_connection + key: host_key_auto_add + env: + - name: ANSIBLE_HOST_KEY_AUTO_ADD + persistent_connect_timeout: + type: int + description: + - Configures, in seconds, the amount of time to wait when trying to initially + establish a persistent connection. If this value expires before the connection + to the remote device is completed, the connection will fail. + default: 30 + ini: + - section: persistent_connection + key: connect_timeout + env: + - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT + vars: + - name: ansible_connect_timeout + persistent_command_timeout: + type: int + description: + - Configures, in seconds, the amount of time to wait for a command to return from + the remote device. If this timer is exceeded before the command returns, the + connection plugin will raise an exception and close. + default: 30 + ini: + - section: persistent_connection + key: command_timeout + env: + - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT + vars: + - name: ansible_command_timeout + persistent_buffer_read_timeout: + type: float + description: + - Configures, in seconds, the amount of time to wait for the data to be read from + Paramiko channel after the command prompt is matched. This timeout value ensures + that command prompt matched is correct and there is no more data left to be + received from remote host. + default: 0.1 + ini: + - section: persistent_connection + key: buffer_read_timeout + env: + - name: ANSIBLE_PERSISTENT_BUFFER_READ_TIMEOUT + vars: + - name: ansible_buffer_read_timeout + persistent_log_messages: + type: boolean + description: + - This flag will enable logging the command executed and response received from + target device in the ansible log file. For this option to work 'log_path' ansible + configuration option is required to be set to a file path with write access. + - Be sure to fully understand the security implications of enabling this option + as it could create a security vulnerability by logging sensitive information + in log file. + default: false + ini: + - section: persistent_connection + key: log_messages + env: + - name: ANSIBLE_PERSISTENT_LOG_MESSAGES + vars: + - name: ansible_persistent_log_messages + terminal_stdout_re: + type: list + elements: dict + description: + - A single regex pattern or a sequence of patterns along with optional flags to + match the command prompt from the received response chunk. This option accepts + C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern + to match the response and the value of C(flags) is the value accepted by I(flags) + argument of I(re.compile) python method to control the way regex is matched + with the response, for example I('re.I'). + vars: + - name: ansible_terminal_stdout_re + terminal_stderr_re: + type: list + elements: dict + description: + - This option provides the regex pattern and optional flags to match the error + string from the received response chunk. This option accepts C(pattern) and + C(flags) keys. The value of C(pattern) is a python regex pattern to match the + response and the value of C(flags) is the value accepted by I(flags) argument + of I(re.compile) python method to control the way regex is matched with the + response, for example I('re.I'). + vars: + - name: ansible_terminal_stderr_re + terminal_initial_prompt: + type: list + description: + - A single regex pattern or a sequence of patterns to evaluate the expected prompt + at the time of initial login to the remote host. + vars: + - name: ansible_terminal_initial_prompt + terminal_initial_answer: + type: list + description: + - The answer to reply with if the C(terminal_initial_prompt) is matched. The value + can be a single answer or a list of answers for multiple terminal_initial_prompt. + In case the login menu has multiple prompts the sequence of the prompt and excepted + answer should be in same order and the value of I(terminal_prompt_checkall) + should be set to I(True) if all the values in C(terminal_initial_prompt) are + expected to be matched and set to I(False) if any one login prompt is to be + matched. + vars: + - name: ansible_terminal_initial_answer + terminal_initial_prompt_checkall: + type: boolean + description: + - By default the value is set to I(False) and any one of the prompts mentioned + in C(terminal_initial_prompt) option is matched it won't check for other prompts. + When set to I(True) it will check for all the prompts mentioned in C(terminal_initial_prompt) + option in the given order and all the prompts should be received from remote + host if not it will result in timeout. + default: false + vars: + - name: ansible_terminal_initial_prompt_checkall + terminal_inital_prompt_newline: + type: boolean + description: + - This boolean flag, that when set to I(True) will send newline in the response + if any of values in I(terminal_initial_prompt) is matched. + default: true + vars: + - name: ansible_terminal_initial_prompt_newline + network_cli_retries: + description: + - Number of attempts to connect to remote host. The delay time between the retires + increases after every attempt by power of 2 in seconds till either the maximum + attempts are exhausted or any of the C(persistent_command_timeout) or C(persistent_connect_timeout) + timers are triggered. + default: 3 + type: integer + env: + - name: ANSIBLE_NETWORK_CLI_RETRIES + ini: + - section: persistent_connection + key: network_cli_retries + vars: + - name: ansible_network_cli_retries +""" + +from functools import wraps +import getpass +import json +import logging +import re +import os +import signal +import socket +import time +import traceback +from io import BytesIO + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils.six import PY3 +from ansible.module_utils.six.moves import cPickle +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible.module_utils._text import to_bytes, to_text +from ansible.playbook.play_context import PlayContext +from ansible.plugins.connection import NetworkConnectionBase +from ansible.plugins.loader import ( + cliconf_loader, + terminal_loader, + connection_loader, +) + + +def ensure_connect(func): + @wraps(func) + def wrapped(self, *args, **kwargs): + if not self._connected: + self._connect() + self.update_cli_prompt_context() + return func(self, *args, **kwargs) + + return wrapped + + +class AnsibleCmdRespRecv(Exception): + pass + + +class Connection(NetworkConnectionBase): + """ CLI (shell) SSH connections on Paramiko """ + + transport = "ansible.netcommon.network_cli" + has_pipelining = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__( + play_context, new_stdin, *args, **kwargs + ) + self._ssh_shell = None + + self._matched_prompt = None + self._matched_cmd_prompt = None + self._matched_pattern = None + self._last_response = None + self._history = list() + self._command_response = None + self._last_recv_window = None + + self._terminal = None + self.cliconf = None + self._paramiko_conn = None + + # Managing prompt context + self._check_prompt = False + self._task_uuid = to_text(kwargs.get("task_uuid", "")) + + if self._play_context.verbosity > 3: + logging.getLogger("paramiko").setLevel(logging.DEBUG) + + if self._network_os: + self._terminal = terminal_loader.get(self._network_os, self) + if not self._terminal: + raise AnsibleConnectionFailure( + "network os %s is not supported" % self._network_os + ) + + self.cliconf = cliconf_loader.get(self._network_os, self) + if self.cliconf: + self._sub_plugin = { + "type": "cliconf", + "name": self.cliconf._load_name, + "obj": self.cliconf, + } + self.queue_message( + "vvvv", + "loaded cliconf plugin %s from path %s for network_os %s" + % ( + self.cliconf._load_name, + self.cliconf._original_path, + self._network_os, + ), + ) + else: + self.queue_message( + "vvvv", + "unable to load cliconf for network_os %s" + % self._network_os, + ) + else: + raise AnsibleConnectionFailure( + "Unable to automatically determine host network os. Please " + "manually configure ansible_network_os value for this host" + ) + self.queue_message("log", "network_os is set to %s" % self._network_os) + + @property + def paramiko_conn(self): + if self._paramiko_conn is None: + self._paramiko_conn = connection_loader.get( + "paramiko", self._play_context, "/dev/null" + ) + self._paramiko_conn.set_options( + direct={ + "look_for_keys": not bool( + self._play_context.password + and not self._play_context.private_key_file + ) + } + ) + return self._paramiko_conn + + def _get_log_channel(self): + name = "p=%s u=%s | " % (os.getpid(), getpass.getuser()) + name += "paramiko [%s]" % self._play_context.remote_addr + return name + + @ensure_connect + def get_prompt(self): + """Returns the current prompt from the device""" + return self._matched_prompt + + def exec_command(self, cmd, in_data=None, sudoable=True): + # this try..except block is just to handle the transition to supporting + # network_cli as a toplevel connection. Once connection=local is gone, + # this block can be removed as well and all calls passed directly to + # the local connection + if self._ssh_shell: + try: + cmd = json.loads(to_text(cmd, errors="surrogate_or_strict")) + kwargs = { + "command": to_bytes( + cmd["command"], errors="surrogate_or_strict" + ) + } + for key in ( + "prompt", + "answer", + "sendonly", + "newline", + "prompt_retry_check", + ): + if cmd.get(key) is True or cmd.get(key) is False: + kwargs[key] = cmd[key] + elif cmd.get(key) is not None: + kwargs[key] = to_bytes( + cmd[key], errors="surrogate_or_strict" + ) + return self.send(**kwargs) + except ValueError: + cmd = to_bytes(cmd, errors="surrogate_or_strict") + return self.send(command=cmd) + + else: + return super(Connection, self).exec_command(cmd, in_data, sudoable) + + def update_play_context(self, pc_data): + """Updates the play context information for the connection""" + pc_data = to_bytes(pc_data) + if PY3: + pc_data = cPickle.loads(pc_data, encoding="bytes") + else: + pc_data = cPickle.loads(pc_data) + play_context = PlayContext() + play_context.deserialize(pc_data) + + self.queue_message("vvvv", "updating play_context for connection") + if self._play_context.become ^ play_context.become: + if play_context.become is True: + auth_pass = play_context.become_pass + self._terminal.on_become(passwd=auth_pass) + self.queue_message("vvvv", "authorizing connection") + else: + self._terminal.on_unbecome() + self.queue_message("vvvv", "deauthorizing connection") + + self._play_context = play_context + + if hasattr(self, "reset_history"): + self.reset_history() + if hasattr(self, "disable_response_logging"): + self.disable_response_logging() + + def set_check_prompt(self, task_uuid): + self._check_prompt = task_uuid + + def update_cli_prompt_context(self): + # set cli prompt context at the start of new task run only + if self._check_prompt and self._task_uuid != self._check_prompt: + self._task_uuid, self._check_prompt = self._check_prompt, False + self.set_cli_prompt_context() + + def _connect(self): + """ + Connects to the remote device and starts the terminal + """ + if not self.connected: + self.paramiko_conn._set_log_channel(self._get_log_channel()) + self.paramiko_conn.force_persistence = self.force_persistence + + command_timeout = self.get_option("persistent_command_timeout") + max_pause = min( + [ + self.get_option("persistent_connect_timeout"), + command_timeout, + ] + ) + retries = self.get_option("network_cli_retries") + total_pause = 0 + + for attempt in range(retries + 1): + try: + ssh = self.paramiko_conn._connect() + break + except Exception as e: + pause = 2 ** (attempt + 1) + if attempt == retries or total_pause >= max_pause: + raise AnsibleConnectionFailure( + to_text(e, errors="surrogate_or_strict") + ) + else: + msg = ( + u"network_cli_retry: attempt: %d, caught exception(%s), " + u"pausing for %d seconds" + % ( + attempt + 1, + to_text(e, errors="surrogate_or_strict"), + pause, + ) + ) + + self.queue_message("vv", msg) + time.sleep(pause) + total_pause += pause + continue + + self.queue_message("vvvv", "ssh connection done, setting terminal") + self._connected = True + + self._ssh_shell = ssh.ssh.invoke_shell() + self._ssh_shell.settimeout(command_timeout) + + self.queue_message( + "vvvv", + "loaded terminal plugin for network_os %s" % self._network_os, + ) + + terminal_initial_prompt = ( + self.get_option("terminal_initial_prompt") + or self._terminal.terminal_initial_prompt + ) + terminal_initial_answer = ( + self.get_option("terminal_initial_answer") + or self._terminal.terminal_initial_answer + ) + newline = ( + self.get_option("terminal_inital_prompt_newline") + or self._terminal.terminal_inital_prompt_newline + ) + check_all = ( + self.get_option("terminal_initial_prompt_checkall") or False + ) + + self.receive( + prompts=terminal_initial_prompt, + answer=terminal_initial_answer, + newline=newline, + check_all=check_all, + ) + + if self._play_context.become: + self.queue_message("vvvv", "firing event: on_become") + auth_pass = self._play_context.become_pass + self._terminal.on_become(passwd=auth_pass) + + self.queue_message("vvvv", "firing event: on_open_shell()") + self._terminal.on_open_shell() + + self.queue_message( + "vvvv", "ssh connection has completed successfully" + ) + + return self + + def close(self): + """ + Close the active connection to the device + """ + # only close the connection if its connected. + if self._connected: + self.queue_message("debug", "closing ssh connection to device") + if self._ssh_shell: + self.queue_message("debug", "firing event: on_close_shell()") + self._terminal.on_close_shell() + self._ssh_shell.close() + self._ssh_shell = None + self.queue_message("debug", "cli session is now closed") + + self.paramiko_conn.close() + self._paramiko_conn = None + self.queue_message( + "debug", "ssh connection has been closed successfully" + ) + super(Connection, self).close() + + def receive( + self, + command=None, + prompts=None, + answer=None, + newline=True, + prompt_retry_check=False, + check_all=False, + ): + """ + Handles receiving of output from command + """ + self._matched_prompt = None + self._matched_cmd_prompt = None + recv = BytesIO() + handled = False + command_prompt_matched = False + matched_prompt_window = window_count = 0 + + # set terminal regex values for command prompt and errors in response + self._terminal_stderr_re = self._get_terminal_std_re( + "terminal_stderr_re" + ) + self._terminal_stdout_re = self._get_terminal_std_re( + "terminal_stdout_re" + ) + + cache_socket_timeout = self._ssh_shell.gettimeout() + command_timeout = self.get_option("persistent_command_timeout") + self._validate_timeout_value( + command_timeout, "persistent_command_timeout" + ) + if cache_socket_timeout != command_timeout: + self._ssh_shell.settimeout(command_timeout) + + buffer_read_timeout = self.get_option("persistent_buffer_read_timeout") + self._validate_timeout_value( + buffer_read_timeout, "persistent_buffer_read_timeout" + ) + + self._log_messages("command: %s" % command) + while True: + if command_prompt_matched: + try: + signal.signal( + signal.SIGALRM, self._handle_buffer_read_timeout + ) + signal.setitimer(signal.ITIMER_REAL, buffer_read_timeout) + data = self._ssh_shell.recv(256) + signal.alarm(0) + self._log_messages( + "response-%s: %s" % (window_count + 1, data) + ) + # if data is still received on channel it indicates the prompt string + # is wrongly matched in between response chunks, continue to read + # remaining response. + command_prompt_matched = False + + # restart command_timeout timer + signal.signal(signal.SIGALRM, self._handle_command_timeout) + signal.alarm(command_timeout) + + except AnsibleCmdRespRecv: + # reset socket timeout to global timeout + self._ssh_shell.settimeout(cache_socket_timeout) + return self._command_response + else: + data = self._ssh_shell.recv(256) + self._log_messages( + "response-%s: %s" % (window_count + 1, data) + ) + # when a channel stream is closed, received data will be empty + if not data: + break + + recv.write(data) + offset = recv.tell() - 256 if recv.tell() > 256 else 0 + recv.seek(offset) + + window = self._strip(recv.read()) + self._last_recv_window = window + window_count += 1 + + if prompts and not handled: + handled = self._handle_prompt( + window, prompts, answer, newline, False, check_all + ) + matched_prompt_window = window_count + elif ( + prompts + and handled + and prompt_retry_check + and matched_prompt_window + 1 == window_count + ): + # check again even when handled, if same prompt repeats in next window + # (like in the case of a wrong enable password, etc) indicates + # value of answer is wrong, report this as error. + if self._handle_prompt( + window, + prompts, + answer, + newline, + prompt_retry_check, + check_all, + ): + raise AnsibleConnectionFailure( + "For matched prompt '%s', answer is not valid" + % self._matched_cmd_prompt + ) + + if self._find_prompt(window): + self._last_response = recv.getvalue() + resp = self._strip(self._last_response) + self._command_response = self._sanitize(resp, command) + if buffer_read_timeout == 0.0: + # reset socket timeout to global timeout + self._ssh_shell.settimeout(cache_socket_timeout) + return self._command_response + else: + command_prompt_matched = True + + @ensure_connect + def send( + self, + command, + prompt=None, + answer=None, + newline=True, + sendonly=False, + prompt_retry_check=False, + check_all=False, + ): + """ + Sends the command to the device in the opened shell + """ + if check_all: + prompt_len = len(to_list(prompt)) + answer_len = len(to_list(answer)) + if prompt_len != answer_len: + raise AnsibleConnectionFailure( + "Number of prompts (%s) is not same as that of answers (%s)" + % (prompt_len, answer_len) + ) + try: + cmd = b"%s\r" % command + self._history.append(cmd) + self._ssh_shell.sendall(cmd) + self._log_messages("send command: %s" % cmd) + if sendonly: + return + response = self.receive( + command, prompt, answer, newline, prompt_retry_check, check_all + ) + return to_text(response, errors="surrogate_then_replace") + except (socket.timeout, AttributeError): + self.queue_message("error", traceback.format_exc()) + raise AnsibleConnectionFailure( + "timeout value %s seconds reached while trying to send command: %s" + % (self._ssh_shell.gettimeout(), command.strip()) + ) + + def _handle_buffer_read_timeout(self, signum, frame): + self.queue_message( + "vvvv", + "Response received, triggered 'persistent_buffer_read_timeout' timer of %s seconds" + % self.get_option("persistent_buffer_read_timeout"), + ) + raise AnsibleCmdRespRecv() + + def _handle_command_timeout(self, signum, frame): + msg = ( + "command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide." + % self.get_option("persistent_command_timeout") + ) + self.queue_message("log", msg) + raise AnsibleConnectionFailure(msg) + + def _strip(self, data): + """ + Removes ANSI codes from device response + """ + for regex in self._terminal.ansi_re: + data = regex.sub(b"", data) + return data + + def _handle_prompt( + self, + resp, + prompts, + answer, + newline, + prompt_retry_check=False, + check_all=False, + ): + """ + Matches the command prompt and responds + + :arg resp: Byte string containing the raw response from the remote + :arg prompts: Sequence of byte strings that we consider prompts for input + :arg answer: Sequence of Byte string to send back to the remote if we find a prompt. + A carriage return is automatically appended to this string. + :param prompt_retry_check: Bool value for trying to detect more prompts + :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of + given prompt. + :returns: True if a prompt was found in ``resp``. If check_all is True + will True only after all the prompt in the prompts list are matched. False otherwise. + """ + single_prompt = False + if not isinstance(prompts, list): + prompts = [prompts] + single_prompt = True + if not isinstance(answer, list): + answer = [answer] + prompts_regex = [re.compile(to_bytes(r), re.I) for r in prompts] + for index, regex in enumerate(prompts_regex): + match = regex.search(resp) + if match: + self._matched_cmd_prompt = match.group() + self._log_messages( + "matched command prompt: %s" % self._matched_cmd_prompt + ) + + # if prompt_retry_check is enabled to check if same prompt is + # repeated don't send answer again. + if not prompt_retry_check: + prompt_answer = ( + answer[index] if len(answer) > index else answer[0] + ) + self._ssh_shell.sendall(b"%s" % prompt_answer) + if newline: + self._ssh_shell.sendall(b"\r") + prompt_answer += b"\r" + self._log_messages( + "matched command prompt answer: %s" % prompt_answer + ) + if check_all and prompts and not single_prompt: + prompts.pop(0) + answer.pop(0) + return False + return True + return False + + def _sanitize(self, resp, command=None): + """ + Removes elements from the response before returning to the caller + """ + cleaned = [] + for line in resp.splitlines(): + if command and line.strip() == command.strip(): + continue + + for prompt in self._matched_prompt.strip().splitlines(): + if prompt.strip() in line: + break + else: + cleaned.append(line) + return b"\n".join(cleaned).strip() + + def _find_prompt(self, response): + """Searches the buffered response for a matching command prompt + """ + errored_response = None + is_error_message = False + + for regex in self._terminal_stderr_re: + if regex.search(response): + is_error_message = True + + # Check if error response ends with command prompt if not + # receive it buffered prompt + for regex in self._terminal_stdout_re: + match = regex.search(response) + if match: + errored_response = response + self._matched_pattern = regex.pattern + self._matched_prompt = match.group() + self._log_messages( + "matched error regex '%s' from response '%s'" + % (self._matched_pattern, errored_response) + ) + break + + if not is_error_message: + for regex in self._terminal_stdout_re: + match = regex.search(response) + if match: + self._matched_pattern = regex.pattern + self._matched_prompt = match.group() + self._log_messages( + "matched cli prompt '%s' with regex '%s' from response '%s'" + % ( + self._matched_prompt, + self._matched_pattern, + response, + ) + ) + if not errored_response: + return True + + if errored_response: + raise AnsibleConnectionFailure(errored_response) + + return False + + def _validate_timeout_value(self, timeout, timer_name): + if timeout < 0: + raise AnsibleConnectionFailure( + "'%s' timer value '%s' is invalid, value should be greater than or equal to zero." + % (timer_name, timeout) + ) + + def transport_test(self, connect_timeout): + """This method enables wait_for_connection to work. + + As it is used by wait_for_connection, it is called by that module's action plugin, + which is on the controller process, which means that nothing done on this instance + should impact the actual persistent connection... this check is for informational + purposes only and should be properly cleaned up. + """ + + # Force a fresh connect if for some reason we have connected before. + self.close() + self._connect() + self.close() + + def _get_terminal_std_re(self, option): + terminal_std_option = self.get_option(option) + terminal_std_re = [] + + if terminal_std_option: + for item in terminal_std_option: + if "pattern" not in item: + raise AnsibleConnectionFailure( + "'pattern' is a required key for option '%s'," + " received option value is %s" % (option, item) + ) + pattern = br"%s" % to_bytes(item["pattern"]) + flag = item.get("flags", 0) + if flag: + flag = getattr(re, flag.split(".")[1]) + terminal_std_re.append(re.compile(pattern, flag)) + else: + # To maintain backward compatibility + terminal_std_re = getattr(self._terminal, option) + + return terminal_std_re diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/persistent.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/persistent.py new file mode 100644 index 00000000..b29b4872 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/persistent.py @@ -0,0 +1,97 @@ +# 2017 Red Hat Inc. +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """author: Ansible Core Team +connection: persistent +short_description: Use a persistent unix socket for connection +description: +- This is a helper plugin to allow making other connections persistent. +options: + persistent_command_timeout: + type: int + description: + - Configures, in seconds, the amount of time to wait for a command to return from + the remote device. If this timer is exceeded before the command returns, the + connection plugin will raise an exception and close + default: 10 + ini: + - section: persistent_connection + key: command_timeout + env: + - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT + vars: + - name: ansible_command_timeout +""" +from ansible.executor.task_executor import start_connection +from ansible.plugins.connection import ConnectionBase +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import Connection as SocketConnection +from ansible.utils.display import Display + +display = Display() + + +class Connection(ConnectionBase): + """ Local based connections """ + + transport = "ansible.netcommon.persistent" + has_pipelining = False + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__( + play_context, new_stdin, *args, **kwargs + ) + self._task_uuid = to_text(kwargs.get("task_uuid", "")) + + def _connect(self): + self._connected = True + return self + + def exec_command(self, cmd, in_data=None, sudoable=True): + display.vvvv( + "exec_command(), socket_path=%s" % self.socket_path, + host=self._play_context.remote_addr, + ) + connection = SocketConnection(self.socket_path) + out = connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) + return 0, out, "" + + def put_file(self, in_path, out_path): + pass + + def fetch_file(self, in_path, out_path): + pass + + def close(self): + self._connected = False + + def run(self): + """Returns the path of the persistent connection socket. + + Attempts to ensure (within playcontext.timeout seconds) that the + socket path exists. If the path exists (or the timeout has expired), + returns the socket path. + """ + display.vvvv( + "starting connection from persistent connection plugin", + host=self._play_context.remote_addr, + ) + variables = { + "ansible_command_timeout": self.get_option( + "persistent_command_timeout" + ) + } + socket_path = start_connection( + self._play_context, variables, self._task_uuid + ) + display.vvvv( + "local domain socket path is %s" % socket_path, + host=self._play_context.remote_addr, + ) + setattr(self, "_socket_path", socket_path) + return socket_path diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py new file mode 100644 index 00000000..8789075a --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r"""options: + host: + description: + - Specifies the DNS host name or address for connecting to the remote device over + the specified transport. The value of host is used as the destination address + for the transport. + type: str + required: true + port: + description: + - Specifies the port to use when building the connection to the remote device. The + port value will default to port 830. + type: int + default: 830 + username: + description: + - Configures the username to use to authenticate the connection to the remote + device. This value is used to authenticate the SSH session. If the value is + not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) + will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to the remote device. This + value is used to authenticate the SSH session. If the value is not specified + in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will + be used instead. + type: str + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device for + either connecting or sending commands. If the timeout is exceeded before the + operation is completed, the module will error. + type: int + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to the remote device. This + value is the path to the key used to authenticate the SSH session. If the value + is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) + will be used instead. + type: path + hostkey_verify: + description: + - If set to C(yes), the ssh host key of the device must match a ssh key present + on the host if set to C(no), the ssh host key of the device is not checked. + type: bool + default: true + look_for_keys: + description: + - Enables looking in the usual locations for the ssh keys (e.g. :file:`~/.ssh/id_*`) + type: bool + default: true +notes: +- For information on using netconf see the :ref:`Platform Options guide using Netconf` +- For more information on using Ansible to manage network devices see the :ref:`Ansible + Network Guide ` +""" diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py new file mode 100644 index 00000000..ad65f6ef --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019 Ansible, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r"""options: {} +notes: +- This module is supported on C(ansible_network_os) network platforms. See the :ref:`Network + Platform Options ` for details. +""" diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/ipaddr.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/ipaddr.py new file mode 100644 index 00000000..6ae47a73 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/ipaddr.py @@ -0,0 +1,1186 @@ +# (c) 2014, Maciej Delmanowski +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from functools import partial +import types + +try: + import netaddr +except ImportError: + # in this case, we'll make the filters return error messages (see bottom) + netaddr = None +else: + + class mac_linux(netaddr.mac_unix): + pass + + mac_linux.word_fmt = "%.2x" + +from ansible import errors + + +# ---- IP address and network query helpers ---- +def _empty_ipaddr_query(v, vtype): + # We don't have any query to process, so just check what type the user + # expects, and return the IP address in a correct format + if v: + if vtype == "address": + return str(v.ip) + elif vtype == "network": + return str(v) + + +def _first_last(v): + if v.size == 2: + first_usable = int(netaddr.IPAddress(v.first)) + last_usable = int(netaddr.IPAddress(v.last)) + return first_usable, last_usable + elif v.size > 1: + first_usable = int(netaddr.IPAddress(v.first + 1)) + last_usable = int(netaddr.IPAddress(v.last - 1)) + return first_usable, last_usable + + +def _6to4_query(v, vtype, value): + if v.version == 4: + + if v.size == 1: + ipconv = str(v.ip) + elif v.size > 1: + if v.ip != v.network: + ipconv = str(v.ip) + else: + ipconv = False + + if ipaddr(ipconv, "public"): + numbers = list(map(int, ipconv.split("."))) + + try: + return "2002:{:02x}{:02x}:{:02x}{:02x}::1/48".format(*numbers) + except Exception: + return False + + elif v.version == 6: + if vtype == "address": + if ipaddr(str(v), "2002::/16"): + return value + elif vtype == "network": + if v.ip != v.network: + if ipaddr(str(v.ip), "2002::/16"): + return value + else: + return False + + +def _ip_query(v): + if v.size == 1: + return str(v.ip) + if v.size > 1: + # /31 networks in netaddr have no broadcast address + if v.ip != v.network or not v.broadcast: + return str(v.ip) + + +def _gateway_query(v): + if v.size > 1: + if v.ip != v.network: + return str(v.ip) + "/" + str(v.prefixlen) + + +def _address_prefix_query(v): + if v.size > 1: + if v.ip != v.network: + return str(v.ip) + "/" + str(v.prefixlen) + + +def _bool_ipaddr_query(v): + if v: + return True + + +def _broadcast_query(v): + if v.size > 2: + return str(v.broadcast) + + +def _cidr_query(v): + return str(v) + + +def _cidr_lookup_query(v, iplist, value): + try: + if v in iplist: + return value + except Exception: + return False + + +def _first_usable_query(v, vtype): + if vtype == "address": + "Does it make sense to raise an error" + raise errors.AnsibleFilterError("Not a network address") + elif vtype == "network": + if v.size == 2: + return str(netaddr.IPAddress(int(v.network))) + elif v.size > 1: + return str(netaddr.IPAddress(int(v.network) + 1)) + + +def _host_query(v): + if v.size == 1: + return str(v) + elif v.size > 1: + if v.ip != v.network: + return str(v.ip) + "/" + str(v.prefixlen) + + +def _hostmask_query(v): + return str(v.hostmask) + + +def _int_query(v, vtype): + if vtype == "address": + return int(v.ip) + elif vtype == "network": + return str(int(v.ip)) + "/" + str(int(v.prefixlen)) + + +def _ip_prefix_query(v): + if v.size == 2: + return str(v.ip) + "/" + str(v.prefixlen) + elif v.size > 1: + if v.ip != v.network: + return str(v.ip) + "/" + str(v.prefixlen) + + +def _ip_netmask_query(v): + if v.size == 2: + return str(v.ip) + " " + str(v.netmask) + elif v.size > 1: + if v.ip != v.network: + return str(v.ip) + " " + str(v.netmask) + + +""" +def _ip_wildcard_query(v): + if v.size == 2: + return str(v.ip) + ' ' + str(v.hostmask) + elif v.size > 1: + if v.ip != v.network: + return str(v.ip) + ' ' + str(v.hostmask) +""" + + +def _ipv4_query(v, value): + if v.version == 6: + try: + return str(v.ipv4()) + except Exception: + return False + else: + return value + + +def _ipv6_query(v, value): + if v.version == 4: + return str(v.ipv6()) + else: + return value + + +def _last_usable_query(v, vtype): + if vtype == "address": + "Does it make sense to raise an error" + raise errors.AnsibleFilterError("Not a network address") + elif vtype == "network": + if v.size > 1: + first_usable, last_usable = _first_last(v) + return str(netaddr.IPAddress(last_usable)) + + +def _link_local_query(v, value): + v_ip = netaddr.IPAddress(str(v.ip)) + if v.version == 4: + if ipaddr(str(v_ip), "169.254.0.0/24"): + return value + + elif v.version == 6: + if ipaddr(str(v_ip), "fe80::/10"): + return value + + +def _loopback_query(v, value): + v_ip = netaddr.IPAddress(str(v.ip)) + if v_ip.is_loopback(): + return value + + +def _multicast_query(v, value): + if v.is_multicast(): + return value + + +def _net_query(v): + if v.size > 1: + if v.ip == v.network: + return str(v.network) + "/" + str(v.prefixlen) + + +def _netmask_query(v): + return str(v.netmask) + + +def _network_query(v): + """Return the network of a given IP or subnet""" + return str(v.network) + + +def _network_id_query(v): + """Return the network of a given IP or subnet""" + return str(v.network) + + +def _network_netmask_query(v): + return str(v.network) + " " + str(v.netmask) + + +def _network_wildcard_query(v): + return str(v.network) + " " + str(v.hostmask) + + +def _next_usable_query(v, vtype): + if vtype == "address": + "Does it make sense to raise an error" + raise errors.AnsibleFilterError("Not a network address") + elif vtype == "network": + if v.size > 1: + first_usable, last_usable = _first_last(v) + next_ip = int(netaddr.IPAddress(int(v.ip) + 1)) + if next_ip >= first_usable and next_ip <= last_usable: + return str(netaddr.IPAddress(int(v.ip) + 1)) + + +def _peer_query(v, vtype): + if vtype == "address": + raise errors.AnsibleFilterError("Not a network address") + elif vtype == "network": + if v.size == 2: + return str(netaddr.IPAddress(int(v.ip) ^ 1)) + if v.size == 4: + if int(v.ip) % 4 == 0: + raise errors.AnsibleFilterError( + "Network address of /30 has no peer" + ) + if int(v.ip) % 4 == 3: + raise errors.AnsibleFilterError( + "Broadcast address of /30 has no peer" + ) + return str(netaddr.IPAddress(int(v.ip) ^ 3)) + raise errors.AnsibleFilterError("Not a point-to-point network") + + +def _prefix_query(v): + return int(v.prefixlen) + + +def _previous_usable_query(v, vtype): + if vtype == "address": + "Does it make sense to raise an error" + raise errors.AnsibleFilterError("Not a network address") + elif vtype == "network": + if v.size > 1: + first_usable, last_usable = _first_last(v) + previous_ip = int(netaddr.IPAddress(int(v.ip) - 1)) + if previous_ip >= first_usable and previous_ip <= last_usable: + return str(netaddr.IPAddress(int(v.ip) - 1)) + + +def _private_query(v, value): + if v.is_private(): + return value + + +def _public_query(v, value): + v_ip = netaddr.IPAddress(str(v.ip)) + if ( + v_ip.is_unicast() + and not v_ip.is_private() + and not v_ip.is_loopback() + and not v_ip.is_netmask() + and not v_ip.is_hostmask() + ): + return value + + +def _range_usable_query(v, vtype): + if vtype == "address": + "Does it make sense to raise an error" + raise errors.AnsibleFilterError("Not a network address") + elif vtype == "network": + if v.size > 1: + first_usable, last_usable = _first_last(v) + first_usable = str(netaddr.IPAddress(first_usable)) + last_usable = str(netaddr.IPAddress(last_usable)) + return "{0}-{1}".format(first_usable, last_usable) + + +def _revdns_query(v): + v_ip = netaddr.IPAddress(str(v.ip)) + return v_ip.reverse_dns + + +def _size_query(v): + return v.size + + +def _size_usable_query(v): + if v.size == 1: + return 0 + elif v.size == 2: + return 2 + return v.size - 2 + + +def _subnet_query(v): + return str(v.cidr) + + +def _type_query(v): + if v.size == 1: + return "address" + if v.size > 1: + if v.ip != v.network: + return "address" + else: + return "network" + + +def _unicast_query(v, value): + if v.is_unicast(): + return value + + +def _version_query(v): + return v.version + + +def _wrap_query(v, vtype, value): + if v.version == 6: + if vtype == "address": + return "[" + str(v.ip) + "]" + elif vtype == "network": + return "[" + str(v.ip) + "]/" + str(v.prefixlen) + else: + return value + + +# ---- HWaddr query helpers ---- +def _bare_query(v): + v.dialect = netaddr.mac_bare + return str(v) + + +def _bool_hwaddr_query(v): + if v: + return True + + +def _int_hwaddr_query(v): + return int(v) + + +def _cisco_query(v): + v.dialect = netaddr.mac_cisco + return str(v) + + +def _empty_hwaddr_query(v, value): + if v: + return value + + +def _linux_query(v): + v.dialect = mac_linux + return str(v) + + +def _postgresql_query(v): + v.dialect = netaddr.mac_pgsql + return str(v) + + +def _unix_query(v): + v.dialect = netaddr.mac_unix + return str(v) + + +def _win_query(v): + v.dialect = netaddr.mac_eui48 + return str(v) + + +# ---- IP address and network filters ---- + +# Returns a minified list of subnets or a single subnet that spans all of +# the inputs. +def cidr_merge(value, action="merge"): + if not hasattr(value, "__iter__"): + raise errors.AnsibleFilterError( + "cidr_merge: expected iterable, got " + repr(value) + ) + + if action == "merge": + try: + return [str(ip) for ip in netaddr.cidr_merge(value)] + except Exception as e: + raise errors.AnsibleFilterError( + "cidr_merge: error in netaddr:\n%s" % e + ) + + elif action == "span": + # spanning_cidr needs at least two values + if len(value) == 0: + return None + elif len(value) == 1: + try: + return str(netaddr.IPNetwork(value[0])) + except Exception as e: + raise errors.AnsibleFilterError( + "cidr_merge: error in netaddr:\n%s" % e + ) + else: + try: + return str(netaddr.spanning_cidr(value)) + except Exception as e: + raise errors.AnsibleFilterError( + "cidr_merge: error in netaddr:\n%s" % e + ) + + else: + raise errors.AnsibleFilterError( + "cidr_merge: invalid action '%s'" % action + ) + + +def ipaddr(value, query="", version=False, alias="ipaddr"): + """ Check if string is an IP address or network and filter it """ + + query_func_extra_args = { + "": ("vtype",), + "6to4": ("vtype", "value"), + "cidr_lookup": ("iplist", "value"), + "first_usable": ("vtype",), + "int": ("vtype",), + "ipv4": ("value",), + "ipv6": ("value",), + "last_usable": ("vtype",), + "link-local": ("value",), + "loopback": ("value",), + "lo": ("value",), + "multicast": ("value",), + "next_usable": ("vtype",), + "peer": ("vtype",), + "previous_usable": ("vtype",), + "private": ("value",), + "public": ("value",), + "unicast": ("value",), + "range_usable": ("vtype",), + "wrap": ("vtype", "value"), + } + + query_func_map = { + "": _empty_ipaddr_query, + "6to4": _6to4_query, + "address": _ip_query, + "address/prefix": _address_prefix_query, # deprecate + "bool": _bool_ipaddr_query, + "broadcast": _broadcast_query, + "cidr": _cidr_query, + "cidr_lookup": _cidr_lookup_query, + "first_usable": _first_usable_query, + "gateway": _gateway_query, # deprecate + "gw": _gateway_query, # deprecate + "host": _host_query, + "host/prefix": _address_prefix_query, # deprecate + "hostmask": _hostmask_query, + "hostnet": _gateway_query, # deprecate + "int": _int_query, + "ip": _ip_query, + "ip/prefix": _ip_prefix_query, + "ip_netmask": _ip_netmask_query, + # 'ip_wildcard': _ip_wildcard_query, built then could not think of use case + "ipv4": _ipv4_query, + "ipv6": _ipv6_query, + "last_usable": _last_usable_query, + "link-local": _link_local_query, + "lo": _loopback_query, + "loopback": _loopback_query, + "multicast": _multicast_query, + "net": _net_query, + "next_usable": _next_usable_query, + "netmask": _netmask_query, + "network": _network_query, + "network_id": _network_id_query, + "network/prefix": _subnet_query, + "network_netmask": _network_netmask_query, + "network_wildcard": _network_wildcard_query, + "peer": _peer_query, + "prefix": _prefix_query, + "previous_usable": _previous_usable_query, + "private": _private_query, + "public": _public_query, + "range_usable": _range_usable_query, + "revdns": _revdns_query, + "router": _gateway_query, # deprecate + "size": _size_query, + "size_usable": _size_usable_query, + "subnet": _subnet_query, + "type": _type_query, + "unicast": _unicast_query, + "v4": _ipv4_query, + "v6": _ipv6_query, + "version": _version_query, + "wildcard": _hostmask_query, + "wrap": _wrap_query, + } + + vtype = None + + if not value: + return False + + elif value is True: + return False + + # Check if value is a list and parse each element + elif isinstance(value, (list, tuple, types.GeneratorType)): + + _ret = [] + for element in value: + if ipaddr(element, str(query), version): + _ret.append(ipaddr(element, str(query), version)) + + if _ret: + return _ret + else: + return list() + + # Check if value is a number and convert it to an IP address + elif str(value).isdigit(): + + # We don't know what IP version to assume, so let's check IPv4 first, + # then IPv6 + try: + if (not version) or (version and version == 4): + v = netaddr.IPNetwork("0.0.0.0/0") + v.value = int(value) + v.prefixlen = 32 + elif version and version == 6: + v = netaddr.IPNetwork("::/0") + v.value = int(value) + v.prefixlen = 128 + + # IPv4 didn't work the first time, so it definitely has to be IPv6 + except Exception: + try: + v = netaddr.IPNetwork("::/0") + v.value = int(value) + v.prefixlen = 128 + + # The value is too big for IPv6. Are you a nanobot? + except Exception: + return False + + # We got an IP address, let's mark it as such + value = str(v) + vtype = "address" + + # value has not been recognized, check if it's a valid IP string + else: + try: + v = netaddr.IPNetwork(value) + + # value is a valid IP string, check if user specified + # CIDR prefix or just an IP address, this will indicate default + # output format + try: + address, prefix = value.split("/") + vtype = "network" + except Exception: + vtype = "address" + + # value hasn't been recognized, maybe it's a numerical CIDR? + except Exception: + try: + address, prefix = value.split("/") + address.isdigit() + address = int(address) + prefix.isdigit() + prefix = int(prefix) + + # It's not numerical CIDR, give up + except Exception: + return False + + # It is something, so let's try and build a CIDR from the parts + try: + v = netaddr.IPNetwork("0.0.0.0/0") + v.value = address + v.prefixlen = prefix + + # It's not a valid IPv4 CIDR + except Exception: + try: + v = netaddr.IPNetwork("::/0") + v.value = address + v.prefixlen = prefix + + # It's not a valid IPv6 CIDR. Give up. + except Exception: + return False + + # We have a valid CIDR, so let's write it in correct format + value = str(v) + vtype = "network" + + # We have a query string but it's not in the known query types. Check if + # that string is a valid subnet, if so, we can check later if given IP + # address/network is inside that specific subnet + try: + # ?? 6to4 and link-local were True here before. Should they still? + if ( + query + and (query not in query_func_map or query == "cidr_lookup") + and not str(query).isdigit() + and ipaddr(query, "network") + ): + iplist = netaddr.IPSet([netaddr.IPNetwork(query)]) + query = "cidr_lookup" + except Exception: + pass + + # This code checks if value maches the IP version the user wants, ie. if + # it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()") + # If version does not match, return False + if version and v.version != version: + return False + + extras = [] + for arg in query_func_extra_args.get(query, tuple()): + extras.append(locals()[arg]) + try: + return query_func_map[query](v, *extras) + except KeyError: + try: + float(query) + if v.size == 1: + if vtype == "address": + return str(v.ip) + elif vtype == "network": + return str(v) + + elif v.size > 1: + try: + return str(v[query]) + "/" + str(v.prefixlen) + except Exception: + return False + + else: + return value + + except Exception: + raise errors.AnsibleFilterError( + alias + ": unknown filter type: %s" % query + ) + + return False + + +def ipmath(value, amount): + try: + if "/" in value: + ip = netaddr.IPNetwork(value).ip + else: + ip = netaddr.IPAddress(value) + except (netaddr.AddrFormatError, ValueError): + msg = "You must pass a valid IP address; {0} is invalid".format(value) + raise errors.AnsibleFilterError(msg) + + if not isinstance(amount, int): + msg = ( + "You must pass an integer for arithmetic; " + "{0} is not a valid integer" + ).format(amount) + raise errors.AnsibleFilterError(msg) + + return str(ip + amount) + + +def ipwrap(value, query=""): + try: + if isinstance(value, (list, tuple, types.GeneratorType)): + _ret = [] + for element in value: + if ipaddr(element, query, version=False, alias="ipwrap"): + _ret.append(ipaddr(element, "wrap")) + else: + _ret.append(element) + + return _ret + else: + _ret = ipaddr(value, query, version=False, alias="ipwrap") + if _ret: + return ipaddr(_ret, "wrap") + else: + return value + + except Exception: + return value + + +def ipv4(value, query=""): + return ipaddr(value, query, version=4, alias="ipv4") + + +def ipv6(value, query=""): + return ipaddr(value, query, version=6, alias="ipv6") + + +# Split given subnet into smaller subnets or find out the biggest subnet of +# a given IP address with given CIDR prefix +# Usage: +# +# - address or address/prefix | ipsubnet +# returns CIDR subnet of a given input +# +# - address/prefix | ipsubnet(cidr) +# returns number of possible subnets for given CIDR prefix +# +# - address/prefix | ipsubnet(cidr, index) +# returns new subnet with given CIDR prefix +# +# - address | ipsubnet(cidr) +# returns biggest subnet with given CIDR prefix that address belongs to +# +# - address | ipsubnet(cidr, index) +# returns next indexed subnet which contains given address +# +# - address/prefix | ipsubnet(subnet/prefix) +# return the index of the subnet in the subnet +def ipsubnet(value, query="", index="x"): + """ Manipulate IPv4/IPv6 subnets """ + + try: + vtype = ipaddr(value, "type") + if vtype == "address": + v = ipaddr(value, "cidr") + elif vtype == "network": + v = ipaddr(value, "subnet") + + value = netaddr.IPNetwork(v) + except Exception: + return False + query_string = str(query) + if not query: + return str(value) + + elif query_string.isdigit(): + vsize = ipaddr(v, "size") + query = int(query) + + try: + float(index) + index = int(index) + + if vsize > 1: + try: + return str(list(value.subnet(query))[index]) + except Exception: + return False + + elif vsize == 1: + try: + return str(value.supernet(query)[index]) + except Exception: + return False + + except Exception: + if vsize > 1: + try: + return str(len(list(value.subnet(query)))) + except Exception: + return False + + elif vsize == 1: + try: + return str(value.supernet(query)[0]) + except Exception: + return False + + elif query_string: + vtype = ipaddr(query, "type") + if vtype == "address": + v = ipaddr(query, "cidr") + elif vtype == "network": + v = ipaddr(query, "subnet") + else: + msg = "You must pass a valid subnet or IP address; {0} is invalid".format( + query_string + ) + raise errors.AnsibleFilterError(msg) + query = netaddr.IPNetwork(v) + for i, subnet in enumerate(query.subnet(value.prefixlen), 1): + if subnet == value: + return str(i) + msg = "{0} is not in the subnet {1}".format(value.cidr, query.cidr) + raise errors.AnsibleFilterError(msg) + return False + + +# Returns the nth host within a network described by value. +# Usage: +# +# - address or address/prefix | nthhost(nth) +# returns the nth host within the given network +def nthhost(value, query=""): + """ Get the nth host within a given network """ + try: + vtype = ipaddr(value, "type") + if vtype == "address": + v = ipaddr(value, "cidr") + elif vtype == "network": + v = ipaddr(value, "subnet") + + value = netaddr.IPNetwork(v) + except Exception: + return False + + if not query: + return False + + try: + nth = int(query) + if value.size > nth: + return value[nth] + + except ValueError: + return False + + return False + + +# Returns the next nth usable ip within a network described by value. +def next_nth_usable(value, offset): + try: + vtype = ipaddr(value, "type") + if vtype == "address": + v = ipaddr(value, "cidr") + elif vtype == "network": + v = ipaddr(value, "subnet") + + v = netaddr.IPNetwork(v) + except Exception: + return False + + if type(offset) != int: + raise errors.AnsibleFilterError("Must pass in an integer") + if v.size > 1: + first_usable, last_usable = _first_last(v) + nth_ip = int(netaddr.IPAddress(int(v.ip) + offset)) + if nth_ip >= first_usable and nth_ip <= last_usable: + return str(netaddr.IPAddress(int(v.ip) + offset)) + + +# Returns the previous nth usable ip within a network described by value. +def previous_nth_usable(value, offset): + try: + vtype = ipaddr(value, "type") + if vtype == "address": + v = ipaddr(value, "cidr") + elif vtype == "network": + v = ipaddr(value, "subnet") + + v = netaddr.IPNetwork(v) + except Exception: + return False + + if type(offset) != int: + raise errors.AnsibleFilterError("Must pass in an integer") + if v.size > 1: + first_usable, last_usable = _first_last(v) + nth_ip = int(netaddr.IPAddress(int(v.ip) - offset)) + if nth_ip >= first_usable and nth_ip <= last_usable: + return str(netaddr.IPAddress(int(v.ip) - offset)) + + +def _range_checker(ip_check, first, last): + """ + Tests whether an ip address is within the bounds of the first and last address. + + :param ip_check: The ip to test if it is within first and last. + :param first: The first IP in the range to test against. + :param last: The last IP in the range to test against. + + :return: bool + """ + if ip_check >= first and ip_check <= last: + return True + else: + return False + + +def _address_normalizer(value): + """ + Used to validate an address or network type and return it in a consistent format. + This is being used for future use cases not currently available such as an address range. + + :param value: The string representation of an address or network. + + :return: The address or network in the normalized form. + """ + try: + vtype = ipaddr(value, "type") + if vtype == "address" or vtype == "network": + v = ipaddr(value, "subnet") + except Exception: + return False + + return v + + +def network_in_usable(value, test): + """ + Checks whether 'test' is a useable address or addresses in 'value' + + :param: value: The string representation of an address or network to test against. + :param test: The string representation of an address or network to validate if it is within the range of 'value'. + + :return: bool + """ + # normalize value and test variables into an ipaddr + v = _address_normalizer(value) + w = _address_normalizer(test) + + # get first and last addresses as integers to compare value and test; or cathes value when case is /32 + v_first = ipaddr(ipaddr(v, "first_usable") or ipaddr(v, "address"), "int") + v_last = ipaddr(ipaddr(v, "last_usable") or ipaddr(v, "address"), "int") + w_first = ipaddr(ipaddr(w, "network") or ipaddr(w, "address"), "int") + w_last = ipaddr(ipaddr(w, "broadcast") or ipaddr(w, "address"), "int") + + if _range_checker(w_first, v_first, v_last) and _range_checker( + w_last, v_first, v_last + ): + return True + else: + return False + + +def network_in_network(value, test): + """ + Checks whether the 'test' address or addresses are in 'value', including broadcast and network + + :param: value: The network address or range to test against. + :param test: The address or network to validate if it is within the range of 'value'. + + :return: bool + """ + # normalize value and test variables into an ipaddr + v = _address_normalizer(value) + w = _address_normalizer(test) + + # get first and last addresses as integers to compare value and test; or cathes value when case is /32 + v_first = ipaddr(ipaddr(v, "network") or ipaddr(v, "address"), "int") + v_last = ipaddr(ipaddr(v, "broadcast") or ipaddr(v, "address"), "int") + w_first = ipaddr(ipaddr(w, "network") or ipaddr(w, "address"), "int") + w_last = ipaddr(ipaddr(w, "broadcast") or ipaddr(w, "address"), "int") + + if _range_checker(w_first, v_first, v_last) and _range_checker( + w_last, v_first, v_last + ): + return True + else: + return False + + +def reduce_on_network(value, network): + """ + Reduces a list of addresses to only the addresses that match a given network. + + :param: value: The list of addresses to filter on. + :param: network: The network to validate against. + + :return: The reduced list of addresses. + """ + # normalize network variable into an ipaddr + n = _address_normalizer(network) + + # get first and last addresses as integers to compare value and test; or cathes value when case is /32 + n_first = ipaddr(ipaddr(n, "network") or ipaddr(n, "address"), "int") + n_last = ipaddr(ipaddr(n, "broadcast") or ipaddr(n, "address"), "int") + + # create an empty list to fill and return + r = [] + + for address in value: + # normalize address variables into an ipaddr + a = _address_normalizer(address) + + # get first and last addresses as integers to compare value and test; or cathes value when case is /32 + a_first = ipaddr(ipaddr(a, "network") or ipaddr(a, "address"), "int") + a_last = ipaddr(ipaddr(a, "broadcast") or ipaddr(a, "address"), "int") + + if _range_checker(a_first, n_first, n_last) and _range_checker( + a_last, n_first, n_last + ): + r.append(address) + + return r + + +# Returns the SLAAC address within a network for a given HW/MAC address. +# Usage: +# +# - prefix | slaac(mac) +def slaac(value, query=""): + """ Get the SLAAC address within given network """ + try: + vtype = ipaddr(value, "type") + if vtype == "address": + v = ipaddr(value, "cidr") + elif vtype == "network": + v = ipaddr(value, "subnet") + + if ipaddr(value, "version") != 6: + return False + + value = netaddr.IPNetwork(v) + except Exception: + return False + + if not query: + return False + + try: + mac = hwaddr(query, alias="slaac") + + eui = netaddr.EUI(mac) + except Exception: + return False + + return eui.ipv6(value.network) + + +# ---- HWaddr / MAC address filters ---- +def hwaddr(value, query="", alias="hwaddr"): + """ Check if string is a HW/MAC address and filter it """ + + query_func_extra_args = {"": ("value",)} + + query_func_map = { + "": _empty_hwaddr_query, + "bare": _bare_query, + "bool": _bool_hwaddr_query, + "int": _int_hwaddr_query, + "cisco": _cisco_query, + "eui48": _win_query, + "linux": _linux_query, + "pgsql": _postgresql_query, + "postgresql": _postgresql_query, + "psql": _postgresql_query, + "unix": _unix_query, + "win": _win_query, + } + + try: + v = netaddr.EUI(value) + except Exception: + if query and query != "bool": + raise errors.AnsibleFilterError( + alias + ": not a hardware address: %s" % value + ) + + extras = [] + for arg in query_func_extra_args.get(query, tuple()): + extras.append(locals()[arg]) + try: + return query_func_map[query](v, *extras) + except KeyError: + raise errors.AnsibleFilterError( + alias + ": unknown filter type: %s" % query + ) + + return False + + +def macaddr(value, query=""): + return hwaddr(value, query, alias="macaddr") + + +def _need_netaddr(f_name, *args, **kwargs): + raise errors.AnsibleFilterError( + "The %s filter requires python's netaddr be " + "installed on the ansible controller" % f_name + ) + + +def ip4_hex(arg, delimiter=""): + """ Convert an IPv4 address to Hexadecimal notation """ + numbers = list(map(int, arg.split("."))) + return "{0:02x}{sep}{1:02x}{sep}{2:02x}{sep}{3:02x}".format( + *numbers, sep=delimiter + ) + + +# ---- Ansible filters ---- +class FilterModule(object): + """ IP address and network manipulation filters """ + + filter_map = { + # IP addresses and networks + "cidr_merge": cidr_merge, + "ipaddr": ipaddr, + "ipmath": ipmath, + "ipwrap": ipwrap, + "ip4_hex": ip4_hex, + "ipv4": ipv4, + "ipv6": ipv6, + "ipsubnet": ipsubnet, + "next_nth_usable": next_nth_usable, + "network_in_network": network_in_network, + "network_in_usable": network_in_usable, + "reduce_on_network": reduce_on_network, + "nthhost": nthhost, + "previous_nth_usable": previous_nth_usable, + "slaac": slaac, + # MAC / HW addresses + "hwaddr": hwaddr, + "macaddr": macaddr, + } + + def filters(self): + if netaddr: + return self.filter_map + else: + # Need to install python's netaddr for these filters to work + return dict( + (f, partial(_need_netaddr, f)) for f in self.filter_map + ) diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py new file mode 100644 index 00000000..f99e6e76 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py @@ -0,0 +1,531 @@ +# +# {c) 2017 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re +import os +import traceback +import string + +from xml.etree.ElementTree import fromstring + +from ansible.module_utils._text import to_native, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + Template, +) +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils.common._collections_compat import Mapping +from ansible.errors import AnsibleError, AnsibleFilterError +from ansible.utils.display import Display +from ansible.utils.encrypt import passlib_or_crypt, random_password + +try: + import yaml + + HAS_YAML = True +except ImportError: + HAS_YAML = False + +try: + import textfsm + + HAS_TEXTFSM = True +except ImportError: + HAS_TEXTFSM = False + +display = Display() + + +def re_matchall(regex, value): + objects = list() + for match in re.findall(regex.pattern, value, re.M): + obj = {} + if regex.groupindex: + for name, index in iteritems(regex.groupindex): + if len(regex.groupindex) == 1: + obj[name] = match + else: + obj[name] = match[index - 1] + objects.append(obj) + return objects + + +def re_search(regex, value): + obj = {} + match = regex.search(value, re.M) + if match: + items = list(match.groups()) + if regex.groupindex: + for name, index in iteritems(regex.groupindex): + obj[name] = items[index - 1] + return obj + + +def parse_cli(output, tmpl): + if not isinstance(output, string_types): + raise AnsibleError( + "parse_cli input should be a string, but was given a input of %s" + % (type(output)) + ) + + if not os.path.exists(tmpl): + raise AnsibleError("unable to locate parse_cli template: %s" % tmpl) + + try: + template = Template() + except ImportError as exc: + raise AnsibleError(to_native(exc)) + + with open(tmpl) as tmpl_fh: + tmpl_content = tmpl_fh.read() + + spec = yaml.safe_load(tmpl_content) + obj = {} + + for name, attrs in iteritems(spec["keys"]): + value = attrs["value"] + + try: + variables = spec.get("vars", {}) + value = template(value, variables) + except Exception: + pass + + if "start_block" in attrs and "end_block" in attrs: + start_block = re.compile(attrs["start_block"]) + end_block = re.compile(attrs["end_block"]) + + blocks = list() + lines = None + block_started = False + + for line in output.split("\n"): + match_start = start_block.match(line) + match_end = end_block.match(line) + + if match_start: + lines = list() + lines.append(line) + block_started = True + + elif match_end: + if lines: + lines.append(line) + blocks.append("\n".join(lines)) + block_started = False + + elif block_started: + if lines: + lines.append(line) + + regex_items = [re.compile(r) for r in attrs["items"]] + objects = list() + + for block in blocks: + if isinstance(value, Mapping) and "key" not in value: + items = list() + for regex in regex_items: + match = regex.search(block) + if match: + item_values = match.groupdict() + item_values["match"] = list(match.groups()) + items.append(item_values) + else: + items.append(None) + + obj = {} + for k, v in iteritems(value): + try: + obj[k] = template( + v, {"item": items}, fail_on_undefined=False + ) + except Exception: + obj[k] = None + objects.append(obj) + + elif isinstance(value, Mapping): + items = list() + for regex in regex_items: + match = regex.search(block) + if match: + item_values = match.groupdict() + item_values["match"] = list(match.groups()) + items.append(item_values) + else: + items.append(None) + + key = template(value["key"], {"item": items}) + values = dict( + [ + (k, template(v, {"item": items})) + for k, v in iteritems(value["values"]) + ] + ) + objects.append({key: values}) + + return objects + + elif "items" in attrs: + regexp = re.compile(attrs["items"]) + when = attrs.get("when") + conditional = ( + "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when + ) + + if isinstance(value, Mapping) and "key" not in value: + values = list() + + for item in re_matchall(regexp, output): + entry = {} + + for item_key, item_value in iteritems(value): + entry[item_key] = template(item_value, {"item": item}) + + if when: + if template(conditional, {"item": entry}): + values.append(entry) + else: + values.append(entry) + + obj[name] = values + + elif isinstance(value, Mapping): + values = dict() + + for item in re_matchall(regexp, output): + entry = {} + + for item_key, item_value in iteritems(value["values"]): + entry[item_key] = template(item_value, {"item": item}) + + key = template(value["key"], {"item": item}) + + if when: + if template( + conditional, {"item": {"key": key, "value": entry}} + ): + values[key] = entry + else: + values[key] = entry + + obj[name] = values + + else: + item = re_search(regexp, output) + obj[name] = template(value, {"item": item}) + + else: + obj[name] = value + + return obj + + +def parse_cli_textfsm(value, template): + if not HAS_TEXTFSM: + raise AnsibleError( + "parse_cli_textfsm filter requires TextFSM library to be installed" + ) + + if not isinstance(value, string_types): + raise AnsibleError( + "parse_cli_textfsm input should be a string, but was given a input of %s" + % (type(value)) + ) + + if not os.path.exists(template): + raise AnsibleError( + "unable to locate parse_cli_textfsm template: %s" % template + ) + + try: + template = open(template) + except IOError as exc: + raise AnsibleError(to_native(exc)) + + re_table = textfsm.TextFSM(template) + fsm_results = re_table.ParseText(value) + + results = list() + for item in fsm_results: + results.append(dict(zip(re_table.header, item))) + + return results + + +def _extract_param(template, root, attrs, value): + + key = None + when = attrs.get("when") + conditional = "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when + param_to_xpath_map = attrs["items"] + + if isinstance(value, Mapping): + key = value.get("key", None) + if key: + value = value["values"] + + entries = dict() if key else list() + + for element in root.findall(attrs["top"]): + entry = dict() + item_dict = dict() + for param, param_xpath in iteritems(param_to_xpath_map): + fields = None + try: + fields = element.findall(param_xpath) + except Exception: + display.warning( + "Failed to evaluate value of '%s' with XPath '%s'.\nUnexpected error: %s." + % (param, param_xpath, traceback.format_exc()) + ) + + tags = param_xpath.split("/") + + # check if xpath ends with attribute. + # If yes set attribute key/value dict to param value in case attribute matches + # else if it is a normal xpath assign matched element text value. + if len(tags) and tags[-1].endswith("]"): + if fields: + if len(fields) > 1: + item_dict[param] = [field.attrib for field in fields] + else: + item_dict[param] = fields[0].attrib + else: + item_dict[param] = {} + else: + if fields: + if len(fields) > 1: + item_dict[param] = [field.text for field in fields] + else: + item_dict[param] = fields[0].text + else: + item_dict[param] = None + + if isinstance(value, Mapping): + for item_key, item_value in iteritems(value): + entry[item_key] = template(item_value, {"item": item_dict}) + else: + entry = template(value, {"item": item_dict}) + + if key: + expanded_key = template(key, {"item": item_dict}) + if when: + if template( + conditional, + {"item": {"key": expanded_key, "value": entry}}, + ): + entries[expanded_key] = entry + else: + entries[expanded_key] = entry + else: + if when: + if template(conditional, {"item": entry}): + entries.append(entry) + else: + entries.append(entry) + + return entries + + +def parse_xml(output, tmpl): + if not os.path.exists(tmpl): + raise AnsibleError("unable to locate parse_xml template: %s" % tmpl) + + if not isinstance(output, string_types): + raise AnsibleError( + "parse_xml works on string input, but given input of : %s" + % type(output) + ) + + root = fromstring(output) + try: + template = Template() + except ImportError as exc: + raise AnsibleError(to_native(exc)) + + with open(tmpl) as tmpl_fh: + tmpl_content = tmpl_fh.read() + + spec = yaml.safe_load(tmpl_content) + obj = {} + + for name, attrs in iteritems(spec["keys"]): + value = attrs["value"] + + try: + variables = spec.get("vars", {}) + value = template(value, variables) + except Exception: + pass + + if "items" in attrs: + obj[name] = _extract_param(template, root, attrs, value) + else: + obj[name] = value + + return obj + + +def type5_pw(password, salt=None): + if not isinstance(password, string_types): + raise AnsibleFilterError( + "type5_pw password input should be a string, but was given a input of %s" + % (type(password).__name__) + ) + + salt_chars = u"".join( + (to_text(string.ascii_letters), to_text(string.digits), u"./") + ) + if salt is not None and not isinstance(salt, string_types): + raise AnsibleFilterError( + "type5_pw salt input should be a string, but was given a input of %s" + % (type(salt).__name__) + ) + elif not salt: + salt = random_password(length=4, chars=salt_chars) + elif not set(salt) <= set(salt_chars): + raise AnsibleFilterError( + "type5_pw salt used inproper characters, must be one of %s" + % (salt_chars) + ) + + encrypted_password = passlib_or_crypt(password, "md5_crypt", salt=salt) + + return encrypted_password + + +def hash_salt(password): + + split_password = password.split("$") + if len(split_password) != 4: + raise AnsibleFilterError( + "Could not parse salt out password correctly from {0}".format( + password + ) + ) + else: + return split_password[2] + + +def comp_type5( + unencrypted_password, encrypted_password, return_original=False +): + + salt = hash_salt(encrypted_password) + if type5_pw(unencrypted_password, salt) == encrypted_password: + if return_original is True: + return encrypted_password + else: + return True + return False + + +def vlan_parser(vlan_list, first_line_len=48, other_line_len=44): + + """ + Input: Unsorted list of vlan integers + Output: Sorted string list of integers according to IOS-like vlan list rules + + 1. Vlans are listed in ascending order + 2. Runs of 3 or more consecutive vlans are listed with a dash + 3. The first line of the list can be first_line_len characters long + 4. Subsequent list lines can be other_line_len characters + """ + + # Sort and remove duplicates + sorted_list = sorted(set(vlan_list)) + + if sorted_list[0] < 1 or sorted_list[-1] > 4094: + raise AnsibleFilterError("Valid VLAN range is 1-4094") + + parse_list = [] + idx = 0 + while idx < len(sorted_list): + start = idx + end = start + while end < len(sorted_list) - 1: + if sorted_list[end + 1] - sorted_list[end] == 1: + end += 1 + else: + break + + if start == end: + # Single VLAN + parse_list.append(str(sorted_list[idx])) + elif start + 1 == end: + # Run of 2 VLANs + parse_list.append(str(sorted_list[start])) + parse_list.append(str(sorted_list[end])) + else: + # Run of 3 or more VLANs + parse_list.append( + str(sorted_list[start]) + "-" + str(sorted_list[end]) + ) + idx = end + 1 + + line_count = 0 + result = [""] + for vlans in parse_list: + # First line (" switchport trunk allowed vlan ") + if line_count == 0: + if len(result[line_count] + vlans) > first_line_len: + result.append("") + line_count += 1 + result[line_count] += vlans + "," + else: + result[line_count] += vlans + "," + + # Subsequent lines (" switchport trunk allowed vlan add ") + else: + if len(result[line_count] + vlans) > other_line_len: + result.append("") + line_count += 1 + result[line_count] += vlans + "," + else: + result[line_count] += vlans + "," + + # Remove trailing orphan commas + for idx in range(0, len(result)): + result[idx] = result[idx].rstrip(",") + + # Sometimes text wraps to next line, but there are no remaining VLANs + if "" in result: + result.remove("") + + return result + + +class FilterModule(object): + """Filters for working with output from network devices""" + + filter_map = { + "parse_cli": parse_cli, + "parse_cli_textfsm": parse_cli_textfsm, + "parse_xml": parse_xml, + "type5_pw": type5_pw, + "hash_salt": hash_salt, + "comp_type5": comp_type5, + "vlan_parser": vlan_parser, + } + + def filters(self): + return self.filter_map diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/httpapi/restconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/httpapi/restconf.py new file mode 100644 index 00000000..8afb3e5e --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/httpapi/restconf.py @@ -0,0 +1,91 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """author: Ansible Networking Team +httpapi: restconf +short_description: HttpApi Plugin for devices supporting Restconf API +description: +- This HttpApi plugin provides methods to connect to Restconf API endpoints. +options: + root_path: + type: str + description: + - Specifies the location of the Restconf root. + default: /restconf + vars: + - name: ansible_httpapi_restconf_root +""" + +import json + +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import ConnectionError +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.plugins.httpapi import HttpApiBase + + +CONTENT_TYPE = "application/yang-data+json" + + +class HttpApi(HttpApiBase): + def send_request(self, data, **message_kwargs): + if data: + data = json.dumps(data) + + path = "/".join( + [ + self.get_option("root_path").rstrip("/"), + message_kwargs.get("path", "").lstrip("/"), + ] + ) + + headers = { + "Content-Type": message_kwargs.get("content_type") or CONTENT_TYPE, + "Accept": message_kwargs.get("accept") or CONTENT_TYPE, + } + response, response_data = self.connection.send( + path, data, headers=headers, method=message_kwargs.get("method") + ) + + return handle_response(response, response_data) + + +def handle_response(response, response_data): + try: + response_data = json.loads(response_data.read()) + except ValueError: + response_data = response_data.read() + + if isinstance(response, HTTPError): + if response_data: + if "errors" in response_data: + errors = response_data["errors"]["error"] + error_text = "\n".join( + (error["error-message"] for error in errors) + ) + else: + error_text = response_data + + raise ConnectionError(error_text, code=response.code) + raise ConnectionError(to_text(response), code=response.code) + + return response_data diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py new file mode 100644 index 00000000..dc0a19f7 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py @@ -0,0 +1,2578 @@ +# -*- coding: utf-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file, and this file only, is based on +# Lib/ipaddress.py of cpython +# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +# +# 1. This LICENSE AGREEMENT is between the Python Software Foundation +# ("PSF"), and the Individual or Organization ("Licensee") accessing and +# otherwise using this software ("Python") in source or binary form and +# its associated documentation. +# +# 2. Subject to the terms and conditions of this License Agreement, PSF hereby +# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +# analyze, test, perform and/or display publicly, prepare derivative works, +# distribute, and otherwise use Python alone or in any derivative version, +# provided, however, that PSF's License Agreement and PSF's notice of copyright, +# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" +# are retained in Python alone or in any derivative version prepared by Licensee. +# +# 3. In the event Licensee prepares a derivative work that is based on +# or incorporates Python or any part thereof, and wants to make +# the derivative work available to others as provided herein, then +# Licensee hereby agrees to include in any such work a brief summary of +# the changes made to Python. +# +# 4. PSF is making Python available to Licensee on an "AS IS" +# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +# INFRINGE ANY THIRD PARTY RIGHTS. +# +# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. +# +# 6. This License Agreement will automatically terminate upon a material +# breach of its terms and conditions. +# +# 7. Nothing in this License Agreement shall be deemed to create any +# relationship of agency, partnership, or joint venture between PSF and +# Licensee. This License Agreement does not grant permission to use PSF +# trademarks or trade name in a trademark sense to endorse or promote +# products or services of Licensee, or any third party. +# +# 8. By copying, installing or otherwise using Python, Licensee +# agrees to be bound by the terms and conditions of this License +# Agreement. + +# Copyright 2007 Google Inc. +# Licensed to PSF under a Contributor Agreement. + +"""A fast, lightweight IPv4/IPv6 manipulation library in Python. + +This library is used to create/poke/manipulate IPv4 and IPv6 addresses +and networks. + +""" + +from __future__ import unicode_literals + + +import itertools +import struct + + +# The following makes it easier for us to script updates of the bundled code and is not part of +# upstream +_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"} + +__version__ = "1.0.22" + +# Compatibility functions +_compat_int_types = (int,) +try: + _compat_int_types = (int, long) +except NameError: + pass +try: + _compat_str = unicode +except NameError: + _compat_str = str + assert bytes != str +if b"\0"[0] == 0: # Python 3 semantics + + def _compat_bytes_to_byte_vals(byt): + return byt + + +else: + + def _compat_bytes_to_byte_vals(byt): + return [struct.unpack(b"!B", b)[0] for b in byt] + + +try: + _compat_int_from_byte_vals = int.from_bytes +except AttributeError: + + def _compat_int_from_byte_vals(bytvals, endianess): + assert endianess == "big" + res = 0 + for bv in bytvals: + assert isinstance(bv, _compat_int_types) + res = (res << 8) + bv + return res + + +def _compat_to_bytes(intval, length, endianess): + assert isinstance(intval, _compat_int_types) + assert endianess == "big" + if length == 4: + if intval < 0 or intval >= 2 ** 32: + raise struct.error("integer out of range for 'I' format code") + return struct.pack(b"!I", intval) + elif length == 16: + if intval < 0 or intval >= 2 ** 128: + raise struct.error("integer out of range for 'QQ' format code") + return struct.pack(b"!QQ", intval >> 64, intval & 0xFFFFFFFFFFFFFFFF) + else: + raise NotImplementedError() + + +if hasattr(int, "bit_length"): + # Not int.bit_length , since that won't work in 2.7 where long exists + def _compat_bit_length(i): + return i.bit_length() + + +else: + + def _compat_bit_length(i): + for res in itertools.count(): + if i >> res == 0: + return res + + +def _compat_range(start, end, step=1): + assert step > 0 + i = start + while i < end: + yield i + i += step + + +class _TotalOrderingMixin(object): + __slots__ = () + + # Helper that derives the other comparison operations from + # __lt__ and __eq__ + # We avoid functools.total_ordering because it doesn't handle + # NotImplemented correctly yet (http://bugs.python.org/issue10042) + def __eq__(self, other): + raise NotImplementedError + + def __ne__(self, other): + equal = self.__eq__(other) + if equal is NotImplemented: + return NotImplemented + return not equal + + def __lt__(self, other): + raise NotImplementedError + + def __le__(self, other): + less = self.__lt__(other) + if less is NotImplemented or not less: + return self.__eq__(other) + return less + + def __gt__(self, other): + less = self.__lt__(other) + if less is NotImplemented: + return NotImplemented + equal = self.__eq__(other) + if equal is NotImplemented: + return NotImplemented + return not (less or equal) + + def __ge__(self, other): + less = self.__lt__(other) + if less is NotImplemented: + return NotImplemented + return not less + + +IPV4LENGTH = 32 +IPV6LENGTH = 128 + + +class AddressValueError(ValueError): + """A Value Error related to the address.""" + + +class NetmaskValueError(ValueError): + """A Value Error related to the netmask.""" + + +def ip_address(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Address or IPv6Address object. + + Raises: + ValueError: if the *address* passed isn't either a v4 or a v6 + address + + """ + try: + return IPv4Address(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Address(address) + except (AddressValueError, NetmaskValueError): + pass + + if isinstance(address, bytes): + raise AddressValueError( + "%r does not appear to be an IPv4 or IPv6 address. " + "Did you pass in a bytes (str in Python 2) instead of" + " a unicode object?" % address + ) + + raise ValueError( + "%r does not appear to be an IPv4 or IPv6 address" % address + ) + + +def ip_network(address, strict=True): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP network. Either IPv4 or + IPv6 networks may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Network or IPv6Network object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. Or if the network has host bits set. + + """ + try: + return IPv4Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + if isinstance(address, bytes): + raise AddressValueError( + "%r does not appear to be an IPv4 or IPv6 network. " + "Did you pass in a bytes (str in Python 2) instead of" + " a unicode object?" % address + ) + + raise ValueError( + "%r does not appear to be an IPv4 or IPv6 network" % address + ) + + +def ip_interface(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Interface or IPv6Interface object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. + + Notes: + The IPv?Interface classes describe an Address on a particular + Network, so they're basically a combination of both the Address + and Network classes. + + """ + try: + return IPv4Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError( + "%r does not appear to be an IPv4 or IPv6 interface" % address + ) + + +def v4_int_to_packed(address): + """Represent an address as 4 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv4 IP address. + + Returns: + The integer address packed as 4 bytes in network (big-endian) order. + + Raises: + ValueError: If the integer is negative or too large to be an + IPv4 IP address. + + """ + try: + return _compat_to_bytes(address, 4, "big") + except (struct.error, OverflowError): + raise ValueError("Address negative or too large for IPv4") + + +def v6_int_to_packed(address): + """Represent an address as 16 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv6 IP address. + + Returns: + The integer address packed as 16 bytes in network (big-endian) order. + + """ + try: + return _compat_to_bytes(address, 16, "big") + except (struct.error, OverflowError): + raise ValueError("Address negative or too large for IPv6") + + +def _split_optional_netmask(address): + """Helper to split the netmask and raise AddressValueError if needed""" + addr = _compat_str(address).split("/") + if len(addr) > 2: + raise AddressValueError("Only one '/' permitted in %r" % address) + return addr + + +def _find_address_range(addresses): + """Find a sequence of sorted deduplicated IPv#Address. + + Args: + addresses: a list of IPv#Address objects. + + Yields: + A tuple containing the first and last IP addresses in the sequence. + + """ + it = iter(addresses) + first = last = next(it) # pylint: disable=stop-iteration-return + for ip in it: + if ip._ip != last._ip + 1: + yield first, last + first = ip + last = ip + yield first, last + + +def _count_righthand_zero_bits(number, bits): + """Count the number of zero bits on the right hand side. + + Args: + number: an integer. + bits: maximum number of bits to count. + + Returns: + The number of zero bits on the right hand side of the number. + + """ + if number == 0: + return bits + return min(bits, _compat_bit_length(~number & (number - 1))) + + +def summarize_address_range(first, last): + """Summarize a network range given the first and last IP addresses. + + Example: + >>> list(summarize_address_range(IPv4Address('192.0.2.0'), + ... IPv4Address('192.0.2.130'))) + ... #doctest: +NORMALIZE_WHITESPACE + [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), + IPv4Network('192.0.2.130/32')] + + Args: + first: the first IPv4Address or IPv6Address in the range. + last: the last IPv4Address or IPv6Address in the range. + + Returns: + An iterator of the summarized IPv(4|6) network objects. + + Raise: + TypeError: + If the first and last objects are not IP addresses. + If the first and last objects are not the same version. + ValueError: + If the last object is not greater than the first. + If the version of the first address is not 4 or 6. + + """ + if not ( + isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress) + ): + raise TypeError("first and last must be IP addresses, not networks") + if first.version != last.version: + raise TypeError( + "%s and %s are not of the same version" % (first, last) + ) + if first > last: + raise ValueError("last IP address must be greater than first") + + if first.version == 4: + ip = IPv4Network + elif first.version == 6: + ip = IPv6Network + else: + raise ValueError("unknown IP version") + + ip_bits = first._max_prefixlen + first_int = first._ip + last_int = last._ip + while first_int <= last_int: + nbits = min( + _count_righthand_zero_bits(first_int, ip_bits), + _compat_bit_length(last_int - first_int + 1) - 1, + ) + net = ip((first_int, ip_bits - nbits)) + yield net + first_int += 1 << nbits + if first_int - 1 == ip._ALL_ONES: + break + + +def _collapse_addresses_internal(addresses): + """Loops through the addresses, collapsing concurrent netblocks. + + Example: + + ip1 = IPv4Network('192.0.2.0/26') + ip2 = IPv4Network('192.0.2.64/26') + ip3 = IPv4Network('192.0.2.128/26') + ip4 = IPv4Network('192.0.2.192/26') + + _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> + [IPv4Network('192.0.2.0/24')] + + This shouldn't be called directly; it is called via + collapse_addresses([]). + + Args: + addresses: A list of IPv4Network's or IPv6Network's + + Returns: + A list of IPv4Network's or IPv6Network's depending on what we were + passed. + + """ + # First merge + to_merge = list(addresses) + subnets = {} + while to_merge: + net = to_merge.pop() + supernet = net.supernet() + existing = subnets.get(supernet) + if existing is None: + subnets[supernet] = net + elif existing != net: + # Merge consecutive subnets + del subnets[supernet] + to_merge.append(supernet) + # Then iterate over resulting networks, skipping subsumed subnets + last = None + for net in sorted(subnets.values()): + if last is not None: + # Since they are sorted, + # last.network_address <= net.network_address is a given. + if last.broadcast_address >= net.broadcast_address: + continue + yield net + last = net + + +def collapse_addresses(addresses): + """Collapse a list of IP objects. + + Example: + collapse_addresses([IPv4Network('192.0.2.0/25'), + IPv4Network('192.0.2.128/25')]) -> + [IPv4Network('192.0.2.0/24')] + + Args: + addresses: An iterator of IPv4Network or IPv6Network objects. + + Returns: + An iterator of the collapsed IPv(4|6)Network objects. + + Raises: + TypeError: If passed a list of mixed version objects. + + """ + addrs = [] + ips = [] + nets = [] + + # split IP addresses and networks + for ip in addresses: + if isinstance(ip, _BaseAddress): + if ips and ips[-1]._version != ip._version: + raise TypeError( + "%s and %s are not of the same version" % (ip, ips[-1]) + ) + ips.append(ip) + elif ip._prefixlen == ip._max_prefixlen: + if ips and ips[-1]._version != ip._version: + raise TypeError( + "%s and %s are not of the same version" % (ip, ips[-1]) + ) + try: + ips.append(ip.ip) + except AttributeError: + ips.append(ip.network_address) + else: + if nets and nets[-1]._version != ip._version: + raise TypeError( + "%s and %s are not of the same version" % (ip, nets[-1]) + ) + nets.append(ip) + + # sort and dedup + ips = sorted(set(ips)) + + # find consecutive address ranges in the sorted sequence and summarize them + if ips: + for first, last in _find_address_range(ips): + addrs.extend(summarize_address_range(first, last)) + + return _collapse_addresses_internal(addrs + nets) + + +def get_mixed_type_key(obj): + """Return a key suitable for sorting between networks and addresses. + + Address and Network objects are not sortable by default; they're + fundamentally different so the expression + + IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') + + doesn't make any sense. There are some times however, where you may wish + to have ipaddress sort these for you anyway. If you need to do this, you + can use this function as the key= argument to sorted(). + + Args: + obj: either a Network or Address object. + Returns: + appropriate key. + + """ + if isinstance(obj, _BaseNetwork): + return obj._get_networks_key() + elif isinstance(obj, _BaseAddress): + return obj._get_address_key() + return NotImplemented + + +class _IPAddressBase(_TotalOrderingMixin): + + """The mother class.""" + + __slots__ = () + + @property + def exploded(self): + """Return the longhand version of the IP address as a string.""" + return self._explode_shorthand_ip_string() + + @property + def compressed(self): + """Return the shorthand version of the IP address as a string.""" + return _compat_str(self) + + @property + def reverse_pointer(self): + """The name of the reverse DNS pointer for the IP address, e.g.: + >>> ipaddress.ip_address("127.0.0.1").reverse_pointer + '1.0.0.127.in-addr.arpa' + >>> ipaddress.ip_address("2001:db8::1").reverse_pointer + '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' + + """ + return self._reverse_pointer() + + @property + def version(self): + msg = "%200s has no version specified" % (type(self),) + raise NotImplementedError(msg) + + def _check_int_address(self, address): + if address < 0: + msg = "%d (< 0) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self._version)) + if address > self._ALL_ONES: + msg = "%d (>= 2**%d) is not permitted as an IPv%d address" + raise AddressValueError( + msg % (address, self._max_prefixlen, self._version) + ) + + def _check_packed_address(self, address, expected_len): + address_len = len(address) + if address_len != expected_len: + msg = ( + "%r (len %d != %d) is not permitted as an IPv%d address. " + "Did you pass in a bytes (str in Python 2) instead of" + " a unicode object?" + ) + raise AddressValueError( + msg % (address, address_len, expected_len, self._version) + ) + + @classmethod + def _ip_int_from_prefix(cls, prefixlen): + """Turn the prefix length into a bitwise netmask + + Args: + prefixlen: An integer, the prefix length. + + Returns: + An integer. + + """ + return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) + + @classmethod + def _prefix_from_ip_int(cls, ip_int): + """Return prefix length from the bitwise netmask. + + Args: + ip_int: An integer, the netmask in expanded bitwise format + + Returns: + An integer, the prefix length. + + Raises: + ValueError: If the input intermingles zeroes & ones + """ + trailing_zeroes = _count_righthand_zero_bits( + ip_int, cls._max_prefixlen + ) + prefixlen = cls._max_prefixlen - trailing_zeroes + leading_ones = ip_int >> trailing_zeroes + all_ones = (1 << prefixlen) - 1 + if leading_ones != all_ones: + byteslen = cls._max_prefixlen // 8 + details = _compat_to_bytes(ip_int, byteslen, "big") + msg = "Netmask pattern %r mixes zeroes & ones" + raise ValueError(msg % details) + return prefixlen + + @classmethod + def _report_invalid_netmask(cls, netmask_str): + msg = "%r is not a valid netmask" % netmask_str + raise NetmaskValueError(msg) + + @classmethod + def _prefix_from_prefix_string(cls, prefixlen_str): + """Return prefix length from a numeric string + + Args: + prefixlen_str: The string to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask + """ + # int allows a leading +/- as well as surrounding whitespace, + # so we ensure that isn't the case + if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): + cls._report_invalid_netmask(prefixlen_str) + try: + prefixlen = int(prefixlen_str) + except ValueError: + cls._report_invalid_netmask(prefixlen_str) + if not (0 <= prefixlen <= cls._max_prefixlen): + cls._report_invalid_netmask(prefixlen_str) + return prefixlen + + @classmethod + def _prefix_from_ip_string(cls, ip_str): + """Turn a netmask/hostmask string into a prefix length + + Args: + ip_str: The netmask/hostmask to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask/hostmask + """ + # Parse the netmask/hostmask like an IP address. + try: + ip_int = cls._ip_int_from_string(ip_str) + except AddressValueError: + cls._report_invalid_netmask(ip_str) + + # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). + # Note that the two ambiguous cases (all-ones and all-zeroes) are + # treated as netmasks. + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + pass + + # Invert the bits, and try matching a /0+1+/ hostmask instead. + ip_int ^= cls._ALL_ONES + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + cls._report_invalid_netmask(ip_str) + + def __reduce__(self): + return self.__class__, (_compat_str(self),) + + +class _BaseAddress(_IPAddressBase): + + """A generic IP object. + + This IP class contains the version independent methods which are + used by single IP addresses. + """ + + __slots__ = () + + def __int__(self): + return self._ip + + def __eq__(self, other): + try: + return self._ip == other._ip and self._version == other._version + except AttributeError: + return NotImplemented + + def __lt__(self, other): + if not isinstance(other, _IPAddressBase): + return NotImplemented + if not isinstance(other, _BaseAddress): + raise TypeError( + "%s and %s are not of the same type" % (self, other) + ) + if self._version != other._version: + raise TypeError( + "%s and %s are not of the same version" % (self, other) + ) + if self._ip != other._ip: + return self._ip < other._ip + return False + + # Shorthand for Integer addition and subtraction. This is not + # meant to ever support addition/subtraction of addresses. + def __add__(self, other): + if not isinstance(other, _compat_int_types): + return NotImplemented + return self.__class__(int(self) + other) + + def __sub__(self, other): + if not isinstance(other, _compat_int_types): + return NotImplemented + return self.__class__(int(self) - other) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, _compat_str(self)) + + def __str__(self): + return _compat_str(self._string_from_ip_int(self._ip)) + + def __hash__(self): + return hash(hex(int(self._ip))) + + def _get_address_key(self): + return (self._version, self) + + def __reduce__(self): + return self.__class__, (self._ip,) + + +class _BaseNetwork(_IPAddressBase): + + """A generic IP network object. + + This IP class contains the version independent methods which are + used by networks. + + """ + + def __init__(self, address): + self._cache = {} + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, _compat_str(self)) + + def __str__(self): + return "%s/%d" % (self.network_address, self.prefixlen) + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the network + or broadcast addresses. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network + 1, broadcast): + yield self._address_class(x) + + def __iter__(self): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network, broadcast + 1): + yield self._address_class(x) + + def __getitem__(self, n): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + if n >= 0: + if network + n > broadcast: + raise IndexError("address out of range") + return self._address_class(network + n) + else: + n += 1 + if broadcast + n < network: + raise IndexError("address out of range") + return self._address_class(broadcast + n) + + def __lt__(self, other): + if not isinstance(other, _IPAddressBase): + return NotImplemented + if not isinstance(other, _BaseNetwork): + raise TypeError( + "%s and %s are not of the same type" % (self, other) + ) + if self._version != other._version: + raise TypeError( + "%s and %s are not of the same version" % (self, other) + ) + if self.network_address != other.network_address: + return self.network_address < other.network_address + if self.netmask != other.netmask: + return self.netmask < other.netmask + return False + + def __eq__(self, other): + try: + return ( + self._version == other._version + and self.network_address == other.network_address + and int(self.netmask) == int(other.netmask) + ) + except AttributeError: + return NotImplemented + + def __hash__(self): + return hash(int(self.network_address) ^ int(self.netmask)) + + def __contains__(self, other): + # always false if one is v4 and the other is v6. + if self._version != other._version: + return False + # dealing with another network. + if isinstance(other, _BaseNetwork): + return False + # dealing with another address + else: + # address + return ( + int(self.network_address) + <= int(other._ip) + <= int(self.broadcast_address) + ) + + def overlaps(self, other): + """Tell if self is partly contained in other.""" + return self.network_address in other or ( + self.broadcast_address in other + or ( + other.network_address in self + or (other.broadcast_address in self) + ) + ) + + @property + def broadcast_address(self): + x = self._cache.get("broadcast_address") + if x is None: + x = self._address_class( + int(self.network_address) | int(self.hostmask) + ) + self._cache["broadcast_address"] = x + return x + + @property + def hostmask(self): + x = self._cache.get("hostmask") + if x is None: + x = self._address_class(int(self.netmask) ^ self._ALL_ONES) + self._cache["hostmask"] = x + return x + + @property + def with_prefixlen(self): + return "%s/%d" % (self.network_address, self._prefixlen) + + @property + def with_netmask(self): + return "%s/%s" % (self.network_address, self.netmask) + + @property + def with_hostmask(self): + return "%s/%s" % (self.network_address, self.hostmask) + + @property + def num_addresses(self): + """Number of hosts in the current subnet.""" + return int(self.broadcast_address) - int(self.network_address) + 1 + + @property + def _address_class(self): + # Returning bare address objects (rather than interfaces) allows for + # more consistent behaviour across the network address, broadcast + # address and individual host addresses. + msg = "%200s has no associated address class" % (type(self),) + raise NotImplementedError(msg) + + @property + def prefixlen(self): + return self._prefixlen + + def address_exclude(self, other): + """Remove an address from a larger block. + + For example: + + addr1 = ip_network('192.0.2.0/28') + addr2 = ip_network('192.0.2.1/32') + list(addr1.address_exclude(addr2)) = + [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), + IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] + + or IPv6: + + addr1 = ip_network('2001:db8::1/32') + addr2 = ip_network('2001:db8::1/128') + list(addr1.address_exclude(addr2)) = + [ip_network('2001:db8::1/128'), + ip_network('2001:db8::2/127'), + ip_network('2001:db8::4/126'), + ip_network('2001:db8::8/125'), + ... + ip_network('2001:db8:8000::/33')] + + Args: + other: An IPv4Network or IPv6Network object of the same type. + + Returns: + An iterator of the IPv(4|6)Network objects which is self + minus other. + + Raises: + TypeError: If self and other are of differing address + versions, or if other is not a network object. + ValueError: If other is not completely contained by self. + + """ + if not self._version == other._version: + raise TypeError( + "%s and %s are not of the same version" % (self, other) + ) + + if not isinstance(other, _BaseNetwork): + raise TypeError("%s is not a network object" % other) + + if not other.subnet_of(self): + raise ValueError("%s not contained in %s" % (other, self)) + if other == self: + return + + # Make sure we're comparing the network of other. + other = other.__class__( + "%s/%s" % (other.network_address, other.prefixlen) + ) + + s1, s2 = self.subnets() + while s1 != other and s2 != other: + if other.subnet_of(s1): + yield s2 + s1, s2 = s1.subnets() + elif other.subnet_of(s2): + yield s1 + s1, s2 = s2.subnets() + else: + # If we got here, there's a bug somewhere. + raise AssertionError( + "Error performing exclusion: " + "s1: %s s2: %s other: %s" % (s1, s2, other) + ) + if s1 == other: + yield s2 + elif s2 == other: + yield s1 + else: + # If we got here, there's a bug somewhere. + raise AssertionError( + "Error performing exclusion: " + "s1: %s s2: %s other: %s" % (s1, s2, other) + ) + + def compare_networks(self, other): + """Compare two IP objects. + + This is only concerned about the comparison of the integer + representation of the network addresses. This means that the + host bits aren't considered at all in this method. If you want + to compare host bits, you can easily enough do a + 'HostA._ip < HostB._ip' + + Args: + other: An IP object. + + Returns: + If the IP versions of self and other are the same, returns: + + -1 if self < other: + eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') + IPv6Network('2001:db8::1000/124') < + IPv6Network('2001:db8::2000/124') + 0 if self == other + eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') + IPv6Network('2001:db8::1000/124') == + IPv6Network('2001:db8::1000/124') + 1 if self > other + eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') + IPv6Network('2001:db8::2000/124') > + IPv6Network('2001:db8::1000/124') + + Raises: + TypeError if the IP versions are different. + + """ + # does this need to raise a ValueError? + if self._version != other._version: + raise TypeError( + "%s and %s are not of the same type" % (self, other) + ) + # self._version == other._version below here: + if self.network_address < other.network_address: + return -1 + if self.network_address > other.network_address: + return 1 + # self.network_address == other.network_address below here: + if self.netmask < other.netmask: + return -1 + if self.netmask > other.netmask: + return 1 + return 0 + + def _get_networks_key(self): + """Network-only key function. + + Returns an object that identifies this address' network and + netmask. This function is a suitable "key" argument for sorted() + and list.sort(). + + """ + return (self._version, self.network_address, self.netmask) + + def subnets(self, prefixlen_diff=1, new_prefix=None): + """The subnets which join to make the current subnet. + + In the case that self contains only one IP + (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 + for IPv6), yield an iterator with just ourself. + + Args: + prefixlen_diff: An integer, the amount the prefix length + should be increased by. This should not be set if + new_prefix is also set. + new_prefix: The desired new prefix length. This must be a + larger number (smaller prefix) than the existing prefix. + This should not be set if prefixlen_diff is also set. + + Returns: + An iterator of IPv(4|6) objects. + + Raises: + ValueError: The prefixlen_diff is too small or too large. + OR + prefixlen_diff and new_prefix are both set or new_prefix + is a smaller number than the current prefix (smaller + number means a larger network) + + """ + if self._prefixlen == self._max_prefixlen: + yield self + return + + if new_prefix is not None: + if new_prefix < self._prefixlen: + raise ValueError("new prefix must be longer") + if prefixlen_diff != 1: + raise ValueError("cannot set prefixlen_diff and new_prefix") + prefixlen_diff = new_prefix - self._prefixlen + + if prefixlen_diff < 0: + raise ValueError("prefix length diff must be > 0") + new_prefixlen = self._prefixlen + prefixlen_diff + + if new_prefixlen > self._max_prefixlen: + raise ValueError( + "prefix length diff %d is invalid for netblock %s" + % (new_prefixlen, self) + ) + + start = int(self.network_address) + end = int(self.broadcast_address) + 1 + step = (int(self.hostmask) + 1) >> prefixlen_diff + for new_addr in _compat_range(start, end, step): + current = self.__class__((new_addr, new_prefixlen)) + yield current + + def supernet(self, prefixlen_diff=1, new_prefix=None): + """The supernet containing the current network. + + Args: + prefixlen_diff: An integer, the amount the prefix length of + the network should be decreased by. For example, given a + /24 network and a prefixlen_diff of 3, a supernet with a + /21 netmask is returned. + + Returns: + An IPv4 network object. + + Raises: + ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have + a negative prefix length. + OR + If prefixlen_diff and new_prefix are both set or new_prefix is a + larger number than the current prefix (larger number means a + smaller network) + + """ + if self._prefixlen == 0: + return self + + if new_prefix is not None: + if new_prefix > self._prefixlen: + raise ValueError("new prefix must be shorter") + if prefixlen_diff != 1: + raise ValueError("cannot set prefixlen_diff and new_prefix") + prefixlen_diff = self._prefixlen - new_prefix + + new_prefixlen = self.prefixlen - prefixlen_diff + if new_prefixlen < 0: + raise ValueError( + "current prefixlen is %d, cannot have a prefixlen_diff of %d" + % (self.prefixlen, prefixlen_diff) + ) + return self.__class__( + ( + int(self.network_address) + & (int(self.netmask) << prefixlen_diff), + new_prefixlen, + ) + ) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return ( + self.network_address.is_multicast + and self.broadcast_address.is_multicast + ) + + @staticmethod + def _is_subnet_of(a, b): + try: + # Always false if one is v4 and the other is v6. + if a._version != b._version: + raise TypeError( + "%s and %s are not of the same version" % (a, b) + ) + return ( + b.network_address <= a.network_address + and b.broadcast_address >= a.broadcast_address + ) + except AttributeError: + raise TypeError( + "Unable to test subnet containment " + "between %s and %s" % (a, b) + ) + + def subnet_of(self, other): + """Return True if this network is a subnet of other.""" + return self._is_subnet_of(self, other) + + def supernet_of(self, other): + """Return True if this network is a supernet of other.""" + return self._is_subnet_of(other, self) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return ( + self.network_address.is_reserved + and self.broadcast_address.is_reserved + ) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return ( + self.network_address.is_link_local + and self.broadcast_address.is_link_local + ) + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return ( + self.network_address.is_private + and self.broadcast_address.is_private + ) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return ( + self.network_address.is_unspecified + and self.broadcast_address.is_unspecified + ) + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return ( + self.network_address.is_loopback + and self.broadcast_address.is_loopback + ) + + +class _BaseV4(object): + + """Base IPv4 object. + + The following methods are used by IPv4 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + _version = 4 + # Equivalent to 255.255.255.255 or 32 bits of 1's. + _ALL_ONES = (2 ** IPV4LENGTH) - 1 + _DECIMAL_DIGITS = frozenset("0123456789") + + # the valid octets for host and netmasks. only useful for IPv4. + _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) + + _max_prefixlen = IPV4LENGTH + # There are only a handful of valid v4 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + def _explode_shorthand_ip_string(self): + return _compat_str(self) + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, _compat_int_types): + prefixlen = arg + else: + try: + # Check for a netmask in prefix length form + prefixlen = cls._prefix_from_prefix_string(arg) + except NetmaskValueError: + # Check for a netmask or hostmask in dotted-quad form. + # This may raise NetmaskValueError. + prefixlen = cls._prefix_from_ip_string(arg) + netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn the given IP string into an integer for comparison. + + Args: + ip_str: A string, the IP ip_str. + + Returns: + The IP ip_str as an integer. + + Raises: + AddressValueError: if ip_str isn't a valid IPv4 Address. + + """ + if not ip_str: + raise AddressValueError("Address cannot be empty") + + octets = ip_str.split(".") + if len(octets) != 4: + raise AddressValueError("Expected 4 octets in %r" % ip_str) + + try: + return _compat_int_from_byte_vals( + map(cls._parse_octet, octets), "big" + ) + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + + @classmethod + def _parse_octet(cls, octet_str): + """Convert a decimal octet into an integer. + + Args: + octet_str: A string, the number to parse. + + Returns: + The octet as an integer. + + Raises: + ValueError: if the octet isn't strictly a decimal from [0..255]. + + """ + if not octet_str: + raise ValueError("Empty octet not permitted") + # Whitelist the characters, since int() allows a lot of bizarre stuff. + if not cls._DECIMAL_DIGITS.issuperset(octet_str): + msg = "Only decimal digits permitted in %r" + raise ValueError(msg % octet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(octet_str) > 3: + msg = "At most 3 characters permitted in %r" + raise ValueError(msg % octet_str) + # Convert to integer (we know digits are legal) + octet_int = int(octet_str, 10) + # Any octets that look like they *might* be written in octal, + # and which don't look exactly the same in both octal and + # decimal are rejected as ambiguous + if octet_int > 7 and octet_str[0] == "0": + msg = "Ambiguous (octal/decimal) value in %r not permitted" + raise ValueError(msg % octet_str) + if octet_int > 255: + raise ValueError("Octet %d (> 255) not permitted" % octet_int) + return octet_int + + @classmethod + def _string_from_ip_int(cls, ip_int): + """Turns a 32-bit integer into dotted decimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + The IP address as a string in dotted decimal notation. + + """ + return ".".join( + _compat_str( + struct.unpack(b"!B", b)[0] if isinstance(b, bytes) else b + ) + for b in _compat_to_bytes(ip_int, 4, "big") + ) + + def _is_hostmask(self, ip_str): + """Test if the IP string is a hostmask (rather than a netmask). + + Args: + ip_str: A string, the potential hostmask. + + Returns: + A boolean, True if the IP string is a hostmask. + + """ + bits = ip_str.split(".") + try: + parts = [x for x in map(int, bits) if x in self._valid_mask_octets] + except ValueError: + return False + if len(parts) != len(bits): + return False + if parts[0] < parts[-1]: + return True + return False + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv4 address. + + This implements the method described in RFC1035 3.5. + + """ + reverse_octets = _compat_str(self).split(".")[::-1] + return ".".join(reverse_octets) + ".in-addr.arpa" + + @property + def max_prefixlen(self): + return self._max_prefixlen + + @property + def version(self): + return self._version + + +class IPv4Address(_BaseV4, _BaseAddress): + + """Represent and manipulate single IPv4 Addresses.""" + + __slots__ = ("_ip", "__weakref__") + + def __init__(self, address): + + """ + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv4Address('192.0.2.1') == IPv4Address(3221225985). + or, more generally + IPv4Address(int(IPv4Address('192.0.2.1'))) == + IPv4Address('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + + """ + # Efficient constructor from integer. + if isinstance(address, _compat_int_types): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 4) + bvs = _compat_bytes_to_byte_vals(address) + self._ip = _compat_int_from_byte_vals(bvs, "big") + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = _compat_str(address) + if "/" in addr_str: + raise AddressValueError("Unexpected '/' in %r" % address) + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v4_int_to_packed(self._ip) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within the + reserved IPv4 Network range. + + """ + return self in self._constants._reserved_network + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv4-special-registry. + + """ + return any(self in net for net in self._constants._private_networks) + + @property + def is_global(self): + return ( + self not in self._constants._public_network and not self.is_private + ) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is multicast. + See RFC 3171 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 5735 3. + + """ + return self == self._constants._unspecified_address + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback per RFC 3330. + + """ + return self in self._constants._loopback_network + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is link-local per RFC 3927. + + """ + return self in self._constants._linklocal_network + + +class IPv4Interface(IPv4Address): + def __init__(self, address): + if isinstance(address, (bytes, _compat_int_types)): + IPv4Address.__init__(self, address) + self.network = IPv4Network(self._ip) + self._prefixlen = self._max_prefixlen + return + + if isinstance(address, tuple): + IPv4Address.__init__(self, address[0]) + if len(address) > 1: + self._prefixlen = int(address[1]) + else: + self._prefixlen = self._max_prefixlen + + self.network = IPv4Network(address, strict=False) + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + return + + addr = _split_optional_netmask(address) + IPv4Address.__init__(self, addr[0]) + + self.network = IPv4Network(address, strict=False) + self._prefixlen = self.network._prefixlen + + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + + def __str__(self): + return "%s/%d" % ( + self._string_from_ip_int(self._ip), + self.network.prefixlen, + ) + + def __eq__(self, other): + address_equal = IPv4Address.__eq__(self, other) + if not address_equal or address_equal is NotImplemented: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv4Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return ( + self.network < other.network + or self.network == other.network + and address_less + ) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return self._ip ^ self._prefixlen ^ int(self.network.network_address) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv4Address(self._ip) + + @property + def with_prefixlen(self): + return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen) + + @property + def with_netmask(self): + return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask) + + @property + def with_hostmask(self): + return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask) + + +class IPv4Network(_BaseV4, _BaseNetwork): + + """This class represents and manipulates 32-bit IPv4 network + addresses.. + + Attributes: [examples for IPv4Network('192.0.2.0/27')] + .network_address: IPv4Address('192.0.2.0') + .hostmask: IPv4Address('0.0.0.31') + .broadcast_address: IPv4Address('192.0.2.32') + .netmask: IPv4Address('255.255.255.224') + .prefixlen: 27 + + """ + + # Class to use when creating address objects + _address_class = IPv4Address + + def __init__(self, address, strict=True): + + """Instantiate a new IPv4 network object. + + Args: + address: A string or integer representing the IP [& network]. + '192.0.2.0/24' + '192.0.2.0/255.255.255.0' + '192.0.0.2/0.0.0.255' + are all functionally the same in IPv4. Similarly, + '192.0.2.1' + '192.0.2.1/255.255.255.255' + '192.0.2.1/32' + are also functionally equivalent. That is to say, failing to + provide a subnetmask will create an object with a mask of /32. + + If the mask (portion after the / in the argument) is given in + dotted quad form, it is treated as a netmask if it starts with a + non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it + starts with a zero field (e.g. 0.255.255.255 == /8), with the + single exception of an all-zero mask which is treated as a + netmask == /0. If no mask is given, a default of /32 is used. + + Additionally, an integer can be passed, so + IPv4Network('192.0.2.1') == IPv4Network(3221225985) + or, more generally + IPv4Interface(int(IPv4Interface('192.0.2.1'))) == + IPv4Interface('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + NetmaskValueError: If the netmask isn't valid for + an IPv4 address. + ValueError: If strict is True and a network address is not + supplied. + + """ + _BaseNetwork.__init__(self, address) + + # Constructing from a packed address or integer + if isinstance(address, (_compat_int_types, bytes)): + self.network_address = IPv4Address(address) + self.netmask, self._prefixlen = self._make_netmask( + self._max_prefixlen + ) + # fixme: address/network test here. + return + + if isinstance(address, tuple): + if len(address) > 1: + arg = address[1] + else: + # We weren't given an address[1] + arg = self._max_prefixlen + self.network_address = IPv4Address(address[0]) + self.netmask, self._prefixlen = self._make_netmask(arg) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError("%s has host bits set" % self) + else: + self.network_address = IPv4Address( + packed & int(self.netmask) + ) + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + addr = _split_optional_netmask(address) + self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) + + if len(addr) == 2: + arg = addr[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + + if strict: + if ( + IPv4Address(int(self.network_address) & int(self.netmask)) + != self.network_address + ): + raise ValueError("%s has host bits set" % self) + self.network_address = IPv4Address( + int(self.network_address) & int(self.netmask) + ) + + if self._prefixlen == (self._max_prefixlen - 1): + self.hosts = self.__iter__ + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry. + + """ + return ( + not ( + self.network_address in IPv4Network("100.64.0.0/10") + and self.broadcast_address in IPv4Network("100.64.0.0/10") + ) + and not self.is_private + ) + + +class _IPv4Constants(object): + + _linklocal_network = IPv4Network("169.254.0.0/16") + + _loopback_network = IPv4Network("127.0.0.0/8") + + _multicast_network = IPv4Network("224.0.0.0/4") + + _public_network = IPv4Network("100.64.0.0/10") + + _private_networks = [ + IPv4Network("0.0.0.0/8"), + IPv4Network("10.0.0.0/8"), + IPv4Network("127.0.0.0/8"), + IPv4Network("169.254.0.0/16"), + IPv4Network("172.16.0.0/12"), + IPv4Network("192.0.0.0/29"), + IPv4Network("192.0.0.170/31"), + IPv4Network("192.0.2.0/24"), + IPv4Network("192.168.0.0/16"), + IPv4Network("198.18.0.0/15"), + IPv4Network("198.51.100.0/24"), + IPv4Network("203.0.113.0/24"), + IPv4Network("240.0.0.0/4"), + IPv4Network("255.255.255.255/32"), + ] + + _reserved_network = IPv4Network("240.0.0.0/4") + + _unspecified_address = IPv4Address("0.0.0.0") + + +IPv4Address._constants = _IPv4Constants + + +class _BaseV6(object): + + """Base IPv6 object. + + The following methods are used by IPv6 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + _version = 6 + _ALL_ONES = (2 ** IPV6LENGTH) - 1 + _HEXTET_COUNT = 8 + _HEX_DIGITS = frozenset("0123456789ABCDEFabcdef") + _max_prefixlen = IPV6LENGTH + + # There are only a bunch of valid v6 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, _compat_int_types): + prefixlen = arg + else: + prefixlen = cls._prefix_from_prefix_string(arg) + netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn an IPv6 ip_str into an integer. + + Args: + ip_str: A string, the IPv6 ip_str. + + Returns: + An int, the IPv6 address + + Raises: + AddressValueError: if ip_str isn't a valid IPv6 Address. + + """ + if not ip_str: + raise AddressValueError("Address cannot be empty") + + parts = ip_str.split(":") + + # An IPv6 address needs at least 2 colons (3 parts). + _min_parts = 3 + if len(parts) < _min_parts: + msg = "At least %d parts expected in %r" % (_min_parts, ip_str) + raise AddressValueError(msg) + + # If the address has an IPv4-style suffix, convert it to hexadecimal. + if "." in parts[-1]: + try: + ipv4_int = IPv4Address(parts.pop())._ip + except AddressValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + parts.append("%x" % ((ipv4_int >> 16) & 0xFFFF)) + parts.append("%x" % (ipv4_int & 0xFFFF)) + + # An IPv6 address can't have more than 8 colons (9 parts). + # The extra colon comes from using the "::" notation for a single + # leading or trailing zero part. + _max_parts = cls._HEXTET_COUNT + 1 + if len(parts) > _max_parts: + msg = "At most %d colons permitted in %r" % ( + _max_parts - 1, + ip_str, + ) + raise AddressValueError(msg) + + # Disregarding the endpoints, find '::' with nothing in between. + # This indicates that a run of zeroes has been skipped. + skip_index = None + for i in _compat_range(1, len(parts) - 1): + if not parts[i]: + if skip_index is not None: + # Can't have more than one '::' + msg = "At most one '::' permitted in %r" % ip_str + raise AddressValueError(msg) + skip_index = i + + # parts_hi is the number of parts to copy from above/before the '::' + # parts_lo is the number of parts to copy from below/after the '::' + if skip_index is not None: + # If we found a '::', then check if it also covers the endpoints. + parts_hi = skip_index + parts_lo = len(parts) - skip_index - 1 + if not parts[0]: + parts_hi -= 1 + if parts_hi: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + parts_lo -= 1 + if parts_lo: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) + if parts_skipped < 1: + msg = "Expected at most %d other parts with '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) + else: + # Otherwise, allocate the entire address to parts_hi. The + # endpoints could still be empty, but _parse_hextet() will check + # for that. + if len(parts) != cls._HEXTET_COUNT: + msg = "Exactly %d parts expected without '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) + if not parts[0]: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_hi = len(parts) + parts_lo = 0 + parts_skipped = 0 + + try: + # Now, parse the hextets into a 128-bit integer. + ip_int = 0 + for i in range(parts_hi): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + ip_int <<= 16 * parts_skipped + for i in range(-parts_lo, 0): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + return ip_int + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + + @classmethod + def _parse_hextet(cls, hextet_str): + """Convert an IPv6 hextet string into an integer. + + Args: + hextet_str: A string, the number to parse. + + Returns: + The hextet as an integer. + + Raises: + ValueError: if the input isn't strictly a hex number from + [0..FFFF]. + + """ + # Whitelist the characters, since int() allows a lot of bizarre stuff. + if not cls._HEX_DIGITS.issuperset(hextet_str): + raise ValueError("Only hex digits permitted in %r" % hextet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(hextet_str) > 4: + msg = "At most 4 characters permitted in %r" + raise ValueError(msg % hextet_str) + # Length check means we can skip checking the integer value + return int(hextet_str, 16) + + @classmethod + def _compress_hextets(cls, hextets): + """Compresses a list of hextets. + + Compresses a list of strings, replacing the longest continuous + sequence of "0" in the list with "" and adding empty strings at + the beginning or at the end of the string such that subsequently + calling ":".join(hextets) will produce the compressed version of + the IPv6 address. + + Args: + hextets: A list of strings, the hextets to compress. + + Returns: + A list of strings. + + """ + best_doublecolon_start = -1 + best_doublecolon_len = 0 + doublecolon_start = -1 + doublecolon_len = 0 + for index, hextet in enumerate(hextets): + if hextet == "0": + doublecolon_len += 1 + if doublecolon_start == -1: + # Start of a sequence of zeros. + doublecolon_start = index + if doublecolon_len > best_doublecolon_len: + # This is the longest sequence of zeros so far. + best_doublecolon_len = doublecolon_len + best_doublecolon_start = doublecolon_start + else: + doublecolon_len = 0 + doublecolon_start = -1 + + if best_doublecolon_len > 1: + best_doublecolon_end = ( + best_doublecolon_start + best_doublecolon_len + ) + # For zeros at the end of the address. + if best_doublecolon_end == len(hextets): + hextets += [""] + hextets[best_doublecolon_start:best_doublecolon_end] = [""] + # For zeros at the beginning of the address. + if best_doublecolon_start == 0: + hextets = [""] + hextets + + return hextets + + @classmethod + def _string_from_ip_int(cls, ip_int=None): + """Turns a 128-bit integer into hexadecimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + A string, the hexadecimal representation of the address. + + Raises: + ValueError: The address is bigger than 128 bits of all ones. + + """ + if ip_int is None: + ip_int = int(cls._ip) + + if ip_int > cls._ALL_ONES: + raise ValueError("IPv6 address is too large") + + hex_str = "%032x" % ip_int + hextets = ["%x" % int(hex_str[x : x + 4], 16) for x in range(0, 32, 4)] + + hextets = cls._compress_hextets(hextets) + return ":".join(hextets) + + def _explode_shorthand_ip_string(self): + """Expand a shortened IPv6 address. + + Args: + ip_str: A string, the IPv6 address. + + Returns: + A string, the expanded IPv6 address. + + """ + if isinstance(self, IPv6Network): + ip_str = _compat_str(self.network_address) + elif isinstance(self, IPv6Interface): + ip_str = _compat_str(self.ip) + else: + ip_str = _compat_str(self) + + ip_int = self._ip_int_from_string(ip_str) + hex_str = "%032x" % ip_int + parts = [hex_str[x : x + 4] for x in range(0, 32, 4)] + if isinstance(self, (_BaseNetwork, IPv6Interface)): + return "%s/%d" % (":".join(parts), self._prefixlen) + return ":".join(parts) + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv6 address. + + This implements the method described in RFC3596 2.5. + + """ + reverse_chars = self.exploded[::-1].replace(":", "") + return ".".join(reverse_chars) + ".ip6.arpa" + + @property + def max_prefixlen(self): + return self._max_prefixlen + + @property + def version(self): + return self._version + + +class IPv6Address(_BaseV6, _BaseAddress): + + """Represent and manipulate single IPv6 Addresses.""" + + __slots__ = ("_ip", "__weakref__") + + def __init__(self, address): + """Instantiate a new IPv6 address object. + + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv6Address('2001:db8::') == + IPv6Address(42540766411282592856903984951653826560) + or, more generally + IPv6Address(int(IPv6Address('2001:db8::'))) == + IPv6Address('2001:db8::') + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + + """ + # Efficient constructor from integer. + if isinstance(address, _compat_int_types): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 16) + bvs = _compat_bytes_to_byte_vals(address) + self._ip = _compat_int_from_byte_vals(bvs, "big") + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = _compat_str(address) + if "/" in addr_str: + raise AddressValueError("Unexpected '/' in %r" % address) + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v6_int_to_packed(self._ip) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return any(self in x for x in self._constants._reserved_networks) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return self in self._constants._linklocal_network + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return self in self._constants._sitelocal_network + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv6-special-registry. + + """ + return any(self in net for net in self._constants._private_networks) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, true if the address is not reserved per + iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return self._ip == 0 + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return self._ip == 1 + + @property + def ipv4_mapped(self): + """Return the IPv4 mapped address. + + Returns: + If the IPv6 address is a v4 mapped address, return the + IPv4 mapped address. Return None otherwise. + + """ + if (self._ip >> 32) != 0xFFFF: + return None + return IPv4Address(self._ip & 0xFFFFFFFF) + + @property + def teredo(self): + """Tuple of embedded teredo IPs. + + Returns: + Tuple of the (server, client) IPs or None if the address + doesn't appear to be a teredo address (doesn't start with + 2001::/32) + + """ + if (self._ip >> 96) != 0x20010000: + return None + return ( + IPv4Address((self._ip >> 64) & 0xFFFFFFFF), + IPv4Address(~self._ip & 0xFFFFFFFF), + ) + + @property + def sixtofour(self): + """Return the IPv4 6to4 embedded address. + + Returns: + The IPv4 6to4-embedded address if present or None if the + address doesn't appear to contain a 6to4 embedded address. + + """ + if (self._ip >> 112) != 0x2002: + return None + return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) + + +class IPv6Interface(IPv6Address): + def __init__(self, address): + if isinstance(address, (bytes, _compat_int_types)): + IPv6Address.__init__(self, address) + self.network = IPv6Network(self._ip) + self._prefixlen = self._max_prefixlen + return + if isinstance(address, tuple): + IPv6Address.__init__(self, address[0]) + if len(address) > 1: + self._prefixlen = int(address[1]) + else: + self._prefixlen = self._max_prefixlen + self.network = IPv6Network(address, strict=False) + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + return + + addr = _split_optional_netmask(address) + IPv6Address.__init__(self, addr[0]) + self.network = IPv6Network(address, strict=False) + self.netmask = self.network.netmask + self._prefixlen = self.network._prefixlen + self.hostmask = self.network.hostmask + + def __str__(self): + return "%s/%d" % ( + self._string_from_ip_int(self._ip), + self.network.prefixlen, + ) + + def __eq__(self, other): + address_equal = IPv6Address.__eq__(self, other) + if not address_equal or address_equal is NotImplemented: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv6Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return ( + self.network < other.network + or self.network == other.network + and address_less + ) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return self._ip ^ self._prefixlen ^ int(self.network.network_address) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv6Address(self._ip) + + @property + def with_prefixlen(self): + return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen) + + @property + def with_netmask(self): + return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask) + + @property + def with_hostmask(self): + return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask) + + @property + def is_unspecified(self): + return self._ip == 0 and self.network.is_unspecified + + @property + def is_loopback(self): + return self._ip == 1 and self.network.is_loopback + + +class IPv6Network(_BaseV6, _BaseNetwork): + + """This class represents and manipulates 128-bit IPv6 networks. + + Attributes: [examples for IPv6('2001:db8::1000/124')] + .network_address: IPv6Address('2001:db8::1000') + .hostmask: IPv6Address('::f') + .broadcast_address: IPv6Address('2001:db8::100f') + .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') + .prefixlen: 124 + + """ + + # Class to use when creating address objects + _address_class = IPv6Address + + def __init__(self, address, strict=True): + """Instantiate a new IPv6 Network object. + + Args: + address: A string or integer representing the IPv6 network or the + IP and prefix/netmask. + '2001:db8::/128' + '2001:db8:0000:0000:0000:0000:0000:0000/128' + '2001:db8::' + are all functionally the same in IPv6. That is to say, + failing to provide a subnetmask will create an object with + a mask of /128. + + Additionally, an integer can be passed, so + IPv6Network('2001:db8::') == + IPv6Network(42540766411282592856903984951653826560) + or, more generally + IPv6Network(int(IPv6Network('2001:db8::'))) == + IPv6Network('2001:db8::') + + strict: A boolean. If true, ensure that we have been passed + A true network address, eg, 2001:db8::1000/124 and not an + IP address on a network, eg, 2001:db8::1/124. + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + NetmaskValueError: If the netmask isn't valid for + an IPv6 address. + ValueError: If strict was True and a network address was not + supplied. + + """ + _BaseNetwork.__init__(self, address) + + # Efficient constructor from integer or packed address + if isinstance(address, (bytes, _compat_int_types)): + self.network_address = IPv6Address(address) + self.netmask, self._prefixlen = self._make_netmask( + self._max_prefixlen + ) + return + + if isinstance(address, tuple): + if len(address) > 1: + arg = address[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + self.network_address = IPv6Address(address[0]) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError("%s has host bits set" % self) + else: + self.network_address = IPv6Address( + packed & int(self.netmask) + ) + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + addr = _split_optional_netmask(address) + + self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) + + if len(addr) == 2: + arg = addr[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + + if strict: + if ( + IPv6Address(int(self.network_address) & int(self.netmask)) + != self.network_address + ): + raise ValueError("%s has host bits set" % self) + self.network_address = IPv6Address( + int(self.network_address) & int(self.netmask) + ) + + if self._prefixlen == (self._max_prefixlen - 1): + self.hosts = self.__iter__ + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the + Subnet-Router anycast address. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network + 1, broadcast + 1): + yield self._address_class(x) + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return ( + self.network_address.is_site_local + and self.broadcast_address.is_site_local + ) + + +class _IPv6Constants(object): + + _linklocal_network = IPv6Network("fe80::/10") + + _multicast_network = IPv6Network("ff00::/8") + + _private_networks = [ + IPv6Network("::1/128"), + IPv6Network("::/128"), + IPv6Network("::ffff:0:0/96"), + IPv6Network("100::/64"), + IPv6Network("2001::/23"), + IPv6Network("2001:2::/48"), + IPv6Network("2001:db8::/32"), + IPv6Network("2001:10::/28"), + IPv6Network("fc00::/7"), + IPv6Network("fe80::/10"), + ] + + _reserved_networks = [ + IPv6Network("::/8"), + IPv6Network("100::/8"), + IPv6Network("200::/7"), + IPv6Network("400::/6"), + IPv6Network("800::/5"), + IPv6Network("1000::/4"), + IPv6Network("4000::/3"), + IPv6Network("6000::/3"), + IPv6Network("8000::/3"), + IPv6Network("A000::/3"), + IPv6Network("C000::/3"), + IPv6Network("E000::/4"), + IPv6Network("F000::/5"), + IPv6Network("F800::/6"), + IPv6Network("FE00::/9"), + ] + + _sitelocal_network = IPv6Network("fec0::/10") + + +IPv6Address._constants = _IPv6Constants diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py new file mode 100644 index 00000000..68608d1b --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py @@ -0,0 +1,27 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The base class for all resource modules +""" + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import ( + get_resource_connection, +) + + +class ConfigBase(object): + """ The base class for all resource modules + """ + + ACTION_STATES = ["merged", "replaced", "overridden", "deleted"] + + def __init__(self, module): + self._module = module + self.state = module.params["state"] + self._connection = None + + if self.state not in ["rendered", "parsed"]: + self._connection = get_resource_connection(module) diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py new file mode 100644 index 00000000..bc458eb5 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py @@ -0,0 +1,473 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import re +import hashlib + +from ansible.module_utils.six.moves import zip +from ansible.module_utils._text import to_bytes, to_native + +DEFAULT_COMMENT_TOKENS = ["#", "!", "/*", "*/", "echo"] + +DEFAULT_IGNORE_LINES_RE = set( + [ + re.compile(r"Using \d+ out of \d+ bytes"), + re.compile(r"Building configuration"), + re.compile(r"Current configuration : \d+ bytes"), + ] +) + + +try: + Pattern = re._pattern_type +except AttributeError: + Pattern = re.Pattern + + +class ConfigLine(object): + def __init__(self, raw): + self.text = str(raw).strip() + self.raw = raw + self._children = list() + self._parents = list() + + def __str__(self): + return self.raw + + def __eq__(self, other): + return self.line == other.line + + def __ne__(self, other): + return not self.__eq__(other) + + def __getitem__(self, key): + for item in self._children: + if item.text == key: + return item + raise KeyError(key) + + @property + def line(self): + line = self.parents + line.append(self.text) + return " ".join(line) + + @property + def children(self): + return _obj_to_text(self._children) + + @property + def child_objs(self): + return self._children + + @property + def parents(self): + return _obj_to_text(self._parents) + + @property + def path(self): + config = _obj_to_raw(self._parents) + config.append(self.raw) + return "\n".join(config) + + @property + def has_children(self): + return len(self._children) > 0 + + @property + def has_parents(self): + return len(self._parents) > 0 + + def add_child(self, obj): + if not isinstance(obj, ConfigLine): + raise AssertionError("child must be of type `ConfigLine`") + self._children.append(obj) + + +def ignore_line(text, tokens=None): + for item in tokens or DEFAULT_COMMENT_TOKENS: + if text.startswith(item): + return True + for regex in DEFAULT_IGNORE_LINES_RE: + if regex.match(text): + return True + + +def _obj_to_text(x): + return [o.text for o in x] + + +def _obj_to_raw(x): + return [o.raw for o in x] + + +def _obj_to_block(objects, visited=None): + items = list() + for o in objects: + if o not in items: + items.append(o) + for child in o._children: + if child not in items: + items.append(child) + return _obj_to_raw(items) + + +def dumps(objects, output="block", comments=False): + if output == "block": + items = _obj_to_block(objects) + elif output == "commands": + items = _obj_to_text(objects) + elif output == "raw": + items = _obj_to_raw(objects) + else: + raise TypeError("unknown value supplied for keyword output") + + if output == "block": + if comments: + for index, item in enumerate(items): + nextitem = index + 1 + if ( + nextitem < len(items) + and not item.startswith(" ") + and items[nextitem].startswith(" ") + ): + item = "!\n%s" % item + items[index] = item + items.append("!") + items.append("end") + + return "\n".join(items) + + +class NetworkConfig(object): + def __init__(self, indent=1, contents=None, ignore_lines=None): + self._indent = indent + self._items = list() + self._config_text = None + + if ignore_lines: + for item in ignore_lines: + if not isinstance(item, Pattern): + item = re.compile(item) + DEFAULT_IGNORE_LINES_RE.add(item) + + if contents: + self.load(contents) + + @property + def items(self): + return self._items + + @property + def config_text(self): + return self._config_text + + @property + def sha1(self): + sha1 = hashlib.sha1() + sha1.update(to_bytes(str(self), errors="surrogate_or_strict")) + return sha1.digest() + + def __getitem__(self, key): + for line in self: + if line.text == key: + return line + raise KeyError(key) + + def __iter__(self): + return iter(self._items) + + def __str__(self): + return "\n".join([c.raw for c in self.items]) + + def __len__(self): + return len(self._items) + + def load(self, s): + self._config_text = s + self._items = self.parse(s) + + def loadfp(self, fp): + with open(fp) as f: + return self.load(f.read()) + + def parse(self, lines, comment_tokens=None): + toplevel = re.compile(r"\S") + childline = re.compile(r"^\s*(.+)$") + entry_reg = re.compile(r"([{};])") + + ancestors = list() + config = list() + + indents = [0] + + for linenum, line in enumerate( + to_native(lines, errors="surrogate_or_strict").split("\n") + ): + text = entry_reg.sub("", line).strip() + + cfg = ConfigLine(line) + + if not text or ignore_line(text, comment_tokens): + continue + + # handle top level commands + if toplevel.match(line): + ancestors = [cfg] + indents = [0] + + # handle sub level commands + else: + match = childline.match(line) + line_indent = match.start(1) + + if line_indent < indents[-1]: + while indents[-1] > line_indent: + indents.pop() + + if line_indent > indents[-1]: + indents.append(line_indent) + + curlevel = len(indents) - 1 + parent_level = curlevel - 1 + + cfg._parents = ancestors[:curlevel] + + if curlevel > len(ancestors): + config.append(cfg) + continue + + for i in range(curlevel, len(ancestors)): + ancestors.pop() + + ancestors.append(cfg) + ancestors[parent_level].add_child(cfg) + + config.append(cfg) + + return config + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + if item.parents == path[:-1]: + return item + + def get_block(self, path): + if not isinstance(path, list): + raise AssertionError("path argument must be a list object") + obj = self.get_object(path) + if not obj: + raise ValueError("path does not exist in config") + return self._expand_block(obj) + + def get_block_config(self, path): + block = self.get_block(path) + return dumps(block, "block") + + def _expand_block(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj._children: + if child in S: + continue + self._expand_block(child, S) + return S + + def _diff_line(self, other): + updates = list() + for item in self.items: + if item not in other: + updates.append(item) + return updates + + def _diff_strict(self, other): + updates = list() + # block extracted from other does not have all parents + # but the last one. In case of multiple parents we need + # to add additional parents. + if other and isinstance(other, list) and len(other) > 0: + start_other = other[0] + if start_other.parents: + for parent in start_other.parents: + other.insert(0, ConfigLine(parent)) + for index, line in enumerate(self.items): + try: + if str(line).strip() != str(other[index]).strip(): + updates.append(line) + except (AttributeError, IndexError): + updates.append(line) + return updates + + def _diff_exact(self, other): + updates = list() + if len(other) != len(self.items): + updates.extend(self.items) + else: + for ours, theirs in zip(self.items, other): + if ours != theirs: + updates.extend(self.items) + break + return updates + + def difference(self, other, match="line", path=None, replace=None): + """Perform a config diff against the another network config + + :param other: instance of NetworkConfig to diff against + :param match: type of diff to perform. valid values are 'line', + 'strict', 'exact' + :param path: context in the network config to filter the diff + :param replace: the method used to generate the replacement lines. + valid values are 'block', 'line' + + :returns: a string of lines that are different + """ + if path and match != "line": + try: + other = other.get_block(path) + except ValueError: + other = list() + else: + other = other.items + + # generate a list of ConfigLines that aren't in other + meth = getattr(self, "_diff_%s" % match) + updates = meth(other) + + if replace == "block": + parents = list() + for item in updates: + if not item.has_parents: + parents.append(item) + else: + for p in item._parents: + if p not in parents: + parents.append(p) + + updates = list() + for item in parents: + updates.extend(self._expand_block(item)) + + visited = set() + expanded = list() + + for item in updates: + for p in item._parents: + if p.line not in visited: + visited.add(p.line) + expanded.append(p) + expanded.append(item) + visited.add(item.line) + + return expanded + + def add(self, lines, parents=None): + ancestors = list() + offset = 0 + obj = None + + # global config command + if not parents: + for line in lines: + # handle ignore lines + if ignore_line(line): + continue + + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_block(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self._indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj._parents = list(ancestors) + ancestors[-1]._children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in lines: + # handle ignore lines + if ignore_line(line): + continue + + # check if child already exists + for child in ancestors[-1]._children: + if child.text == line: + break + else: + offset = len(parents) * self._indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item._parents = ancestors + ancestors[-1]._children.append(item) + self.items.append(item) + + +class CustomNetworkConfig(NetworkConfig): + def items_text(self): + return [item.text for item in self.items] + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.child_objs: + if child in S: + continue + self.expand_section(child, S) + return S + + def to_block(self, section): + return "\n".join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError("path does not exist in config") + return self.expand_section(obj) diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py new file mode 100644 index 00000000..477d3184 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py @@ -0,0 +1,162 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The facts base class +this contains methods common to all facts subsets +""" +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import ( + get_resource_connection, +) +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + """ + The facts base class + """ + + def __init__(self, module): + self._module = module + self._warnings = [] + self._gather_subset = module.params.get("gather_subset") + self._gather_network_resources = module.params.get( + "gather_network_resources" + ) + self._connection = None + if module.params.get("state") not in ["rendered", "parsed"]: + self._connection = get_resource_connection(module) + + self.ansible_facts = {"ansible_network_resources": {}} + self.ansible_facts["ansible_net_gather_network_resources"] = list() + self.ansible_facts["ansible_net_gather_subset"] = list() + + if not self._gather_subset: + self._gather_subset = ["!config"] + if not self._gather_network_resources: + self._gather_network_resources = ["!all"] + + def gen_runable(self, subsets, valid_subsets, resource_facts=False): + """ Generate the runable subset + + :param module: The module instance + :param subsets: The provided subsets + :param valid_subsets: The valid subsets + :param resource_facts: A boolean flag + :rtype: list + :returns: The runable subsets + """ + runable_subsets = set() + exclude_subsets = set() + minimal_gather_subset = set() + if not resource_facts: + minimal_gather_subset = frozenset(["default"]) + + for subset in subsets: + if subset == "all": + runable_subsets.update(valid_subsets) + continue + if subset == "min" and minimal_gather_subset: + runable_subsets.update(minimal_gather_subset) + continue + if subset.startswith("!"): + subset = subset[1:] + if subset == "min": + exclude_subsets.update(minimal_gather_subset) + continue + if subset == "all": + exclude_subsets.update( + valid_subsets - minimal_gather_subset + ) + continue + exclude = True + else: + exclude = False + + if subset not in valid_subsets: + self._module.fail_json( + msg="Subset must be one of [%s], got %s" + % ( + ", ".join(sorted([item for item in valid_subsets])), + subset, + ) + ) + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(valid_subsets) + runable_subsets.difference_update(exclude_subsets) + return runable_subsets + + def get_network_resources_facts( + self, facts_resource_obj_map, resource_facts_type=None, data=None + ): + """ + :param fact_resource_subsets: + :param data: previously collected configuration + :return: + """ + if not resource_facts_type: + resource_facts_type = self._gather_network_resources + + restorun_subsets = self.gen_runable( + resource_facts_type, + frozenset(facts_resource_obj_map.keys()), + resource_facts=True, + ) + if restorun_subsets: + self.ansible_facts["ansible_net_gather_network_resources"] = list( + restorun_subsets + ) + instances = list() + for key in restorun_subsets: + fact_cls_obj = facts_resource_obj_map.get(key) + if fact_cls_obj: + instances.append(fact_cls_obj(self._module)) + else: + self._warnings.extend( + [ + "network resource fact gathering for '%s' is not supported" + % key + ] + ) + + for inst in instances: + inst.populate_facts(self._connection, self.ansible_facts, data) + + def get_network_legacy_facts( + self, fact_legacy_obj_map, legacy_facts_type=None + ): + if not legacy_facts_type: + legacy_facts_type = self._gather_subset + + runable_subsets = self.gen_runable( + legacy_facts_type, frozenset(fact_legacy_obj_map.keys()) + ) + if runable_subsets: + facts = dict() + # default subset should always returned be with legacy facts subsets + if "default" not in runable_subsets: + runable_subsets.add("default") + self.ansible_facts["ansible_net_gather_subset"] = list( + runable_subsets + ) + + instances = list() + for key in runable_subsets: + instances.append(fact_legacy_obj_map[key](self._module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + self._warnings.extend(inst.warnings) + + for key, value in iteritems(facts): + key = "ansible_net_%s" % key + self.ansible_facts[key] = value diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py new file mode 100644 index 00000000..53a91e8c --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py @@ -0,0 +1,179 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2017 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import sys + +from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.connection import Connection, ConnectionError + +try: + from ncclient.xml_ import NCElement, new_ele, sub_ele + + HAS_NCCLIENT = True +except (ImportError, AttributeError): + HAS_NCCLIENT = False + +try: + from lxml.etree import Element, fromstring, XMLSyntaxError +except ImportError: + from xml.etree.ElementTree import Element, fromstring + + if sys.version_info < (2, 7): + from xml.parsers.expat import ExpatError as XMLSyntaxError + else: + from xml.etree.ElementTree import ParseError as XMLSyntaxError + +NS_MAP = {"nc": "urn:ietf:params:xml:ns:netconf:base:1.0"} + + +def exec_rpc(module, *args, **kwargs): + connection = NetconfConnection(module._socket_path) + return connection.execute_rpc(*args, **kwargs) + + +class NetconfConnection(Connection): + def __init__(self, socket_path): + super(NetconfConnection, self).__init__(socket_path) + + def __rpc__(self, name, *args, **kwargs): + """Executes the json-rpc and returns the output received + from remote device. + :name: rpc method to be executed over connection plugin that implements jsonrpc 2.0 + :args: Ordered list of params passed as arguments to rpc method + :kwargs: Dict of valid key, value pairs passed as arguments to rpc method + + For usage refer the respective connection plugin docs. + """ + self.check_rc = kwargs.pop("check_rc", True) + self.ignore_warning = kwargs.pop("ignore_warning", True) + + response = self._exec_jsonrpc(name, *args, **kwargs) + if "error" in response: + rpc_error = response["error"].get("data") + return self.parse_rpc_error( + to_bytes(rpc_error, errors="surrogate_then_replace") + ) + + return fromstring( + to_bytes(response["result"], errors="surrogate_then_replace") + ) + + def parse_rpc_error(self, rpc_error): + if self.check_rc: + try: + error_root = fromstring(rpc_error) + root = Element("root") + root.append(error_root) + + error_list = root.findall(".//nc:rpc-error", NS_MAP) + if not error_list: + raise ConnectionError( + to_text(rpc_error, errors="surrogate_then_replace") + ) + + warnings = [] + for error in error_list: + message_ele = error.find("./nc:error-message", NS_MAP) + + if message_ele is None: + message_ele = error.find("./nc:error-info", NS_MAP) + + message = ( + message_ele.text if message_ele is not None else None + ) + + severity = error.find("./nc:error-severity", NS_MAP).text + + if ( + severity == "warning" + and self.ignore_warning + and message is not None + ): + warnings.append(message) + else: + raise ConnectionError( + to_text(rpc_error, errors="surrogate_then_replace") + ) + return warnings + except XMLSyntaxError: + raise ConnectionError(rpc_error) + + +def transform_reply(): + return b""" + + + + + + + + + + + + + + + + + + + + + """ + + +# Note: Workaround for ncclient 0.5.3 +def remove_namespaces(data): + if not HAS_NCCLIENT: + raise ImportError( + "ncclient is required but does not appear to be installed. " + "It can be installed using `pip install ncclient`" + ) + return NCElement(data, transform_reply()).data_xml + + +def build_root_xml_node(tag): + return new_ele(tag) + + +def build_child_xml_node(parent, tag, text=None, attrib=None): + element = sub_ele(parent, tag) + if text: + element.text = to_text(text) + if attrib: + element.attrib.update(attrib) + return element + + +def build_subtree(parent, path): + element = parent + for field in path.split("/"): + sub_element = build_child_xml_node(element, field) + element = sub_element + return element diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py new file mode 100644 index 00000000..555fc713 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py @@ -0,0 +1,275 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2015 Peter Sprygada, +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import traceback +import json + +from ansible.module_utils._text import to_text, to_native +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.connection import Connection, ConnectionError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import ( + NetconfConnection, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import ( + Cli, +) +from ansible.module_utils.six import iteritems + + +NET_TRANSPORT_ARGS = dict( + host=dict(required=True), + port=dict(type="int"), + username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])), + password=dict( + no_log=True, fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]) + ), + ssh_keyfile=dict( + fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path" + ), + authorize=dict( + default=False, + fallback=(env_fallback, ["ANSIBLE_NET_AUTHORIZE"]), + type="bool", + ), + auth_pass=dict( + no_log=True, fallback=(env_fallback, ["ANSIBLE_NET_AUTH_PASS"]) + ), + provider=dict(type="dict", no_log=True), + transport=dict(choices=list()), + timeout=dict(default=10, type="int"), +) + +NET_CONNECTION_ARGS = dict() + +NET_CONNECTIONS = dict() + + +def _transitional_argument_spec(): + argument_spec = {} + for key, value in iteritems(NET_TRANSPORT_ARGS): + value["required"] = False + argument_spec[key] = value + return argument_spec + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class ModuleStub(object): + def __init__(self, argument_spec, fail_json): + self.params = dict() + for key, value in argument_spec.items(): + self.params[key] = value.get("default") + self.fail_json = fail_json + + +class NetworkError(Exception): + def __init__(self, msg, **kwargs): + super(NetworkError, self).__init__(msg) + self.kwargs = kwargs + + +class Config(object): + def __init__(self, connection): + self.connection = connection + + def __call__(self, commands, **kwargs): + lines = to_list(commands) + return self.connection.configure(lines, **kwargs) + + def load_config(self, commands, **kwargs): + commands = to_list(commands) + return self.connection.load_config(commands, **kwargs) + + def get_config(self, **kwargs): + return self.connection.get_config(**kwargs) + + def save_config(self): + return self.connection.save_config() + + +class NetworkModule(AnsibleModule): + def __init__(self, *args, **kwargs): + connect_on_load = kwargs.pop("connect_on_load", True) + + argument_spec = NET_TRANSPORT_ARGS.copy() + argument_spec["transport"]["choices"] = NET_CONNECTIONS.keys() + argument_spec.update(NET_CONNECTION_ARGS.copy()) + + if kwargs.get("argument_spec"): + argument_spec.update(kwargs["argument_spec"]) + kwargs["argument_spec"] = argument_spec + + super(NetworkModule, self).__init__(*args, **kwargs) + + self.connection = None + self._cli = None + self._config = None + + try: + transport = self.params["transport"] or "__default__" + cls = NET_CONNECTIONS[transport] + self.connection = cls() + except KeyError: + self.fail_json( + msg="Unknown transport or no default transport specified" + ) + except (TypeError, NetworkError) as exc: + self.fail_json( + msg=to_native(exc), exception=traceback.format_exc() + ) + + if connect_on_load: + self.connect() + + @property + def cli(self): + if not self.connected: + self.connect() + if self._cli: + return self._cli + self._cli = Cli(self.connection) + return self._cli + + @property + def config(self): + if not self.connected: + self.connect() + if self._config: + return self._config + self._config = Config(self.connection) + return self._config + + @property + def connected(self): + return self.connection._connected + + def _load_params(self): + super(NetworkModule, self)._load_params() + provider = self.params.get("provider") or dict() + for key, value in provider.items(): + for args in [NET_TRANSPORT_ARGS, NET_CONNECTION_ARGS]: + if key in args: + if self.params.get(key) is None and value is not None: + self.params[key] = value + + def connect(self): + try: + if not self.connected: + self.connection.connect(self.params) + if self.params["authorize"]: + self.connection.authorize(self.params) + self.log( + "connected to %s:%s using %s" + % ( + self.params["host"], + self.params["port"], + self.params["transport"], + ) + ) + except NetworkError as exc: + self.fail_json( + msg=to_native(exc), exception=traceback.format_exc() + ) + + def disconnect(self): + try: + if self.connected: + self.connection.disconnect() + self.log("disconnected from %s" % self.params["host"]) + except NetworkError as exc: + self.fail_json( + msg=to_native(exc), exception=traceback.format_exc() + ) + + +def register_transport(transport, default=False): + def register(cls): + NET_CONNECTIONS[transport] = cls + if default: + NET_CONNECTIONS["__default__"] = cls + return cls + + return register + + +def add_argument(key, value): + NET_CONNECTION_ARGS[key] = value + + +def get_resource_connection(module): + if hasattr(module, "_connection"): + return module._connection + + capabilities = get_capabilities(module) + network_api = capabilities.get("network_api") + if network_api in ("cliconf", "nxapi", "eapi", "exosapi"): + module._connection = Connection(module._socket_path) + elif network_api == "netconf": + module._connection = NetconfConnection(module._socket_path) + elif network_api == "local": + # This isn't supported, but we shouldn't fail here. + # Set the connection to a fake connection so it fails sensibly. + module._connection = LocalResourceConnection(module) + else: + module.fail_json( + msg="Invalid connection type {0!s}".format(network_api) + ) + + return module._connection + + +def get_capabilities(module): + if hasattr(module, "capabilities"): + return module._capabilities + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + except AssertionError: + # No socket_path, connection most likely local. + return dict(network_api="local") + module._capabilities = json.loads(capabilities) + + return module._capabilities + + +class LocalResourceConnection: + def __init__(self, module): + self.module = module + + def get(self, *args, **kwargs): + self.module.fail_json( + msg="Network resource modules not supported over local connection." + ) diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py new file mode 100644 index 00000000..2dd1de9e --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py @@ -0,0 +1,316 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2015 Peter Sprygada, +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import re +import shlex +import time + +from ansible.module_utils.parsing.convert_bool import ( + BOOLEANS_TRUE, + BOOLEANS_FALSE, +) +from ansible.module_utils.six import string_types, text_type +from ansible.module_utils.six.moves import zip + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class FailedConditionsError(Exception): + def __init__(self, msg, failed_conditions): + super(FailedConditionsError, self).__init__(msg) + self.failed_conditions = failed_conditions + + +class FailedConditionalError(Exception): + def __init__(self, msg, failed_conditional): + super(FailedConditionalError, self).__init__(msg) + self.failed_conditional = failed_conditional + + +class AddCommandError(Exception): + def __init__(self, msg, command): + super(AddCommandError, self).__init__(msg) + self.command = command + + +class AddConditionError(Exception): + def __init__(self, msg, condition): + super(AddConditionError, self).__init__(msg) + self.condition = condition + + +class Cli(object): + def __init__(self, connection): + self.connection = connection + self.default_output = connection.default_output or "text" + self._commands = list() + + @property + def commands(self): + return [str(c) for c in self._commands] + + def __call__(self, commands, output=None): + objects = list() + for cmd in to_list(commands): + objects.append(self.to_command(cmd, output)) + return self.connection.run_commands(objects) + + def to_command( + self, command, output=None, prompt=None, response=None, **kwargs + ): + output = output or self.default_output + if isinstance(command, Command): + return command + if isinstance(prompt, string_types): + prompt = re.compile(re.escape(prompt)) + return Command( + command, output, prompt=prompt, response=response, **kwargs + ) + + def add_commands(self, commands, output=None, **kwargs): + for cmd in commands: + self._commands.append(self.to_command(cmd, output, **kwargs)) + + def run_commands(self): + responses = self.connection.run_commands(self._commands) + for resp, cmd in zip(responses, self._commands): + cmd.response = resp + + # wipe out the commands list to avoid issues if additional + # commands are executed later + self._commands = list() + + return responses + + +class Command(object): + def __init__( + self, command, output=None, prompt=None, response=None, **kwargs + ): + + self.command = command + self.output = output + self.command_string = command + + self.prompt = prompt + self.response = response + + self.args = kwargs + + def __str__(self): + return self.command_string + + +class CommandRunner(object): + def __init__(self, module): + self.module = module + + self.items = list() + self.conditionals = set() + + self.commands = list() + + self.retries = 10 + self.interval = 1 + + self.match = "all" + + self._default_output = module.connection.default_output + + def add_command( + self, command, output=None, prompt=None, response=None, **kwargs + ): + if command in [str(c) for c in self.commands]: + raise AddCommandError( + "duplicated command detected", command=command + ) + cmd = self.module.cli.to_command( + command, output=output, prompt=prompt, response=response, **kwargs + ) + self.commands.append(cmd) + + def get_command(self, command, output=None): + for cmd in self.commands: + if cmd.command == command: + return cmd.response + raise ValueError("command '%s' not found" % command) + + def get_responses(self): + return [cmd.response for cmd in self.commands] + + def add_conditional(self, condition): + try: + self.conditionals.add(Conditional(condition)) + except AttributeError as exc: + raise AddConditionError(msg=str(exc), condition=condition) + + def run(self): + while self.retries > 0: + self.module.cli.add_commands(self.commands) + responses = self.module.cli.run_commands() + + for item in list(self.conditionals): + if item(responses): + if self.match == "any": + return item + self.conditionals.remove(item) + + if not self.conditionals: + break + + time.sleep(self.interval) + self.retries -= 1 + else: + failed_conditions = [item.raw for item in self.conditionals] + errmsg = ( + "One or more conditional statements have not been satisfied" + ) + raise FailedConditionsError(errmsg, failed_conditions) + + +class Conditional(object): + """Used in command modules to evaluate waitfor conditions + """ + + OPERATORS = { + "eq": ["eq", "=="], + "neq": ["neq", "ne", "!="], + "gt": ["gt", ">"], + "ge": ["ge", ">="], + "lt": ["lt", "<"], + "le": ["le", "<="], + "contains": ["contains"], + "matches": ["matches"], + } + + def __init__(self, conditional, encoding=None): + self.raw = conditional + self.negate = False + try: + components = shlex.split(conditional) + key, val = components[0], components[-1] + op_components = components[1:-1] + if "not" in op_components: + self.negate = True + op_components.pop(op_components.index("not")) + op = op_components[0] + + except ValueError: + raise ValueError("failed to parse conditional") + + self.key = key + self.func = self._func(op) + self.value = self._cast_value(val) + + def __call__(self, data): + value = self.get_value(dict(result=data)) + if not self.negate: + return self.func(value) + else: + return not self.func(value) + + def _cast_value(self, value): + if value in BOOLEANS_TRUE: + return True + elif value in BOOLEANS_FALSE: + return False + elif re.match(r"^\d+\.d+$", value): + return float(value) + elif re.match(r"^\d+$", value): + return int(value) + else: + return text_type(value) + + def _func(self, oper): + for func, operators in self.OPERATORS.items(): + if oper in operators: + return getattr(self, func) + raise AttributeError("unknown operator: %s" % oper) + + def get_value(self, result): + try: + return self.get_json(result) + except (IndexError, TypeError, AttributeError): + msg = "unable to apply conditional to result" + raise FailedConditionalError(msg, self.raw) + + def get_json(self, result): + string = re.sub(r"\[[\'|\"]", ".", self.key) + string = re.sub(r"[\'|\"]\]", ".", string) + parts = re.split(r"\.(?=[^\]]*(?:\[|$))", string) + for part in parts: + match = re.findall(r"\[(\S+?)\]", part) + if match: + key = part[: part.find("[")] + result = result[key] + for m in match: + try: + m = int(m) + except ValueError: + m = str(m) + result = result[m] + else: + result = result.get(part) + return result + + def number(self, value): + if "." in str(value): + return float(value) + else: + return int(value) + + def eq(self, value): + return value == self.value + + def neq(self, value): + return value != self.value + + def gt(self, value): + return self.number(value) > self.value + + def ge(self, value): + return self.number(value) >= self.value + + def lt(self, value): + return self.number(value) < self.value + + def le(self, value): + return self.number(value) <= self.value + + def contains(self, value): + return str(self.value) in value + + def matches(self, value): + match = re.search(self.value, value, re.M) + return match is not None diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py new file mode 100644 index 00000000..64eca157 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py @@ -0,0 +1,686 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# Networking tools for network modules only + +import re +import ast +import operator +import socket +import json + +from itertools import chain + +from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.common._collections_compat import Mapping +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils import basic +from ansible.module_utils.parsing.convert_bool import boolean + +# Backwards compatibility for 3rd party modules +# TODO(pabelanger): With move to ansible.netcommon, we should clean this code +# up and have modules import directly themself. +from ansible.module_utils.common.network import ( # noqa: F401 + to_bits, + is_netmask, + is_masklen, + to_netmask, + to_masklen, + to_subnet, + to_ipv6_network, + VALID_MASKS, +) + +try: + from jinja2 import Environment, StrictUndefined + from jinja2.exceptions import UndefinedError + + HAS_JINJA2 = True +except ImportError: + HAS_JINJA2 = False + + +OPERATORS = frozenset(["ge", "gt", "eq", "neq", "lt", "le"]) +ALIASES = frozenset( + [("min", "ge"), ("max", "le"), ("exactly", "eq"), ("neq", "ne")] +) + + +def to_list(val): + if isinstance(val, (list, tuple, set)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = to_text(item).split("\n") + yield item + + +def transform_commands(module): + transform = ComplexList( + dict( + command=dict(key=True), + output=dict(), + prompt=dict(type="list"), + answer=dict(type="list"), + newline=dict(type="bool", default=True), + sendonly=dict(type="bool", default=False), + check_all=dict(type="bool", default=False), + ), + module, + ) + + return transform(module.params["commands"]) + + +def sort_list(val): + if isinstance(val, list): + return sorted(val) + return val + + +class Entity(object): + """Transforms a dict to with an argument spec + + This class will take a dict and apply an Ansible argument spec to the + values. The resulting dict will contain all of the keys in the param + with appropriate values set. + + Example:: + + argument_spec = dict( + command=dict(key=True), + display=dict(default='text', choices=['text', 'json']), + validate=dict(type='bool') + ) + transform = Entity(module, argument_spec) + value = dict(command='foo') + result = transform(value) + print result + {'command': 'foo', 'display': 'text', 'validate': None} + + Supported argument spec: + * key - specifies how to map a single value to a dict + * read_from - read and apply the argument_spec from the module + * required - a value is required + * type - type of value (uses AnsibleModule type checker) + * fallback - implements fallback function + * choices - set of valid options + * default - default value + """ + + def __init__( + self, module, attrs=None, args=None, keys=None, from_argspec=False + ): + args = [] if args is None else args + + self._attributes = attrs or {} + self._module = module + + for arg in args: + self._attributes[arg] = dict() + if from_argspec: + self._attributes[arg]["read_from"] = arg + if keys and arg in keys: + self._attributes[arg]["key"] = True + + self.attr_names = frozenset(self._attributes.keys()) + + _has_key = False + + for name, attr in iteritems(self._attributes): + if attr.get("read_from"): + if attr["read_from"] not in self._module.argument_spec: + module.fail_json( + msg="argument %s does not exist" % attr["read_from"] + ) + spec = self._module.argument_spec.get(attr["read_from"]) + for key, value in iteritems(spec): + if key not in attr: + attr[key] = value + + if attr.get("key"): + if _has_key: + module.fail_json(msg="only one key value can be specified") + _has_key = True + attr["required"] = True + + def serialize(self): + return self._attributes + + def to_dict(self, value): + obj = {} + for name, attr in iteritems(self._attributes): + if attr.get("key"): + obj[name] = value + else: + obj[name] = attr.get("default") + return obj + + def __call__(self, value, strict=True): + if not isinstance(value, dict): + value = self.to_dict(value) + + if strict: + unknown = set(value).difference(self.attr_names) + if unknown: + self._module.fail_json( + msg="invalid keys: %s" % ",".join(unknown) + ) + + for name, attr in iteritems(self._attributes): + if value.get(name) is None: + value[name] = attr.get("default") + + if attr.get("fallback") and not value.get(name): + fallback = attr.get("fallback", (None,)) + fallback_strategy = fallback[0] + fallback_args = [] + fallback_kwargs = {} + if fallback_strategy is not None: + for item in fallback[1:]: + if isinstance(item, dict): + fallback_kwargs = item + else: + fallback_args = item + try: + value[name] = fallback_strategy( + *fallback_args, **fallback_kwargs + ) + except basic.AnsibleFallbackNotFound: + continue + + if attr.get("required") and value.get(name) is None: + self._module.fail_json( + msg="missing required attribute %s" % name + ) + + if "choices" in attr: + if value[name] not in attr["choices"]: + self._module.fail_json( + msg="%s must be one of %s, got %s" + % (name, ", ".join(attr["choices"]), value[name]) + ) + + if value[name] is not None: + value_type = attr.get("type", "str") + type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[ + value_type + ] + type_checker(value[name]) + elif value.get(name): + value[name] = self._module.params[name] + + return value + + +class EntityCollection(Entity): + """Extends ```Entity``` to handle a list of dicts """ + + def __call__(self, iterable, strict=True): + if iterable is None: + iterable = [ + super(EntityCollection, self).__call__( + self._module.params, strict + ) + ] + + if not isinstance(iterable, (list, tuple)): + self._module.fail_json(msg="value must be an iterable") + + return [ + (super(EntityCollection, self).__call__(i, strict)) + for i in iterable + ] + + +# these two are for backwards compatibility and can be removed once all of the +# modules that use them are updated +class ComplexDict(Entity): + def __init__(self, attrs, module, *args, **kwargs): + super(ComplexDict, self).__init__(module, attrs, *args, **kwargs) + + +class ComplexList(EntityCollection): + def __init__(self, attrs, module, *args, **kwargs): + super(ComplexList, self).__init__(module, attrs, *args, **kwargs) + + +def dict_diff(base, comparable): + """ Generate a dict object of differences + + This function will compare two dict objects and return the difference + between them as a dict object. For scalar values, the key will reflect + the updated value. If the key does not exist in `comparable`, then then no + key will be returned. For lists, the value in comparable will wholly replace + the value in base for the key. For dicts, the returned value will only + return keys that are different. + + :param base: dict object to base the diff on + :param comparable: dict object to compare against base + + :returns: new dict object with differences + """ + if not isinstance(base, dict): + raise AssertionError("`base` must be of type ") + if not isinstance(comparable, dict): + if comparable is None: + comparable = dict() + else: + raise AssertionError("`comparable` must be of type ") + + updates = dict() + + for key, value in iteritems(base): + if isinstance(value, dict): + item = comparable.get(key) + if item is not None: + sub_diff = dict_diff(value, comparable[key]) + if sub_diff: + updates[key] = sub_diff + else: + comparable_value = comparable.get(key) + if comparable_value is not None: + if sort_list(base[key]) != sort_list(comparable_value): + updates[key] = comparable_value + + for key in set(comparable.keys()).difference(base.keys()): + updates[key] = comparable.get(key) + + return updates + + +def dict_merge(base, other): + """ Return a new dict object that combines base and other + + This will create a new dict object that is a combination of the key/value + pairs from base and other. When both keys exist, the value will be + selected from other. If the value is a list object, the two lists will + be combined and duplicate entries removed. + + :param base: dict object to serve as base + :param other: dict object to combine with base + + :returns: new combined dict object + """ + if not isinstance(base, dict): + raise AssertionError("`base` must be of type ") + if not isinstance(other, dict): + raise AssertionError("`other` must be of type ") + + combined = dict() + + for key, value in iteritems(base): + if isinstance(value, dict): + if key in other: + item = other.get(key) + if item is not None: + if isinstance(other[key], Mapping): + combined[key] = dict_merge(value, other[key]) + else: + combined[key] = other[key] + else: + combined[key] = item + else: + combined[key] = value + elif isinstance(value, list): + if key in other: + item = other.get(key) + if item is not None: + try: + combined[key] = list(set(chain(value, item))) + except TypeError: + value.extend([i for i in item if i not in value]) + combined[key] = value + else: + combined[key] = item + else: + combined[key] = value + else: + if key in other: + other_value = other.get(key) + if other_value is not None: + if sort_list(base[key]) != sort_list(other_value): + combined[key] = other_value + else: + combined[key] = value + else: + combined[key] = other_value + else: + combined[key] = value + + for key in set(other.keys()).difference(base.keys()): + combined[key] = other.get(key) + + return combined + + +def param_list_to_dict(param_list, unique_key="name", remove_key=True): + """Rotates a list of dictionaries to be a dictionary of dictionaries. + + :param param_list: The aforementioned list of dictionaries + :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value + behind this key will be the key each dictionary can be found at in the new root dictionary + :param remove_key: If True, remove unique_key from the individual dictionaries before returning. + """ + param_dict = {} + for params in param_list: + params = params.copy() + if remove_key: + name = params.pop(unique_key) + else: + name = params.get(unique_key) + param_dict[name] = params + + return param_dict + + +def conditional(expr, val, cast=None): + match = re.match(r"^(.+)\((.+)\)$", str(expr), re.I) + if match: + op, arg = match.groups() + else: + op = "eq" + if " " in str(expr): + raise AssertionError("invalid expression: cannot contain spaces") + arg = expr + + if cast is None and val is not None: + arg = type(val)(arg) + elif callable(cast): + arg = cast(arg) + val = cast(val) + + op = next((oper for alias, oper in ALIASES if op == alias), op) + + if not hasattr(operator, op) and op not in OPERATORS: + raise ValueError("unknown operator: %s" % op) + + func = getattr(operator, op) + return func(val, arg) + + +def ternary(value, true_val, false_val): + """ value ? true_val : false_val """ + if value: + return true_val + else: + return false_val + + +def remove_default_spec(spec): + for item in spec: + if "default" in spec[item]: + del spec[item]["default"] + + +def validate_ip_address(address): + try: + socket.inet_aton(address) + except socket.error: + return False + return address.count(".") == 3 + + +def validate_ip_v6_address(address): + try: + socket.inet_pton(socket.AF_INET6, address) + except socket.error: + return False + return True + + +def validate_prefix(prefix): + if prefix and not 0 <= int(prefix) <= 32: + return False + return True + + +def load_provider(spec, args): + provider = args.get("provider") or {} + for key, value in iteritems(spec): + if key not in provider: + if "fallback" in value: + provider[key] = _fallback(value["fallback"]) + elif "default" in value: + provider[key] = value["default"] + else: + provider[key] = None + if "authorize" in provider: + # Coerce authorize to provider if a string has somehow snuck in. + provider["authorize"] = boolean(provider["authorize"] or False) + args["provider"] = provider + return provider + + +def _fallback(fallback): + strategy = fallback[0] + args = [] + kwargs = {} + + for item in fallback[1:]: + if isinstance(item, dict): + kwargs = item + else: + args = item + try: + return strategy(*args, **kwargs) + except basic.AnsibleFallbackNotFound: + pass + + +def generate_dict(spec): + """ + Generate dictionary which is in sync with argspec + + :param spec: A dictionary that is the argspec of the module + :rtype: A dictionary + :returns: A dictionary in sync with argspec with default value + """ + obj = {} + if not spec: + return obj + + for key, val in iteritems(spec): + if "default" in val: + dct = {key: val["default"]} + elif "type" in val and val["type"] == "dict": + dct = {key: generate_dict(val["options"])} + else: + dct = {key: None} + obj.update(dct) + return obj + + +def parse_conf_arg(cfg, arg): + """ + Parse config based on argument + + :param cfg: A text string which is a line of configuration. + :param arg: A text string which is to be matched. + :rtype: A text string + :returns: A text string if match is found + """ + match = re.search(r"%s (.+)(\n|$)" % arg, cfg, re.M) + if match: + result = match.group(1).strip() + else: + result = None + return result + + +def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str="no"): + """ + Parse config based on command + + :param cfg: A text string which is a line of configuration. + :param cmd: A text string which is the command to be matched + :param res1: A text string to be returned if the command is present + :param res2: A text string to be returned if the negate command + is present + :param delete_str: A text string to identify the start of the + negate command + :rtype: A text string + :returns: A text string if match is found + """ + match = re.search(r"\n\s+%s(\n|$)" % cmd, cfg) + if match: + return res1 + if res2 is not None: + match = re.search(r"\n\s+%s %s(\n|$)" % (delete_str, cmd), cfg) + if match: + return res2 + return None + + +def get_xml_conf_arg(cfg, path, data="text"): + """ + :param cfg: The top level configuration lxml Element tree object + :param path: The relative xpath w.r.t to top level element (cfg) + to be searched in the xml hierarchy + :param data: The type of data to be returned for the matched xml node. + Valid values are text, tag, attrib, with default as text. + :return: Returns the required type for the matched xml node or else None + """ + match = cfg.xpath(path) + if len(match): + if data == "tag": + result = getattr(match[0], "tag") + elif data == "attrib": + result = getattr(match[0], "attrib") + else: + result = getattr(match[0], "text") + else: + result = None + return result + + +def remove_empties(cfg_dict): + """ + Generate final config dictionary + + :param cfg_dict: A dictionary parsed in the facts system + :rtype: A dictionary + :returns: A dictionary by eliminating keys that have null values + """ + final_cfg = {} + if not cfg_dict: + return final_cfg + + for key, val in iteritems(cfg_dict): + dct = None + if isinstance(val, dict): + child_val = remove_empties(val) + if child_val: + dct = {key: child_val} + elif ( + isinstance(val, list) + and val + and all([isinstance(x, dict) for x in val]) + ): + child_val = [remove_empties(x) for x in val] + if child_val: + dct = {key: child_val} + elif val not in [None, [], {}, (), ""]: + dct = {key: val} + if dct: + final_cfg.update(dct) + return final_cfg + + +def validate_config(spec, data): + """ + Validate if the input data against the AnsibleModule spec format + :param spec: Ansible argument spec + :param data: Data to be validated + :return: + """ + params = basic._ANSIBLE_ARGS + basic._ANSIBLE_ARGS = to_bytes(json.dumps({"ANSIBLE_MODULE_ARGS": data})) + validated_data = basic.AnsibleModule(spec).params + basic._ANSIBLE_ARGS = params + return validated_data + + +def search_obj_in_list(name, lst, key="name"): + if not lst: + return None + else: + for item in lst: + if item.get(key) == name: + return item + + +class Template: + def __init__(self): + if not HAS_JINJA2: + raise ImportError( + "jinja2 is required but does not appear to be installed. " + "It can be installed using `pip install jinja2`" + ) + + self.env = Environment(undefined=StrictUndefined) + self.env.filters.update({"ternary": ternary}) + + def __call__(self, value, variables=None, fail_on_undefined=True): + variables = variables or {} + + if not self.contains_vars(value): + return value + + try: + value = self.env.from_string(value).render(variables) + except UndefinedError: + if not fail_on_undefined: + return None + raise + + if value: + try: + return ast.literal_eval(value) + except Exception: + return str(value) + else: + return None + + def contains_vars(self, data): + if isinstance(data, string_types): + for marker in ( + self.env.block_start_string, + self.env.variable_start_string, + self.env.comment_start_string, + ): + if marker in data: + return True + return False diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py new file mode 100644 index 00000000..1f03299b --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py @@ -0,0 +1,147 @@ +# +# (c) 2018 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +import json + +from copy import deepcopy +from contextlib import contextmanager + +try: + from lxml.etree import fromstring, tostring +except ImportError: + from xml.etree.ElementTree import fromstring, tostring + +from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.connection import Connection, ConnectionError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import ( + NetconfConnection, +) + + +IGNORE_XML_ATTRIBUTE = () + + +def get_connection(module): + if hasattr(module, "_netconf_connection"): + return module._netconf_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get("network_api") + if network_api == "netconf": + module._netconf_connection = NetconfConnection(module._socket_path) + else: + module.fail_json(msg="Invalid connection type %s" % network_api) + + return module._netconf_connection + + +def get_capabilities(module): + if hasattr(module, "_netconf_capabilities"): + return module._netconf_capabilities + + capabilities = Connection(module._socket_path).get_capabilities() + module._netconf_capabilities = json.loads(capabilities) + return module._netconf_capabilities + + +def lock_configuration(module, target=None): + conn = get_connection(module) + return conn.lock(target=target) + + +def unlock_configuration(module, target=None): + conn = get_connection(module) + return conn.unlock(target=target) + + +@contextmanager +def locked_config(module, target=None): + try: + lock_configuration(module, target=target) + yield + finally: + unlock_configuration(module, target=target) + + +def get_config(module, source, filter=None, lock=False): + conn = get_connection(module) + try: + locked = False + if lock: + conn.lock(target=source) + locked = True + response = conn.get_config(source=source, filter=filter) + + except ConnectionError as e: + module.fail_json( + msg=to_text(e, errors="surrogate_then_replace").strip() + ) + + finally: + if locked: + conn.unlock(target=source) + + return response + + +def get(module, filter, lock=False): + conn = get_connection(module) + try: + locked = False + if lock: + conn.lock(target="running") + locked = True + + response = conn.get(filter=filter) + + except ConnectionError as e: + module.fail_json( + msg=to_text(e, errors="surrogate_then_replace").strip() + ) + + finally: + if locked: + conn.unlock(target="running") + + return response + + +def dispatch(module, request): + conn = get_connection(module) + try: + response = conn.dispatch(request) + except ConnectionError as e: + module.fail_json( + msg=to_text(e, errors="surrogate_then_replace").strip() + ) + + return response + + +def sanitize_xml(data): + tree = fromstring( + to_bytes(deepcopy(data), errors="surrogate_then_replace") + ) + for element in tree.getiterator(): + # remove attributes + attribute = element.attrib + if attribute: + for key in list(attribute): + if key not in IGNORE_XML_ATTRIBUTE: + attribute.pop(key) + return to_text(tostring(tree), errors="surrogate_then_replace").strip() diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py new file mode 100644 index 00000000..fba46be0 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py @@ -0,0 +1,61 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2018 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +from ansible.module_utils.connection import Connection + + +def get(module, path=None, content=None, fields=None, output="json"): + if path is None: + raise ValueError("path value must be provided") + if content: + path += "?" + "content=%s" % content + if fields: + path += "?" + "field=%s" % fields + + accept = None + if output == "xml": + accept = "application/yang-data+xml" + + connection = Connection(module._socket_path) + return connection.send_request( + None, path=path, method="GET", accept=accept + ) + + +def edit_config(module, path=None, content=None, method="GET", format="json"): + if path is None: + raise ValueError("path value must be provided") + + content_type = None + if format == "xml": + content_type = "application/yang-data+xml" + + connection = Connection(module._socket_path) + return connection.send_request( + content, path=path, method=method, content_type=content_type + ) diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/cli_config.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/cli_config.py new file mode 100644 index 00000000..c1384c1d --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/cli_config.py @@ -0,0 +1,444 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, Ansible by Red Hat, inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "network", +} + + +DOCUMENTATION = """module: cli_config +author: Trishna Guha (@trishnaguha) +notes: +- The commands will be returned only for platforms that do not support onbox diff. + The C(--diff) option with the playbook will return the difference in configuration + for devices that has support for onbox diff +short_description: Push text based configuration to network devices over network_cli +description: +- This module provides platform agnostic way of pushing text based configuration to + network devices over network_cli connection plugin. +extends_documentation_fragment: +- ansible.netcommon.network_agnostic +options: + config: + description: + - The config to be pushed to the network device. This argument is mutually exclusive + with C(rollback) and either one of the option should be given as input. The + config should have indentation that the device uses. + type: str + commit: + description: + - The C(commit) argument instructs the module to push the configuration to the + device. This is mapped to module check mode. + type: bool + replace: + description: + - If the C(replace) argument is set to C(yes), it will replace the entire running-config + of the device with the C(config) argument value. For devices that support replacing + running configuration from file on device like NXOS/JUNOS, the C(replace) argument + takes path to the file on the device that will be used for replacing the entire + running-config. The value of C(config) option should be I(None) for such devices. + Nexus 9K devices only support replace. Use I(net_put) or I(nxos_file_copy) in + case of NXOS module to copy the flat file to remote device and then use set + the fullpath to this argument. + type: str + backup: + description: + - This argument will cause the module to create a full backup of the current running + config from the remote device before any changes are made. If the C(backup_options) + value is not given, the backup file is written to the C(backup) folder in the + playbook root directory or role root directory, if playbook is part of an ansible + role. If the directory does not exist, it is created. + type: bool + default: 'no' + rollback: + description: + - The C(rollback) argument instructs the module to rollback the current configuration + to the identifier specified in the argument. If the specified rollback identifier + does not exist on the remote device, the module will fail. To rollback to the + most recent commit, set the C(rollback) argument to 0. This option is mutually + exclusive with C(config). + commit_comment: + description: + - The C(commit_comment) argument specifies a text string to be used when committing + the configuration. If the C(commit) argument is set to False, this argument + is silently ignored. This argument is only valid for the platforms that support + commit operation with comment. + type: str + defaults: + description: + - The I(defaults) argument will influence how the running-config is collected + from the device. When the value is set to true, the command used to collect + the running-config is append with the all keyword. When the value is set to + false, the command is issued without the all keyword. + default: 'no' + type: bool + multiline_delimiter: + description: + - This argument is used when pushing a multiline configuration element to the + device. It specifies the character to use as the delimiting character. This + only applies to the configuration action. + type: str + diff_replace: + description: + - Instructs the module on the way to perform the configuration on the device. + If the C(diff_replace) argument is set to I(line) then the modified lines are + pushed to the device in configuration mode. If the argument is set to I(block) + then the entire command block is pushed to the device in configuration mode + if any line is not correct. Note that this parameter will be ignored if the + platform has onbox diff support. + choices: + - line + - block + - config + diff_match: + description: + - Instructs the module on the way to perform the matching of the set of commands + against the current device config. If C(diff_match) is set to I(line), commands + are matched line by line. If C(diff_match) is set to I(strict), command lines + are matched with respect to position. If C(diff_match) is set to I(exact), command + lines must be an equal match. Finally, if C(diff_match) is set to I(none), the + module will not attempt to compare the source configuration with the running + configuration on the remote device. Note that this parameter will be ignored + if the platform has onbox diff support. + choices: + - line + - strict + - exact + - none + diff_ignore_lines: + description: + - Use this argument to specify one or more lines that should be ignored during + the diff. This is used for lines in the configuration that are automatically + updated by the system. This argument takes a list of regular expressions or + exact line matches. Note that this parameter will be ignored if the platform + has onbox diff support. + backup_options: + description: + - This is a dict object containing configurable options related to backup file + path. The value of this option is read only when C(backup) is set to I(yes), + if C(backup) is set to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and + date in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will + be first created and the filename is either the value of C(filename) or + default filename as described in C(filename) options description. If the + path value is not given in that case a I(backup) directory will be created + in the current working directory and backup configuration will be copied + in C(filename) within I(backup) directory. + type: path + type: dict +""" + +EXAMPLES = """ +- name: configure device with config + cli_config: + config: "{{ lookup('template', 'basic/config.j2') }}" + +- name: multiline config + cli_config: + config: | + hostname foo + feature nxapi + +- name: configure device with config with defaults enabled + cli_config: + config: "{{ lookup('template', 'basic/config.j2') }}" + defaults: yes + +- name: Use diff_match + cli_config: + config: "{{ lookup('file', 'interface_config') }}" + diff_match: none + +- name: nxos replace config + cli_config: + replace: 'bootflash:nxoscfg' + +- name: junos replace config + cli_config: + replace: '/var/home/ansible/junos01.cfg' + +- name: commit with comment + cli_config: + config: set system host-name foo + commit_comment: this is a test + +- name: configurable backup path + cli_config: + config: "{{ lookup('template', 'basic/config.j2') }}" + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['interface Loopback999', 'no shutdown'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/hostname_config.2016-07-16@22:28:34 +""" + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import Connection +from ansible.module_utils._text import to_text + + +def validate_args(module, device_operations): + """validate param if it is supported on the platform + """ + feature_list = [ + "replace", + "rollback", + "commit_comment", + "defaults", + "multiline_delimiter", + "diff_replace", + "diff_match", + "diff_ignore_lines", + ] + + for feature in feature_list: + if module.params[feature]: + supports_feature = device_operations.get("supports_%s" % feature) + if supports_feature is None: + module.fail_json( + "This platform does not specify whether %s is supported or not. " + "Please report an issue against this platform's cliconf plugin." + % feature + ) + elif not supports_feature: + module.fail_json( + msg="Option %s is not supported on this platform" % feature + ) + + +def run( + module, device_operations, connection, candidate, running, rollback_id +): + result = {} + resp = {} + config_diff = [] + banner_diff = {} + + replace = module.params["replace"] + commit_comment = module.params["commit_comment"] + multiline_delimiter = module.params["multiline_delimiter"] + diff_replace = module.params["diff_replace"] + diff_match = module.params["diff_match"] + diff_ignore_lines = module.params["diff_ignore_lines"] + + commit = not module.check_mode + + if replace in ("yes", "true", "True"): + replace = True + elif replace in ("no", "false", "False"): + replace = False + + if ( + replace is not None + and replace not in [True, False] + and candidate is not None + ): + module.fail_json( + msg="Replace value '%s' is a configuration file path already" + " present on the device. Hence 'replace' and 'config' options" + " are mutually exclusive" % replace + ) + + if rollback_id is not None: + resp = connection.rollback(rollback_id, commit) + if "diff" in resp: + result["changed"] = True + + elif device_operations.get("supports_onbox_diff"): + if diff_replace: + module.warn( + "diff_replace is ignored as the device supports onbox diff" + ) + if diff_match: + module.warn( + "diff_mattch is ignored as the device supports onbox diff" + ) + if diff_ignore_lines: + module.warn( + "diff_ignore_lines is ignored as the device supports onbox diff" + ) + + if candidate and not isinstance(candidate, list): + candidate = candidate.strip("\n").splitlines() + + kwargs = { + "candidate": candidate, + "commit": commit, + "replace": replace, + "comment": commit_comment, + } + resp = connection.edit_config(**kwargs) + + if "diff" in resp: + result["changed"] = True + + elif device_operations.get("supports_generate_diff"): + kwargs = {"candidate": candidate, "running": running} + if diff_match: + kwargs.update({"diff_match": diff_match}) + if diff_replace: + kwargs.update({"diff_replace": diff_replace}) + if diff_ignore_lines: + kwargs.update({"diff_ignore_lines": diff_ignore_lines}) + + diff_response = connection.get_diff(**kwargs) + + config_diff = diff_response.get("config_diff") + banner_diff = diff_response.get("banner_diff") + + if config_diff: + if isinstance(config_diff, list): + candidate = config_diff + else: + candidate = config_diff.splitlines() + + kwargs = { + "candidate": candidate, + "commit": commit, + "replace": replace, + "comment": commit_comment, + } + if commit: + connection.edit_config(**kwargs) + result["changed"] = True + result["commands"] = config_diff.split("\n") + + if banner_diff: + candidate = json.dumps(banner_diff) + + kwargs = {"candidate": candidate, "commit": commit} + if multiline_delimiter: + kwargs.update({"multiline_delimiter": multiline_delimiter}) + if commit: + connection.edit_banner(**kwargs) + result["changed"] = True + + if module._diff: + if "diff" in resp: + result["diff"] = {"prepared": resp["diff"]} + else: + diff = "" + if config_diff: + if isinstance(config_diff, list): + diff += "\n".join(config_diff) + else: + diff += config_diff + if banner_diff: + diff += json.dumps(banner_diff) + result["diff"] = {"prepared": diff} + + return result + + +def main(): + """main entry point for execution + """ + backup_spec = dict(filename=dict(), dir_path=dict(type="path")) + argument_spec = dict( + backup=dict(default=False, type="bool"), + backup_options=dict(type="dict", options=backup_spec), + config=dict(type="str"), + commit=dict(type="bool"), + replace=dict(type="str"), + rollback=dict(type="int"), + commit_comment=dict(type="str"), + defaults=dict(default=False, type="bool"), + multiline_delimiter=dict(type="str"), + diff_replace=dict(choices=["line", "block", "config"]), + diff_match=dict(choices=["line", "strict", "exact", "none"]), + diff_ignore_lines=dict(type="list"), + ) + + mutually_exclusive = [("config", "rollback")] + required_one_of = [["backup", "config", "rollback"]] + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + supports_check_mode=True, + ) + + result = {"changed": False} + + connection = Connection(module._socket_path) + capabilities = module.from_json(connection.get_capabilities()) + + if capabilities: + device_operations = capabilities.get("device_operations", dict()) + validate_args(module, device_operations) + else: + device_operations = dict() + + if module.params["defaults"]: + if "get_default_flag" in capabilities.get("rpc"): + flags = connection.get_default_flag() + else: + flags = "all" + else: + flags = [] + + candidate = module.params["config"] + candidate = ( + to_text(candidate, errors="surrogate_then_replace") + if candidate + else None + ) + running = connection.get_config(flags=flags) + rollback_id = module.params["rollback"] + + if module.params["backup"]: + result["__backup__"] = running + + if candidate or rollback_id or module.params["replace"]: + try: + result.update( + run( + module, + device_operations, + connection, + candidate, + running, + rollback_id, + ) + ) + except Exception as exc: + module.fail_json(msg=to_text(exc)) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_get.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_get.py new file mode 100644 index 00000000..f0910f52 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_get.py @@ -0,0 +1,71 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, Ansible by Red Hat, inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "network", +} + + +DOCUMENTATION = """module: net_get +author: Deepak Agrawal (@dagrawal) +short_description: Copy a file from a network device to Ansible Controller +description: +- This module provides functionality to copy file from network device to ansible controller. +extends_documentation_fragment: +- ansible.netcommon.network_agnostic +options: + src: + description: + - Specifies the source file. The path to the source file can either be the full + path on the network device or a relative path as per path supported by destination + network device. + required: true + protocol: + description: + - Protocol used to transfer file. + default: scp + choices: + - scp + - sftp + dest: + description: + - Specifies the destination file. The path to the destination file can either + be the full path on the Ansible control host or a relative path from the playbook + or role root directory. + default: + - Same filename as specified in I(src). The path will be playbook root or role + root directory if playbook is part of a role. +requirements: +- scp +notes: +- Some devices need specific configurations to be enabled before scp can work These + configuration should be pre-configured before using this module e.g ios - C(ip scp + server enable). +- User privilege to do scp on network device should be pre-configured e.g. ios - need + user privilege 15 by default for allowing scp. +- Default destination of source file. +""" + +EXAMPLES = """ +- name: copy file from the network device to Ansible controller + net_get: + src: running_cfg_ios1.txt + +- name: copy file from ios to common location at /tmp + net_get: + src: running_cfg_sw1.txt + dest : /tmp/ios1.txt +""" + +RETURN = """ +""" diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_put.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_put.py new file mode 100644 index 00000000..2fc4a98c --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_put.py @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2018, Ansible by Red Hat, inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "network", +} + + +DOCUMENTATION = """module: net_put +author: Deepak Agrawal (@dagrawal) +short_description: Copy a file from Ansible Controller to a network device +description: +- This module provides functionality to copy file from Ansible controller to network + devices. +extends_documentation_fragment: +- ansible.netcommon.network_agnostic +options: + src: + description: + - Specifies the source file. The path to the source file can either be the full + path on the Ansible control host or a relative path from the playbook or role + root directory. + required: true + protocol: + description: + - Protocol used to transfer file. + default: scp + choices: + - scp + - sftp + dest: + description: + - Specifies the destination file. The path to destination file can either be the + full path or relative path as supported by network_os. + default: + - Filename from src and at default directory of user shell on network_os. + required: false + mode: + description: + - Set the file transfer mode. If mode is set to I(text) then I(src) file will + go through Jinja2 template engine to replace any vars if present in the src + file. If mode is set to I(binary) then file will be copied as it is to destination + device. + default: binary + choices: + - binary + - text +requirements: +- scp +notes: +- Some devices need specific configurations to be enabled before scp can work These + configuration should be pre-configured before using this module e.g ios - C(ip scp + server enable). +- User privilege to do scp on network device should be pre-configured e.g. ios - need + user privilege 15 by default for allowing scp. +- Default destination of source file. +""" + +EXAMPLES = """ +- name: copy file from ansible controller to a network device + net_put: + src: running_cfg_ios1.txt + +- name: copy file at root dir of flash in slot 3 of sw1(ios) + net_put: + src: running_cfg_sw1.txt + protocol: sftp + dest : flash3:/running_cfg_sw1.txt +""" + +RETURN = """ +""" diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/netconf/default.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/netconf/default.py new file mode 100644 index 00000000..e9332f26 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/netconf/default.py @@ -0,0 +1,70 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """author: Ansible Networking Team +netconf: default +short_description: Use default netconf plugin to run standard netconf commands as + per RFC +description: +- This default plugin provides low level abstraction apis for sending and receiving + netconf commands as per Netconf RFC specification. +options: + ncclient_device_handler: + type: str + default: default + description: + - Specifies the ncclient device handler name for network os that support default + netconf implementation as per Netconf RFC specification. To identify the ncclient + device handler name refer ncclient library documentation. +""" +import json + +from ansible.module_utils._text import to_text +from ansible.plugins.netconf import NetconfBase + + +class Netconf(NetconfBase): + def get_text(self, ele, tag): + try: + return to_text( + ele.find(tag).text, errors="surrogate_then_replace" + ).strip() + except AttributeError: + pass + + def get_device_info(self): + device_info = dict() + device_info["network_os"] = "default" + return device_info + + def get_capabilities(self): + result = dict() + result["rpc"] = self.get_base_rpc() + result["network_api"] = "netconf" + result["device_info"] = self.get_device_info() + result["server_capabilities"] = [c for c in self.m.server_capabilities] + result["client_capabilities"] = [c for c in self.m.client_capabilities] + result["session_id"] = self.m.session_id + result["device_operations"] = self.get_device_operations( + result["server_capabilities"] + ) + return json.dumps(result) diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/action/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/action/ios.py new file mode 100644 index 00000000..e5ac2cd1 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/action/ios.py @@ -0,0 +1,133 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import sys +import copy + +from ansible_collections.ansible.netcommon.plugins.action.network import ( + ActionModule as ActionNetworkModule, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + load_provider, +) +from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import ( + ios_provider_spec, +) +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split(".")[-1] + self._config_module = True if module_name == "ios_config" else False + persistent_connection = self._play_context.connection.split(".")[-1] + warnings = [] + + if persistent_connection == "network_cli": + provider = self._task.args.get("provider", {}) + if any(provider.values()): + display.warning( + "provider is unnecessary when using network_cli and will be ignored" + ) + del self._task.args["provider"] + elif self._play_context.connection == "local": + provider = load_provider(ios_provider_spec, self._task.args) + pc = copy.deepcopy(self._play_context) + pc.connection = "ansible.netcommon.network_cli" + pc.network_os = "cisco.ios.ios" + pc.remote_addr = provider["host"] or self._play_context.remote_addr + pc.port = int(provider["port"] or self._play_context.port or 22) + pc.remote_user = ( + provider["username"] or self._play_context.connection_user + ) + pc.password = provider["password"] or self._play_context.password + pc.private_key_file = ( + provider["ssh_keyfile"] or self._play_context.private_key_file + ) + pc.become = provider["authorize"] or False + if pc.become: + pc.become_method = "enable" + pc.become_pass = provider["auth_pass"] + + connection = self._shared_loader_obj.connection_loader.get( + "ansible.netcommon.persistent", + pc, + sys.stdin, + task_uuid=self._task._uuid, + ) + + # TODO: Remove below code after ansible minimal is cut out + if connection is None: + pc.connection = "network_cli" + pc.network_os = "ios" + connection = self._shared_loader_obj.connection_loader.get( + "persistent", pc, sys.stdin, task_uuid=self._task._uuid + ) + + display.vvv( + "using connection plugin %s (was local)" % pc.connection, + pc.remote_addr, + ) + + command_timeout = ( + int(provider["timeout"]) + if provider["timeout"] + else connection.get_option("persistent_command_timeout") + ) + connection.set_options( + direct={"persistent_command_timeout": command_timeout} + ) + + socket_path = connection.run() + display.vvvv("socket_path: %s" % socket_path, pc.remote_addr) + if not socket_path: + return { + "failed": True, + "msg": "unable to open shell. Please see: " + + "https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell", + } + + task_vars["ansible_socket"] = socket_path + warnings.append( + [ + "connection local support for this module is deprecated and will be removed in version 2.14, use connection %s" + % pc.connection + ] + ) + else: + return { + "failed": True, + "msg": "Connection type %s is not valid for this module" + % self._play_context.connection, + } + + result = super(ActionModule, self).run(task_vars=task_vars) + if warnings: + if "warnings" in result: + result["warnings"].extend(warnings) + else: + result["warnings"] = warnings + return result diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py new file mode 100644 index 00000000..8a390034 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py @@ -0,0 +1,465 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +author: Ansible Networking Team +cliconf: ios +short_description: Use ios cliconf to run command on Cisco IOS platform +description: + - This ios plugin provides low level abstraction apis for + sending and receiving CLI commands from Cisco IOS network devices. +version_added: "2.4" +""" + +import re +import time +import json + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible.module_utils.common._collections_compat import Mapping +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import ( + NetworkConfig, + dumps, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + @enable_mode + def get_config(self, source="running", flags=None, format=None): + if source not in ("running", "startup"): + raise ValueError( + "fetching configuration from %s is not supported" % source + ) + + if format: + raise ValueError( + "'format' value %s is not supported for get_config" % format + ) + + if not flags: + flags = [] + if source == "running": + cmd = "show running-config " + else: + cmd = "show startup-config " + + cmd += " ".join(to_list(flags)) + cmd = cmd.strip() + + return self.send_command(cmd) + + def get_diff( + self, + candidate=None, + running=None, + diff_match="line", + diff_ignore_lines=None, + path=None, + diff_replace="line", + ): + """ + Generate diff between candidate and running configuration. If the + remote host supports onbox diff capabilities ie. supports_onbox_diff in that case + candidate and running configurations are not required to be passed as argument. + In case if onbox diff capability is not supported candidate argument is mandatory + and running argument is optional. + :param candidate: The configuration which is expected to be present on remote host. + :param running: The base configuration which is used to generate diff. + :param diff_match: Instructs how to match the candidate configuration with current device configuration + Valid values are 'line', 'strict', 'exact', 'none'. + 'line' - commands are matched line by line + 'strict' - command lines are matched with respect to position + 'exact' - command lines must be an equal match + 'none' - will not compare the candidate configuration with the running configuration + :param diff_ignore_lines: Use this argument to specify one or more lines that should be + ignored during the diff. This is used for lines in the configuration + that are automatically updated by the system. This argument takes + a list of regular expressions or exact line matches. + :param path: The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + :param diff_replace: Instructs on the way to perform the configuration on the device. + If the replace argument is set to I(line) then the modified lines are + pushed to the device in configuration mode. If the replace argument is + set to I(block) then the entire command block is pushed to the device in + configuration mode if any line is not correct. + :return: Configuration diff in json format. + { + 'config_diff': '', + 'banner_diff': {} + } + + """ + diff = {} + device_operations = self.get_device_operations() + option_values = self.get_option_values() + + if candidate is None and device_operations["supports_generate_diff"]: + raise ValueError( + "candidate configuration is required to generate diff" + ) + + if diff_match not in option_values["diff_match"]: + raise ValueError( + "'match' value %s in invalid, valid values are %s" + % (diff_match, ", ".join(option_values["diff_match"])) + ) + + if diff_replace not in option_values["diff_replace"]: + raise ValueError( + "'replace' value %s in invalid, valid values are %s" + % (diff_replace, ", ".join(option_values["diff_replace"])) + ) + + # prepare candidate configuration + candidate_obj = NetworkConfig(indent=1) + want_src, want_banners = self._extract_banners(candidate) + candidate_obj.load(want_src) + + if running and diff_match != "none": + # running configuration + have_src, have_banners = self._extract_banners(running) + running_obj = NetworkConfig( + indent=1, contents=have_src, ignore_lines=diff_ignore_lines + ) + configdiffobjs = candidate_obj.difference( + running_obj, path=path, match=diff_match, replace=diff_replace + ) + + else: + configdiffobjs = candidate_obj.items + have_banners = {} + + diff["config_diff"] = ( + dumps(configdiffobjs, "commands") if configdiffobjs else "" + ) + banners = self._diff_banners(want_banners, have_banners) + diff["banner_diff"] = banners if banners else {} + return diff + + @enable_mode + def edit_config( + self, candidate=None, commit=True, replace=None, comment=None + ): + resp = {} + operations = self.get_device_operations() + self.check_edit_config_capability( + operations, candidate, commit, replace, comment + ) + + results = [] + requests = [] + if commit: + self.send_command("configure terminal") + for line in to_list(candidate): + if not isinstance(line, Mapping): + line = {"command": line} + + cmd = line["command"] + if cmd != "end" and cmd[0] != "!": + results.append(self.send_command(**line)) + requests.append(cmd) + + self.send_command("end") + else: + raise ValueError("check mode is not supported") + + resp["request"] = requests + resp["response"] = results + return resp + + def edit_macro( + self, candidate=None, commit=True, replace=None, comment=None + ): + """ + ios_config: + lines: "{{ macro_lines }}" + parents: "macro name {{ macro_name }}" + after: '@' + match: line + replace: block + """ + resp = {} + operations = self.get_device_operations() + self.check_edit_config_capability( + operations, candidate, commit, replace, comment + ) + + results = [] + requests = [] + if commit: + commands = "" + self.send_command("config terminal") + time.sleep(0.1) + # first item: macro command + commands += candidate.pop(0) + "\n" + multiline_delimiter = candidate.pop(-1) + for line in candidate: + commands += " " + line + "\n" + commands += multiline_delimiter + "\n" + obj = {"command": commands, "sendonly": True} + results.append(self.send_command(**obj)) + requests.append(commands) + + time.sleep(0.1) + self.send_command("end", sendonly=True) + time.sleep(0.1) + results.append(self.send_command("\n")) + requests.append("\n") + + resp["request"] = requests + resp["response"] = results + return resp + + def get( + self, + command=None, + prompt=None, + answer=None, + sendonly=False, + output=None, + newline=True, + check_all=False, + ): + if not command: + raise ValueError("must provide value of command to execute") + if output: + raise ValueError( + "'output' value %s is not supported for get" % output + ) + + return self.send_command( + command=command, + prompt=prompt, + answer=answer, + sendonly=sendonly, + newline=newline, + check_all=check_all, + ) + + def get_device_info(self): + device_info = {} + + device_info["network_os"] = "ios" + reply = self.get(command="show version") + data = to_text(reply, errors="surrogate_or_strict").strip() + + match = re.search(r"Version (\S+)", data) + if match: + device_info["network_os_version"] = match.group(1).strip(",") + + model_search_strs = [ + r"^[Cc]isco (.+) \(revision", + r"^[Cc]isco (\S+).+bytes of .*memory", + ] + for item in model_search_strs: + match = re.search(item, data, re.M) + if match: + version = match.group(1).split(" ") + device_info["network_os_model"] = version[0] + break + + match = re.search(r"^(.+) uptime", data, re.M) + if match: + device_info["network_os_hostname"] = match.group(1) + + match = re.search(r'image file is "(.+)"', data) + if match: + device_info["network_os_image"] = match.group(1) + + return device_info + + def get_device_operations(self): + return { + "supports_diff_replace": True, + "supports_commit": False, + "supports_rollback": False, + "supports_defaults": True, + "supports_onbox_diff": False, + "supports_commit_comment": False, + "supports_multiline_delimiter": True, + "supports_diff_match": True, + "supports_diff_ignore_lines": True, + "supports_generate_diff": True, + "supports_replace": False, + } + + def get_option_values(self): + return { + "format": ["text"], + "diff_match": ["line", "strict", "exact", "none"], + "diff_replace": ["line", "block"], + "output": [], + } + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + result["rpc"] += [ + "edit_banner", + "get_diff", + "run_commands", + "get_defaults_flag", + ] + result["device_operations"] = self.get_device_operations() + result.update(self.get_option_values()) + return json.dumps(result) + + def edit_banner( + self, candidate=None, multiline_delimiter="@", commit=True + ): + """ + Edit banner on remote device + :param banners: Banners to be loaded in json format + :param multiline_delimiter: Line delimiter for banner + :param commit: Boolean value that indicates if the device candidate + configuration should be pushed in the running configuration or discarded. + :param diff: Boolean flag to indicate if configuration that is applied on remote host should + generated and returned in response or not + :return: Returns response of executing the configuration command received + from remote host + """ + resp = {} + banners_obj = json.loads(candidate) + results = [] + requests = [] + if commit: + for key, value in iteritems(banners_obj): + key += " %s" % multiline_delimiter + self.send_command("config terminal", sendonly=True) + for cmd in [key, value, multiline_delimiter]: + obj = {"command": cmd, "sendonly": True} + results.append(self.send_command(**obj)) + requests.append(cmd) + + self.send_command("end", sendonly=True) + time.sleep(0.1) + results.append(self.send_command("\n")) + requests.append("\n") + + resp["request"] = requests + resp["response"] = results + + return resp + + def run_commands(self, commands=None, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {"command": cmd} + + output = cmd.pop("output", None) + if output: + raise ValueError( + "'output' value %s is not supported for run_commands" + % output + ) + + try: + out = self.send_command(**cmd) + except AnsibleConnectionFailure as e: + if check_rc: + raise + out = getattr(e, "err", to_text(e)) + + responses.append(out) + + return responses + + def get_defaults_flag(self): + """ + The method identifies the filter that should be used to fetch running-configuration + with defaults. + :return: valid default filter + """ + out = self.get("show running-config ?") + out = to_text(out, errors="surrogate_then_replace") + + commands = set() + for line in out.splitlines(): + if line.strip(): + commands.add(line.strip().split()[0]) + + if "all" in commands: + return "all" + else: + return "full" + + def set_cli_prompt_context(self): + """ + Make sure we are in the operational cli mode + :return: None + """ + if self._connection.connected: + out = self._connection.get_prompt() + + if out is None: + raise AnsibleConnectionFailure( + message=u"cli prompt is not identified from the last received" + u" response window: %s" + % self._connection._last_recv_window + ) + + if re.search( + r"config.*\)#", + to_text(out, errors="surrogate_then_replace").strip(), + ): + self._connection.queue_message( + "vvvv", "wrong context, sending end to device" + ) + self._connection.send_command("end") + + def _extract_banners(self, config): + banners = {} + banner_cmds = re.findall(r"^banner (\w+)", config, re.M) + for cmd in banner_cmds: + regex = r"banner %s \^C(.+?)(?=\^C)" % cmd + match = re.search(regex, config, re.S) + if match: + key = "banner %s" % cmd + banners[key] = match.group(1).strip() + + for cmd in banner_cmds: + regex = r"banner %s \^C(.+?)(?=\^C)" % cmd + match = re.search(regex, config, re.S) + if match: + config = config.replace(str(match.group(1)), "") + + config = re.sub(r"banner \w+ \^C\^C", "!! banner removed", config) + return config, banners + + def _diff_banners(self, want, have): + candidate = {} + for key, value in iteritems(want): + if value != have.get(key): + candidate[key] = value + return candidate diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py new file mode 100644 index 00000000..ff22d27c --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Peter Sprygada +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r"""options: + provider: + description: + - B(Deprecated) + - 'Starting with Ansible 2.5 we recommend using C(connection: network_cli).' + - For more information please see the L(IOS Platform Options guide, ../network/user_guide/platform_ios.html). + - HORIZONTALLINE + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote device + over the specified transport. The value of host is used as the destination + address for the transport. + type: str + required: true + port: + description: + - Specifies the port to use when building the connection to the remote device. + type: int + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to the remote + device. This value is used to authenticate the SSH session. If the value + is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) + will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to the remote + device. This value is used to authenticate the SSH session. If the value + is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) + will be used instead. + type: str + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is exceeded before + the operation is completed, the module will error. + type: int + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to the remote + device. This value is the path to the key used to authenticate the SSH + session. If the value is not specified in the task, the value of environment + variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + type: path + authorize: + description: + - Instructs the module to enter privileged mode on the remote device before + sending any commands. If not specified, the device will attempt to execute + all commands in non-privileged mode. If the value is not specified in the + task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be + used instead. + type: bool + default: false + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode on the + remote device. If I(authorize) is false, then this argument does nothing. + If the value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTH_PASS) will be used instead. + type: str +notes: +- For more information on using Ansible to manage network devices see the :ref:`Ansible + Network Guide ` +- For more information on using Ansible to manage Cisco devices see the `Cisco integration + page `_. +""" diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py new file mode 100644 index 00000000..6818a0ce --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py @@ -0,0 +1,197 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import json + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible.module_utils.connection import Connection, ConnectionError + +_DEVICE_CONFIGS = {} + +ios_provider_spec = { + "host": dict(), + "port": dict(type="int"), + "username": dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])), + "password": dict( + fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True + ), + "ssh_keyfile": dict( + fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path" + ), + "authorize": dict( + fallback=(env_fallback, ["ANSIBLE_NET_AUTHORIZE"]), type="bool" + ), + "auth_pass": dict( + fallback=(env_fallback, ["ANSIBLE_NET_AUTH_PASS"]), no_log=True + ), + "timeout": dict(type="int"), +} +ios_argument_spec = { + "provider": dict( + type="dict", options=ios_provider_spec, removed_in_version=2.14 + ) +} + + +def get_provider_argspec(): + return ios_provider_spec + + +def get_connection(module): + if hasattr(module, "_ios_connection"): + return module._ios_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get("network_api") + if network_api == "cliconf": + module._ios_connection = Connection(module._socket_path) + else: + module.fail_json(msg="Invalid connection type %s" % network_api) + + return module._ios_connection + + +def get_capabilities(module): + if hasattr(module, "_ios_capabilities"): + return module._ios_capabilities + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + module._ios_capabilities = json.loads(capabilities) + return module._ios_capabilities + + +def get_defaults_flag(module): + connection = get_connection(module) + try: + out = connection.get_defaults_flag() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + return to_text(out, errors="surrogate_then_replace").strip() + + +def get_config(module, flags=None): + flags = to_list(flags) + + section_filter = False + if flags and "section" in flags[-1]: + section_filter = True + + flag_str = " ".join(flags) + + try: + return _DEVICE_CONFIGS[flag_str] + except KeyError: + connection = get_connection(module) + try: + out = connection.get_config(flags=flags) + except ConnectionError as exc: + if section_filter: + # Some ios devices don't understand `| section foo` + out = get_config(module, flags=flags[:-1]) + else: + module.fail_json( + msg=to_text(exc, errors="surrogate_then_replace") + ) + cfg = to_text(out, errors="surrogate_then_replace").strip() + _DEVICE_CONFIGS[flag_str] = cfg + return cfg + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + try: + return connection.run_commands(commands=commands, check_rc=check_rc) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def load_config(module, commands): + connection = get_connection(module) + + try: + resp = connection.edit_config(commands) + return resp.get("response") + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def normalize_interface(name): + """Return the normalized interface name + """ + if not name: + return + + def _get_number(name): + digits = "" + for char in name: + if char.isdigit() or char in "/.": + digits += char + return digits + + if name.lower().startswith("gi"): + if_type = "GigabitEthernet" + elif name.lower().startswith("te"): + if_type = "TenGigabitEthernet" + elif name.lower().startswith("fa"): + if_type = "FastEthernet" + elif name.lower().startswith("fo"): + if_type = "FortyGigabitEthernet" + elif name.lower().startswith("et"): + if_type = "Ethernet" + elif name.lower().startswith("vl"): + if_type = "Vlan" + elif name.lower().startswith("lo"): + if_type = "loopback" + elif name.lower().startswith("po"): + if_type = "port-channel" + elif name.lower().startswith("nv"): + if_type = "nve" + elif name.lower().startswith("twe"): + if_type = "TwentyFiveGigE" + elif name.lower().startswith("hu"): + if_type = "HundredGigE" + else: + if_type = None + + number_list = name.split(" ") + if len(number_list) == 2: + if_number = number_list[-1].strip() + else: + if_number = _get_number(name) + + if if_type: + proper_interface = if_type + if_number + else: + proper_interface = name + + return proper_interface diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py new file mode 100644 index 00000000..ef383fcc --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "network", +} + + +DOCUMENTATION = """module: ios_command +author: Peter Sprygada (@privateip) +short_description: Run commands on remote devices running Cisco IOS +description: +- Sends arbitrary commands to an ios node and returns the results read from the device. + This module includes an argument that will cause the module to wait for a specific + condition before returning or timing out if the condition is not met. +- This module does not support running commands in configuration mode. Please use + M(ios_config) to configure IOS devices. +extends_documentation_fragment: +- cisco.ios.ios +notes: +- Tested against IOS 15.6 +options: + commands: + description: + - List of commands to send to the remote ios device over the configured provider. + The resulting output from the command is returned. If the I(wait_for) argument + is provided, the module is not returned until the condition is satisfied or + the number of retries has expired. If a command sent to the device requires + answering a prompt, it is possible to pass a dict containing I(command), I(answer) + and I(prompt). Common answers are 'y' or "\r" (carriage return, must be double + quotes). See examples. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the command. The task will + wait for each condition to be true before moving forward. If the conditional + is not true within the configured number of retries, the task fails. See examples. + aliases: + - waitfor + match: + description: + - The I(match) argument is used in conjunction with the I(wait_for) argument to + specify the match policy. Valid values are C(all) or C(any). If the value + is set to C(all) then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be satisfied. + default: all + choices: + - any + - all + retries: + description: + - Specifies the number of retries a command should by tried before it is considered + failed. The command is run on the target device every retry and evaluated against + the I(wait_for) conditions. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries of the command. If + the command does not pass the specified conditions, the interval indicates how + long to wait before trying the command again. + default: 1 +""" + +EXAMPLES = r""" +tasks: + - name: run show version on remote devices + ios_command: + commands: show version + + - name: run show version and check to see if output contains IOS + ios_command: + commands: show version + wait_for: result[0] contains IOS + + - name: run multiple commands on remote nodes + ios_command: + commands: + - show version + - show interfaces + + - name: run multiple commands and evaluate the output + ios_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains IOS + - result[1] contains Loopback0 + + - name: run commands that require answering a prompt + ios_command: + commands: + - command: 'clear counters GigabitEthernet0/1' + prompt: 'Clear "show interface" counters on this interface \[confirm\]' + answer: 'y' + - command: 'clear counters GigabitEthernet0/2' + prompt: '[confirm]' + answer: "\r" +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +import time + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import ( + Conditional, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + transform_commands, + to_lines, +) +from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import ( + run_commands, +) +from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import ( + ios_argument_spec, +) + + +def parse_commands(module, warnings): + commands = transform_commands(module) + + if module.check_mode: + for item in list(commands): + if not item["command"].startswith("show"): + warnings.append( + "Only show commands are supported when using check mode, not " + "executing %s" % item["command"] + ) + commands.remove(item) + + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + commands=dict(type="list", required=True), + wait_for=dict(type="list", aliases=["waitfor"]), + match=dict(default="all", choices=["all", "any"]), + retries=dict(default=10, type="int"), + interval=dict(default=1, type="int"), + ) + + argument_spec.update(ios_argument_spec) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) + + warnings = list() + result = {"changed": False, "warnings": warnings} + commands = parse_commands(module, warnings) + wait_for = module.params["wait_for"] or list() + + try: + conditionals = [Conditional(c) for c in wait_for] + except AttributeError as exc: + module.fail_json(msg=to_text(exc)) + + retries = module.params["retries"] + interval = module.params["interval"] + match = module.params["match"] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == "any": + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = "One or more conditional statements have not been satisfied" + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update( + {"stdout": responses, "stdout_lines": list(to_lines(responses))} + ) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py new file mode 100644 index 00000000..beec5b8d --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py @@ -0,0 +1,596 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "network", +} + + +DOCUMENTATION = """module: ios_config +author: Peter Sprygada (@privateip) +short_description: Manage Cisco IOS configuration sections +description: +- Cisco IOS configurations use a simple block indent file syntax for segmenting configuration + into sections. This module provides an implementation for working with IOS configuration + sections in a deterministic way. +extends_documentation_fragment: +- cisco.ios.ios +notes: +- Tested against IOS 15.6 +- Abbreviated commands are NOT idempotent, see L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands). +options: + lines: + description: + - The ordered set of commands that should be configured in the section. The commands + must be the exact same commands as found in the device running-config. Be sure + to note the configuration command syntax as some commands are automatically + modified by the device config parser. + aliases: + - commands + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy the + commands should be checked against. If the parents argument is omitted, the + commands are checked against the set of top level or global commands. + src: + description: + - Specifies the source path to the file that contains the configuration or configuration + template to load. The path to the source file can either be the full path on + the Ansible control host or a relative path from the playbook or role root directory. This + argument is mutually exclusive with I(lines), I(parents). + before: + description: + - The ordered set of commands to push on to the command stack if a change needs + to be made. This allows the playbook designer the opportunity to perform configuration + commands prior to pushing any changes without affecting how the set of commands + are matched against the system. + after: + description: + - The ordered set of commands to append to the end of the command stack if a change + needs to be made. Just like with I(before) this allows the playbook designer + to append a set of commands to be executed after the command set. + match: + description: + - Instructs the module on the way to perform the matching of the set of commands + against the current device config. If match is set to I(line), commands are + matched line by line. If match is set to I(strict), command lines are matched + with respect to position. If match is set to I(exact), command lines must be + an equal match. Finally, if match is set to I(none), the module will not attempt + to compare the source configuration with the running configuration on the remote + device. + choices: + - line + - strict + - exact + - none + default: line + replace: + description: + - Instructs the module on the way to perform the configuration on the device. + If the replace argument is set to I(line) then the modified lines are pushed + to the device in configuration mode. If the replace argument is set to I(block) + then the entire command block is pushed to the device in configuration mode + if any line is not correct. + default: line + choices: + - line + - block + multiline_delimiter: + description: + - This argument is used when pushing a multiline configuration element to the + IOS device. It specifies the character to use as the delimiting character. This + only applies to the configuration action. + default: '@' + backup: + description: + - This argument will cause the module to create a full backup of the current C(running-config) + from the remote device before any changes are made. If the C(backup_options) + value is not given, the backup file is written to the C(backup) folder in the + playbook root directory or role root directory, if playbook is part of an ansible + role. If the directory does not exist, it is created. + type: bool + default: 'no' + running_config: + description: + - The module, by default, will connect to the remote device and retrieve the current + running-config to use as a base for comparing against the contents of source. + There are times when it is not desirable to have the task get the current running-config + for every task in a playbook. The I(running_config) argument allows the implementer + to pass in the configuration to use as the base config for comparison. + aliases: + - config + defaults: + description: + - This argument specifies whether or not to collect all defaults when getting + the remote device running config. When enabled, the module will get the current + config by issuing the command C(show running-config all). + type: bool + default: 'no' + save_when: + description: + - When changes are made to the device running-configuration, the changes are not + copied to non-volatile storage by default. Using this argument will change + that before. If the argument is set to I(always), then the running-config will + always be copied to the startup-config and the I(modified) flag will always + be set to True. If the argument is set to I(modified), then the running-config + will only be copied to the startup-config if it has changed since the last save + to startup-config. If the argument is set to I(never), the running-config will + never be copied to the startup-config. If the argument is set to I(changed), + then the running-config will only be copied to the startup-config if the task + has made a change. I(changed) was added in Ansible 2.5. + default: never + choices: + - always + - never + - modified + - changed + diff_against: + description: + - When using the C(ansible-playbook --diff) command line argument the module can + generate diffs against different sources. + - When this option is configure as I(startup), the module will return the diff + of the running-config against the startup-config. + - When this option is configured as I(intended), the module will return the diff + of the running-config against the configuration provided in the C(intended_config) + argument. + - When this option is configured as I(running), the module will return the before + and after diff of the running-config with respect to any changes made to the + device configuration. + choices: + - running + - startup + - intended + diff_ignore_lines: + description: + - Use this argument to specify one or more lines that should be ignored during + the diff. This is used for lines in the configuration that are automatically + updated by the system. This argument takes a list of regular expressions or + exact line matches. + intended_config: + description: + - The C(intended_config) provides the master configuration that the node should + conform to and is used to check the final running-config against. This argument + will not modify any settings on the remote device and is strictly used to check + the compliance of the current device's configuration against. When specifying + this argument, the task should also modify the C(diff_against) value and set + it to I(intended). + backup_options: + description: + - This is a dict object containing configurable options related to backup file + path. The value of this option is read only when C(backup) is set to I(yes), + if C(backup) is set to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and + date in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will + be first created and the filename is either the value of C(filename) or + default filename as described in C(filename) options description. If the + path value is not given in that case a I(backup) directory will be created + in the current working directory and backup configuration will be copied + in C(filename) within I(backup) directory. + type: path + type: dict +""" + +EXAMPLES = """ +- name: configure top level configuration + ios_config: + lines: hostname {{ inventory_hostname }} + +- name: configure interface settings + ios_config: + lines: + - description test interface + - ip address 172.31.1.1 255.255.255.0 + parents: interface Ethernet1 + +- name: configure ip helpers on multiple interfaces + ios_config: + lines: + - ip helper-address 172.26.1.10 + - ip helper-address 172.26.3.8 + parents: "{{ item }}" + with_items: + - interface Ethernet1 + - interface Ethernet2 + - interface GigabitEthernet1 + +- name: configure policer in Scavenger class + ios_config: + lines: + - conform-action transmit + - exceed-action drop + parents: + - policy-map Foo + - class Scavenger + - police cir 64000 + +- name: load new acl into device + ios_config: + lines: + - 10 permit ip host 192.0.2.1 any log + - 20 permit ip host 192.0.2.2 any log + - 30 permit ip host 192.0.2.3 any log + - 40 permit ip host 192.0.2.4 any log + - 50 permit ip host 192.0.2.5 any log + parents: ip access-list extended test + before: no ip access-list extended test + match: exact + +- name: check the running-config against master config + ios_config: + diff_against: intended + intended_config: "{{ lookup('file', 'master.cfg') }}" + +- name: check the startup-config against the running-config + ios_config: + diff_against: startup + diff_ignore_lines: + - ntp clock .* + +- name: save running to startup when modified + ios_config: + save_when: modified + +- name: for idempotency, use full-form commands + ios_config: + lines: + # - shut + - shutdown + # parents: int gig1/0/11 + parents: interface GigabitEthernet1/0/11 + +# Set boot image based on comparison to a group_var (version) and the version +# that is returned from the `ios_facts` module +- name: SETTING BOOT IMAGE + ios_config: + lines: + - no boot system + - boot system flash bootflash:{{new_image}} + host: "{{ inventory_hostname }}" + when: ansible_net_version != version + +- name: render a Jinja2 template onto an IOS device + ios_config: + backup: yes + src: ios_template.j2 + +- name: configurable backup path + ios_config: + src: ios_template.j2 + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/ios_config.2016-07-16@22:28:34 +filename: + description: The name of the backup file + returned: when backup is yes and filename is not specified in backup options + type: str + sample: ios_config.2016-07-16@22:28:34 +shortname: + description: The full path to the backup file excluding the timestamp + returned: when backup is yes and filename is not specified in backup options + type: str + sample: /playbooks/ansible/backup/ios_config +date: + description: The date extracted from the backup file name + returned: when backup is yes + type: str + sample: "2016-07-16" +time: + description: The time extracted from the backup file name + returned: when backup is yes + type: str + sample: "22:28:34" +""" +import json + +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import ConnectionError +from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import ( + run_commands, + get_config, +) +from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import ( + get_defaults_flag, + get_connection, +) +from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import ( + ios_argument_spec, +) +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import ( + NetworkConfig, + dumps, +) + + +def check_args(module, warnings): + if module.params["multiline_delimiter"]: + if len(module.params["multiline_delimiter"]) != 1: + module.fail_json( + msg="multiline_delimiter value can only be a " + "single character" + ) + + +def edit_config_or_macro(connection, commands): + # only catch the macro configuration command, + # not negated 'no' variation. + if commands[0].startswith("macro name"): + connection.edit_macro(candidate=commands) + else: + connection.edit_config(candidate=commands) + + +def get_candidate_config(module): + candidate = "" + if module.params["src"]: + candidate = module.params["src"] + + elif module.params["lines"]: + candidate_obj = NetworkConfig(indent=1) + parents = module.params["parents"] or list() + candidate_obj.add(module.params["lines"], parents=parents) + candidate = dumps(candidate_obj, "raw") + + return candidate + + +def get_running_config(module, current_config=None, flags=None): + running = module.params["running_config"] + if not running: + if not module.params["defaults"] and current_config: + running = current_config + else: + running = get_config(module, flags=flags) + + return running + + +def save_config(module, result): + result["changed"] = True + if not module.check_mode: + run_commands(module, "copy running-config startup-config\r") + else: + module.warn( + "Skipping command `copy running-config startup-config` " + "due to check_mode. Configuration not copied to " + "non-volatile storage" + ) + + +def main(): + """ main entry point for module execution + """ + backup_spec = dict(filename=dict(), dir_path=dict(type="path")) + argument_spec = dict( + src=dict(type="path"), + lines=dict(aliases=["commands"], type="list"), + parents=dict(type="list"), + before=dict(type="list"), + after=dict(type="list"), + match=dict( + default="line", choices=["line", "strict", "exact", "none"] + ), + replace=dict(default="line", choices=["line", "block"]), + multiline_delimiter=dict(default="@"), + running_config=dict(aliases=["config"]), + intended_config=dict(), + defaults=dict(type="bool", default=False), + backup=dict(type="bool", default=False), + backup_options=dict(type="dict", options=backup_spec), + save_when=dict( + choices=["always", "never", "modified", "changed"], default="never" + ), + diff_against=dict(choices=["startup", "intended", "running"]), + diff_ignore_lines=dict(type="list"), + ) + + argument_spec.update(ios_argument_spec) + + mutually_exclusive = [("lines", "src"), ("parents", "src")] + + required_if = [ + ("match", "strict", ["lines"]), + ("match", "exact", ["lines"]), + ("replace", "block", ["lines"]), + ("diff_against", "intended", ["intended_config"]), + ] + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True, + ) + + result = {"changed": False} + + warnings = list() + check_args(module, warnings) + result["warnings"] = warnings + + diff_ignore_lines = module.params["diff_ignore_lines"] + config = None + contents = None + flags = get_defaults_flag(module) if module.params["defaults"] else [] + connection = get_connection(module) + + if module.params["backup"] or ( + module._diff and module.params["diff_against"] == "running" + ): + contents = get_config(module, flags=flags) + config = NetworkConfig(indent=1, contents=contents) + if module.params["backup"]: + result["__backup__"] = contents + + if any((module.params["lines"], module.params["src"])): + match = module.params["match"] + replace = module.params["replace"] + path = module.params["parents"] + + candidate = get_candidate_config(module) + running = get_running_config(module, contents, flags=flags) + try: + response = connection.get_diff( + candidate=candidate, + running=running, + diff_match=match, + diff_ignore_lines=diff_ignore_lines, + path=path, + diff_replace=replace, + ) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + + config_diff = response["config_diff"] + banner_diff = response["banner_diff"] + + if config_diff or banner_diff: + commands = config_diff.split("\n") + + if module.params["before"]: + commands[:0] = module.params["before"] + + if module.params["after"]: + commands.extend(module.params["after"]) + + result["commands"] = commands + result["updates"] = commands + result["banners"] = banner_diff + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + if commands: + edit_config_or_macro(connection, commands) + if banner_diff: + connection.edit_banner( + candidate=json.dumps(banner_diff), + multiline_delimiter=module.params[ + "multiline_delimiter" + ], + ) + + result["changed"] = True + + running_config = module.params["running_config"] + startup_config = None + + if module.params["save_when"] == "always": + save_config(module, result) + elif module.params["save_when"] == "modified": + output = run_commands( + module, ["show running-config", "show startup-config"] + ) + + running_config = NetworkConfig( + indent=1, contents=output[0], ignore_lines=diff_ignore_lines + ) + startup_config = NetworkConfig( + indent=1, contents=output[1], ignore_lines=diff_ignore_lines + ) + + if running_config.sha1 != startup_config.sha1: + save_config(module, result) + elif module.params["save_when"] == "changed" and result["changed"]: + save_config(module, result) + + if module._diff: + if not running_config: + output = run_commands(module, "show running-config") + contents = output[0] + else: + contents = running_config + + # recreate the object in order to process diff_ignore_lines + running_config = NetworkConfig( + indent=1, contents=contents, ignore_lines=diff_ignore_lines + ) + + if module.params["diff_against"] == "running": + if module.check_mode: + module.warn( + "unable to perform diff against running-config due to check mode" + ) + contents = None + else: + contents = config.config_text + + elif module.params["diff_against"] == "startup": + if not startup_config: + output = run_commands(module, "show startup-config") + contents = output[0] + else: + contents = startup_config.config_text + + elif module.params["diff_against"] == "intended": + contents = module.params["intended_config"] + + if contents is not None: + base_config = NetworkConfig( + indent=1, contents=contents, ignore_lines=diff_ignore_lines + ) + + if running_config.sha1 != base_config.sha1: + if module.params["diff_against"] == "intended": + before = running_config + after = base_config + elif module.params["diff_against"] in ("startup", "running"): + before = base_config + after = running_config + + result.update( + { + "changed": True, + "diff": {"before": str(before), "after": str(after)}, + } + ) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/terminal/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/terminal/ios.py new file mode 100644 index 00000000..29f31b0e --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/terminal/ios.py @@ -0,0 +1,115 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import json +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase +from ansible.utils.display import Display + +display = Display() + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + # re.compile(br"^% \w+", re.M), + re.compile(br"% ?Bad secret"), + re.compile(br"[\r\n%] Bad passwords"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found"), + re.compile(br"'[^']' +returned error code: ?\d+"), + re.compile(br"Bad mask", re.I), + re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I), + re.compile(br"[%\S] ?Error: ?[\s]+", re.I), + re.compile(br"[%\S] ?Informational: ?[\s]+", re.I), + re.compile(br"Command authorization failed"), + ] + + def on_open_shell(self): + try: + self._exec_cli_command(b"terminal length 0") + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure("unable to set terminal parameters") + + try: + self._exec_cli_command(b"terminal width 512") + try: + self._exec_cli_command(b"terminal width 0") + except AnsibleConnectionFailure: + pass + except AnsibleConnectionFailure: + display.display( + "WARNING: Unable to set terminal width, command responses may be truncated" + ) + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b"#"): + return + + cmd = {u"command": u"enable"} + if passwd: + # Note: python-3.5 cannot combine u"" and r"" together. Thus make + # an r string and use to_text to ensure it's text on both py2 and py3. + cmd[u"prompt"] = to_text( + r"[\r\n]?(?:.*)?[Pp]assword: ?$", errors="surrogate_or_strict" + ) + cmd[u"answer"] = passwd + cmd[u"prompt_retry_check"] = True + try: + self._exec_cli_command( + to_bytes(json.dumps(cmd), errors="surrogate_or_strict") + ) + prompt = self._get_prompt() + if prompt is None or not prompt.endswith(b"#"): + raise AnsibleConnectionFailure( + "failed to elevate privilege to enable mode still at prompt [%s]" + % prompt + ) + except AnsibleConnectionFailure as e: + prompt = self._get_prompt() + raise AnsibleConnectionFailure( + "unable to elevate privilege to enable mode, at prompt [%s] with error: %s" + % (prompt, e.message) + ) + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if b"(config" in prompt: + self._exec_cli_command(b"end") + self._exec_cli_command(b"disable") + + elif prompt.endswith(b"#"): + self._exec_cli_command(b"disable") diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/action/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/action/vyos.py new file mode 100644 index 00000000..cab2f3fd --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/action/vyos.py @@ -0,0 +1,129 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import sys +import copy + +from ansible_collections.ansible.netcommon.plugins.action.network import ( + ActionModule as ActionNetworkModule, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + load_provider, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import ( + vyos_provider_spec, +) +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split(".")[-1] + self._config_module = True if module_name == "vyos_config" else False + persistent_connection = self._play_context.connection.split(".")[-1] + warnings = [] + + if persistent_connection == "network_cli": + provider = self._task.args.get("provider", {}) + if any(provider.values()): + display.warning( + "provider is unnecessary when using network_cli and will be ignored" + ) + del self._task.args["provider"] + elif self._play_context.connection == "local": + provider = load_provider(vyos_provider_spec, self._task.args) + pc = copy.deepcopy(self._play_context) + pc.connection = "ansible.netcommon.network_cli" + pc.network_os = "vyos.vyos.vyos" + pc.remote_addr = provider["host"] or self._play_context.remote_addr + pc.port = int(provider["port"] or self._play_context.port or 22) + pc.remote_user = ( + provider["username"] or self._play_context.connection_user + ) + pc.password = provider["password"] or self._play_context.password + pc.private_key_file = ( + provider["ssh_keyfile"] or self._play_context.private_key_file + ) + + connection = self._shared_loader_obj.connection_loader.get( + "ansible.netcommon.persistent", + pc, + sys.stdin, + task_uuid=self._task._uuid, + ) + + # TODO: Remove below code after ansible minimal is cut out + if connection is None: + pc.connection = "network_cli" + pc.network_os = "vyos" + connection = self._shared_loader_obj.connection_loader.get( + "persistent", pc, sys.stdin, task_uuid=self._task._uuid + ) + + display.vvv( + "using connection plugin %s (was local)" % pc.connection, + pc.remote_addr, + ) + + command_timeout = ( + int(provider["timeout"]) + if provider["timeout"] + else connection.get_option("persistent_command_timeout") + ) + connection.set_options( + direct={"persistent_command_timeout": command_timeout} + ) + + socket_path = connection.run() + display.vvvv("socket_path: %s" % socket_path, pc.remote_addr) + if not socket_path: + return { + "failed": True, + "msg": "unable to open shell. Please see: " + + "https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell", + } + + task_vars["ansible_socket"] = socket_path + warnings.append( + [ + "connection local support for this module is deprecated and will be removed in version 2.14, use connection %s" + % pc.connection + ] + ) + else: + return { + "failed": True, + "msg": "Connection type %s is not valid for this module" + % self._play_context.connection, + } + + result = super(ActionModule, self).run(task_vars=task_vars) + if warnings: + if "warnings" in result: + result["warnings"].extend(warnings) + else: + result["warnings"] = warnings + return result diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py new file mode 100644 index 00000000..30336031 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py @@ -0,0 +1,342 @@ +# +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +author: Ansible Networking Team +cliconf: vyos +short_description: Use vyos cliconf to run command on VyOS platform +description: + - This vyos plugin provides low level abstraction apis for + sending and receiving CLI commands from VyOS network devices. +version_added: "2.4" +""" + +import re +import json + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_text +from ansible.module_utils.common._collections_compat import Mapping +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import ( + NetworkConfig, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible.plugins.cliconf import CliconfBase + + +class Cliconf(CliconfBase): + def get_device_info(self): + device_info = {} + + device_info["network_os"] = "vyos" + reply = self.get("show version") + data = to_text(reply, errors="surrogate_or_strict").strip() + + match = re.search(r"Version:\s*(.*)", data) + if match: + device_info["network_os_version"] = match.group(1) + + match = re.search(r"HW model:\s*(\S+)", data) + if match: + device_info["network_os_model"] = match.group(1) + + reply = self.get("show host name") + device_info["network_os_hostname"] = to_text( + reply, errors="surrogate_or_strict" + ).strip() + + return device_info + + def get_config(self, flags=None, format=None): + if format: + option_values = self.get_option_values() + if format not in option_values["format"]: + raise ValueError( + "'format' value %s is invalid. Valid values of format are %s" + % (format, ", ".join(option_values["format"])) + ) + + if not flags: + flags = [] + + if format == "text": + command = "show configuration" + else: + command = "show configuration commands" + + command += " ".join(to_list(flags)) + command = command.strip() + + out = self.send_command(command) + return out + + def edit_config( + self, candidate=None, commit=True, replace=None, comment=None + ): + resp = {} + operations = self.get_device_operations() + self.check_edit_config_capability( + operations, candidate, commit, replace, comment + ) + + results = [] + requests = [] + self.send_command("configure") + for cmd in to_list(candidate): + if not isinstance(cmd, Mapping): + cmd = {"command": cmd} + + results.append(self.send_command(**cmd)) + requests.append(cmd["command"]) + out = self.get("compare") + out = to_text(out, errors="surrogate_or_strict") + diff_config = out if not out.startswith("No changes") else None + + if diff_config: + if commit: + try: + self.commit(comment) + except AnsibleConnectionFailure as e: + msg = "commit failed: %s" % e.message + self.discard_changes() + raise AnsibleConnectionFailure(msg) + else: + self.send_command("exit") + else: + self.discard_changes() + else: + self.send_command("exit") + if ( + to_text( + self._connection.get_prompt(), errors="surrogate_or_strict" + ) + .strip() + .endswith("#") + ): + self.discard_changes() + + if diff_config: + resp["diff"] = diff_config + resp["response"] = results + resp["request"] = requests + return resp + + def get( + self, + command=None, + prompt=None, + answer=None, + sendonly=False, + output=None, + newline=True, + check_all=False, + ): + if not command: + raise ValueError("must provide value of command to execute") + if output: + raise ValueError( + "'output' value %s is not supported for get" % output + ) + + return self.send_command( + command=command, + prompt=prompt, + answer=answer, + sendonly=sendonly, + newline=newline, + check_all=check_all, + ) + + def commit(self, comment=None): + if comment: + command = 'commit comment "{0}"'.format(comment) + else: + command = "commit" + self.send_command(command) + + def discard_changes(self): + self.send_command("exit discard") + + def get_diff( + self, + candidate=None, + running=None, + diff_match="line", + diff_ignore_lines=None, + path=None, + diff_replace=None, + ): + diff = {} + device_operations = self.get_device_operations() + option_values = self.get_option_values() + + if candidate is None and device_operations["supports_generate_diff"]: + raise ValueError( + "candidate configuration is required to generate diff" + ) + + if diff_match not in option_values["diff_match"]: + raise ValueError( + "'match' value %s in invalid, valid values are %s" + % (diff_match, ", ".join(option_values["diff_match"])) + ) + + if diff_replace: + raise ValueError("'replace' in diff is not supported") + + if diff_ignore_lines: + raise ValueError("'diff_ignore_lines' in diff is not supported") + + if path: + raise ValueError("'path' in diff is not supported") + + set_format = candidate.startswith("set") or candidate.startswith( + "delete" + ) + candidate_obj = NetworkConfig(indent=4, contents=candidate) + if not set_format: + config = [c.line for c in candidate_obj.items] + commands = list() + # this filters out less specific lines + for item in config: + for index, entry in enumerate(commands): + if item.startswith(entry): + del commands[index] + break + commands.append(item) + + candidate_commands = [ + "set %s" % cmd.replace(" {", "") for cmd in commands + ] + + else: + candidate_commands = str(candidate).strip().split("\n") + + if diff_match == "none": + diff["config_diff"] = list(candidate_commands) + return diff + + running_commands = [ + str(c).replace("'", "") for c in running.splitlines() + ] + + updates = list() + visited = set() + + for line in candidate_commands: + item = str(line).replace("'", "") + + if not item.startswith("set") and not item.startswith("delete"): + raise ValueError( + "line must start with either `set` or `delete`" + ) + + elif item.startswith("set") and item not in running_commands: + updates.append(line) + + elif item.startswith("delete"): + if not running_commands: + updates.append(line) + else: + item = re.sub(r"delete", "set", item) + for entry in running_commands: + if entry.startswith(item) and line not in visited: + updates.append(line) + visited.add(line) + + diff["config_diff"] = list(updates) + return diff + + def run_commands(self, commands=None, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {"command": cmd} + + output = cmd.pop("output", None) + if output: + raise ValueError( + "'output' value %s is not supported for run_commands" + % output + ) + + try: + out = self.send_command(**cmd) + except AnsibleConnectionFailure as e: + if check_rc: + raise + out = getattr(e, "err", e) + + responses.append(out) + + return responses + + def get_device_operations(self): + return { + "supports_diff_replace": False, + "supports_commit": True, + "supports_rollback": False, + "supports_defaults": False, + "supports_onbox_diff": True, + "supports_commit_comment": True, + "supports_multiline_delimiter": False, + "supports_diff_match": True, + "supports_diff_ignore_lines": False, + "supports_generate_diff": False, + "supports_replace": False, + } + + def get_option_values(self): + return { + "format": ["text", "set"], + "diff_match": ["line", "none"], + "diff_replace": [], + "output": [], + } + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + result["rpc"] += [ + "commit", + "discard_changes", + "get_diff", + "run_commands", + ] + result["device_operations"] = self.get_device_operations() + result.update(self.get_option_values()) + return json.dumps(result) + + def set_cli_prompt_context(self): + """ + Make sure we are in the operational cli mode + :return: None + """ + if self._connection.connected: + self._update_cli_prompt_context( + config_context="#", exit_command="exit discard" + ) diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py new file mode 100644 index 00000000..094963f1 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Peter Sprygada +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r"""options: + provider: + description: + - B(Deprecated) + - 'Starting with Ansible 2.5 we recommend using C(connection: network_cli).' + - For more information please see the L(Network Guide, ../network/getting_started/network_differences.html#multiple-communication-protocols). + - HORIZONTALLINE + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote device + over the specified transport. The value of host is used as the destination + address for the transport. + type: str + required: true + port: + description: + - Specifies the port to use when building the connection to the remote device. + type: int + default: 22 + username: + description: + - Configures the username to use to authenticate the connection to the remote + device. This value is used to authenticate the SSH session. If the value + is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) + will be used instead. + type: str + password: + description: + - Specifies the password to use to authenticate the connection to the remote + device. This value is used to authenticate the SSH session. If the value + is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) + will be used instead. + type: str + timeout: + description: + - Specifies the timeout in seconds for communicating with the network device + for either connecting or sending commands. If the timeout is exceeded before + the operation is completed, the module will error. + type: int + default: 10 + ssh_keyfile: + description: + - Specifies the SSH key to use to authenticate the connection to the remote + device. This value is the path to the key used to authenticate the SSH + session. If the value is not specified in the task, the value of environment + variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + type: path +notes: +- For more information on using Ansible to manage network devices see the :ref:`Ansible + Network Guide ` +""" diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/facts/facts.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/facts/facts.py new file mode 100644 index 00000000..46fabaa2 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/facts/facts.py @@ -0,0 +1,22 @@ +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The arg spec for the vyos facts module. +""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class FactsArgs(object): # pylint: disable=R0903 + """ The arg spec for the vyos facts module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "gather_subset": dict(default=["!config"], type="list"), + "gather_network_resources": dict(type="list"), + } diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/firewall_rules/firewall_rules.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/firewall_rules/firewall_rules.py new file mode 100644 index 00000000..a018cc0b --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/firewall_rules/firewall_rules.py @@ -0,0 +1,263 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# +""" +The arg spec for the vyos_firewall_rules module +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class Firewall_rulesArgs(object): # pylint: disable=R0903 + """The arg spec for the vyos_firewall_rules module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "elements": "dict", + "options": { + "afi": { + "choices": ["ipv4", "ipv6"], + "required": True, + "type": "str", + }, + "rule_sets": { + "elements": "dict", + "options": { + "default_action": { + "choices": ["drop", "reject", "accept"], + "type": "str", + }, + "description": {"type": "str"}, + "enable_default_log": {"type": "bool"}, + "name": {"type": "str"}, + "rules": { + "elements": "dict", + "options": { + "action": { + "choices": [ + "drop", + "reject", + "accept", + "inspect", + ], + "type": "str", + }, + "description": {"type": "str"}, + "destination": { + "options": { + "address": {"type": "str"}, + "group": { + "options": { + "address_group": { + "type": "str" + }, + "network_group": { + "type": "str" + }, + "port_group": {"type": "str"}, + }, + "type": "dict", + }, + "port": {"type": "str"}, + }, + "type": "dict", + }, + "disabled": {"type": "bool"}, + "fragment": { + "choices": [ + "match-frag", + "match-non-frag", + ], + "type": "str", + }, + "icmp": { + "options": { + "code": {"type": "int"}, + "type": {"type": "int"}, + "type_name": { + "choices": [ + "any", + "echo-reply", + "destination-unreachable", + "network-unreachable", + "host-unreachable", + "protocol-unreachable", + "port-unreachable", + "fragmentation-needed", + "source-route-failed", + "network-unknown", + "host-unknown", + "network-prohibited", + "host-prohibited", + "TOS-network-unreachable", + "TOS-host-unreachable", + "communication-prohibited", + "host-precedence-violation", + "precedence-cutoff", + "source-quench", + "redirect", + "network-redirect", + "host-redirect", + "TOS-network-redirect", + "TOS-host-redirect", + "echo-request", + "router-advertisement", + "router-solicitation", + "time-exceeded", + "ttl-zero-during-transit", + "ttl-zero-during-reassembly", + "parameter-problem", + "ip-header-bad", + "required-option-missing", + "timestamp-request", + "timestamp-reply", + "address-mask-request", + "address-mask-reply", + "ping", + "pong", + "ttl-exceeded", + ], + "type": "str", + }, + }, + "type": "dict", + }, + "ipsec": { + "choices": ["match-ipsec", "match-none"], + "type": "str", + }, + "limit": { + "options": { + "burst": {"type": "int"}, + "rate": { + "options": { + "number": {"type": "int"}, + "unit": {"type": "str"}, + }, + "type": "dict", + }, + }, + "type": "dict", + }, + "number": {"required": True, "type": "int"}, + "p2p": { + "elements": "dict", + "options": { + "application": { + "choices": [ + "all", + "applejuice", + "bittorrent", + "directconnect", + "edonkey", + "gnutella", + "kazaa", + ], + "type": "str", + } + }, + "type": "list", + }, + "protocol": {"type": "str"}, + "recent": { + "options": { + "count": {"type": "int"}, + "time": {"type": "int"}, + }, + "type": "dict", + }, + "source": { + "options": { + "address": {"type": "str"}, + "group": { + "options": { + "address_group": { + "type": "str" + }, + "network_group": { + "type": "str" + }, + "port_group": {"type": "str"}, + }, + "type": "dict", + }, + "mac_address": {"type": "str"}, + "port": {"type": "str"}, + }, + "type": "dict", + }, + "state": { + "options": { + "established": {"type": "bool"}, + "invalid": {"type": "bool"}, + "new": {"type": "bool"}, + "related": {"type": "bool"}, + }, + "type": "dict", + }, + "tcp": { + "options": {"flags": {"type": "str"}}, + "type": "dict", + }, + "time": { + "options": { + "monthdays": {"type": "str"}, + "startdate": {"type": "str"}, + "starttime": {"type": "str"}, + "stopdate": {"type": "str"}, + "stoptime": {"type": "str"}, + "utc": {"type": "bool"}, + "weekdays": {"type": "str"}, + }, + "type": "dict", + }, + }, + "type": "list", + }, + }, + "type": "list", + }, + }, + "type": "list", + }, + "running_config": {"type": "str"}, + "state": { + "choices": [ + "merged", + "replaced", + "overridden", + "deleted", + "gathered", + "rendered", + "parsed", + ], + "default": "merged", + "type": "str", + }, + } # pylint: disable=C0301 diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/interfaces/interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/interfaces/interfaces.py new file mode 100644 index 00000000..3542cb19 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/interfaces/interfaces.py @@ -0,0 +1,69 @@ +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# +""" +The arg spec for the vyos_interfaces module +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class InterfacesArgs(object): # pylint: disable=R0903 + """The arg spec for the vyos_interfaces module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "elements": "dict", + "options": { + "description": {"type": "str"}, + "duplex": {"choices": ["full", "half", "auto"]}, + "enabled": {"default": True, "type": "bool"}, + "mtu": {"type": "int"}, + "name": {"required": True, "type": "str"}, + "speed": { + "choices": ["auto", "10", "100", "1000", "2500", "10000"], + "type": "str", + }, + "vifs": { + "elements": "dict", + "options": { + "vlan_id": {"type": "int"}, + "description": {"type": "str"}, + "enabled": {"default": True, "type": "bool"}, + "mtu": {"type": "int"}, + }, + "type": "list", + }, + }, + "type": "list", + }, + "state": { + "choices": ["merged", "replaced", "overridden", "deleted"], + "default": "merged", + "type": "str", + }, + } # pylint: disable=C0301 diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/l3_interfaces/l3_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/l3_interfaces/l3_interfaces.py new file mode 100644 index 00000000..91434e4b --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/l3_interfaces/l3_interfaces.py @@ -0,0 +1,81 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# +""" +The arg spec for the vyos_l3_interfaces module +""" + + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class L3_interfacesArgs(object): # pylint: disable=R0903 + """The arg spec for the vyos_l3_interfaces module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "elements": "dict", + "options": { + "ipv4": { + "elements": "dict", + "options": {"address": {"type": "str"}}, + "type": "list", + }, + "ipv6": { + "elements": "dict", + "options": {"address": {"type": "str"}}, + "type": "list", + }, + "name": {"required": True, "type": "str"}, + "vifs": { + "elements": "dict", + "options": { + "ipv4": { + "elements": "dict", + "options": {"address": {"type": "str"}}, + "type": "list", + }, + "ipv6": { + "elements": "dict", + "options": {"address": {"type": "str"}}, + "type": "list", + }, + "vlan_id": {"type": "int"}, + }, + "type": "list", + }, + }, + "type": "list", + }, + "state": { + "choices": ["merged", "replaced", "overridden", "deleted"], + "default": "merged", + "type": "str", + }, + } # pylint: disable=C0301 diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lag_interfaces/lag_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lag_interfaces/lag_interfaces.py new file mode 100644 index 00000000..97c5d5a2 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lag_interfaces/lag_interfaces.py @@ -0,0 +1,80 @@ +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the vyos_lag_interfaces module +""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class Lag_interfacesArgs(object): # pylint: disable=R0903 + """The arg spec for the vyos_lag_interfaces module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "elements": "dict", + "options": { + "arp_monitor": { + "options": { + "interval": {"type": "int"}, + "target": {"type": "list"}, + }, + "type": "dict", + }, + "hash_policy": { + "choices": ["layer2", "layer2+3", "layer3+4"], + "type": "str", + }, + "members": { + "elements": "dict", + "options": {"member": {"type": "str"}}, + "type": "list", + }, + "mode": { + "choices": [ + "802.3ad", + "active-backup", + "broadcast", + "round-robin", + "transmit-load-balance", + "adaptive-load-balance", + "xor-hash", + ], + "type": "str", + }, + "name": {"required": True, "type": "str"}, + "primary": {"type": "str"}, + }, + "type": "list", + }, + "state": { + "choices": ["merged", "replaced", "overridden", "deleted"], + "default": "merged", + "type": "str", + }, + } # pylint: disable=C0301 diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_global/lldp_global.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_global/lldp_global.py new file mode 100644 index 00000000..84bbc00c --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_global/lldp_global.py @@ -0,0 +1,56 @@ +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the vyos_lldp_global module +""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class Lldp_globalArgs(object): # pylint: disable=R0903 + """The arg spec for the vyos_lldp_global module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "options": { + "address": {"type": "str"}, + "enable": {"type": "bool"}, + "legacy_protocols": { + "choices": ["cdp", "edp", "fdp", "sonmp"], + "type": "list", + }, + "snmp": {"type": "str"}, + }, + "type": "dict", + }, + "state": { + "choices": ["merged", "replaced", "deleted"], + "default": "merged", + "type": "str", + }, + } # pylint: disable=C0301 diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_interfaces/lldp_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_interfaces/lldp_interfaces.py new file mode 100644 index 00000000..2976fc09 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_interfaces/lldp_interfaces.py @@ -0,0 +1,89 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# +""" +The arg spec for the vyos_lldp_interfaces module +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class Lldp_interfacesArgs(object): # pylint: disable=R0903 + """The arg spec for the vyos_lldp_interfaces module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "elements": "dict", + "options": { + "enable": {"default": True, "type": "bool"}, + "location": { + "options": { + "civic_based": { + "options": { + "ca_info": { + "elements": "dict", + "options": { + "ca_type": {"type": "int"}, + "ca_value": {"type": "str"}, + }, + "type": "list", + }, + "country_code": { + "required": True, + "type": "str", + }, + }, + "type": "dict", + }, + "coordinate_based": { + "options": { + "altitude": {"type": "int"}, + "datum": { + "choices": ["WGS84", "NAD83", "MLLW"], + "type": "str", + }, + "latitude": {"required": True, "type": "str"}, + "longitude": {"required": True, "type": "str"}, + }, + "type": "dict", + }, + "elin": {"type": "str"}, + }, + "type": "dict", + }, + "name": {"required": True, "type": "str"}, + }, + "type": "list", + }, + "state": { + "choices": ["merged", "replaced", "overridden", "deleted"], + "default": "merged", + "type": "str", + }, + } # pylint: disable=C0301 diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/static_routes/static_routes.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/static_routes/static_routes.py new file mode 100644 index 00000000..8ecd955a --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/static_routes/static_routes.py @@ -0,0 +1,99 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# +""" +The arg spec for the vyos_static_routes module +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class Static_routesArgs(object): # pylint: disable=R0903 + """The arg spec for the vyos_static_routes module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "elements": "dict", + "options": { + "address_families": { + "elements": "dict", + "options": { + "afi": { + "choices": ["ipv4", "ipv6"], + "required": True, + "type": "str", + }, + "routes": { + "elements": "dict", + "options": { + "blackhole_config": { + "options": { + "distance": {"type": "int"}, + "type": {"type": "str"}, + }, + "type": "dict", + }, + "dest": {"required": True, "type": "str"}, + "next_hops": { + "elements": "dict", + "options": { + "admin_distance": {"type": "int"}, + "enabled": {"type": "bool"}, + "forward_router_address": { + "required": True, + "type": "str", + }, + "interface": {"type": "str"}, + }, + "type": "list", + }, + }, + "type": "list", + }, + }, + "type": "list", + } + }, + "type": "list", + }, + "running_config": {"type": "str"}, + "state": { + "choices": [ + "merged", + "replaced", + "overridden", + "deleted", + "gathered", + "rendered", + "parsed", + ], + "default": "merged", + "type": "str", + }, + } # pylint: disable=C0301 diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/config/lldp_interfaces/lldp_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/config/lldp_interfaces/lldp_interfaces.py new file mode 100644 index 00000000..377fec9a --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/config/lldp_interfaces/lldp_interfaces.py @@ -0,0 +1,438 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The vyos_lldp_interfaces class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.facts import ( + Facts, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, + dict_diff, +) +from ansible.module_utils.six import iteritems +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.utils.utils import ( + search_obj_in_list, + search_dict_tv_in_list, + key_value_in_dict, + is_dict_element_present, +) + + +class Lldp_interfaces(ConfigBase): + """ + The vyos_lldp_interfaces class + """ + + gather_subset = [ + "!all", + "!min", + ] + + gather_network_resources = [ + "lldp_interfaces", + ] + + params = ["enable", "location", "name"] + + def __init__(self, module): + super(Lldp_interfaces, self).__init__(module) + + def get_lldp_interfaces_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts( + self.gather_subset, self.gather_network_resources + ) + lldp_interfaces_facts = facts["ansible_network_resources"].get( + "lldp_interfaces" + ) + if not lldp_interfaces_facts: + return [] + return lldp_interfaces_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {"changed": False} + commands = list() + warnings = list() + existing_lldp_interfaces_facts = self.get_lldp_interfaces_facts() + commands.extend(self.set_config(existing_lldp_interfaces_facts)) + if commands: + if self._module.check_mode: + resp = self._connection.edit_config(commands, commit=False) + else: + resp = self._connection.edit_config(commands) + result["changed"] = True + + result["commands"] = commands + + if self._module._diff: + result["diff"] = resp["diff"] if result["changed"] else None + + changed_lldp_interfaces_facts = self.get_lldp_interfaces_facts() + result["before"] = existing_lldp_interfaces_facts + if result["changed"]: + result["after"] = changed_lldp_interfaces_facts + + result["warnings"] = warnings + return result + + def set_config(self, existing_lldp_interfaces_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params["config"] + have = existing_lldp_interfaces_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + state = self._module.params["state"] + if state in ("merged", "replaced", "overridden") and not want: + self._module.fail_json( + msg="value of config parameter must not be empty for state {0}".format( + state + ) + ) + if state == "overridden": + commands.extend(self._state_overridden(want=want, have=have)) + elif state == "deleted": + if want: + for item in want: + name = item["name"] + have_item = search_obj_in_list(name, have) + commands.extend( + self._state_deleted(want=None, have=have_item) + ) + else: + for have_item in have: + commands.extend( + self._state_deleted(want=None, have=have_item) + ) + else: + for want_item in want: + name = want_item["name"] + have_item = search_obj_in_list(name, have) + if state == "merged": + commands.extend( + self._state_merged(want=want_item, have=have_item) + ) + else: + commands.extend( + self._state_replaced(want=want_item, have=have_item) + ) + return commands + + def _state_replaced(self, want, have): + """ The command generator when state is replaced + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + if have: + commands.extend(self._state_deleted(want, have)) + commands.extend(self._state_merged(want, have)) + return commands + + def _state_overridden(self, want, have): + """ The command generator when state is overridden + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + for have_item in have: + lldp_name = have_item["name"] + lldp_in_want = search_obj_in_list(lldp_name, want) + if not lldp_in_want: + commands.append( + self._compute_command(have_item["name"], remove=True) + ) + + for want_item in want: + name = want_item["name"] + lldp_in_have = search_obj_in_list(name, have) + commands.extend(self._state_replaced(want_item, lldp_in_have)) + return commands + + def _state_merged(self, want, have): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = [] + if have: + commands.extend(self._render_updates(want, have)) + else: + commands.extend(self._render_set_commands(want)) + return commands + + def _state_deleted(self, want, have): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + commands = [] + if want: + params = Lldp_interfaces.params + for attrib in params: + if attrib == "location": + commands.extend( + self._update_location(have["name"], want, have) + ) + + elif have: + commands.append(self._compute_command(have["name"], remove=True)) + return commands + + def _render_updates(self, want, have): + commands = [] + lldp_name = have["name"] + commands.extend(self._configure_status(lldp_name, want, have)) + commands.extend(self._add_location(lldp_name, want, have)) + + return commands + + def _render_set_commands(self, want): + commands = [] + have = {} + lldp_name = want["name"] + params = Lldp_interfaces.params + + commands.extend(self._add_location(lldp_name, want, have)) + for attrib in params: + value = want[attrib] + if value: + if attrib == "location": + commands.extend(self._add_location(lldp_name, want, have)) + elif attrib == "enable": + if not value: + commands.append( + self._compute_command(lldp_name, value="disable") + ) + else: + commands.append(self._compute_command(lldp_name)) + + return commands + + def _configure_status(self, name, want_item, have_item): + commands = [] + if is_dict_element_present(have_item, "enable"): + temp_have_item = False + else: + temp_have_item = True + if want_item["enable"] != temp_have_item: + if want_item["enable"]: + commands.append( + self._compute_command(name, value="disable", remove=True) + ) + else: + commands.append(self._compute_command(name, value="disable")) + return commands + + def _add_location(self, name, want_item, have_item): + commands = [] + have_dict = {} + have_ca = {} + set_cmd = name + " location " + want_location_type = want_item.get("location") or {} + have_location_type = have_item.get("location") or {} + + if want_location_type["coordinate_based"]: + want_dict = want_location_type.get("coordinate_based") or {} + if is_dict_element_present(have_location_type, "coordinate_based"): + have_dict = have_location_type.get("coordinate_based") or {} + location_type = "coordinate-based" + updates = dict_diff(have_dict, want_dict) + for key, value in iteritems(updates): + if value: + commands.append( + self._compute_command( + set_cmd + location_type, key, str(value) + ) + ) + + elif want_location_type["civic_based"]: + location_type = "civic-based" + want_dict = want_location_type.get("civic_based") or {} + want_ca = want_dict.get("ca_info") or [] + if is_dict_element_present(have_location_type, "civic_based"): + have_dict = have_location_type.get("civic_based") or {} + have_ca = have_dict.get("ca_info") or [] + if want_dict["country_code"] != have_dict["country_code"]: + commands.append( + self._compute_command( + set_cmd + location_type, + "country-code", + str(want_dict["country_code"]), + ) + ) + else: + commands.append( + self._compute_command( + set_cmd + location_type, + "country-code", + str(want_dict["country_code"]), + ) + ) + commands.extend(self._add_civic_address(name, want_ca, have_ca)) + + elif want_location_type["elin"]: + location_type = "elin" + if is_dict_element_present(have_location_type, "elin"): + if want_location_type.get("elin") != have_location_type.get( + "elin" + ): + commands.append( + self._compute_command( + set_cmd + location_type, + value=str(want_location_type["elin"]), + ) + ) + else: + commands.append( + self._compute_command( + set_cmd + location_type, + value=str(want_location_type["elin"]), + ) + ) + return commands + + def _update_location(self, name, want_item, have_item): + commands = [] + del_cmd = name + " location" + want_location_type = want_item.get("location") or {} + have_location_type = have_item.get("location") or {} + + if want_location_type["coordinate_based"]: + want_dict = want_location_type.get("coordinate_based") or {} + if is_dict_element_present(have_location_type, "coordinate_based"): + have_dict = have_location_type.get("coordinate_based") or {} + location_type = "coordinate-based" + for key, value in iteritems(have_dict): + only_in_have = key_value_in_dict(key, value, want_dict) + if not only_in_have: + commands.append( + self._compute_command( + del_cmd + location_type, key, str(value), True + ) + ) + else: + commands.append(self._compute_command(del_cmd, remove=True)) + + elif want_location_type["civic_based"]: + want_dict = want_location_type.get("civic_based") or {} + want_ca = want_dict.get("ca_info") or [] + if is_dict_element_present(have_location_type, "civic_based"): + have_dict = have_location_type.get("civic_based") or {} + have_ca = have_dict.get("ca_info") + commands.extend( + self._update_civic_address(name, want_ca, have_ca) + ) + else: + commands.append(self._compute_command(del_cmd, remove=True)) + + else: + if is_dict_element_present(have_location_type, "elin"): + if want_location_type.get("elin") != have_location_type.get( + "elin" + ): + commands.append( + self._compute_command(del_cmd, remove=True) + ) + else: + commands.append(self._compute_command(del_cmd, remove=True)) + return commands + + def _add_civic_address(self, name, want, have): + commands = [] + for item in want: + ca_type = item["ca_type"] + ca_value = item["ca_value"] + obj_in_have = search_dict_tv_in_list( + ca_type, ca_value, have, "ca_type", "ca_value" + ) + if not obj_in_have: + commands.append( + self._compute_command( + key=name + " location civic-based ca-type", + attrib=str(ca_type) + " ca-value", + value=ca_value, + ) + ) + return commands + + def _update_civic_address(self, name, want, have): + commands = [] + for item in have: + ca_type = item["ca_type"] + ca_value = item["ca_value"] + in_want = search_dict_tv_in_list( + ca_type, ca_value, want, "ca_type", "ca_value" + ) + if not in_want: + commands.append( + self._compute_command( + name, + "location civic-based ca-type", + str(ca_type), + remove=True, + ) + ) + return commands + + def _compute_command(self, key, attrib=None, value=None, remove=False): + if remove: + cmd = "delete service lldp interface " + else: + cmd = "set service lldp interface " + cmd += key + if attrib: + cmd += " " + attrib + if value: + cmd += " '" + value + "'" + return cmd diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/facts.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/facts.py new file mode 100644 index 00000000..8f0a3bb6 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/facts.py @@ -0,0 +1,83 @@ +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The facts class for vyos +this file validates each subset of facts and selectively +calls the appropriate facts gathering function +""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts import ( + FactsBase, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.interfaces.interfaces import ( + InterfacesFacts, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.l3_interfaces.l3_interfaces import ( + L3_interfacesFacts, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.lag_interfaces.lag_interfaces import ( + Lag_interfacesFacts, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.lldp_global.lldp_global import ( + Lldp_globalFacts, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.lldp_interfaces.lldp_interfaces import ( + Lldp_interfacesFacts, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.firewall_rules.firewall_rules import ( + Firewall_rulesFacts, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.static_routes.static_routes import ( + Static_routesFacts, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.legacy.base import ( + Default, + Neighbors, + Config, +) + + +FACT_LEGACY_SUBSETS = dict(default=Default, neighbors=Neighbors, config=Config) +FACT_RESOURCE_SUBSETS = dict( + interfaces=InterfacesFacts, + l3_interfaces=L3_interfacesFacts, + lag_interfaces=Lag_interfacesFacts, + lldp_global=Lldp_globalFacts, + lldp_interfaces=Lldp_interfacesFacts, + static_routes=Static_routesFacts, + firewall_rules=Firewall_rulesFacts, +) + + +class Facts(FactsBase): + """ The fact class for vyos + """ + + VALID_LEGACY_GATHER_SUBSETS = frozenset(FACT_LEGACY_SUBSETS.keys()) + VALID_RESOURCE_SUBSETS = frozenset(FACT_RESOURCE_SUBSETS.keys()) + + def __init__(self, module): + super(Facts, self).__init__(module) + + def get_facts( + self, legacy_facts_type=None, resource_facts_type=None, data=None + ): + """ Collect the facts for vyos + :param legacy_facts_type: List of legacy facts types + :param resource_facts_type: List of resource fact types + :param data: previously collected conf + :rtype: dict + :return: the facts gathered + """ + if self.VALID_RESOURCE_SUBSETS: + self.get_network_resources_facts( + FACT_RESOURCE_SUBSETS, resource_facts_type, data + ) + if self.VALID_LEGACY_GATHER_SUBSETS: + self.get_network_legacy_facts( + FACT_LEGACY_SUBSETS, legacy_facts_type + ) + return self.ansible_facts, self._warnings diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/firewall_rules/firewall_rules.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/firewall_rules/firewall_rules.py new file mode 100644 index 00000000..971ea6fe --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/firewall_rules/firewall_rules.py @@ -0,0 +1,380 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The vyos firewall_rules fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from re import findall, search, M +from copy import deepcopy +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.firewall_rules.firewall_rules import ( + Firewall_rulesArgs, +) + + +class Firewall_rulesFacts(object): + """ The vyos firewall_rules fact class + """ + + def __init__(self, module, subspec="config", options="options"): + self._module = module + self.argument_spec = Firewall_rulesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_device_data(self, connection): + return connection.get_config() + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for firewall_rules + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_device_data(connection) + # split the config into instances of the resource + objs = [] + v6_rules = findall( + r"^set firewall ipv6-name (?:\'*)(\S+)(?:\'*)", data, M + ) + v4_rules = findall(r"^set firewall name (?:\'*)(\S+)(?:\'*)", data, M) + if v6_rules: + config = self.get_rules(data, v6_rules, type="ipv6") + if config: + config = utils.remove_empties(config) + objs.append(config) + if v4_rules: + config = self.get_rules(data, v4_rules, type="ipv4") + if config: + config = utils.remove_empties(config) + objs.append(config) + + ansible_facts["ansible_network_resources"].pop("firewall_rules", None) + facts = {} + if objs: + facts["firewall_rules"] = [] + params = utils.validate_config( + self.argument_spec, {"config": objs} + ) + for cfg in params["config"]: + facts["firewall_rules"].append(utils.remove_empties(cfg)) + + ansible_facts["ansible_network_resources"].update(facts) + return ansible_facts + + def get_rules(self, data, rules, type): + """ + This function performs following: + - Form regex to fetch 'rule-sets' specific config from data. + - Form the rule-set list based on ip address. + :param data: configuration. + :param rules: list of rule-sets. + :param type: ip address type. + :return: generated rule-sets configuration. + """ + r_v4 = [] + r_v6 = [] + for r in set(rules): + rule_regex = r" %s .+$" % r.strip("'") + cfg = findall(rule_regex, data, M) + fr = self.render_config(cfg, r.strip("'")) + fr["name"] = r.strip("'") + if type == "ipv6": + r_v6.append(fr) + else: + r_v4.append(fr) + if r_v4: + config = {"afi": "ipv4", "rule_sets": r_v4} + if r_v6: + config = {"afi": "ipv6", "rule_sets": r_v6} + return config + + def render_config(self, conf, match): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + conf = "\n".join(filter(lambda x: x, conf)) + a_lst = ["description", "default_action", "enable_default_log"] + config = self.parse_attr(conf, a_lst, match) + if not config: + config = {} + config["rules"] = self.parse_rules_lst(conf) + return config + + def parse_rules_lst(self, conf): + """ + This function forms the regex to fetch the 'rules' with in + 'rule-sets' + :param conf: configuration data. + :return: generated rule list configuration. + """ + r_lst = [] + rules = findall(r"rule (?:\'*)(\d+)(?:\'*)", conf, M) + if rules: + rules_lst = [] + for r in set(rules): + r_regex = r" %s .+$" % r + cfg = "\n".join(findall(r_regex, conf, M)) + obj = self.parse_rules(cfg) + obj["number"] = int(r) + if obj: + rules_lst.append(obj) + r_lst = sorted(rules_lst, key=lambda i: i["number"]) + return r_lst + + def parse_rules(self, conf): + """ + This function triggers the parsing of 'rule' attributes. + a_lst is a list having rule attributes which doesn't + have further sub attributes. + :param conf: configuration + :return: generated rule configuration dictionary. + """ + a_lst = [ + "ipsec", + "action", + "protocol", + "fragment", + "disabled", + "description", + ] + rule = self.parse_attr(conf, a_lst) + r_sub = { + "p2p": self.parse_p2p(conf), + "tcp": self.parse_tcp(conf, "tcp"), + "icmp": self.parse_icmp(conf, "icmp"), + "time": self.parse_time(conf, "time"), + "limit": self.parse_limit(conf, "limit"), + "state": self.parse_state(conf, "state"), + "recent": self.parse_recent(conf, "recent"), + "source": self.parse_src_or_dest(conf, "source"), + "destination": self.parse_src_or_dest(conf, "destination"), + } + rule.update(r_sub) + return rule + + def parse_p2p(self, conf): + """ + This function forms the regex to fetch the 'p2p' with in + 'rules' + :param conf: configuration data. + :return: generated rule list configuration. + """ + a_lst = [] + applications = findall(r"p2p (?:\'*)(\d+)(?:\'*)", conf, M) + if applications: + app_lst = [] + for r in set(applications): + obj = {"application": r.strip("'")} + app_lst.append(obj) + a_lst = sorted(app_lst, key=lambda i: i["application"]) + return a_lst + + def parse_src_or_dest(self, conf, attrib=None): + """ + This function triggers the parsing of 'source or + destination' attributes. + :param conf: configuration. + :param attrib:'source/destination'. + :return:generated source/destination configuration dictionary. + """ + a_lst = ["port", "address", "mac_address"] + cfg_dict = self.parse_attr(conf, a_lst, match=attrib) + cfg_dict["group"] = self.parse_group(conf, attrib + " group") + return cfg_dict + + def parse_recent(self, conf, attrib=None): + """ + This function triggers the parsing of 'recent' attributes + :param conf: configuration. + :param attrib: 'recent'. + :return: generated config dictionary. + """ + a_lst = ["time", "count"] + cfg_dict = self.parse_attr(conf, a_lst, match=attrib) + return cfg_dict + + def parse_tcp(self, conf, attrib=None): + """ + This function triggers the parsing of 'tcp' attributes. + :param conf: configuration. + :param attrib: 'tcp'. + :return: generated config dictionary. + """ + cfg_dict = self.parse_attr(conf, ["flags"], match=attrib) + return cfg_dict + + def parse_time(self, conf, attrib=None): + """ + This function triggers the parsing of 'time' attributes. + :param conf: configuration. + :param attrib: 'time'. + :return: generated config dictionary. + """ + a_lst = [ + "stopdate", + "stoptime", + "weekdays", + "monthdays", + "startdate", + "starttime", + ] + cfg_dict = self.parse_attr(conf, a_lst, match=attrib) + return cfg_dict + + def parse_state(self, conf, attrib=None): + """ + This function triggers the parsing of 'state' attributes. + :param conf: configuration + :param attrib: 'state'. + :return: generated config dictionary. + """ + a_lst = ["new", "invalid", "related", "established"] + cfg_dict = self.parse_attr(conf, a_lst, match=attrib) + return cfg_dict + + def parse_group(self, conf, attrib=None): + """ + This function triggers the parsing of 'group' attributes. + :param conf: configuration. + :param attrib: 'group'. + :return: generated config dictionary. + """ + a_lst = ["port_group", "address_group", "network_group"] + cfg_dict = self.parse_attr(conf, a_lst, match=attrib) + return cfg_dict + + def parse_icmp(self, conf, attrib=None): + """ + This function triggers the parsing of 'icmp' attributes. + :param conf: configuration to be parsed. + :param attrib: 'icmp'. + :return: generated config dictionary. + """ + a_lst = ["code", "type", "type_name"] + cfg_dict = self.parse_attr(conf, a_lst, match=attrib) + return cfg_dict + + def parse_limit(self, conf, attrib=None): + """ + This function triggers the parsing of 'limit' attributes. + :param conf: configuration to be parsed. + :param attrib: 'limit' + :return: generated config dictionary. + """ + cfg_dict = self.parse_attr(conf, ["burst"], match=attrib) + cfg_dict["rate"] = self.parse_rate(conf, "rate") + return cfg_dict + + def parse_rate(self, conf, attrib=None): + """ + This function triggers the parsing of 'rate' attributes. + :param conf: configuration. + :param attrib: 'rate' + :return: generated config dictionary. + """ + a_lst = ["unit", "number"] + cfg_dict = self.parse_attr(conf, a_lst, match=attrib) + return cfg_dict + + def parse_attr(self, conf, attr_list, match=None): + """ + This function peforms the following: + - Form the regex to fetch the required attribute config. + - Type cast the output in desired format. + :param conf: configuration. + :param attr_list: list of attributes. + :param match: parent node/attribute name. + :return: generated config dictionary. + """ + config = {} + for attrib in attr_list: + regex = self.map_regex(attrib) + if match: + regex = match + " " + regex + if conf: + if self.is_bool(attrib): + out = conf.find(attrib.replace("_", "-")) + + dis = conf.find(attrib.replace("_", "-") + " 'disable'") + if out >= 1: + if dis >= 1: + config[attrib] = False + else: + config[attrib] = True + else: + out = search(r"^.*" + regex + " (.+)", conf, M) + if out: + val = out.group(1).strip("'") + if self.is_num(attrib): + val = int(val) + config[attrib] = val + return config + + def map_regex(self, attrib): + """ + - This function construct the regex string. + - replace the underscore with hyphen. + :param attrib: attribute + :return: regex string + """ + regex = attrib.replace("_", "-") + if attrib == "disabled": + regex = "disable" + return regex + + def is_bool(self, attrib): + """ + This function looks for the attribute in predefined bool type set. + :param attrib: attribute. + :return: True/False + """ + bool_set = ( + "new", + "invalid", + "related", + "disabled", + "established", + "enable_default_log", + ) + return True if attrib in bool_set else False + + def is_num(self, attrib): + """ + This function looks for the attribute in predefined integer type set. + :param attrib: attribute. + :return: True/false. + """ + num_set = ("time", "code", "type", "count", "burst", "number") + return True if attrib in num_set else False diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/interfaces/interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/interfaces/interfaces.py new file mode 100644 index 00000000..4b24803b --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/interfaces/interfaces.py @@ -0,0 +1,134 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The vyos interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +from re import findall, M +from copy import deepcopy +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.interfaces.interfaces import ( + InterfacesArgs, +) + + +class InterfacesFacts(object): + """ The vyos interfaces fact class + """ + + def __init__(self, module, subspec="config", options="options"): + self._module = module + self.argument_spec = InterfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for interfaces + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + data = connection.get_config(flags=["| grep interfaces"]) + + objs = [] + interface_names = findall( + r"^set interfaces (?:ethernet|bonding|vti|loopback|vxlan) (?:\'*)(\S+)(?:\'*)", + data, + M, + ) + if interface_names: + for interface in set(interface_names): + intf_regex = r" %s .+$" % interface.strip("'") + cfg = findall(intf_regex, data, M) + obj = self.render_config(cfg) + obj["name"] = interface.strip("'") + if obj: + objs.append(obj) + facts = {} + if objs: + facts["interfaces"] = [] + params = utils.validate_config( + self.argument_spec, {"config": objs} + ) + for cfg in params["config"]: + facts["interfaces"].append(utils.remove_empties(cfg)) + + ansible_facts["ansible_network_resources"].update(facts) + return ansible_facts + + def render_config(self, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + vif_conf = "\n".join(filter(lambda x: ("vif" in x), conf)) + eth_conf = "\n".join(filter(lambda x: ("vif" not in x), conf)) + config = self.parse_attribs( + ["description", "speed", "mtu", "duplex"], eth_conf + ) + config["vifs"] = self.parse_vifs(vif_conf) + + return utils.remove_empties(config) + + def parse_vifs(self, conf): + vif_names = findall(r"vif (?:\'*)(\d+)(?:\'*)", conf, M) + vifs_list = None + + if vif_names: + vifs_list = [] + for vif in set(vif_names): + vif_regex = r" %s .+$" % vif + cfg = "\n".join(findall(vif_regex, conf, M)) + obj = self.parse_attribs(["description", "mtu"], cfg) + obj["vlan_id"] = int(vif) + if obj: + vifs_list.append(obj) + vifs_list = sorted(vifs_list, key=lambda i: i["vlan_id"]) + + return vifs_list + + def parse_attribs(self, attribs, conf): + config = {} + for item in attribs: + value = utils.parse_conf_arg(conf, item) + if value and item == "mtu": + config[item] = int(value.strip("'")) + elif value: + config[item] = value.strip("'") + else: + config[item] = None + if "disable" in conf: + config["enabled"] = False + else: + config["enabled"] = True + + return utils.remove_empties(config) diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/l3_interfaces/l3_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/l3_interfaces/l3_interfaces.py new file mode 100644 index 00000000..d1d62c23 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/l3_interfaces/l3_interfaces.py @@ -0,0 +1,143 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The vyos l3_interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +import re +from copy import deepcopy +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.compat import ( + ipaddress, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.l3_interfaces.l3_interfaces import ( + L3_interfacesArgs, +) + + +class L3_interfacesFacts(object): + """ The vyos l3_interfaces fact class + """ + + def __init__(self, module, subspec="config", options="options"): + self._module = module + self.argument_spec = L3_interfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for l3_interfaces + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + data = connection.get_config() + + # operate on a collection of resource x + objs = [] + interface_names = re.findall( + r"set interfaces (?:ethernet|bonding|vti|vxlan) (?:\'*)(\S+)(?:\'*)", + data, + re.M, + ) + if interface_names: + for interface in set(interface_names): + intf_regex = r" %s .+$" % interface + cfg = re.findall(intf_regex, data, re.M) + obj = self.render_config(cfg) + obj["name"] = interface.strip("'") + if obj: + objs.append(obj) + + ansible_facts["ansible_network_resources"].pop("l3_interfaces", None) + facts = {} + if objs: + facts["l3_interfaces"] = [] + params = utils.validate_config( + self.argument_spec, {"config": objs} + ) + for cfg in params["config"]: + facts["l3_interfaces"].append(utils.remove_empties(cfg)) + + ansible_facts["ansible_network_resources"].update(facts) + return ansible_facts + + def render_config(self, conf): + """ + Render config as dictionary structure and delete keys from spec for null values + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + vif_conf = "\n".join(filter(lambda x: ("vif" in x), conf)) + eth_conf = "\n".join(filter(lambda x: ("vif" not in x), conf)) + config = self.parse_attribs(eth_conf) + config["vifs"] = self.parse_vifs(vif_conf) + + return utils.remove_empties(config) + + def parse_vifs(self, conf): + vif_names = re.findall(r"vif (\d+)", conf, re.M) + vifs_list = None + if vif_names: + vifs_list = [] + for vif in set(vif_names): + vif_regex = r" %s .+$" % vif + cfg = "\n".join(re.findall(vif_regex, conf, re.M)) + obj = self.parse_attribs(cfg) + obj["vlan_id"] = vif + if obj: + vifs_list.append(obj) + + return vifs_list + + def parse_attribs(self, conf): + config = {} + ipaddrs = re.findall(r"address (\S+)", conf, re.M) + config["ipv4"] = [] + config["ipv6"] = [] + + for item in ipaddrs: + item = item.strip("'") + if item == "dhcp": + config["ipv4"].append({"address": item}) + elif item == "dhcpv6": + config["ipv6"].append({"address": item}) + else: + ip_version = ipaddress.ip_address(item.split("/")[0]).version + if ip_version == 4: + config["ipv4"].append({"address": item}) + else: + config["ipv6"].append({"address": item}) + + for key, value in iteritems(config): + if value == []: + config[key] = None + + return utils.remove_empties(config) diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py new file mode 100644 index 00000000..9201e5c6 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py @@ -0,0 +1,152 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The vyos lag_interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +from re import findall, search, M +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lag_interfaces.lag_interfaces import ( + Lag_interfacesArgs, +) + + +class Lag_interfacesFacts(object): + """ The vyos lag_interfaces fact class + """ + + def __init__(self, module, subspec="config", options="options"): + self._module = module + self.argument_spec = Lag_interfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for lag_interfaces + :param module: the module instance + :param connection: the device connection + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + data = connection.get_config() + + objs = [] + lag_names = findall(r"^set interfaces bonding (\S+)", data, M) + if lag_names: + for lag in set(lag_names): + lag_regex = r" %s .+$" % lag + cfg = findall(lag_regex, data, M) + obj = self.render_config(cfg) + + output = connection.run_commands( + ["show interfaces bonding " + lag + " slaves"] + ) + lines = output[0].splitlines() + members = [] + member = {} + if len(lines) > 1: + for line in lines[2:]: + splitted_line = line.split() + + if len(splitted_line) > 1: + member["member"] = splitted_line[0] + members.append(member) + else: + members = [] + member = {} + obj["name"] = lag.strip("'") + if members: + obj["members"] = members + + if obj: + objs.append(obj) + + facts = {} + if objs: + facts["lag_interfaces"] = [] + params = utils.validate_config( + self.argument_spec, {"config": objs} + ) + for cfg in params["config"]: + facts["lag_interfaces"].append(utils.remove_empties(cfg)) + + ansible_facts["ansible_network_resources"].update(facts) + return ansible_facts + + def render_config(self, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + arp_monitor_conf = "\n".join( + filter(lambda x: ("arp-monitor" in x), conf) + ) + hash_policy_conf = "\n".join( + filter(lambda x: ("hash-policy" in x), conf) + ) + lag_conf = "\n".join(filter(lambda x: ("bond" in x), conf)) + config = self.parse_attribs(["mode", "primary"], lag_conf) + config["arp_monitor"] = self.parse_arp_monitor(arp_monitor_conf) + config["hash_policy"] = self.parse_hash_policy(hash_policy_conf) + + return utils.remove_empties(config) + + def parse_attribs(self, attribs, conf): + config = {} + for item in attribs: + value = utils.parse_conf_arg(conf, item) + if value: + config[item] = value.strip("'") + else: + config[item] = None + return utils.remove_empties(config) + + def parse_arp_monitor(self, conf): + arp_monitor = None + if conf: + arp_monitor = {} + target_list = [] + interval = search(r"^.*arp-monitor interval (.+)", conf, M) + targets = findall(r"^.*arp-monitor target '(.+)'", conf, M) + if targets: + for target in targets: + target_list.append(target) + arp_monitor["target"] = target_list + if interval: + value = interval.group(1).strip("'") + arp_monitor["interval"] = int(value) + return arp_monitor + + def parse_hash_policy(self, conf): + hash_policy = None + if conf: + hash_policy = search(r"^.*hash-policy (.+)", conf, M) + hash_policy = hash_policy.group(1).strip("'") + return hash_policy diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/legacy/base.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/legacy/base.py new file mode 100644 index 00000000..f6b343e0 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/legacy/base.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The VyOS interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import platform +import re +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import ( + run_commands, + get_capabilities, +) + + +class LegacyFactsBase(object): + + COMMANDS = frozenset() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.warnings = list() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, list(self.COMMANDS)) + + +class Default(LegacyFactsBase): + + COMMANDS = [ + "show version", + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + self.facts["serialnum"] = self.parse_serialnum(data) + self.facts.update(self.platform_facts()) + + def parse_serialnum(self, data): + match = re.search(r"HW S/N:\s+(\S+)", data) + if match: + return match.group(1) + + def platform_facts(self): + platform_facts = {} + + resp = get_capabilities(self.module) + device_info = resp["device_info"] + + platform_facts["system"] = device_info["network_os"] + + for item in ("model", "image", "version", "platform", "hostname"): + val = device_info.get("network_os_%s" % item) + if val: + platform_facts[item] = val + + platform_facts["api"] = resp["network_api"] + platform_facts["python_version"] = platform.python_version() + + return platform_facts + + +class Config(LegacyFactsBase): + + COMMANDS = [ + "show configuration commands", + "show system commit", + ] + + def populate(self): + super(Config, self).populate() + + self.facts["config"] = self.responses + + commits = self.responses[1] + entries = list() + entry = None + + for line in commits.split("\n"): + match = re.match(r"(\d+)\s+(.+)by(.+)via(.+)", line) + if match: + if entry: + entries.append(entry) + + entry = dict( + revision=match.group(1), + datetime=match.group(2), + by=str(match.group(3)).strip(), + via=str(match.group(4)).strip(), + comment=None, + ) + else: + entry["comment"] = line.strip() + + self.facts["commits"] = entries + + +class Neighbors(LegacyFactsBase): + + COMMANDS = [ + "show lldp neighbors", + "show lldp neighbors detail", + ] + + def populate(self): + super(Neighbors, self).populate() + + all_neighbors = self.responses[0] + if "LLDP not configured" not in all_neighbors: + neighbors = self.parse(self.responses[1]) + self.facts["neighbors"] = self.parse_neighbors(neighbors) + + def parse(self, data): + parsed = list() + values = None + for line in data.split("\n"): + if not line: + continue + elif line[0] == " ": + values += "\n%s" % line + elif line.startswith("Interface"): + if values: + parsed.append(values) + values = line + if values: + parsed.append(values) + return parsed + + def parse_neighbors(self, data): + facts = dict() + for item in data: + interface = self.parse_interface(item) + host = self.parse_host(item) + port = self.parse_port(item) + if interface not in facts: + facts[interface] = list() + facts[interface].append(dict(host=host, port=port)) + return facts + + def parse_interface(self, data): + match = re.search(r"^Interface:\s+(\S+),", data) + return match.group(1) + + def parse_host(self, data): + match = re.search(r"SysName:\s+(.+)$", data, re.M) + if match: + return match.group(1) + + def parse_port(self, data): + match = re.search(r"PortDescr:\s+(.+)$", data, re.M) + if match: + return match.group(1) diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_global/lldp_global.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_global/lldp_global.py new file mode 100644 index 00000000..3c7e2f93 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_global/lldp_global.py @@ -0,0 +1,116 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The vyos lldp_global fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from re import findall, M +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lldp_global.lldp_global import ( + Lldp_globalArgs, +) + + +class Lldp_globalFacts(object): + """ The vyos lldp_global fact class + """ + + def __init__(self, module, subspec="config", options="options"): + self._module = module + self.argument_spec = Lldp_globalArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for lldp_global + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + data = connection.get_config() + + objs = {} + lldp_output = findall(r"^set service lldp (\S+)", data, M) + if lldp_output: + for item in set(lldp_output): + lldp_regex = r" %s .+$" % item + cfg = findall(lldp_regex, data, M) + obj = self.render_config(cfg) + if obj: + objs.update(obj) + lldp_service = findall(r"^set service (lldp)?('lldp')", data, M) + if lldp_service or lldp_output: + lldp_obj = {} + lldp_obj["enable"] = True + objs.update(lldp_obj) + + facts = {} + params = utils.validate_config(self.argument_spec, {"config": objs}) + facts["lldp_global"] = utils.remove_empties(params["config"]) + + ansible_facts["ansible_network_resources"].update(facts) + + return ansible_facts + + def render_config(self, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + protocol_conf = "\n".join( + filter(lambda x: ("legacy-protocols" in x), conf) + ) + att_conf = "\n".join( + filter(lambda x: ("legacy-protocols" not in x), conf) + ) + config = self.parse_attribs(["snmp", "address"], att_conf) + config["legacy_protocols"] = self.parse_protocols(protocol_conf) + return utils.remove_empties(config) + + def parse_protocols(self, conf): + protocol_support = None + if conf: + protocols = findall(r"^.*legacy-protocols (.+)", conf, M) + if protocols: + protocol_support = [] + for protocol in protocols: + protocol_support.append(protocol.strip("'")) + return protocol_support + + def parse_attribs(self, attribs, conf): + config = {} + for item in attribs: + value = utils.parse_conf_arg(conf, item) + if value: + config[item] = value.strip("'") + else: + config[item] = None + return utils.remove_empties(config) diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_interfaces/lldp_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_interfaces/lldp_interfaces.py new file mode 100644 index 00000000..dcfbc6ee --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_interfaces/lldp_interfaces.py @@ -0,0 +1,155 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The vyos lldp_interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +from re import findall, search, M +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lldp_interfaces.lldp_interfaces import ( + Lldp_interfacesArgs, +) + + +class Lldp_interfacesFacts(object): + """ The vyos lldp_interfaces fact class + """ + + def __init__(self, module, subspec="config", options="options"): + self._module = module + self.argument_spec = Lldp_interfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for lldp_interfaces + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + data = connection.get_config() + + objs = [] + lldp_names = findall(r"^set service lldp interface (\S+)", data, M) + if lldp_names: + for lldp in set(lldp_names): + lldp_regex = r" %s .+$" % lldp + cfg = findall(lldp_regex, data, M) + obj = self.render_config(cfg) + obj["name"] = lldp.strip("'") + if obj: + objs.append(obj) + facts = {} + if objs: + facts["lldp_interfaces"] = objs + ansible_facts["ansible_network_resources"].update(facts) + + ansible_facts["ansible_network_resources"].update(facts) + return ansible_facts + + def render_config(self, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = {} + location = {} + + civic_conf = "\n".join(filter(lambda x: ("civic-based" in x), conf)) + elin_conf = "\n".join(filter(lambda x: ("elin" in x), conf)) + coordinate_conf = "\n".join( + filter(lambda x: ("coordinate-based" in x), conf) + ) + disable = "\n".join(filter(lambda x: ("disable" in x), conf)) + + coordinate_based_conf = self.parse_attribs( + ["altitude", "datum", "longitude", "latitude"], coordinate_conf + ) + elin_based_conf = self.parse_lldp_elin_based(elin_conf) + civic_based_conf = self.parse_lldp_civic_based(civic_conf) + if disable: + config["enable"] = False + if coordinate_conf: + location["coordinate_based"] = coordinate_based_conf + config["location"] = location + elif civic_based_conf: + location["civic_based"] = civic_based_conf + config["location"] = location + elif elin_conf: + location["elin"] = elin_based_conf + config["location"] = location + + return utils.remove_empties(config) + + def parse_attribs(self, attribs, conf): + config = {} + for item in attribs: + value = utils.parse_conf_arg(conf, item) + if value: + value = value.strip("'") + if item == "altitude": + value = int(value) + config[item] = value + else: + config[item] = None + return utils.remove_empties(config) + + def parse_lldp_civic_based(self, conf): + civic_based = None + if conf: + civic_info_list = [] + civic_add_list = findall(r"^.*civic-based ca-type (.+)", conf, M) + if civic_add_list: + for civic_add in civic_add_list: + ca = civic_add.split(" ") + c_add = {} + c_add["ca_type"] = int(ca[0].strip("'")) + c_add["ca_value"] = ca[2].strip("'") + civic_info_list.append(c_add) + + country_code = search( + r"^.*civic-based country-code (.+)", conf, M + ) + civic_based = {} + civic_based["ca_info"] = civic_info_list + civic_based["country_code"] = country_code.group(1).strip("'") + return civic_based + + def parse_lldp_elin_based(self, conf): + elin_based = None + if conf: + e_num = search(r"^.* elin (.+)", conf, M) + elin_based = e_num.group(1).strip("'") + + return elin_based diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/static_routes/static_routes.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/static_routes/static_routes.py new file mode 100644 index 00000000..00049475 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/static_routes/static_routes.py @@ -0,0 +1,181 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The vyos static_routes fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +from re import findall, search, M +from copy import deepcopy +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.static_routes.static_routes import ( + Static_routesArgs, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.utils.utils import ( + get_route_type, +) + + +class Static_routesFacts(object): + """ The vyos static_routes fact class + """ + + def __init__(self, module, subspec="config", options="options"): + self._module = module + self.argument_spec = Static_routesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_device_data(self, connection): + return connection.get_config() + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for static_routes + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + data = self.get_device_data(connection) + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + objs = [] + r_v4 = [] + r_v6 = [] + af = [] + static_routes = findall( + r"set protocols static route(6)? (\S+)", data, M + ) + if static_routes: + for route in set(static_routes): + route_regex = r" %s .+$" % route[1] + cfg = findall(route_regex, data, M) + sr = self.render_config(cfg) + sr["dest"] = route[1].strip("'") + afi = self.get_afi(sr["dest"]) + if afi == "ipv4": + r_v4.append(sr) + else: + r_v6.append(sr) + if r_v4: + afi_v4 = {"afi": "ipv4", "routes": r_v4} + af.append(afi_v4) + if r_v6: + afi_v6 = {"afi": "ipv6", "routes": r_v6} + af.append(afi_v6) + config = {"address_families": af} + if config: + objs.append(config) + + ansible_facts["ansible_network_resources"].pop("static_routes", None) + facts = {} + if objs: + facts["static_routes"] = [] + params = utils.validate_config( + self.argument_spec, {"config": objs} + ) + for cfg in params["config"]: + facts["static_routes"].append(utils.remove_empties(cfg)) + + ansible_facts["ansible_network_resources"].update(facts) + return ansible_facts + + def render_config(self, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + next_hops_conf = "\n".join(filter(lambda x: ("next-hop" in x), conf)) + blackhole_conf = "\n".join(filter(lambda x: ("blackhole" in x), conf)) + routes_dict = { + "blackhole_config": self.parse_blackhole(blackhole_conf), + "next_hops": self.parse_next_hop(next_hops_conf), + } + return routes_dict + + def parse_blackhole(self, conf): + blackhole = None + if conf: + distance = search(r"^.*blackhole distance (.\S+)", conf, M) + bh = conf.find("blackhole") + if distance is not None: + blackhole = {} + value = distance.group(1).strip("'") + blackhole["distance"] = int(value) + elif bh: + blackhole = {} + blackhole["type"] = "blackhole" + return blackhole + + def get_afi(self, address): + route_type = get_route_type(address) + if route_type == "route": + return "ipv4" + elif route_type == "route6": + return "ipv6" + + def parse_next_hop(self, conf): + nh_list = None + if conf: + nh_list = [] + hop_list = findall(r"^.*next-hop (.+)", conf, M) + if hop_list: + for hop in hop_list: + distance = search(r"^.*distance (.\S+)", hop, M) + interface = search(r"^.*interface (.\S+)", hop, M) + + dis = hop.find("disable") + hop_info = hop.split(" ") + nh_info = { + "forward_router_address": hop_info[0].strip("'") + } + if interface: + nh_info["interface"] = interface.group(1).strip("'") + if distance: + value = distance.group(1).strip("'") + nh_info["admin_distance"] = int(value) + elif dis >= 1: + nh_info["enabled"] = False + for element in nh_list: + if ( + element["forward_router_address"] + == nh_info["forward_router_address"] + ): + if "interface" in nh_info.keys(): + element["interface"] = nh_info["interface"] + if "admin_distance" in nh_info.keys(): + element["admin_distance"] = nh_info[ + "admin_distance" + ] + if "enabled" in nh_info.keys(): + element["enabled"] = nh_info["enabled"] + nh_info = None + if nh_info is not None: + nh_list.append(nh_info) + return nh_list diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/utils/utils.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/utils/utils.py new file mode 100644 index 00000000..402adfc9 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/utils/utils.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# utils +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.compat import ( + ipaddress, +) + + +def search_obj_in_list(name, lst, key="name"): + for item in lst: + if item[key] == name: + return item + return None + + +def get_interface_type(interface): + """Gets the type of interface + """ + if interface.startswith("eth"): + return "ethernet" + elif interface.startswith("bond"): + return "bonding" + elif interface.startswith("vti"): + return "vti" + elif interface.startswith("lo"): + return "loopback" + + +def dict_delete(base, comparable): + """ + This function generates a dict containing key, value pairs for keys + that are present in the `base` dict but not present in the `comparable` + dict. + + :param base: dict object to base the diff on + :param comparable: dict object to compare against base + :returns: new dict object with key, value pairs that needs to be deleted. + + """ + to_delete = dict() + + for key in base: + if isinstance(base[key], dict): + sub_diff = dict_delete(base[key], comparable.get(key, {})) + if sub_diff: + to_delete[key] = sub_diff + else: + if key not in comparable: + to_delete[key] = base[key] + + return to_delete + + +def diff_list_of_dicts(want, have): + diff = [] + + set_w = set(tuple(d.items()) for d in want) + set_h = set(tuple(d.items()) for d in have) + difference = set_w.difference(set_h) + + for element in difference: + diff.append(dict((x, y) for x, y in element)) + + return diff + + +def get_lst_diff_for_dicts(want, have, lst): + """ + This function generates a list containing values + that are only in want and not in list in have dict + :param want: dict object to want + :param have: dict object to have + :param lst: list the diff on + :return: new list object with values which are only in want. + """ + if not have: + diff = want.get(lst) or [] + + else: + want_elements = want.get(lst) or {} + have_elements = have.get(lst) or {} + diff = list_diff_want_only(want_elements, have_elements) + return diff + + +def get_lst_same_for_dicts(want, have, lst): + """ + This function generates a list containing values + that are common for list in want and list in have dict + :param want: dict object to want + :param have: dict object to have + :param lst: list the comparison on + :return: new list object with values which are common in want and have. + """ + diff = None + if want and have: + want_list = want.get(lst) or {} + have_list = have.get(lst) or {} + diff = [ + i + for i in want_list and have_list + if i in have_list and i in want_list + ] + return diff + + +def list_diff_have_only(want_list, have_list): + """ + This function generated the list containing values + that are only in have list. + :param want_list: + :param have_list: + :return: new list with values which are only in have list + """ + if have_list and not want_list: + diff = have_list + elif not have_list: + diff = None + else: + diff = [ + i + for i in have_list + want_list + if i in have_list and i not in want_list + ] + return diff + + +def list_diff_want_only(want_list, have_list): + """ + This function generated the list containing values + that are only in want list. + :param want_list: + :param have_list: + :return: new list with values which are only in want list + """ + if have_list and not want_list: + diff = None + elif not have_list: + diff = want_list + else: + diff = [ + i + for i in have_list + want_list + if i in want_list and i not in have_list + ] + return diff + + +def search_dict_tv_in_list(d_val1, d_val2, lst, key1, key2): + """ + This function return the dict object if it exist in list. + :param d_val1: + :param d_val2: + :param lst: + :param key1: + :param key2: + :return: + """ + obj = next( + ( + item + for item in lst + if item[key1] == d_val1 and item[key2] == d_val2 + ), + None, + ) + if obj: + return obj + else: + return None + + +def key_value_in_dict(have_key, have_value, want_dict): + """ + This function checks whether the key and values exist in dict + :param have_key: + :param have_value: + :param want_dict: + :return: + """ + for key, value in iteritems(want_dict): + if key == have_key and value == have_value: + return True + return False + + +def is_dict_element_present(dict, key): + """ + This function checks whether the key is present in dict. + :param dict: + :param key: + :return: + """ + for item in dict: + if item == key: + return True + return False + + +def get_ip_address_version(address): + """ + This function returns the version of IP address + :param address: IP address + :return: + """ + try: + address = unicode(address) + except NameError: + address = str(address) + version = ipaddress.ip_address(address.split("/")[0]).version + return version + + +def get_route_type(address): + """ + This function returns the route type based on IP address + :param address: + :return: + """ + version = get_ip_address_version(address) + if version == 6: + return "route6" + elif version == 4: + return "route" diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py new file mode 100644 index 00000000..908395a6 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py @@ -0,0 +1,124 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +import json + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.connection import Connection, ConnectionError + +_DEVICE_CONFIGS = {} + +vyos_provider_spec = { + "host": dict(), + "port": dict(type="int"), + "username": dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])), + "password": dict( + fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True + ), + "ssh_keyfile": dict( + fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path" + ), + "timeout": dict(type="int"), +} +vyos_argument_spec = { + "provider": dict( + type="dict", options=vyos_provider_spec, removed_in_version=2.14 + ), +} + + +def get_provider_argspec(): + return vyos_provider_spec + + +def get_connection(module): + if hasattr(module, "_vyos_connection"): + return module._vyos_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get("network_api") + if network_api == "cliconf": + module._vyos_connection = Connection(module._socket_path) + else: + module.fail_json(msg="Invalid connection type %s" % network_api) + + return module._vyos_connection + + +def get_capabilities(module): + if hasattr(module, "_vyos_capabilities"): + return module._vyos_capabilities + + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + + module._vyos_capabilities = json.loads(capabilities) + return module._vyos_capabilities + + +def get_config(module, flags=None, format=None): + flags = [] if flags is None else flags + global _DEVICE_CONFIGS + + if _DEVICE_CONFIGS != {}: + return _DEVICE_CONFIGS + else: + connection = get_connection(module) + try: + out = connection.get_config(flags=flags, format=format) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + cfg = to_text(out, errors="surrogate_then_replace").strip() + _DEVICE_CONFIGS = cfg + return cfg + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + try: + response = connection.run_commands( + commands=commands, check_rc=check_rc + ) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + return response + + +def load_config(module, commands, commit=False, comment=None): + connection = get_connection(module) + + try: + response = connection.edit_config( + candidate=commands, commit=commit, comment=comment + ) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + + return response.get("diff") diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py new file mode 100644 index 00000000..18538491 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "network", +} + + +DOCUMENTATION = """module: vyos_command +author: Nathaniel Case (@Qalthos) +short_description: Run one or more commands on VyOS devices +description: +- The command module allows running one or more commands on remote devices running + VyOS. This module can also be introspected to validate key parameters before returning + successfully. If the conditional statements are not met in the wait period, the + task fails. +- Certain C(show) commands in VyOS produce many lines of output and use a custom pager + that can cause this module to hang. If the value of the environment variable C(ANSIBLE_VYOS_TERMINAL_LENGTH) + is not set, the default number of 10000 is used. +extends_documentation_fragment: +- vyos.vyos.vyos +options: + commands: + description: + - The ordered set of commands to execute on the remote device running VyOS. The + output from the command execution is returned to the playbook. If the I(wait_for) + argument is provided, the module is not returned until the condition is satisfied + or the number of retries has been exceeded. + required: true + wait_for: + description: + - Specifies what to evaluate from the output of the command and what conditionals + to apply. This argument will cause the task to wait for a particular conditional + to be true before moving forward. If the conditional is not true by the configured + I(retries), the task fails. See examples. + aliases: + - waitfor + match: + description: + - The I(match) argument is used in conjunction with the I(wait_for) argument to + specify the match policy. Valid values are C(all) or C(any). If the value is + set to C(all) then all conditionals in the wait_for must be satisfied. If the + value is set to C(any) then only one of the values must be satisfied. + default: all + choices: + - any + - all + retries: + description: + - Specifies the number of retries a command should be tried before it is considered + failed. The command is run on the target device every retry and evaluated against + the I(wait_for) conditionals. + default: 10 + interval: + description: + - Configures the interval in seconds to wait between I(retries) of the command. + If the command does not pass the specified conditions, the interval indicates + how long to wait before trying the command again. + default: 1 +notes: +- Tested against VyOS 1.1.8 (helium). +- Running C(show system boot-messages all) will cause the module to hang since VyOS + is using a custom pager setting to display the output of that command. +- If a command sent to the device requires answering a prompt, it is possible to pass + a dict containing I(command), I(answer) and I(prompt). See examples. +- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html). +""" + +EXAMPLES = """ +tasks: + - name: show configuration on ethernet devices eth0 and eth1 + vyos_command: + commands: + - show interfaces ethernet {{ item }} + with_items: + - eth0 + - eth1 + + - name: run multiple commands and check if version output contains specific version string + vyos_command: + commands: + - show version + - show hardware cpu + wait_for: + - "result[0] contains 'VyOS 1.1.7'" + + - name: run command that requires answering a prompt + vyos_command: + commands: + - command: 'rollback 1' + prompt: 'Proceed with reboot? [confirm][y]' + answer: y +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +warnings: + description: The list of warnings (if any) generated by module based on arguments + returned: always + type: list + sample: ['...', '...'] +""" +import time + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import ( + Conditional, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + transform_commands, + to_lines, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import ( + run_commands, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import ( + vyos_argument_spec, +) + + +def parse_commands(module, warnings): + commands = transform_commands(module) + + if module.check_mode: + for item in list(commands): + if not item["command"].startswith("show"): + warnings.append( + "Only show commands are supported when using check mode, not " + "executing %s" % item["command"] + ) + commands.remove(item) + + return commands + + +def main(): + spec = dict( + commands=dict(type="list", required=True), + wait_for=dict(type="list", aliases=["waitfor"]), + match=dict(default="all", choices=["all", "any"]), + retries=dict(default=10, type="int"), + interval=dict(default=1, type="int"), + ) + + spec.update(vyos_argument_spec) + + module = AnsibleModule(argument_spec=spec, supports_check_mode=True) + + warnings = list() + result = {"changed": False, "warnings": warnings} + commands = parse_commands(module, warnings) + wait_for = module.params["wait_for"] or list() + + try: + conditionals = [Conditional(c) for c in wait_for] + except AttributeError as exc: + module.fail_json(msg=to_text(exc)) + + retries = module.params["retries"] + interval = module.params["interval"] + match = module.params["match"] + + for _ in range(retries): + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == "any": + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = "One or more conditional statements have not been satisfied" + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update( + {"stdout": responses, "stdout_lines": list(to_lines(responses)),} + ) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py new file mode 100644 index 00000000..b899045a --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "network", +} + + +DOCUMENTATION = """module: vyos_config +author: Nathaniel Case (@Qalthos) +short_description: Manage VyOS configuration on remote device +description: +- This module provides configuration file management of VyOS devices. It provides + arguments for managing both the configuration file and state of the active configuration. + All configuration statements are based on `set` and `delete` commands in the device + configuration. +extends_documentation_fragment: +- vyos.vyos.vyos +notes: +- Tested against VyOS 1.1.8 (helium). +- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html). +options: + lines: + description: + - The ordered set of configuration lines to be managed and compared with the existing + configuration on the remote device. + src: + description: + - The C(src) argument specifies the path to the source config file to load. The + source config file can either be in bracket format or set format. The source + file can include Jinja2 template variables. + match: + description: + - The C(match) argument controls the method used to match against the current + active configuration. By default, the desired config is matched against the + active config and the deltas are loaded. If the C(match) argument is set to + C(none) the active configuration is ignored and the configuration is always + loaded. + default: line + choices: + - line + - none + backup: + description: + - The C(backup) argument will backup the current devices active configuration + to the Ansible control host prior to making any changes. If the C(backup_options) + value is not given, the backup file will be located in the backup folder in + the playbook root directory or role root directory, if playbook is part of an + ansible role. If the directory does not exist, it is created. + type: bool + default: 'no' + comment: + description: + - Allows a commit description to be specified to be included when the configuration + is committed. If the configuration is not changed or committed, this argument + is ignored. + default: configured by vyos_config + config: + description: + - The C(config) argument specifies the base configuration to use to compare against + the desired configuration. If this value is not specified, the module will + automatically retrieve the current active configuration from the remote device. + save: + description: + - The C(save) argument controls whether or not changes made to the active configuration + are saved to disk. This is independent of committing the config. When set + to True, the active configuration is saved. + type: bool + default: 'no' + backup_options: + description: + - This is a dict object containing configurable options related to backup file + path. The value of this option is read only when C(backup) is set to I(yes), + if C(backup) is set to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given it will be generated based on the hostname, current time and + date in format defined by _config.@ + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will + be first created and the filename is either the value of C(filename) or + default filename as described in C(filename) options description. If the + path value is not given in that case a I(backup) directory will be created + in the current working directory and backup configuration will be copied + in C(filename) within I(backup) directory. + type: path + type: dict +""" + +EXAMPLES = """ +- name: configure the remote device + vyos_config: + lines: + - set system host-name {{ inventory_hostname }} + - set service lldp + - delete service dhcp-server + +- name: backup and load from file + vyos_config: + src: vyos.cfg + backup: yes + +- name: render a Jinja2 template onto the VyOS router + vyos_config: + src: vyos_template.j2 + +- name: for idempotency, use full-form commands + vyos_config: + lines: + # - set int eth eth2 description 'OUTSIDE' + - set interface ethernet eth2 description 'OUTSIDE' + +- name: configurable backup path + vyos_config: + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +commands: + description: The list of configuration commands sent to the device + returned: always + type: list + sample: ['...', '...'] +filtered: + description: The list of configuration commands removed to avoid a load failure + returned: always + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/vyos_config.2016-07-16@22:28:34 +filename: + description: The name of the backup file + returned: when backup is yes and filename is not specified in backup options + type: str + sample: vyos_config.2016-07-16@22:28:34 +shortname: + description: The full path to the backup file excluding the timestamp + returned: when backup is yes and filename is not specified in backup options + type: str + sample: /playbooks/ansible/backup/vyos_config +date: + description: The date extracted from the backup file name + returned: when backup is yes + type: str + sample: "2016-07-16" +time: + description: The time extracted from the backup file name + returned: when backup is yes + type: str + sample: "22:28:34" +""" +import re + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.connection import ConnectionError +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import ( + load_config, + get_config, + run_commands, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import ( + vyos_argument_spec, + get_connection, +) + + +DEFAULT_COMMENT = "configured by vyos_config" + +CONFIG_FILTERS = [ + re.compile(r"set system login user \S+ authentication encrypted-password") +] + + +def get_candidate(module): + contents = module.params["src"] or module.params["lines"] + + if module.params["src"]: + contents = format_commands(contents.splitlines()) + + contents = "\n".join(contents) + return contents + + +def format_commands(commands): + """ + This function format the input commands and removes the prepend white spaces + for command lines having 'set' or 'delete' and it skips empty lines. + :param commands: + :return: list of commands + """ + return [ + line.strip() if line.split()[0] in ("set", "delete") else line + for line in commands + if len(line.strip()) > 0 + ] + + +def diff_config(commands, config): + config = [str(c).replace("'", "") for c in config.splitlines()] + + updates = list() + visited = set() + + for line in commands: + item = str(line).replace("'", "") + + if not item.startswith("set") and not item.startswith("delete"): + raise ValueError("line must start with either `set` or `delete`") + + elif item.startswith("set") and item not in config: + updates.append(line) + + elif item.startswith("delete"): + if not config: + updates.append(line) + else: + item = re.sub(r"delete", "set", item) + for entry in config: + if entry.startswith(item) and line not in visited: + updates.append(line) + visited.add(line) + + return list(updates) + + +def sanitize_config(config, result): + result["filtered"] = list() + index_to_filter = list() + for regex in CONFIG_FILTERS: + for index, line in enumerate(list(config)): + if regex.search(line): + result["filtered"].append(line) + index_to_filter.append(index) + # Delete all filtered configs + for filter_index in sorted(index_to_filter, reverse=True): + del config[filter_index] + + +def run(module, result): + # get the current active config from the node or passed in via + # the config param + config = module.params["config"] or get_config(module) + + # create the candidate config object from the arguments + candidate = get_candidate(module) + + # create loadable config that includes only the configuration updates + connection = get_connection(module) + try: + response = connection.get_diff( + candidate=candidate, + running=config, + diff_match=module.params["match"], + ) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + + commands = response.get("config_diff") + sanitize_config(commands, result) + + result["commands"] = commands + + commit = not module.check_mode + comment = module.params["comment"] + + diff = None + if commands: + diff = load_config(module, commands, commit=commit, comment=comment) + + if result.get("filtered"): + result["warnings"].append( + "Some configuration commands were " + "removed, please see the filtered key" + ) + + result["changed"] = True + + if module._diff: + result["diff"] = {"prepared": diff} + + +def main(): + backup_spec = dict(filename=dict(), dir_path=dict(type="path")) + argument_spec = dict( + src=dict(type="path"), + lines=dict(type="list"), + match=dict(default="line", choices=["line", "none"]), + comment=dict(default=DEFAULT_COMMENT), + config=dict(), + backup=dict(type="bool", default=False), + backup_options=dict(type="dict", options=backup_spec), + save=dict(type="bool", default=False), + ) + + argument_spec.update(vyos_argument_spec) + + mutually_exclusive = [("lines", "src")] + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + warnings = list() + + result = dict(changed=False, warnings=warnings) + + if module.params["backup"]: + result["__backup__"] = get_config(module=module) + + if any((module.params["src"], module.params["lines"])): + run(module, result) + + if module.params["save"]: + diff = run_commands(module, commands=["configure", "compare saved"])[1] + if diff != "[edit]": + run_commands(module, commands=["save"]) + result["changed"] = True + run_commands(module, commands=["exit"]) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py new file mode 100644 index 00000000..19fb727f --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The module file for vyos_facts +""" + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": [u"preview"], + "supported_by": "network", +} + + +DOCUMENTATION = """module: vyos_facts +short_description: Get facts about vyos devices. +description: +- Collects facts from network devices running the vyos operating system. This module + places the facts gathered in the fact tree keyed by the respective resource name. The + facts module will always collect a base set of facts from the device and can enable + or disable collection of additional facts. +author: +- Nathaniel Case (@qalthos) +- Nilashish Chakraborty (@Nilashishc) +- Rohit Thakur (@rohitthakur2590) +extends_documentation_fragment: +- vyos.vyos.vyos +notes: +- Tested against VyOS 1.1.8 (helium). +- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html). +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected to a given subset. Possible + values for this argument include all, default, config, and neighbors. Can specify + a list of values to include a larger subset. Values can also be used with an + initial C(M(!)) to specify that a specific subset should not be collected. + required: false + default: '!config' + gather_network_resources: + description: + - When supplied, this argument will restrict the facts collected to a given subset. + Possible values for this argument include all and the resources like interfaces. + Can specify a list of values to include a larger subset. Values can also be + used with an initial C(M(!)) to specify that a specific subset should not be + collected. Valid subsets are 'all', 'interfaces', 'l3_interfaces', 'lag_interfaces', + 'lldp_global', 'lldp_interfaces', 'static_routes', 'firewall_rules'. + required: false +""" + +EXAMPLES = """ +# Gather all facts +- vyos_facts: + gather_subset: all + gather_network_resources: all + +# collect only the config and default facts +- vyos_facts: + gather_subset: config + +# collect everything exception the config +- vyos_facts: + gather_subset: "!config" + +# Collect only the interfaces facts +- vyos_facts: + gather_subset: + - '!all' + - '!min' + gather_network_resources: + - interfaces + +# Do not collect interfaces facts +- vyos_facts: + gather_network_resources: + - "!interfaces" + +# Collect interfaces and minimal default facts +- vyos_facts: + gather_subset: min + gather_network_resources: interfaces +""" + +RETURN = """ +ansible_net_config: + description: The running-config from the device + returned: when config is configured + type: str +ansible_net_commits: + description: The set of available configuration revisions + returned: when present + type: list +ansible_net_hostname: + description: The configured system hostname + returned: always + type: str +ansible_net_model: + description: The device model string + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the device + returned: always + type: str +ansible_net_version: + description: The version of the software running + returned: always + type: str +ansible_net_neighbors: + description: The set of LLDP neighbors + returned: when interface is configured + type: list +ansible_net_gather_subset: + description: The list of subsets gathered by the module + returned: always + type: list +ansible_net_api: + description: The name of the transport + returned: always + type: str +ansible_net_python_version: + description: The Python version Ansible controller is using + returned: always + type: str +ansible_net_gather_network_resources: + description: The list of fact resource subsets collected from the device + returned: always + type: list +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.facts.facts import ( + FactsArgs, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.facts import ( + Facts, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import ( + vyos_argument_spec, +) + + +def main(): + """ + Main entry point for module execution + + :returns: ansible_facts + """ + argument_spec = FactsArgs.argument_spec + argument_spec.update(vyos_argument_spec) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) + + warnings = [] + if module.params["gather_subset"] == "!config": + warnings.append( + "default value for `gather_subset` will be changed to `min` from `!config` v2.11 onwards" + ) + + result = Facts(module).get_facts() + + ansible_facts, additional_warnings = result + warnings.extend(additional_warnings) + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == "__main__": + main() diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_lldp_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_lldp_interfaces.py new file mode 100644 index 00000000..8fe572b0 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_lldp_interfaces.py @@ -0,0 +1,513 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for vyos_lldp_interfaces +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "network", +} + +DOCUMENTATION = """module: vyos_lldp_interfaces +short_description: Manages attributes of lldp interfaces on VyOS devices. +description: This module manages attributes of lldp interfaces on VyOS network devices. +notes: +- Tested against VyOS 1.1.8 (helium). +- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html). +author: +- Rohit Thakur (@rohitthakur2590) +options: + config: + description: A list of lldp interfaces configurations. + type: list + suboptions: + name: + description: + - Name of the lldp interface. + type: str + required: true + enable: + description: + - to disable lldp on the interface. + type: bool + default: true + location: + description: + - LLDP-MED location data. + type: dict + suboptions: + civic_based: + description: + - Civic-based location data. + type: dict + suboptions: + ca_info: + description: LLDP-MED address info + type: list + suboptions: + ca_type: + description: LLDP-MED Civic Address type. + type: int + required: true + ca_value: + description: LLDP-MED Civic Address value. + type: str + required: true + country_code: + description: Country Code + type: str + required: true + coordinate_based: + description: + - Coordinate-based location. + type: dict + suboptions: + altitude: + description: Altitude in meters. + type: int + datum: + description: Coordinate datum type. + type: str + choices: + - WGS84 + - NAD83 + - MLLW + latitude: + description: Latitude. + type: str + required: true + longitude: + description: Longitude. + type: str + required: true + elin: + description: Emergency Call Service ELIN number (between 10-25 numbers). + type: str + state: + description: + - The state of the configuration after module completion. + type: str + choices: + - merged + - replaced + - overridden + - deleted + default: merged +""" +EXAMPLES = """ +# Using merged +# +# Before state: +# ------------- +# +# vyos@vyos:~$ show configuration commands | grep lldp +# +- name: Merge provided configuration with device configuration + vyos_lldp_interfaces: + config: + - name: 'eth1' + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + + - name: 'eth2' + location: + coordinate_based: + altitude: 2200 + datum: 'WGS84' + longitude: '222.267255W' + latitude: '33.524449N' + state: merged +# +# +# ------------------------- +# Module Execution Result +# ------------------------- +# +# before": [] +# +# "commands": [ +# "set service lldp interface eth1 location civic-based country-code 'US'", +# "set service lldp interface eth1 location civic-based ca-type 0 ca-value 'ENGLISH'", +# "set service lldp interface eth1", +# "set service lldp interface eth2 location coordinate-based latitude '33.524449N'", +# "set service lldp interface eth2 location coordinate-based altitude '2200'", +# "set service lldp interface eth2 location coordinate-based datum 'WGS84'", +# "set service lldp interface eth2 location coordinate-based longitude '222.267255W'", +# "set service lldp interface eth2 location coordinate-based latitude '33.524449N'", +# "set service lldp interface eth2 location coordinate-based altitude '2200'", +# "set service lldp interface eth2 location coordinate-based datum 'WGS84'", +# "set service lldp interface eth2 location coordinate-based longitude '222.267255W'", +# "set service lldp interface eth2" +# +# "after": [ +# { +# "location": { +# "coordinate_based": { +# "altitude": 2200, +# "datum": "WGS84", +# "latitude": "33.524449N", +# "longitude": "222.267255W" +# } +# }, +# "name": "eth2" +# }, +# { +# "location": { +# "civic_based": { +# "ca_info": [ +# { +# "ca_type": 0, +# "ca_value": "ENGLISH" +# } +# ], +# "country_code": "US" +# } +# }, +# "name": "eth1" +# } +# ], +# +# After state: +# ------------- +# +# vyos@vyos:~$ show configuration commands | grep lldp +# set service lldp interface eth1 location civic-based ca-type 0 ca-value 'ENGLISH' +# set service lldp interface eth1 location civic-based country-code 'US' +# set service lldp interface eth2 location coordinate-based altitude '2200' +# set service lldp interface eth2 location coordinate-based datum 'WGS84' +# set service lldp interface eth2 location coordinate-based latitude '33.524449N' +# set service lldp interface eth2 location coordinate-based longitude '222.267255W' + + +# Using replaced +# +# Before state: +# ------------- +# +# vyos@vyos:~$ show configuration commands | grep lldp +# set service lldp interface eth1 location civic-based ca-type 0 ca-value 'ENGLISH' +# set service lldp interface eth1 location civic-based country-code 'US' +# set service lldp interface eth2 location coordinate-based altitude '2200' +# set service lldp interface eth2 location coordinate-based datum 'WGS84' +# set service lldp interface eth2 location coordinate-based latitude '33.524449N' +# set service lldp interface eth2 location coordinate-based longitude '222.267255W' +# +- name: Replace device configurations of listed LLDP interfaces with provided configurations + vyos_lldp_interfaces: + config: + - name: 'eth2' + location: + civic_based: + country_code: 'US' + ca_info: + - ca_type: 0 + ca_value: 'ENGLISH' + + - name: 'eth1' + location: + coordinate_based: + altitude: 2200 + datum: 'WGS84' + longitude: '222.267255W' + latitude: '33.524449N' + state: replaced +# +# +# ------------------------- +# Module Execution Result +# ------------------------- +# +# "before": [ +# { +# "location": { +# "coordinate_based": { +# "altitude": 2200, +# "datum": "WGS84", +# "latitude": "33.524449N", +# "longitude": "222.267255W" +# } +# }, +# "name": "eth2" +# }, +# { +# "location": { +# "civic_based": { +# "ca_info": [ +# { +# "ca_type": 0, +# "ca_value": "ENGLISH" +# } +# ], +# "country_code": "US" +# } +# }, +# "name": "eth1" +# } +# ] +# +# "commands": [ +# "delete service lldp interface eth2 location", +# "set service lldp interface eth2 'disable'", +# "set service lldp interface eth2 location civic-based country-code 'US'", +# "set service lldp interface eth2 location civic-based ca-type 0 ca-value 'ENGLISH'", +# "delete service lldp interface eth1 location", +# "set service lldp interface eth1 'disable'", +# "set service lldp interface eth1 location coordinate-based latitude '33.524449N'", +# "set service lldp interface eth1 location coordinate-based altitude '2200'", +# "set service lldp interface eth1 location coordinate-based datum 'WGS84'", +# "set service lldp interface eth1 location coordinate-based longitude '222.267255W'" +# ] +# +# "after": [ +# { +# "location": { +# "civic_based": { +# "ca_info": [ +# { +# "ca_type": 0, +# "ca_value": "ENGLISH" +# } +# ], +# "country_code": "US" +# } +# }, +# "name": "eth2" +# }, +# { +# "location": { +# "coordinate_based": { +# "altitude": 2200, +# "datum": "WGS84", +# "latitude": "33.524449N", +# "longitude": "222.267255W" +# } +# }, +# "name": "eth1" +# } +# ] +# +# After state: +# ------------- +# +# vyos@vyos:~$ show configuration commands | grep lldp +# set service lldp interface eth1 'disable' +# set service lldp interface eth1 location coordinate-based altitude '2200' +# set service lldp interface eth1 location coordinate-based datum 'WGS84' +# set service lldp interface eth1 location coordinate-based latitude '33.524449N' +# set service lldp interface eth1 location coordinate-based longitude '222.267255W' +# set service lldp interface eth2 'disable' +# set service lldp interface eth2 location civic-based ca-type 0 ca-value 'ENGLISH' +# set service lldp interface eth2 location civic-based country-code 'US' + + +# Using overridden +# +# Before state +# -------------- +# +# vyos@vyos:~$ show configuration commands | grep lldp +# set service lldp interface eth1 'disable' +# set service lldp interface eth1 location coordinate-based altitude '2200' +# set service lldp interface eth1 location coordinate-based datum 'WGS84' +# set service lldp interface eth1 location coordinate-based latitude '33.524449N' +# set service lldp interface eth1 location coordinate-based longitude '222.267255W' +# set service lldp interface eth2 'disable' +# set service lldp interface eth2 location civic-based ca-type 0 ca-value 'ENGLISH' +# set service lldp interface eth2 location civic-based country-code 'US' +# +- name: Overrides all device configuration with provided configuration + vyos_lag_interfaces: + config: + - name: 'eth2' + location: + elin: 0000000911 + + state: overridden +# +# +# ------------------------- +# Module Execution Result +# ------------------------- +# +# "before": [ +# { +# "enable": false, +# "location": { +# "civic_based": { +# "ca_info": [ +# { +# "ca_type": 0, +# "ca_value": "ENGLISH" +# } +# ], +# "country_code": "US" +# } +# }, +# "name": "eth2" +# }, +# { +# "enable": false, +# "location": { +# "coordinate_based": { +# "altitude": 2200, +# "datum": "WGS84", +# "latitude": "33.524449N", +# "longitude": "222.267255W" +# } +# }, +# "name": "eth1" +# } +# ] +# +# "commands": [ +# "delete service lldp interface eth2 location", +# "delete service lldp interface eth2 disable", +# "set service lldp interface eth2 location elin 0000000911" +# +# +# "after": [ +# { +# "location": { +# "elin": 0000000911 +# }, +# "name": "eth2" +# } +# ] +# +# +# After state +# ------------ +# +# vyos@vyos# run show configuration commands | grep lldp +# set service lldp interface eth2 location elin '0000000911' + + +# Using deleted +# +# Before state +# ------------- +# +# vyos@vyos# run show configuration commands | grep lldp +# set service lldp interface eth2 location elin '0000000911' +# +- name: Delete lldp interface attributes of given interfaces. + vyos_lag_interfaces: + config: + - name: 'eth2' + state: deleted +# +# +# ------------------------ +# Module Execution Results +# ------------------------ +# + "before": [ + { + "location": { + "elin": 0000000911 + }, + "name": "eth2" + } + ] +# "commands": [ +# "commands": [ +# "delete service lldp interface eth2" +# ] +# +# "after": [] +# After state +# ------------ +# vyos@vyos# run show configuration commands | grep lldp +# set service 'lldp' + + +""" +RETURN = """ +before: + description: The configuration as structured data prior to module invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The configuration as structured data after module completion. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: + - "set service lldp interface eth2 'disable'" + - "delete service lldp interface eth1 location" +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lldp_interfaces.lldp_interfaces import ( + Lldp_interfacesArgs, +) +from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.config.lldp_interfaces.lldp_interfaces import ( + Lldp_interfaces, +) + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + required_if = [ + ("state", "merged", ("config",)), + ("state", "replaced", ("config",)), + ("state", "overridden", ("config",)), + ] + module = AnsibleModule( + argument_spec=Lldp_interfacesArgs.argument_spec, + required_if=required_if, + supports_check_mode=True, + ) + + result = Lldp_interfaces(module).execute_module() + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/terminal/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/terminal/vyos.py new file mode 100644 index 00000000..fe7712f6 --- /dev/null +++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/terminal/vyos.py @@ -0,0 +1,53 @@ +# +# (c) 2016 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import os +import re + +from ansible.plugins.terminal import TerminalBase +from ansible.errors import AnsibleConnectionFailure + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), + re.compile(br"\@[\w\-\.]+:\S+?[>#\$] ?$"), + ] + + terminal_stderr_re = [ + re.compile(br"\n\s*Invalid command:"), + re.compile(br"\nCommit failed"), + re.compile(br"\n\s+Set failed"), + ] + + terminal_length = os.getenv("ANSIBLE_VYOS_TERMINAL_LENGTH", 10000) + + def on_open_shell(self): + try: + for cmd in (b"set terminal length 0", b"set terminal width 512"): + self._exec_cli_command(cmd) + self._exec_cli_command( + b"set terminal length %d" % self.terminal_length + ) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure("unable to set terminal parameters") diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py new file mode 120000 index 00000000..0364d766 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py @@ -0,0 +1 @@ +../../../../../../plugins/action/win_copy.py \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1 new file mode 120000 index 00000000..6fc438d6 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1 @@ -0,0 +1 @@ +../../../../../../plugins/modules/async_status.ps1 \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1 new file mode 120000 index 00000000..81d8afa3 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1 @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_acl.ps1 \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py new file mode 120000 index 00000000..3a2434cf --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_acl.py \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1 new file mode 120000 index 00000000..a34fb012 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1 @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_copy.ps1 \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py new file mode 120000 index 00000000..2d2c69a2 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_copy.py \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1 new file mode 120000 index 00000000..8ee5c2b5 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1 @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_file.ps1 \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py new file mode 120000 index 00000000..b4bc0583 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_file.py \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1 new file mode 120000 index 00000000..d7b25ed0 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1 @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_ping.ps1 \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py new file mode 120000 index 00000000..0b97c87b --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_ping.py \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1 new file mode 120000 index 00000000..eb07a017 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1 @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_shell.ps1 \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py new file mode 120000 index 00000000..3c6f0749 --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_shell.py \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1 new file mode 120000 index 00000000..62a7a40a --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1 @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_stat.ps1 \ No newline at end of file diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py new file mode 120000 index 00000000..1db4c95e --- /dev/null +++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py @@ -0,0 +1 @@ +../../../../../../plugins/modules/win_stat.py \ No newline at end of file diff --git a/test/support/windows-integration/plugins/action/win_copy.py b/test/support/windows-integration/plugins/action/win_copy.py new file mode 100644 index 00000000..adb918be --- /dev/null +++ b/test/support/windows-integration/plugins/action/win_copy.py @@ -0,0 +1,522 @@ +# This file is part of Ansible + +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import base64 +import json +import os +import os.path +import shutil +import tempfile +import traceback +import zipfile + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleFileNotFound +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.hashing import checksum + + +def _walk_dirs(topdir, loader, decrypt=True, base_path=None, local_follow=False, trailing_slash_detector=None, checksum_check=False): + """ + Walk a filesystem tree returning enough information to copy the files. + This is similar to the _walk_dirs function in ``copy.py`` but returns + a dict instead of a tuple for each entry and includes the checksum of + a local file if wanted. + + :arg topdir: The directory that the filesystem tree is rooted at + :arg loader: The self._loader object from ActionBase + :kwarg decrypt: Whether to decrypt a file encrypted with ansible-vault + :kwarg base_path: The initial directory structure to strip off of the + files for the destination directory. If this is None (the default), + the base_path is set to ``top_dir``. + :kwarg local_follow: Whether to follow symlinks on the source. When set + to False, no symlinks are dereferenced. When set to True (the + default), the code will dereference most symlinks. However, symlinks + can still be present if needed to break a circular link. + :kwarg trailing_slash_detector: Function to determine if a path has + a trailing directory separator. Only needed when dealing with paths on + a remote machine (in which case, pass in a function that is aware of the + directory separator conventions on the remote machine). + :kawrg whether to get the checksum of the local file and add to the dict + :returns: dictionary of dictionaries. All of the path elements in the structure are text string. + This separates all the files, directories, and symlinks along with + import information about each:: + + { + 'files'; [{ + src: '/absolute/path/to/copy/from', + dest: 'relative/path/to/copy/to', + checksum: 'b54ba7f5621240d403f06815f7246006ef8c7d43' + }, ...], + 'directories'; [{ + src: '/absolute/path/to/copy/from', + dest: 'relative/path/to/copy/to' + }, ...], + 'symlinks'; [{ + src: '/symlink/target/path', + dest: 'relative/path/to/copy/to' + }, ...], + + } + + The ``symlinks`` field is only populated if ``local_follow`` is set to False + *or* a circular symlink cannot be dereferenced. The ``checksum`` entry is set + to None if checksum_check=False. + + """ + # Convert the path segments into byte strings + + r_files = {'files': [], 'directories': [], 'symlinks': []} + + def _recurse(topdir, rel_offset, parent_dirs, rel_base=u'', checksum_check=False): + """ + This is a closure (function utilizing variables from it's parent + function's scope) so that we only need one copy of all the containers. + Note that this function uses side effects (See the Variables used from + outer scope). + + :arg topdir: The directory we are walking for files + :arg rel_offset: Integer defining how many characters to strip off of + the beginning of a path + :arg parent_dirs: Directories that we're copying that this directory is in. + :kwarg rel_base: String to prepend to the path after ``rel_offset`` is + applied to form the relative path. + + Variables used from the outer scope + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :r_files: Dictionary of files in the hierarchy. See the return value + for :func:`walk` for the structure of this dictionary. + :local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks + """ + for base_path, sub_folders, files in os.walk(topdir): + for filename in files: + filepath = os.path.join(base_path, filename) + dest_filepath = os.path.join(rel_base, filepath[rel_offset:]) + + if os.path.islink(filepath): + # Dereference the symlnk + real_file = loader.get_real_file(os.path.realpath(filepath), decrypt=decrypt) + if local_follow and os.path.isfile(real_file): + # Add the file pointed to by the symlink + r_files['files'].append( + { + "src": real_file, + "dest": dest_filepath, + "checksum": _get_local_checksum(checksum_check, real_file) + } + ) + else: + # Mark this file as a symlink to copy + r_files['symlinks'].append({"src": os.readlink(filepath), "dest": dest_filepath}) + else: + # Just a normal file + real_file = loader.get_real_file(filepath, decrypt=decrypt) + r_files['files'].append( + { + "src": real_file, + "dest": dest_filepath, + "checksum": _get_local_checksum(checksum_check, real_file) + } + ) + + for dirname in sub_folders: + dirpath = os.path.join(base_path, dirname) + dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:]) + real_dir = os.path.realpath(dirpath) + dir_stats = os.stat(real_dir) + + if os.path.islink(dirpath): + if local_follow: + if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs: + # Just insert the symlink if the target directory + # exists inside of the copy already + r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath}) + else: + # Walk the dirpath to find all parent directories. + new_parents = set() + parent_dir_list = os.path.dirname(dirpath).split(os.path.sep) + for parent in range(len(parent_dir_list), 0, -1): + parent_stat = os.stat(u'/'.join(parent_dir_list[:parent])) + if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs: + # Reached the point at which the directory + # tree is already known. Don't add any + # more or we might go to an ancestor that + # isn't being copied. + break + new_parents.add((parent_stat.st_dev, parent_stat.st_ino)) + + if (dir_stats.st_dev, dir_stats.st_ino) in new_parents: + # This was a a circular symlink. So add it as + # a symlink + r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath}) + else: + # Walk the directory pointed to by the symlink + r_files['directories'].append({"src": real_dir, "dest": dest_dirpath}) + offset = len(real_dir) + 1 + _recurse(real_dir, offset, parent_dirs.union(new_parents), + rel_base=dest_dirpath, + checksum_check=checksum_check) + else: + # Add the symlink to the destination + r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath}) + else: + # Just a normal directory + r_files['directories'].append({"src": dirpath, "dest": dest_dirpath}) + + # Check if the source ends with a "/" so that we know which directory + # level to work at (similar to rsync) + source_trailing_slash = False + if trailing_slash_detector: + source_trailing_slash = trailing_slash_detector(topdir) + else: + source_trailing_slash = topdir.endswith(os.path.sep) + + # Calculate the offset needed to strip the base_path to make relative + # paths + if base_path is None: + base_path = topdir + if not source_trailing_slash: + base_path = os.path.dirname(base_path) + if topdir.startswith(base_path): + offset = len(base_path) + + # Make sure we're making the new paths relative + if trailing_slash_detector and not trailing_slash_detector(base_path): + offset += 1 + elif not base_path.endswith(os.path.sep): + offset += 1 + + if os.path.islink(topdir) and not local_follow: + r_files['symlinks'] = {"src": os.readlink(topdir), "dest": os.path.basename(topdir)} + return r_files + + dir_stats = os.stat(topdir) + parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),)) + # Actually walk the directory hierarchy + _recurse(topdir, offset, parents, checksum_check=checksum_check) + + return r_files + + +def _get_local_checksum(get_checksum, local_path): + if get_checksum: + return checksum(local_path) + else: + return None + + +class ActionModule(ActionBase): + + WIN_PATH_SEPARATOR = "\\" + + def _create_content_tempfile(self, content): + ''' Create a tempfile containing defined content ''' + fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP) + f = os.fdopen(fd, 'wb') + content = to_bytes(content) + try: + f.write(content) + except Exception as err: + os.remove(content_tempfile) + raise Exception(err) + finally: + f.close() + return content_tempfile + + def _create_zip_tempfile(self, files, directories): + tmpdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP) + zip_file_path = os.path.join(tmpdir, "win_copy.zip") + zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_STORED, True) + + # encoding the file/dir name with base64 so Windows can unzip a unicode + # filename and get the right name, Windows doesn't handle unicode names + # very well + for directory in directories: + directory_path = to_bytes(directory['src'], errors='surrogate_or_strict') + archive_path = to_bytes(directory['dest'], errors='surrogate_or_strict') + + encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict') + zip_file.write(directory_path, encoded_path, zipfile.ZIP_DEFLATED) + + for file in files: + file_path = to_bytes(file['src'], errors='surrogate_or_strict') + archive_path = to_bytes(file['dest'], errors='surrogate_or_strict') + + encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict') + zip_file.write(file_path, encoded_path, zipfile.ZIP_DEFLATED) + + return zip_file_path + + def _remove_tempfile_if_content_defined(self, content, content_tempfile): + if content is not None: + os.remove(content_tempfile) + + def _copy_single_file(self, local_file, dest, source_rel, task_vars, tmp, backup): + if self._play_context.check_mode: + module_return = dict(changed=True) + return module_return + + # copy the file across to the server + tmp_src = self._connection._shell.join_path(tmp, 'source') + self._transfer_file(local_file, tmp_src) + + copy_args = self._task.args.copy() + copy_args.update( + dict( + dest=dest, + src=tmp_src, + _original_basename=source_rel, + _copy_mode="single", + backup=backup, + ) + ) + copy_args.pop('content', None) + + copy_result = self._execute_module(module_name="copy", + module_args=copy_args, + task_vars=task_vars) + + return copy_result + + def _copy_zip_file(self, dest, files, directories, task_vars, tmp, backup): + # create local zip file containing all the files and directories that + # need to be copied to the server + if self._play_context.check_mode: + module_return = dict(changed=True) + return module_return + + try: + zip_file = self._create_zip_tempfile(files, directories) + except Exception as e: + module_return = dict( + changed=False, + failed=True, + msg="failed to create tmp zip file: %s" % to_text(e), + exception=traceback.format_exc() + ) + return module_return + + zip_path = self._loader.get_real_file(zip_file) + + # send zip file to remote, file must end in .zip so + # Com Shell.Application works + tmp_src = self._connection._shell.join_path(tmp, 'source.zip') + self._transfer_file(zip_path, tmp_src) + + # run the explode operation of win_copy on remote + copy_args = self._task.args.copy() + copy_args.update( + dict( + src=tmp_src, + dest=dest, + _copy_mode="explode", + backup=backup, + ) + ) + copy_args.pop('content', None) + module_return = self._execute_module(module_name='copy', + module_args=copy_args, + task_vars=task_vars) + shutil.rmtree(os.path.dirname(zip_path)) + return module_return + + def run(self, tmp=None, task_vars=None): + ''' handler for file transfer operations ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + source = self._task.args.get('src', None) + content = self._task.args.get('content', None) + dest = self._task.args.get('dest', None) + remote_src = boolean(self._task.args.get('remote_src', False), strict=False) + local_follow = boolean(self._task.args.get('local_follow', False), strict=False) + force = boolean(self._task.args.get('force', True), strict=False) + decrypt = boolean(self._task.args.get('decrypt', True), strict=False) + backup = boolean(self._task.args.get('backup', False), strict=False) + + result['src'] = source + result['dest'] = dest + + result['failed'] = True + if (source is None and content is None) or dest is None: + result['msg'] = "src (or content) and dest are required" + elif source is not None and content is not None: + result['msg'] = "src and content are mutually exclusive" + elif content is not None and dest is not None and ( + dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)): + result['msg'] = "dest must be a file if content is defined" + else: + del result['failed'] + + if result.get('failed'): + return result + + # If content is defined make a temp file and write the content into it + content_tempfile = None + if content is not None: + try: + # if content comes to us as a dict it should be decoded json. + # We need to encode it back into a string and write it out + if isinstance(content, dict) or isinstance(content, list): + content_tempfile = self._create_content_tempfile(json.dumps(content)) + else: + content_tempfile = self._create_content_tempfile(content) + source = content_tempfile + except Exception as err: + result['failed'] = True + result['msg'] = "could not write content tmp file: %s" % to_native(err) + return result + # all actions should occur on the remote server, run win_copy module + elif remote_src: + new_module_args = self._task.args.copy() + new_module_args.update( + dict( + _copy_mode="remote", + dest=dest, + src=source, + force=force, + backup=backup, + ) + ) + new_module_args.pop('content', None) + result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars)) + return result + # find_needle returns a path that may not have a trailing slash on a + # directory so we need to find that out first and append at the end + else: + trailing_slash = source.endswith(os.path.sep) + try: + # find in expected paths + source = self._find_needle('files', source) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if trailing_slash != source.endswith(os.path.sep): + if source[-1] == os.path.sep: + source = source[:-1] + else: + source = source + os.path.sep + + # A list of source file tuples (full_path, relative_path) which will try to copy to the destination + source_files = {'files': [], 'directories': [], 'symlinks': []} + + # If source is a directory populate our list else source is a file and translate it to a tuple. + if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')): + result['operation'] = 'folder_copy' + + # Get a list of the files we want to replicate on the remote side + source_files = _walk_dirs(source, self._loader, decrypt=decrypt, local_follow=local_follow, + trailing_slash_detector=self._connection._shell.path_has_trailing_slash, + checksum_check=force) + + # If it's recursive copy, destination is always a dir, + # explicitly mark it so (note - win_copy module relies on this). + if not self._connection._shell.path_has_trailing_slash(dest): + dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR) + + check_dest = dest + # Source is a file, add details to source_files dict + else: + result['operation'] = 'file_copy' + + # If the local file does not exist, get_real_file() raises AnsibleFileNotFound + try: + source_full = self._loader.get_real_file(source, decrypt=decrypt) + except AnsibleFileNotFound as e: + result['failed'] = True + result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e)) + return result + + original_basename = os.path.basename(source) + result['original_basename'] = original_basename + + # check if dest ends with / or \ and append source filename to dest + if self._connection._shell.path_has_trailing_slash(dest): + check_dest = dest + filename = original_basename + result['dest'] = self._connection._shell.join_path(dest, filename) + else: + # replace \\ with / so we can use os.path to get the filename or dirname + unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep) + filename = os.path.basename(unix_path) + check_dest = os.path.dirname(unix_path) + + file_checksum = _get_local_checksum(force, source_full) + source_files['files'].append( + dict( + src=source_full, + dest=filename, + checksum=file_checksum + ) + ) + result['checksum'] = file_checksum + result['size'] = os.path.getsize(to_bytes(source_full, errors='surrogate_or_strict')) + + # find out the files/directories/symlinks that we need to copy to the server + query_args = self._task.args.copy() + query_args.update( + dict( + _copy_mode="query", + dest=check_dest, + force=force, + files=source_files['files'], + directories=source_files['directories'], + symlinks=source_files['symlinks'], + ) + ) + # src is not required for query, will fail path validation is src has unix allowed chars + query_args.pop('src', None) + + query_args.pop('content', None) + query_return = self._execute_module(module_args=query_args, + task_vars=task_vars) + + if query_return.get('failed') is True: + result.update(query_return) + return result + + if len(query_return['files']) > 0 or len(query_return['directories']) > 0 and self._connection._shell.tmpdir is None: + self._connection._shell.tmpdir = self._make_tmp_path() + + if len(query_return['files']) == 1 and len(query_return['directories']) == 0: + # we only need to copy 1 file, don't mess around with zips + file_src = query_return['files'][0]['src'] + file_dest = query_return['files'][0]['dest'] + result.update(self._copy_single_file(file_src, dest, file_dest, + task_vars, self._connection._shell.tmpdir, backup)) + if result.get('failed') is True: + result['msg'] = "failed to copy file %s: %s" % (file_src, result['msg']) + result['changed'] = True + + elif len(query_return['files']) > 0 or len(query_return['directories']) > 0: + # either multiple files or directories need to be copied, compress + # to a zip and 'explode' the zip on the server + # TODO: handle symlinks + result.update(self._copy_zip_file(dest, source_files['files'], + source_files['directories'], + task_vars, self._connection._shell.tmpdir, backup)) + result['changed'] = True + else: + # no operations need to occur + result['failed'] = False + result['changed'] = False + + # remove the content tmp file and remote tmp file if it was created + self._remove_tempfile_if_content_defined(content, content_tempfile) + self._remove_tmp_path(self._connection._shell.tmpdir) + return result diff --git a/test/support/windows-integration/plugins/action/win_reboot.py b/test/support/windows-integration/plugins/action/win_reboot.py new file mode 100644 index 00000000..c408f4f3 --- /dev/null +++ b/test/support/windows-integration/plugins/action/win_reboot.py @@ -0,0 +1,96 @@ +# Copyright: (c) 2018, Matt Davis +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from datetime import datetime + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.plugins.action import ActionBase +from ansible.plugins.action.reboot import ActionModule as RebootActionModule +from ansible.utils.display import Display + +display = Display() + + +class TimedOutException(Exception): + pass + + +class ActionModule(RebootActionModule, ActionBase): + TRANSFERS_FILES = False + _VALID_ARGS = frozenset(( + 'connect_timeout', 'connect_timeout_sec', 'msg', 'post_reboot_delay', 'post_reboot_delay_sec', 'pre_reboot_delay', 'pre_reboot_delay_sec', + 'reboot_timeout', 'reboot_timeout_sec', 'shutdown_timeout', 'shutdown_timeout_sec', 'test_command', + )) + + DEFAULT_BOOT_TIME_COMMAND = "(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime" + DEFAULT_CONNECT_TIMEOUT = 5 + DEFAULT_PRE_REBOOT_DELAY = 2 + DEFAULT_SUDOABLE = False + DEFAULT_SHUTDOWN_COMMAND_ARGS = '/r /t {delay_sec} /c "{message}"' + + DEPRECATED_ARGS = { + 'shutdown_timeout': '2.5', + 'shutdown_timeout_sec': '2.5', + } + + def __init__(self, *args, **kwargs): + super(ActionModule, self).__init__(*args, **kwargs) + + def get_distribution(self, task_vars): + return {'name': 'windows', 'version': '', 'family': ''} + + def get_shutdown_command(self, task_vars, distribution): + return self.DEFAULT_SHUTDOWN_COMMAND + + def run_test_command(self, distribution, **kwargs): + # Need to wrap the test_command in our PowerShell encoded wrapper. This is done to align the command input to a + # common shell and to allow the psrp connection plugin to report the correct exit code without manually setting + # $LASTEXITCODE for just that plugin. + test_command = self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND) + kwargs['test_command'] = self._connection._shell._encode_script(test_command) + super(ActionModule, self).run_test_command(distribution, **kwargs) + + def perform_reboot(self, task_vars, distribution): + shutdown_command = self.get_shutdown_command(task_vars, distribution) + shutdown_command_args = self.get_shutdown_command_args(distribution) + reboot_command = self._connection._shell._encode_script('{0} {1}'.format(shutdown_command, shutdown_command_args)) + + display.vvv("{action}: rebooting server...".format(action=self._task.action)) + display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution)) + display.debug("{action}: rebooting server with command '{command}'".format(action=self._task.action, command=reboot_command)) + + result = {} + reboot_result = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE) + result['start'] = datetime.utcnow() + + # Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully + stdout = reboot_result['stdout'] + stderr = reboot_result['stderr'] + if reboot_result['rc'] == 1190 or (reboot_result['rc'] != 0 and "(1190)" in reboot_result['stderr']): + display.warning('A scheduled reboot was pre-empted by Ansible.') + + # Try to abort (this may fail if it was already aborted) + result1 = self._low_level_execute_command(self._connection._shell._encode_script('shutdown /a'), + sudoable=self.DEFAULT_SUDOABLE) + + # Initiate reboot again + result2 = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE) + + reboot_result['rc'] = result2['rc'] + stdout += result1['stdout'] + result2['stdout'] + stderr += result1['stderr'] + result2['stderr'] + + if reboot_result['rc'] != 0: + result['failed'] = True + result['rebooted'] = False + result['msg'] = "Reboot command failed, error was: {stdout} {stderr}".format( + stdout=to_native(stdout.strip()), + stderr=to_native(stderr.strip())) + return result + + result['failed'] = False + return result diff --git a/test/support/windows-integration/plugins/action/win_template.py b/test/support/windows-integration/plugins/action/win_template.py new file mode 100644 index 00000000..20494b93 --- /dev/null +++ b/test/support/windows-integration/plugins/action/win_template.py @@ -0,0 +1,29 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase +from ansible.plugins.action.template import ActionModule as TemplateActionModule + + +# Even though TemplateActionModule inherits from ActionBase, we still need to +# directly inherit from ActionBase to appease the plugin loader. +class ActionModule(TemplateActionModule, ActionBase): + DEFAULT_NEWLINE_SEQUENCE = '\r\n' diff --git a/test/support/windows-integration/plugins/become/runas.py b/test/support/windows-integration/plugins/become/runas.py new file mode 100644 index 00000000..c8ae881c --- /dev/null +++ b/test/support/windows-integration/plugins/become/runas.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ + become: runas + short_description: Run As user + description: + - This become plugins allows your remote/login user to execute commands as another user via the windows runas facility. + author: ansible (@core) + version_added: "2.8" + options: + become_user: + description: User you 'become' to execute the task + ini: + - section: privilege_escalation + key: become_user + - section: runas_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_runas_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_RUNAS_USER + required: True + become_flags: + description: Options to pass to runas, a space delimited list of k=v pairs + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: runas_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_runas_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_RUNAS_FLAGS + become_pass: + description: password + ini: + - section: runas_become_plugin + key: password + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_runas_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_RUNAS_PASS + notes: + - runas is really implemented in the powershell module handler and as such can only be used with winrm connections. + - This plugin ignores the 'become_exe' setting as it uses an API and not an executable. + - The Secondary Logon service (seclogon) must be running to use runas +""" + +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = 'runas' + + def build_become_command(self, cmd, shell): + # runas is implemented inside the winrm connection plugin + return cmd diff --git a/test/support/windows-integration/plugins/module_utils/Ansible.Service.cs b/test/support/windows-integration/plugins/module_utils/Ansible.Service.cs new file mode 100644 index 00000000..be0f3db3 --- /dev/null +++ b/test/support/windows-integration/plugins/module_utils/Ansible.Service.cs @@ -0,0 +1,1341 @@ +using Microsoft.Win32.SafeHandles; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.ConstrainedExecution; +using System.Runtime.InteropServices; +using System.Security.Principal; +using System.Text; +using Ansible.Privilege; + +namespace Ansible.Service +{ + internal class NativeHelpers + { + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + public struct ENUM_SERVICE_STATUSW + { + public string lpServiceName; + public string lpDisplayName; + public SERVICE_STATUS ServiceStatus; + } + + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + public struct QUERY_SERVICE_CONFIGW + { + public ServiceType dwServiceType; + public ServiceStartType dwStartType; + public ErrorControl dwErrorControl; + [MarshalAs(UnmanagedType.LPWStr)] public string lpBinaryPathName; + [MarshalAs(UnmanagedType.LPWStr)] public string lpLoadOrderGroup; + public Int32 dwTagId; + public IntPtr lpDependencies; // Can't rely on marshaling as dependencies are delimited by \0. + [MarshalAs(UnmanagedType.LPWStr)] public string lpServiceStartName; + [MarshalAs(UnmanagedType.LPWStr)] public string lpDisplayName; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SC_ACTION + { + public FailureAction Type; + public UInt32 Delay; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_DELAYED_AUTO_START_INFO + { + public bool fDelayedAutostart; + } + + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + public struct SERVICE_DESCRIPTIONW + { + [MarshalAs(UnmanagedType.LPWStr)] public string lpDescription; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_FAILURE_ACTIONS_FLAG + { + public bool fFailureActionsOnNonCrashFailures; + } + + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + public struct SERVICE_FAILURE_ACTIONSW + { + public UInt32 dwResetPeriod; + [MarshalAs(UnmanagedType.LPWStr)] public string lpRebootMsg; + [MarshalAs(UnmanagedType.LPWStr)] public string lpCommand; + public UInt32 cActions; + public IntPtr lpsaActions; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_LAUNCH_PROTECTED_INFO + { + public LaunchProtection dwLaunchProtected; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_PREFERRED_NODE_INFO + { + public UInt16 usPreferredNode; + public bool fDelete; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_PRESHUTDOWN_INFO + { + public UInt32 dwPreshutdownTimeout; + } + + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + public struct SERVICE_REQUIRED_PRIVILEGES_INFOW + { + // Can't rely on marshaling as privileges are delimited by \0. + public IntPtr pmszRequiredPrivileges; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_SID_INFO + { + public ServiceSidInfo dwServiceSidType; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_STATUS + { + public ServiceType dwServiceType; + public ServiceStatus dwCurrentState; + public ControlsAccepted dwControlsAccepted; + public UInt32 dwWin32ExitCode; + public UInt32 dwServiceSpecificExitCode; + public UInt32 dwCheckPoint; + public UInt32 dwWaitHint; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_STATUS_PROCESS + { + public ServiceType dwServiceType; + public ServiceStatus dwCurrentState; + public ControlsAccepted dwControlsAccepted; + public UInt32 dwWin32ExitCode; + public UInt32 dwServiceSpecificExitCode; + public UInt32 dwCheckPoint; + public UInt32 dwWaitHint; + public UInt32 dwProcessId; + public ServiceFlags dwServiceFlags; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_TRIGGER + { + public TriggerType dwTriggerType; + public TriggerAction dwAction; + public IntPtr pTriggerSubtype; + public UInt32 cDataItems; + public IntPtr pDataItems; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_TRIGGER_SPECIFIC_DATA_ITEM + { + public TriggerDataType dwDataType; + public UInt32 cbData; + public IntPtr pData; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SERVICE_TRIGGER_INFO + { + public UInt32 cTriggers; + public IntPtr pTriggers; + public IntPtr pReserved; + } + + public enum ConfigInfoLevel : uint + { + SERVICE_CONFIG_DESCRIPTION = 0x00000001, + SERVICE_CONFIG_FAILURE_ACTIONS = 0x00000002, + SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 0x00000003, + SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 0x00000004, + SERVICE_CONFIG_SERVICE_SID_INFO = 0x00000005, + SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 0x00000006, + SERVICE_CONFIG_PRESHUTDOWN_INFO = 0x00000007, + SERVICE_CONFIG_TRIGGER_INFO = 0x00000008, + SERVICE_CONFIG_PREFERRED_NODE = 0x00000009, + SERVICE_CONFIG_LAUNCH_PROTECTED = 0x0000000c, + } + } + + internal class NativeMethods + { + [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + public static extern bool ChangeServiceConfigW( + SafeHandle hService, + ServiceType dwServiceType, + ServiceStartType dwStartType, + ErrorControl dwErrorControl, + string lpBinaryPathName, + string lpLoadOrderGroup, + IntPtr lpdwTagId, + string lpDependencies, + string lpServiceStartName, + string lpPassword, + string lpDisplayName); + + [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + public static extern bool ChangeServiceConfig2W( + SafeHandle hService, + NativeHelpers.ConfigInfoLevel dwInfoLevel, + IntPtr lpInfo); + + [DllImport("Advapi32.dll", SetLastError = true)] + public static extern bool CloseServiceHandle( + IntPtr hSCObject); + + [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + public static extern SafeServiceHandle CreateServiceW( + SafeHandle hSCManager, + string lpServiceName, + string lpDisplayName, + ServiceRights dwDesiredAccess, + ServiceType dwServiceType, + ServiceStartType dwStartType, + ErrorControl dwErrorControl, + string lpBinaryPathName, + string lpLoadOrderGroup, + IntPtr lpdwTagId, + string lpDependencies, + string lpServiceStartName, + string lpPassword); + + [DllImport("Advapi32.dll", SetLastError = true)] + public static extern bool DeleteService( + SafeHandle hService); + + [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + public static extern bool EnumDependentServicesW( + SafeHandle hService, + UInt32 dwServiceState, + SafeMemoryBuffer lpServices, + UInt32 cbBufSize, + out UInt32 pcbBytesNeeded, + out UInt32 lpServicesReturned); + + [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + public static extern SafeServiceHandle OpenSCManagerW( + string lpMachineName, + string lpDatabaseNmae, + SCMRights dwDesiredAccess); + + [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + public static extern SafeServiceHandle OpenServiceW( + SafeHandle hSCManager, + string lpServiceName, + ServiceRights dwDesiredAccess); + + [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + public static extern bool QueryServiceConfigW( + SafeHandle hService, + IntPtr lpServiceConfig, + UInt32 cbBufSize, + out UInt32 pcbBytesNeeded); + + [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + public static extern bool QueryServiceConfig2W( + SafeHandle hservice, + NativeHelpers.ConfigInfoLevel dwInfoLevel, + IntPtr lpBuffer, + UInt32 cbBufSize, + out UInt32 pcbBytesNeeded); + + [DllImport("Advapi32.dll", SetLastError = true)] + public static extern bool QueryServiceStatusEx( + SafeHandle hService, + UInt32 InfoLevel, + IntPtr lpBuffer, + UInt32 cbBufSize, + out UInt32 pcbBytesNeeded); + } + + internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid + { + public UInt32 BufferLength { get; internal set; } + + public SafeMemoryBuffer() : base(true) { } + public SafeMemoryBuffer(int cb) : base(true) + { + BufferLength = (UInt32)cb; + base.SetHandle(Marshal.AllocHGlobal(cb)); + } + public SafeMemoryBuffer(IntPtr handle) : base(true) + { + base.SetHandle(handle); + } + + [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] + protected override bool ReleaseHandle() + { + Marshal.FreeHGlobal(handle); + return true; + } + } + + internal class SafeServiceHandle : SafeHandleZeroOrMinusOneIsInvalid + { + public SafeServiceHandle() : base(true) { } + public SafeServiceHandle(IntPtr handle) : base(true) { this.handle = handle; } + + [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] + protected override bool ReleaseHandle() + { + return NativeMethods.CloseServiceHandle(handle); + } + } + + [Flags] + public enum ControlsAccepted : uint + { + None = 0x00000000, + Stop = 0x00000001, + PauseContinue = 0x00000002, + Shutdown = 0x00000004, + ParamChange = 0x00000008, + NetbindChange = 0x00000010, + HardwareProfileChange = 0x00000020, + PowerEvent = 0x00000040, + SessionChange = 0x00000080, + PreShutdown = 0x00000100, + } + + public enum ErrorControl : uint + { + Ignore = 0x00000000, + Normal = 0x00000001, + Severe = 0x00000002, + Critical = 0x00000003, + } + + public enum FailureAction : uint + { + None = 0x00000000, + Restart = 0x00000001, + Reboot = 0x00000002, + RunCommand = 0x00000003, + } + + public enum LaunchProtection : uint + { + None = 0, + Windows = 1, + WindowsLight = 2, + AntimalwareLight = 3, + } + + [Flags] + public enum SCMRights : uint + { + Connect = 0x00000001, + CreateService = 0x00000002, + EnumerateService = 0x00000004, + Lock = 0x00000008, + QueryLockStatus = 0x00000010, + ModifyBootConfig = 0x00000020, + AllAccess = 0x000F003F, + } + + [Flags] + public enum ServiceFlags : uint + { + None = 0x0000000, + RunsInSystemProcess = 0x00000001, + } + + [Flags] + public enum ServiceRights : uint + { + QueryConfig = 0x00000001, + ChangeConfig = 0x00000002, + QueryStatus = 0x00000004, + EnumerateDependents = 0x00000008, + Start = 0x00000010, + Stop = 0x00000020, + PauseContinue = 0x00000040, + Interrogate = 0x00000080, + UserDefinedControl = 0x00000100, + Delete = 0x00010000, + ReadControl = 0x00020000, + WriteDac = 0x00040000, + WriteOwner = 0x00080000, + AllAccess = 0x000F01FF, + AccessSystemSecurity = 0x01000000, + } + + public enum ServiceStartType : uint + { + BootStart = 0x00000000, + SystemStart = 0x00000001, + AutoStart = 0x00000002, + DemandStart = 0x00000003, + Disabled = 0x00000004, + + // Not part of ChangeServiceConfig enumeration but built by the Srvice class for the StartType property. + AutoStartDelayed = 0x1000000 + } + + [Flags] + public enum ServiceType : uint + { + KernelDriver = 0x00000001, + FileSystemDriver = 0x00000002, + Adapter = 0x00000004, + RecognizerDriver = 0x00000008, + Driver = KernelDriver | FileSystemDriver | RecognizerDriver, + Win32OwnProcess = 0x00000010, + Win32ShareProcess = 0x00000020, + Win32 = Win32OwnProcess | Win32ShareProcess, + UserProcess = 0x00000040, + UserOwnprocess = Win32OwnProcess | UserProcess, + UserShareProcess = Win32ShareProcess | UserProcess, + UserServiceInstance = 0x00000080, + InteractiveProcess = 0x00000100, + PkgService = 0x00000200, + } + + public enum ServiceSidInfo : uint + { + None, + Unrestricted, + Restricted = 3, + } + + public enum ServiceStatus : uint + { + Stopped = 0x00000001, + StartPending = 0x00000002, + StopPending = 0x00000003, + Running = 0x00000004, + ContinuePending = 0x00000005, + PausePending = 0x00000006, + Paused = 0x00000007, + } + + public enum TriggerAction : uint + { + ServiceStart = 0x00000001, + ServiceStop = 0x000000002, + } + + public enum TriggerDataType : uint + { + Binary = 00000001, + String = 0x00000002, + Level = 0x00000003, + KeywordAny = 0x00000004, + KeywordAll = 0x00000005, + } + + public enum TriggerType : uint + { + DeviceInterfaceArrival = 0x00000001, + IpAddressAvailability = 0x00000002, + DomainJoin = 0x00000003, + FirewallPortEvent = 0x00000004, + GroupPolicy = 0x00000005, + NetworkEndpoint = 0x00000006, + Custom = 0x00000014, + } + + public class ServiceManagerException : System.ComponentModel.Win32Exception + { + private string _msg; + + public ServiceManagerException(string message) : this(Marshal.GetLastWin32Error(), message) { } + public ServiceManagerException(int errorCode, string message) : base(errorCode) + { + _msg = String.Format("{0} ({1}, Win32ErrorCode {2} - 0x{2:X8})", message, base.Message, errorCode); + } + + public override string Message { get { return _msg; } } + public static explicit operator ServiceManagerException(string message) + { + return new ServiceManagerException(message); + } + } + + public class Action + { + public FailureAction Type; + public UInt32 Delay; + } + + public class FailureActions + { + public UInt32? ResetPeriod = null; // Get is always populated, can be null on set to preserve existing. + public string RebootMsg = null; + public string Command = null; + public List Actions = null; + + public FailureActions() { } + + internal FailureActions(NativeHelpers.SERVICE_FAILURE_ACTIONSW actions) + { + ResetPeriod = actions.dwResetPeriod; + RebootMsg = actions.lpRebootMsg; + Command = actions.lpCommand; + Actions = new List(); + + int actionLength = Marshal.SizeOf(typeof(NativeHelpers.SC_ACTION)); + for (int i = 0; i < actions.cActions; i++) + { + IntPtr actionPtr = IntPtr.Add(actions.lpsaActions, i * actionLength); + + NativeHelpers.SC_ACTION rawAction = (NativeHelpers.SC_ACTION)Marshal.PtrToStructure( + actionPtr, typeof(NativeHelpers.SC_ACTION)); + + Actions.Add(new Action() + { + Type = rawAction.Type, + Delay = rawAction.Delay, + }); + } + } + } + + public class TriggerItem + { + public TriggerDataType Type; + public object Data; // Can be string, List, byte, byte[], or Int64 depending on Type. + + public TriggerItem() { } + + internal TriggerItem(NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM dataItem) + { + Type = dataItem.dwDataType; + + byte[] itemBytes = new byte[dataItem.cbData]; + Marshal.Copy(dataItem.pData, itemBytes, 0, itemBytes.Length); + + switch (dataItem.dwDataType) + { + case TriggerDataType.String: + string value = Encoding.Unicode.GetString(itemBytes, 0, itemBytes.Length); + + if (value.EndsWith("\0\0")) + { + // Multistring with a delimiter of \0 and terminated with \0\0. + Data = new List(value.Split(new char[1] { '\0' }, StringSplitOptions.RemoveEmptyEntries)); + } + else + // Just a single string with null character at the end, strip it off. + Data = value.Substring(0, value.Length - 1); + break; + case TriggerDataType.Level: + Data = itemBytes[0]; + break; + case TriggerDataType.KeywordAll: + case TriggerDataType.KeywordAny: + Data = BitConverter.ToUInt64(itemBytes, 0); + break; + default: + Data = itemBytes; + break; + } + } + } + + public class Trigger + { + // https://docs.microsoft.com/en-us/windows/win32/api/winsvc/ns-winsvc-service_trigger + public const string NAMED_PIPE_EVENT_GUID = "1f81d131-3fac-4537-9e0c-7e7b0c2f4b55"; + public const string RPC_INTERFACE_EVENT_GUID = "bc90d167-9470-4139-a9ba-be0bbbf5b74d"; + public const string DOMAIN_JOIN_GUID = "1ce20aba-9851-4421-9430-1ddeb766e809"; + public const string DOMAIN_LEAVE_GUID = "ddaf516e-58c2-4866-9574-c3b615d42ea1"; + public const string FIREWALL_PORT_OPEN_GUID = "b7569e07-8421-4ee0-ad10-86915afdad09"; + public const string FIREWALL_PORT_CLOSE_GUID = "a144ed38-8e12-4de4-9d96-e64740b1a524"; + public const string MACHINE_POLICY_PRESENT_GUID = "659fcae6-5bdb-4da9-b1ff-ca2a178d46e0"; + public const string NETWORK_MANAGER_FIRST_IP_ADDRESS_ARRIVAL_GUID = "4f27f2de-14e2-430b-a549-7cd48cbc8245"; + public const string NETWORK_MANAGER_LAST_IP_ADDRESS_REMOVAL_GUID = "cc4ba62a-162e-4648-847a-b6bdf993e335"; + public const string USER_POLICY_PRESENT_GUID = "54fb46c8-f089-464c-b1fd-59d1b62c3b50"; + + public TriggerType Type; + public TriggerAction Action; + public Guid SubType; + public List DataItems = new List(); + + public Trigger() { } + + internal Trigger(NativeHelpers.SERVICE_TRIGGER trigger) + { + Type = trigger.dwTriggerType; + Action = trigger.dwAction; + SubType = (Guid)Marshal.PtrToStructure(trigger.pTriggerSubtype, typeof(Guid)); + + int dataItemLength = Marshal.SizeOf(typeof(NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM)); + for (int i = 0; i < trigger.cDataItems; i++) + { + IntPtr dataPtr = IntPtr.Add(trigger.pDataItems, i * dataItemLength); + + var dataItem = (NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM)Marshal.PtrToStructure( + dataPtr, typeof(NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM)); + + DataItems.Add(new TriggerItem(dataItem)); + } + } + } + + public class Service : IDisposable + { + private const UInt32 SERVICE_NO_CHANGE = 0xFFFFFFFF; + + private SafeServiceHandle _scmHandle; + private SafeServiceHandle _serviceHandle; + private SafeMemoryBuffer _rawServiceConfig; + private NativeHelpers.SERVICE_STATUS_PROCESS _statusProcess; + + private NativeHelpers.QUERY_SERVICE_CONFIGW _ServiceConfig + { + get + { + return (NativeHelpers.QUERY_SERVICE_CONFIGW)Marshal.PtrToStructure( + _rawServiceConfig.DangerousGetHandle(), typeof(NativeHelpers.QUERY_SERVICE_CONFIGW)); + } + } + + // ServiceConfig + public string ServiceName { get; private set; } + + public ServiceType ServiceType + { + get { return _ServiceConfig.dwServiceType; } + set { ChangeServiceConfig(serviceType: value); } + } + + public ServiceStartType StartType + { + get + { + ServiceStartType startType = _ServiceConfig.dwStartType; + if (startType == ServiceStartType.AutoStart) + { + var value = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_DELAYED_AUTO_START_INFO); + + if (value.fDelayedAutostart) + startType = ServiceStartType.AutoStartDelayed; + } + + return startType; + } + set + { + ServiceStartType newStartType = value; + bool delayedStart = false; + if (value == ServiceStartType.AutoStartDelayed) + { + newStartType = ServiceStartType.AutoStart; + delayedStart = true; + } + + ChangeServiceConfig(startType: newStartType); + + var info = new NativeHelpers.SERVICE_DELAYED_AUTO_START_INFO() + { + fDelayedAutostart = delayedStart, + }; + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_DELAYED_AUTO_START_INFO, info); + } + } + + public ErrorControl ErrorControl + { + get { return _ServiceConfig.dwErrorControl; } + set { ChangeServiceConfig(errorControl: value); } + } + + public string Path + { + get { return _ServiceConfig.lpBinaryPathName; } + set { ChangeServiceConfig(binaryPath: value); } + } + + public string LoadOrderGroup + { + get { return _ServiceConfig.lpLoadOrderGroup; } + set { ChangeServiceConfig(loadOrderGroup: value); } + } + + public List DependentOn + { + get + { + StringBuilder deps = new StringBuilder(); + IntPtr depPtr = _ServiceConfig.lpDependencies; + + bool wasNull = false; + while (true) + { + // Get the current char at the pointer and add it to the StringBuilder. + byte[] charBytes = new byte[sizeof(char)]; + Marshal.Copy(depPtr, charBytes, 0, charBytes.Length); + depPtr = IntPtr.Add(depPtr, charBytes.Length); + char currentChar = BitConverter.ToChar(charBytes, 0); + deps.Append(currentChar); + + // If the previous and current char is \0 exit the loop. + if (currentChar == '\0' && wasNull) + break; + wasNull = currentChar == '\0'; + } + + return new List(deps.ToString().Split(new char[1] { '\0' }, + StringSplitOptions.RemoveEmptyEntries)); + } + set { ChangeServiceConfig(dependencies: value); } + } + + public IdentityReference Account + { + get + { + if (_ServiceConfig.lpServiceStartName == null) + // User services don't have the start name specified and will be null. + return null; + else if (_ServiceConfig.lpServiceStartName == "LocalSystem") + // Special string used for the SYSTEM account, this is the same even for different localisations. + return (NTAccount)new SecurityIdentifier("S-1-5-18").Translate(typeof(NTAccount)); + else + return new NTAccount(_ServiceConfig.lpServiceStartName); + } + set + { + string startName = null; + string pass = null; + + if (value != null) + { + // Create a SID and convert back from a SID to get the Netlogon form regardless of the input + // specified. + SecurityIdentifier accountSid = (SecurityIdentifier)value.Translate(typeof(SecurityIdentifier)); + NTAccount accountName = (NTAccount)accountSid.Translate(typeof(NTAccount)); + string[] accountSplit = accountName.Value.Split(new char[1] { '\\' }, 2); + + // SYSTEM, Local Service, Network Service + List serviceAccounts = new List { "S-1-5-18", "S-1-5-19", "S-1-5-20" }; + + // Well known service accounts and MSAs should have no password set. Explicitly blank out the + // existing password to ensure older passwords are no longer stored by Windows. + if (serviceAccounts.Contains(accountSid.Value) || accountSplit[1].EndsWith("$")) + pass = ""; + + // The SYSTEM account uses this special string to specify that account otherwise use the original + // NTAccount value in case it is in a custom format (not Netlogon) for a reason. + if (accountSid.Value == serviceAccounts[0]) + startName = "LocalSystem"; + else + startName = value.Translate(typeof(NTAccount)).Value; + } + + ChangeServiceConfig(startName: startName, password: pass); + } + } + + public string Password { set { ChangeServiceConfig(password: value); } } + + public string DisplayName + { + get { return _ServiceConfig.lpDisplayName; } + set { ChangeServiceConfig(displayName: value); } + } + + // ServiceConfig2 + + public string Description + { + get + { + var value = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_DESCRIPTION); + + return value.lpDescription; + } + set + { + var info = new NativeHelpers.SERVICE_DESCRIPTIONW() + { + lpDescription = value, + }; + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_DESCRIPTION, info); + } + } + + public FailureActions FailureActions + { + get + { + using (SafeMemoryBuffer b = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS)) + { + NativeHelpers.SERVICE_FAILURE_ACTIONSW value = (NativeHelpers.SERVICE_FAILURE_ACTIONSW) + Marshal.PtrToStructure(b.DangerousGetHandle(), typeof(NativeHelpers.SERVICE_FAILURE_ACTIONSW)); + + return new FailureActions(value); + } + } + set + { + // dwResetPeriod and lpsaActions must be set together, we need to read the existing config if someone + // wants to update 1 or the other but both aren't explicitly defined. + UInt32? resetPeriod = value.ResetPeriod; + List actions = value.Actions; + if ((resetPeriod != null && actions == null) || (resetPeriod == null && actions != null)) + { + FailureActions existingValue = this.FailureActions; + + if (resetPeriod != null && existingValue.Actions.Count == 0) + throw new ArgumentException( + "Cannot set FailureAction ResetPeriod without explicit Actions and no existing Actions"); + else if (resetPeriod == null) + resetPeriod = (UInt32)existingValue.ResetPeriod; + + if (actions == null) + actions = existingValue.Actions; + } + + var info = new NativeHelpers.SERVICE_FAILURE_ACTIONSW() + { + dwResetPeriod = resetPeriod == null ? 0 : (UInt32)resetPeriod, + lpRebootMsg = value.RebootMsg, + lpCommand = value.Command, + cActions = actions == null ? 0 : (UInt32)actions.Count, + lpsaActions = IntPtr.Zero, + }; + + // null means to keep the existing actions whereas an empty list deletes the actions. + if (actions == null) + { + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS, info); + return; + } + + int actionLength = Marshal.SizeOf(typeof(NativeHelpers.SC_ACTION)); + using (SafeMemoryBuffer buffer = new SafeMemoryBuffer(actionLength * actions.Count)) + { + info.lpsaActions = buffer.DangerousGetHandle(); + HashSet privileges = new HashSet(); + + for (int i = 0; i < actions.Count; i++) + { + IntPtr actionPtr = IntPtr.Add(info.lpsaActions, i * actionLength); + NativeHelpers.SC_ACTION action = new NativeHelpers.SC_ACTION() + { + Delay = actions[i].Delay, + Type = actions[i].Type, + }; + Marshal.StructureToPtr(action, actionPtr, false); + + // Need to make sure the SeShutdownPrivilege is enabled when adding a reboot failure action. + if (action.Type == FailureAction.Reboot) + privileges.Add("SeShutdownPrivilege"); + } + + using (new PrivilegeEnabler(true, privileges.ToList().ToArray())) + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS, info); + } + } + } + + public bool FailureActionsOnNonCrashFailures + { + get + { + var value = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS_FLAG); + + return value.fFailureActionsOnNonCrashFailures; + } + set + { + var info = new NativeHelpers.SERVICE_FAILURE_ACTIONS_FLAG() + { + fFailureActionsOnNonCrashFailures = value, + }; + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS_FLAG, info); + } + } + + public ServiceSidInfo ServiceSidInfo + { + get + { + var value = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_SERVICE_SID_INFO); + + return value.dwServiceSidType; + } + set + { + var info = new NativeHelpers.SERVICE_SID_INFO() + { + dwServiceSidType = value, + }; + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_SERVICE_SID_INFO, info); + } + } + + public List RequiredPrivileges + { + get + { + using (SafeMemoryBuffer buffer = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO)) + { + var value = (NativeHelpers.SERVICE_REQUIRED_PRIVILEGES_INFOW)Marshal.PtrToStructure( + buffer.DangerousGetHandle(), typeof(NativeHelpers.SERVICE_REQUIRED_PRIVILEGES_INFOW)); + + int structLength = Marshal.SizeOf(value); + int stringLength = ((int)buffer.BufferLength - structLength) / sizeof(char); + + if (stringLength > 0) + { + string privilegesString = Marshal.PtrToStringUni(value.pmszRequiredPrivileges, stringLength); + return new List(privilegesString.Split(new char[1] { '\0' }, + StringSplitOptions.RemoveEmptyEntries)); + } + else + return new List(); + } + } + set + { + string privilegeString = String.Join("\0", value ?? new List()) + "\0\0"; + + using (SafeMemoryBuffer buffer = new SafeMemoryBuffer(Marshal.StringToHGlobalUni(privilegeString))) + { + var info = new NativeHelpers.SERVICE_REQUIRED_PRIVILEGES_INFOW() + { + pmszRequiredPrivileges = buffer.DangerousGetHandle(), + }; + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO, info); + } + } + } + + public UInt32 PreShutdownTimeout + { + get + { + var value = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_PRESHUTDOWN_INFO); + + return value.dwPreshutdownTimeout; + } + set + { + var info = new NativeHelpers.SERVICE_PRESHUTDOWN_INFO() + { + dwPreshutdownTimeout = value, + }; + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_PRESHUTDOWN_INFO, info); + } + } + + public List Triggers + { + get + { + List triggers = new List(); + + using (SafeMemoryBuffer b = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_TRIGGER_INFO)) + { + var value = (NativeHelpers.SERVICE_TRIGGER_INFO)Marshal.PtrToStructure( + b.DangerousGetHandle(), typeof(NativeHelpers.SERVICE_TRIGGER_INFO)); + + int triggerLength = Marshal.SizeOf(typeof(NativeHelpers.SERVICE_TRIGGER)); + for (int i = 0; i < value.cTriggers; i++) + { + IntPtr triggerPtr = IntPtr.Add(value.pTriggers, i * triggerLength); + var trigger = (NativeHelpers.SERVICE_TRIGGER)Marshal.PtrToStructure(triggerPtr, + typeof(NativeHelpers.SERVICE_TRIGGER)); + + triggers.Add(new Trigger(trigger)); + } + } + + return triggers; + } + set + { + var info = new NativeHelpers.SERVICE_TRIGGER_INFO() + { + cTriggers = value == null ? 0 : (UInt32)value.Count, + pTriggers = IntPtr.Zero, + pReserved = IntPtr.Zero, + }; + + if (info.cTriggers == 0) + { + try + { + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_TRIGGER_INFO, info); + } + catch (ServiceManagerException e) + { + // Can fail with ERROR_INVALID_PARAMETER if no triggers were already set on the service, just + // continue as the service is what we want it to be. + if (e.NativeErrorCode != 87) + throw; + } + return; + } + + // Due to the dynamic nature of the trigger structure(s) we need to manually calculate the size of the + // data items on each trigger if present. This also serializes the raw data items to bytes here. + int structDataLength = 0; + int dataLength = 0; + Queue dataItems = new Queue(); + foreach (Trigger trigger in value) + { + if (trigger.DataItems == null || trigger.DataItems.Count == 0) + continue; + + foreach (TriggerItem dataItem in trigger.DataItems) + { + structDataLength += Marshal.SizeOf(typeof(NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM)); + + byte[] dataItemBytes; + Type dataItemType = dataItem.Data.GetType(); + if (dataItemType == typeof(byte)) + dataItemBytes = new byte[1] { (byte)dataItem.Data }; + else if (dataItemType == typeof(byte[])) + dataItemBytes = (byte[])dataItem.Data; + else if (dataItemType == typeof(UInt64)) + dataItemBytes = BitConverter.GetBytes((UInt64)dataItem.Data); + else if (dataItemType == typeof(string)) + dataItemBytes = Encoding.Unicode.GetBytes((string)dataItem.Data + "\0"); + else if (dataItemType == typeof(List)) + dataItemBytes = Encoding.Unicode.GetBytes( + String.Join("\0", (List)dataItem.Data) + "\0"); + else + throw new ArgumentException(String.Format("Trigger data type '{0}' not a value type", + dataItemType.Name)); + + dataLength += dataItemBytes.Length; + dataItems.Enqueue(dataItemBytes); + } + } + + using (SafeMemoryBuffer triggerBuffer = new SafeMemoryBuffer( + value.Count * Marshal.SizeOf(typeof(NativeHelpers.SERVICE_TRIGGER)))) + using (SafeMemoryBuffer triggerGuidBuffer = new SafeMemoryBuffer( + value.Count * Marshal.SizeOf(typeof(Guid)))) + using (SafeMemoryBuffer dataItemBuffer = new SafeMemoryBuffer(structDataLength)) + using (SafeMemoryBuffer dataBuffer = new SafeMemoryBuffer(dataLength)) + { + info.pTriggers = triggerBuffer.DangerousGetHandle(); + + IntPtr triggerPtr = triggerBuffer.DangerousGetHandle(); + IntPtr guidPtr = triggerGuidBuffer.DangerousGetHandle(); + IntPtr dataItemPtr = dataItemBuffer.DangerousGetHandle(); + IntPtr dataPtr = dataBuffer.DangerousGetHandle(); + + foreach (Trigger trigger in value) + { + int dataCount = trigger.DataItems == null ? 0 : trigger.DataItems.Count; + var rawTrigger = new NativeHelpers.SERVICE_TRIGGER() + { + dwTriggerType = trigger.Type, + dwAction = trigger.Action, + pTriggerSubtype = guidPtr, + cDataItems = (UInt32)dataCount, + pDataItems = dataCount == 0 ? IntPtr.Zero : dataItemPtr, + }; + guidPtr = StructureToPtr(trigger.SubType, guidPtr); + + for (int i = 0; i < rawTrigger.cDataItems; i++) + { + byte[] dataItemBytes = dataItems.Dequeue(); + var rawTriggerData = new NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM() + { + dwDataType = trigger.DataItems[i].Type, + cbData = (UInt32)dataItemBytes.Length, + pData = dataPtr, + }; + Marshal.Copy(dataItemBytes, 0, dataPtr, dataItemBytes.Length); + dataPtr = IntPtr.Add(dataPtr, dataItemBytes.Length); + + dataItemPtr = StructureToPtr(rawTriggerData, dataItemPtr); + } + + triggerPtr = StructureToPtr(rawTrigger, triggerPtr); + } + + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_TRIGGER_INFO, info); + } + } + } + + public UInt16? PreferredNode + { + get + { + try + { + var value = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_PREFERRED_NODE); + + return value.usPreferredNode; + } + catch (ServiceManagerException e) + { + // If host has no NUMA support this will fail with ERROR_INVALID_PARAMETER + if (e.NativeErrorCode == 0x00000057) // ERROR_INVALID_PARAMETER + return null; + + throw; + } + } + set + { + var info = new NativeHelpers.SERVICE_PREFERRED_NODE_INFO(); + if (value == null) + info.fDelete = true; + else + info.usPreferredNode = (UInt16)value; + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_PREFERRED_NODE, info); + } + } + + public LaunchProtection LaunchProtection + { + get + { + var value = QueryServiceConfig2( + NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_LAUNCH_PROTECTED); + + return value.dwLaunchProtected; + } + set + { + var info = new NativeHelpers.SERVICE_LAUNCH_PROTECTED_INFO() + { + dwLaunchProtected = value, + }; + ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_LAUNCH_PROTECTED, info); + } + } + + // ServiceStatus + public ServiceStatus State { get { return _statusProcess.dwCurrentState; } } + + public ControlsAccepted ControlsAccepted { get { return _statusProcess.dwControlsAccepted; } } + + public UInt32 Win32ExitCode { get { return _statusProcess.dwWin32ExitCode; } } + + public UInt32 ServiceExitCode { get { return _statusProcess.dwServiceSpecificExitCode; } } + + public UInt32 Checkpoint { get { return _statusProcess.dwCheckPoint; } } + + public UInt32 WaitHint { get { return _statusProcess.dwWaitHint; } } + + public UInt32 ProcessId { get { return _statusProcess.dwProcessId; } } + + public ServiceFlags ServiceFlags { get { return _statusProcess.dwServiceFlags; } } + + public Service(string name) : this(name, ServiceRights.AllAccess) { } + + public Service(string name, ServiceRights access) : this(name, access, SCMRights.Connect) { } + + public Service(string name, ServiceRights access, SCMRights scmAccess) + { + ServiceName = name; + _scmHandle = OpenSCManager(scmAccess); + _serviceHandle = NativeMethods.OpenServiceW(_scmHandle, name, access); + if (_serviceHandle.IsInvalid) + throw new ServiceManagerException(String.Format("Failed to open service '{0}'", name)); + + Refresh(); + } + + private Service(SafeServiceHandle scmHandle, SafeServiceHandle serviceHandle, string name) + { + ServiceName = name; + _scmHandle = scmHandle; + _serviceHandle = serviceHandle; + + Refresh(); + } + + // EnumDependentServices + public List DependedBy + { + get + { + UInt32 bytesNeeded = 0; + UInt32 numServices = 0; + NativeMethods.EnumDependentServicesW(_serviceHandle, 3, new SafeMemoryBuffer(IntPtr.Zero), 0, + out bytesNeeded, out numServices); + + using (SafeMemoryBuffer buffer = new SafeMemoryBuffer((int)bytesNeeded)) + { + if (!NativeMethods.EnumDependentServicesW(_serviceHandle, 3, buffer, bytesNeeded, out bytesNeeded, + out numServices)) + { + throw new ServiceManagerException("Failed to enumerated dependent services"); + } + + List dependents = new List(); + Type enumType = typeof(NativeHelpers.ENUM_SERVICE_STATUSW); + for (int i = 0; i < numServices; i++) + { + var service = (NativeHelpers.ENUM_SERVICE_STATUSW)Marshal.PtrToStructure( + IntPtr.Add(buffer.DangerousGetHandle(), i * Marshal.SizeOf(enumType)), enumType); + + dependents.Add(service.lpServiceName); + } + + return dependents; + } + } + } + + public static Service Create(string name, string binaryPath, string displayName = null, + ServiceType serviceType = ServiceType.Win32OwnProcess, + ServiceStartType startType = ServiceStartType.DemandStart, ErrorControl errorControl = ErrorControl.Normal, + string loadOrderGroup = null, List dependencies = null, string startName = null, + string password = null) + { + SafeServiceHandle scmHandle = OpenSCManager(SCMRights.CreateService | SCMRights.Connect); + + if (displayName == null) + displayName = name; + + string depString = null; + if (dependencies != null && dependencies.Count > 0) + depString = String.Join("\0", dependencies) + "\0\0"; + + SafeServiceHandle serviceHandle = NativeMethods.CreateServiceW(scmHandle, name, displayName, + ServiceRights.AllAccess, serviceType, startType, errorControl, binaryPath, + loadOrderGroup, IntPtr.Zero, depString, startName, password); + + if (serviceHandle.IsInvalid) + throw new ServiceManagerException(String.Format("Failed to create new service '{0}'", name)); + + return new Service(scmHandle, serviceHandle, name); + } + + public void Delete() + { + if (!NativeMethods.DeleteService(_serviceHandle)) + throw new ServiceManagerException("Failed to delete service"); + Dispose(); + } + + public void Dispose() + { + if (_serviceHandle != null) + _serviceHandle.Dispose(); + + if (_scmHandle != null) + _scmHandle.Dispose(); + GC.SuppressFinalize(this); + } + + public void Refresh() + { + UInt32 bytesNeeded; + NativeMethods.QueryServiceConfigW(_serviceHandle, IntPtr.Zero, 0, out bytesNeeded); + + _rawServiceConfig = new SafeMemoryBuffer((int)bytesNeeded); + if (!NativeMethods.QueryServiceConfigW(_serviceHandle, _rawServiceConfig.DangerousGetHandle(), bytesNeeded, + out bytesNeeded)) + { + throw new ServiceManagerException("Failed to query service config"); + } + + NativeMethods.QueryServiceStatusEx(_serviceHandle, 0, IntPtr.Zero, 0, out bytesNeeded); + using (SafeMemoryBuffer buffer = new SafeMemoryBuffer((int)bytesNeeded)) + { + if (!NativeMethods.QueryServiceStatusEx(_serviceHandle, 0, buffer.DangerousGetHandle(), bytesNeeded, + out bytesNeeded)) + { + throw new ServiceManagerException("Failed to query service status"); + } + + _statusProcess = (NativeHelpers.SERVICE_STATUS_PROCESS)Marshal.PtrToStructure( + buffer.DangerousGetHandle(), typeof(NativeHelpers.SERVICE_STATUS_PROCESS)); + } + } + + private void ChangeServiceConfig(ServiceType serviceType = (ServiceType)SERVICE_NO_CHANGE, + ServiceStartType startType = (ServiceStartType)SERVICE_NO_CHANGE, + ErrorControl errorControl = (ErrorControl)SERVICE_NO_CHANGE, string binaryPath = null, + string loadOrderGroup = null, List dependencies = null, string startName = null, + string password = null, string displayName = null) + { + string depString = null; + if (dependencies != null && dependencies.Count > 0) + depString = String.Join("\0", dependencies) + "\0\0"; + + if (!NativeMethods.ChangeServiceConfigW(_serviceHandle, serviceType, startType, errorControl, binaryPath, + loadOrderGroup, IntPtr.Zero, depString, startName, password, displayName)) + { + throw new ServiceManagerException("Failed to change service config"); + } + + Refresh(); + } + + private void ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel infoLevel, object info) + { + using (SafeMemoryBuffer buffer = new SafeMemoryBuffer(Marshal.SizeOf(info))) + { + Marshal.StructureToPtr(info, buffer.DangerousGetHandle(), false); + + if (!NativeMethods.ChangeServiceConfig2W(_serviceHandle, infoLevel, buffer.DangerousGetHandle())) + throw new ServiceManagerException("Failed to change service config"); + } + } + + private static SafeServiceHandle OpenSCManager(SCMRights desiredAccess) + { + SafeServiceHandle handle = NativeMethods.OpenSCManagerW(null, null, desiredAccess); + if (handle.IsInvalid) + throw new ServiceManagerException("Failed to open SCManager"); + + return handle; + } + + private T QueryServiceConfig2(NativeHelpers.ConfigInfoLevel infoLevel) + { + using (SafeMemoryBuffer buffer = QueryServiceConfig2(infoLevel)) + return (T)Marshal.PtrToStructure(buffer.DangerousGetHandle(), typeof(T)); + } + + private SafeMemoryBuffer QueryServiceConfig2(NativeHelpers.ConfigInfoLevel infoLevel) + { + UInt32 bytesNeeded = 0; + NativeMethods.QueryServiceConfig2W(_serviceHandle, infoLevel, IntPtr.Zero, 0, out bytesNeeded); + + SafeMemoryBuffer buffer = new SafeMemoryBuffer((int)bytesNeeded); + if (!NativeMethods.QueryServiceConfig2W(_serviceHandle, infoLevel, buffer.DangerousGetHandle(), bytesNeeded, + out bytesNeeded)) + { + throw new ServiceManagerException(String.Format("QueryServiceConfig2W({0}) failed", + infoLevel.ToString())); + } + + return buffer; + } + + private static IntPtr StructureToPtr(object structure, IntPtr ptr) + { + Marshal.StructureToPtr(structure, ptr, false); + return IntPtr.Add(ptr, Marshal.SizeOf(structure)); + } + + ~Service() { Dispose(); } + } +} diff --git a/test/support/windows-integration/plugins/modules/async_status.ps1 b/test/support/windows-integration/plugins/modules/async_status.ps1 new file mode 100644 index 00000000..1ce3ff40 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/async_status.ps1 @@ -0,0 +1,58 @@ +#!powershell + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy + +$results = @{changed=$false} + +$parsed_args = Parse-Args $args +$jid = Get-AnsibleParam $parsed_args "jid" -failifempty $true -resultobj $results +$mode = Get-AnsibleParam $parsed_args "mode" -Default "status" -ValidateSet "status","cleanup" + +# parsed in from the async_status action plugin +$async_dir = Get-AnsibleParam $parsed_args "_async_dir" -type "path" -failifempty $true + +$log_path = [System.IO.Path]::Combine($async_dir, $jid) + +If(-not $(Test-Path $log_path)) +{ + Fail-Json @{ansible_job_id=$jid; started=1; finished=1} "could not find job at '$async_dir'" +} + +If($mode -eq "cleanup") { + Remove-Item $log_path -Recurse + Exit-Json @{ansible_job_id=$jid; erased=$log_path} +} + +# NOT in cleanup mode, assume regular status mode +# no remote kill mode currently exists, but probably should +# consider log_path + ".pid" file and also unlink that above + +$data = $null +Try { + $data_raw = Get-Content $log_path + + # TODO: move this into module_utils/powershell.ps1? + $jss = New-Object System.Web.Script.Serialization.JavaScriptSerializer + $data = $jss.DeserializeObject($data_raw) +} +Catch { + If(-not $data_raw) { + # file not written yet? That means it is running + Exit-Json @{results_file=$log_path; ansible_job_id=$jid; started=1; finished=0} + } + Else { + Fail-Json @{ansible_job_id=$jid; results_file=$log_path; started=1; finished=1} "Could not parse job output: $data" + } +} + +If (-not $data.ContainsKey("started")) { + $data['finished'] = 1 + $data['ansible_job_id'] = $jid +} +ElseIf (-not $data.ContainsKey("finished")) { + $data['finished'] = 0 +} + +Exit-Json $data diff --git a/test/support/windows-integration/plugins/modules/setup.ps1 b/test/support/windows-integration/plugins/modules/setup.ps1 new file mode 100644 index 00000000..50647239 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/setup.ps1 @@ -0,0 +1,516 @@ +#!powershell + +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy + +Function Get-CustomFacts { + [cmdletBinding()] + param ( + [Parameter(mandatory=$false)] + $factpath = $null + ) + + if (Test-Path -Path $factpath) { + $FactsFiles = Get-ChildItem -Path $factpath | Where-Object -FilterScript {($PSItem.PSIsContainer -eq $false) -and ($PSItem.Extension -eq '.ps1')} + + foreach ($FactsFile in $FactsFiles) { + $out = & $($FactsFile.FullName) + $result.ansible_facts.Add("ansible_$(($FactsFile.Name).Split('.')[0])", $out) + } + } + else + { + Add-Warning $result "Non existing path was set for local facts - $factpath" + } +} + +Function Get-MachineSid { + # The Machine SID is stored in HKLM:\SECURITY\SAM\Domains\Account and is + # only accessible by the Local System account. This method get's the local + # admin account (ends with -500) and lops it off to get the machine sid. + + $machine_sid = $null + + try { + $admins_sid = "S-1-5-32-544" + $admin_group = ([Security.Principal.SecurityIdentifier]$admins_sid).Translate([Security.Principal.NTAccount]).Value + + Add-Type -AssemblyName System.DirectoryServices.AccountManagement + $principal_context = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine) + $group_principal = New-Object -TypeName System.DirectoryServices.AccountManagement.GroupPrincipal($principal_context, $admin_group) + $searcher = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalSearcher($group_principal) + $groups = $searcher.FindOne() + + foreach ($user in $groups.Members) { + $user_sid = $user.Sid + if ($user_sid.Value.EndsWith("-500")) { + $machine_sid = $user_sid.AccountDomainSid.Value + break + } + } + } catch { + #can fail for any number of reasons, if it does just return the original null + Add-Warning -obj $result -message "Error during machine sid retrieval: $($_.Exception.Message)" + } + + return $machine_sid +} + +$cim_instances = @{} + +Function Get-LazyCimInstance([string]$instance_name, [string]$namespace="Root\CIMV2") { + if(-not $cim_instances.ContainsKey($instance_name)) { + $cim_instances[$instance_name] = $(Get-CimInstance -Namespace $namespace -ClassName $instance_name) + } + + return $cim_instances[$instance_name] +} + +$result = @{ + ansible_facts = @{ } + changed = $false +} + +$grouped_subsets = @{ + min=[System.Collections.Generic.List[string]]@('date_time','distribution','dns','env','local','platform','powershell_version','user') + network=[System.Collections.Generic.List[string]]@('all_ipv4_addresses','all_ipv6_addresses','interfaces','windows_domain', 'winrm') + hardware=[System.Collections.Generic.List[string]]@('bios','memory','processor','uptime','virtual') + external=[System.Collections.Generic.List[string]]@('facter') +} + +# build "all" set from everything mentioned in the group- this means every value must be in at least one subset to be considered legal +$all_set = [System.Collections.Generic.HashSet[string]]@() + +foreach($kv in $grouped_subsets.GetEnumerator()) { + [void] $all_set.UnionWith($kv.Value) +} + +# dynamically create an "all" subset now that we know what should be in it +$grouped_subsets['all'] = [System.Collections.Generic.List[string]]$all_set + +# start with all, build up gather and exclude subsets +$gather_subset = [System.Collections.Generic.HashSet[string]]$grouped_subsets.all +$explicit_subset = [System.Collections.Generic.HashSet[string]]@() +$exclude_subset = [System.Collections.Generic.HashSet[string]]@() + +$params = Parse-Args $args -supports_check_mode $true +$factpath = Get-AnsibleParam -obj $params -name "fact_path" -type "path" +$gather_subset_source = Get-AnsibleParam -obj $params -name "gather_subset" -type "list" -default "all" + +foreach($item in $gather_subset_source) { + if(([string]$item).StartsWith("!")) { + $item = ([string]$item).Substring(1) + if($item -eq "all") { + $all_minus_min = [System.Collections.Generic.HashSet[string]]@($all_set) + [void] $all_minus_min.ExceptWith($grouped_subsets.min) + [void] $exclude_subset.UnionWith($all_minus_min) + } + elseif($grouped_subsets.ContainsKey($item)) { + [void] $exclude_subset.UnionWith($grouped_subsets[$item]) + } + elseif($all_set.Contains($item)) { + [void] $exclude_subset.Add($item) + } + # NB: invalid exclude values are ignored, since that's what posix setup does + } + else { + if($grouped_subsets.ContainsKey($item)) { + [void] $explicit_subset.UnionWith($grouped_subsets[$item]) + } + elseif($all_set.Contains($item)) { + [void] $explicit_subset.Add($item) + } + else { + # NB: POSIX setup fails on invalid value; we warn, because we don't implement the same set as POSIX + # and we don't have platform-specific config for this... + Add-Warning $result "invalid value $item specified in gather_subset" + } + } +} + +[void] $gather_subset.ExceptWith($exclude_subset) +[void] $gather_subset.UnionWith($explicit_subset) + +$ansible_facts = @{ + gather_subset=@($gather_subset_source) + module_setup=$true +} + +$osversion = [Environment]::OSVersion + +if ($osversion.Version -lt [version]"6.2") { + # Server 2008, 2008 R2, and Windows 7 are not tested in CI and we want to let customers know about it before + # removing support altogether. + $version_string = "{0}.{1}" -f ($osversion.Version.Major, $osversion.Version.Minor) + $msg = "Windows version '$version_string' will no longer be supported or tested in the next Ansible release" + Add-DeprecationWarning -obj $result -message $msg -version "2.11" +} + +if($gather_subset.Contains('all_ipv4_addresses') -or $gather_subset.Contains('all_ipv6_addresses')) { + $netcfg = Get-LazyCimInstance Win32_NetworkAdapterConfiguration + + # TODO: split v4/v6 properly, return in separate keys + $ips = @() + Foreach ($ip in $netcfg.IPAddress) { + If ($ip) { + $ips += $ip + } + } + + $ansible_facts += @{ + ansible_ip_addresses = $ips + } +} + +if($gather_subset.Contains('bios')) { + $win32_bios = Get-LazyCimInstance Win32_Bios + $win32_cs = Get-LazyCimInstance Win32_ComputerSystem + $ansible_facts += @{ + ansible_bios_date = $win32_bios.ReleaseDate.ToString("MM/dd/yyyy") + ansible_bios_version = $win32_bios.SMBIOSBIOSVersion + ansible_product_name = $win32_cs.Model.Trim() + ansible_product_serial = $win32_bios.SerialNumber + # ansible_product_version = ([string] $win32_cs.SystemFamily) + } +} + +if($gather_subset.Contains('date_time')) { + $datetime = (Get-Date) + $datetime_utc = $datetime.ToUniversalTime() + $date = @{ + date = $datetime.ToString("yyyy-MM-dd") + day = $datetime.ToString("dd") + epoch = (Get-Date -UFormat "%s") + hour = $datetime.ToString("HH") + iso8601 = $datetime_utc.ToString("yyyy-MM-ddTHH:mm:ssZ") + iso8601_basic = $datetime.ToString("yyyyMMddTHHmmssffffff") + iso8601_basic_short = $datetime.ToString("yyyyMMddTHHmmss") + iso8601_micro = $datetime_utc.ToString("yyyy-MM-ddTHH:mm:ss.ffffffZ") + minute = $datetime.ToString("mm") + month = $datetime.ToString("MM") + second = $datetime.ToString("ss") + time = $datetime.ToString("HH:mm:ss") + tz = ([System.TimeZoneInfo]::Local.Id) + tz_offset = $datetime.ToString("zzzz") + # Ensure that the weekday is in English + weekday = $datetime.ToString("dddd", [System.Globalization.CultureInfo]::InvariantCulture) + weekday_number = (Get-Date -UFormat "%w") + weeknumber = (Get-Date -UFormat "%W") + year = $datetime.ToString("yyyy") + } + + $ansible_facts += @{ + ansible_date_time = $date + } +} + +if($gather_subset.Contains('distribution')) { + $win32_os = Get-LazyCimInstance Win32_OperatingSystem + $product_type = switch($win32_os.ProductType) { + 1 { "workstation" } + 2 { "domain_controller" } + 3 { "server" } + default { "unknown" } + } + + $installation_type = $null + $current_version_path = "HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion" + if (Test-Path -LiteralPath $current_version_path) { + $install_type_prop = Get-ItemProperty -LiteralPath $current_version_path -ErrorAction SilentlyContinue + $installation_type = [String]$install_type_prop.InstallationType + } + + $ansible_facts += @{ + ansible_distribution = $win32_os.Caption + ansible_distribution_version = $osversion.Version.ToString() + ansible_distribution_major_version = $osversion.Version.Major.ToString() + ansible_os_family = "Windows" + ansible_os_name = ($win32_os.Name.Split('|')[0]).Trim() + ansible_os_product_type = $product_type + ansible_os_installation_type = $installation_type + } +} + +if($gather_subset.Contains('env')) { + $env_vars = @{ } + foreach ($item in Get-ChildItem Env:) { + $name = $item | Select-Object -ExpandProperty Name + # Powershell ConvertTo-Json fails if string ends with \ + $value = ($item | Select-Object -ExpandProperty Value).TrimEnd("\") + $env_vars.Add($name, $value) + } + + $ansible_facts += @{ + ansible_env = $env_vars + } +} + +if($gather_subset.Contains('facter')) { + # See if Facter is on the System Path + Try { + Get-Command facter -ErrorAction Stop > $null + $facter_installed = $true + } Catch { + $facter_installed = $false + } + + # Get JSON from Facter, and parse it out. + if ($facter_installed) { + &facter -j | Tee-Object -Variable facter_output > $null + $facts = "$facter_output" | ConvertFrom-Json + ForEach($fact in $facts.PSObject.Properties) { + $fact_name = $fact.Name + $ansible_facts.Add("facter_$fact_name", $fact.Value) + } + } +} + +if($gather_subset.Contains('interfaces')) { + $netcfg = Get-LazyCimInstance Win32_NetworkAdapterConfiguration + $ActiveNetcfg = @() + $ActiveNetcfg += $netcfg | Where-Object {$_.ipaddress -ne $null} + + $namespaces = Get-LazyCimInstance __Namespace -namespace root + if ($namespaces | Where-Object { $_.Name -eq "StandardCimv" }) { + $net_adapters = Get-LazyCimInstance MSFT_NetAdapter -namespace Root\StandardCimv2 + $guid_key = "InterfaceGUID" + $name_key = "Name" + } else { + $net_adapters = Get-LazyCimInstance Win32_NetworkAdapter + $guid_key = "GUID" + $name_key = "NetConnectionID" + } + + $formattednetcfg = @() + foreach ($adapter in $ActiveNetcfg) + { + $thisadapter = @{ + default_gateway = $null + connection_name = $null + dns_domain = $adapter.dnsdomain + interface_index = $adapter.InterfaceIndex + interface_name = $adapter.description + macaddress = $adapter.macaddress + } + + if ($adapter.defaultIPGateway) + { + $thisadapter.default_gateway = $adapter.DefaultIPGateway[0].ToString() + } + $net_adapter = $net_adapters | Where-Object { $_.$guid_key -eq $adapter.SettingID } + if ($net_adapter) { + $thisadapter.connection_name = $net_adapter.$name_key + } + + $formattednetcfg += $thisadapter + } + + $ansible_facts += @{ + ansible_interfaces = $formattednetcfg + } +} + +if ($gather_subset.Contains("local") -and $null -ne $factpath) { + # Get any custom facts; results are updated in the + Get-CustomFacts -factpath $factpath +} + +if($gather_subset.Contains('memory')) { + $win32_cs = Get-LazyCimInstance Win32_ComputerSystem + $win32_os = Get-LazyCimInstance Win32_OperatingSystem + $ansible_facts += @{ + # Win32_PhysicalMemory is empty on some virtual platforms + ansible_memtotal_mb = ([math]::ceiling($win32_cs.TotalPhysicalMemory / 1024 / 1024)) + ansible_memfree_mb = ([math]::ceiling($win32_os.FreePhysicalMemory / 1024)) + ansible_swaptotal_mb = ([math]::round($win32_os.TotalSwapSpaceSize / 1024)) + ansible_pagefiletotal_mb = ([math]::round($win32_os.SizeStoredInPagingFiles / 1024)) + ansible_pagefilefree_mb = ([math]::round($win32_os.FreeSpaceInPagingFiles / 1024)) + } +} + + +if($gather_subset.Contains('platform')) { + $win32_cs = Get-LazyCimInstance Win32_ComputerSystem + $win32_os = Get-LazyCimInstance Win32_OperatingSystem + $domain_suffix = $win32_cs.Domain.Substring($win32_cs.Workgroup.length) + $fqdn = $win32_cs.DNSHostname + + if( $domain_suffix -ne "") + { + $fqdn = $win32_cs.DNSHostname + "." + $domain_suffix + } + + try { + $ansible_reboot_pending = Get-PendingRebootStatus + } catch { + # fails for non-admin users, set to null in this case + $ansible_reboot_pending = $null + } + + $ansible_facts += @{ + ansible_architecture = $win32_os.OSArchitecture + ansible_domain = $domain_suffix + ansible_fqdn = $fqdn + ansible_hostname = $win32_cs.DNSHostname + ansible_netbios_name = $win32_cs.Name + ansible_kernel = $osversion.Version.ToString() + ansible_nodename = $fqdn + ansible_machine_id = Get-MachineSid + ansible_owner_contact = ([string] $win32_cs.PrimaryOwnerContact) + ansible_owner_name = ([string] $win32_cs.PrimaryOwnerName) + # FUTURE: should this live in its own subset? + ansible_reboot_pending = $ansible_reboot_pending + ansible_system = $osversion.Platform.ToString() + ansible_system_description = ([string] $win32_os.Description) + ansible_system_vendor = $win32_cs.Manufacturer + } +} + +if($gather_subset.Contains('powershell_version')) { + $ansible_facts += @{ + ansible_powershell_version = ($PSVersionTable.PSVersion.Major) + } +} + +if($gather_subset.Contains('processor')) { + $win32_cs = Get-LazyCimInstance Win32_ComputerSystem + $win32_cpu = Get-LazyCimInstance Win32_Processor + if ($win32_cpu -is [array]) { + # multi-socket, pick first + $win32_cpu = $win32_cpu[0] + } + + $cpu_list = @( ) + for ($i=1; $i -le $win32_cs.NumberOfLogicalProcessors; $i++) { + $cpu_list += $win32_cpu.Manufacturer + $cpu_list += $win32_cpu.Name + } + + $ansible_facts += @{ + ansible_processor = $cpu_list + ansible_processor_cores = $win32_cpu.NumberOfCores + ansible_processor_count = $win32_cs.NumberOfProcessors + ansible_processor_threads_per_core = ($win32_cpu.NumberOfLogicalProcessors / $win32_cpu.NumberofCores) + ansible_processor_vcpus = $win32_cs.NumberOfLogicalProcessors + } +} + +if($gather_subset.Contains('uptime')) { + $win32_os = Get-LazyCimInstance Win32_OperatingSystem + $ansible_facts += @{ + ansible_lastboot = $win32_os.lastbootuptime.ToString("u") + ansible_uptime_seconds = $([System.Convert]::ToInt64($(Get-Date).Subtract($win32_os.lastbootuptime).TotalSeconds)) + } +} + +if($gather_subset.Contains('user')) { + $user = [Security.Principal.WindowsIdentity]::GetCurrent() + $ansible_facts += @{ + ansible_user_dir = $env:userprofile + # Win32_UserAccount.FullName is probably the right thing here, but it can be expensive to get on large domains + ansible_user_gecos = "" + ansible_user_id = $env:username + ansible_user_sid = $user.User.Value + } +} + +if($gather_subset.Contains('windows_domain')) { + $win32_cs = Get-LazyCimInstance Win32_ComputerSystem + $domain_roles = @{ + 0 = "Stand-alone workstation" + 1 = "Member workstation" + 2 = "Stand-alone server" + 3 = "Member server" + 4 = "Backup domain controller" + 5 = "Primary domain controller" + } + + $domain_role = $domain_roles.Get_Item([Int32]$win32_cs.DomainRole) + + $ansible_facts += @{ + ansible_windows_domain = $win32_cs.Domain + ansible_windows_domain_member = $win32_cs.PartOfDomain + ansible_windows_domain_role = $domain_role + } +} + +if($gather_subset.Contains('winrm')) { + + $winrm_https_listener_parent_paths = Get-ChildItem -Path WSMan:\localhost\Listener -Recurse -ErrorAction SilentlyContinue | ` + Where-Object {$_.PSChildName -eq "Transport" -and $_.Value -eq "HTTPS"} | Select-Object PSParentPath + if ($winrm_https_listener_parent_paths -isnot [array]) { + $winrm_https_listener_parent_paths = @($winrm_https_listener_parent_paths) + } + + $winrm_https_listener_paths = @() + foreach ($winrm_https_listener_parent_path in $winrm_https_listener_parent_paths) { + $winrm_https_listener_paths += $winrm_https_listener_parent_path.PSParentPath.Substring($winrm_https_listener_parent_path.PSParentPath.LastIndexOf("\")) + } + + $https_listeners = @() + foreach ($winrm_https_listener_path in $winrm_https_listener_paths) { + $https_listeners += Get-ChildItem -Path "WSMan:\localhost\Listener$winrm_https_listener_path" + } + + $winrm_cert_thumbprints = @() + foreach ($https_listener in $https_listeners) { + $winrm_cert_thumbprints += $https_listener | Where-Object {$_.Name -EQ "CertificateThumbprint" } | Select-Object Value + } + + $winrm_cert_expiry = @() + foreach ($winrm_cert_thumbprint in $winrm_cert_thumbprints) { + Try { + $winrm_cert_expiry += Get-ChildItem -Path Cert:\LocalMachine\My | Where-Object Thumbprint -EQ $winrm_cert_thumbprint.Value.ToString().ToUpper() | Select-Object NotAfter + } Catch { + Add-Warning -obj $result -message "Error during certificate expiration retrieval: $($_.Exception.Message)" + } + } + + $winrm_cert_expirations = $winrm_cert_expiry | Sort-Object NotAfter + if ($winrm_cert_expirations) { + # this fact was renamed from ansible_winrm_certificate_expires due to collision with ansible_winrm_X connection var pattern + $ansible_facts.Add("ansible_win_rm_certificate_expires", $winrm_cert_expirations[0].NotAfter.ToString("yyyy-MM-dd HH:mm:ss")) + } +} + +if($gather_subset.Contains('virtual')) { + $machine_info = Get-LazyCimInstance Win32_ComputerSystem + + switch ($machine_info.model) { + "Virtual Machine" { + $machine_type="Hyper-V" + $machine_role="guest" + } + + "VMware Virtual Platform" { + $machine_type="VMware" + $machine_role="guest" + } + + "VirtualBox" { + $machine_type="VirtualBox" + $machine_role="guest" + } + + "HVM domU" { + $machine_type="Xen" + $machine_role="guest" + } + + default { + $machine_type="NA" + $machine_role="NA" + } + } + + $ansible_facts += @{ + ansible_virtualization_role = $machine_role + ansible_virtualization_type = $machine_type + } +} + +$result.ansible_facts += $ansible_facts + +Exit-Json $result diff --git a/test/support/windows-integration/plugins/modules/slurp.ps1 b/test/support/windows-integration/plugins/modules/slurp.ps1 new file mode 100644 index 00000000..eb506c7c --- /dev/null +++ b/test/support/windows-integration/plugins/modules/slurp.ps1 @@ -0,0 +1,28 @@ +#!powershell + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy + +$params = Parse-Args $args -supports_check_mode $true; +$src = Get-AnsibleParam -obj $params -name "src" -type "path" -aliases "path" -failifempty $true; + +$result = @{ + changed = $false; +} + +If (Test-Path -LiteralPath $src -PathType Leaf) +{ + $bytes = [System.IO.File]::ReadAllBytes($src); + $result.content = [System.Convert]::ToBase64String($bytes); + $result.encoding = "base64"; + Exit-Json $result; +} +ElseIf (Test-Path -LiteralPath $src -PathType Container) +{ + Fail-Json $result "Path $src is a directory"; +} +Else +{ + Fail-Json $result "Path $src is not found"; +} diff --git a/test/support/windows-integration/plugins/modules/win_acl.ps1 b/test/support/windows-integration/plugins/modules/win_acl.ps1 new file mode 100644 index 00000000..e3c38130 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_acl.ps1 @@ -0,0 +1,225 @@ +#!powershell + +# Copyright: (c) 2015, Phil Schwartz +# Copyright: (c) 2015, Trond Hindenes +# Copyright: (c) 2015, Hans-Joachim Kliemeck +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.PrivilegeUtil +#Requires -Module Ansible.ModuleUtils.SID + +$ErrorActionPreference = "Stop" + +# win_acl module (File/Resources Permission Additions/Removal) + +#Functions +function Get-UserSID { + param( + [String]$AccountName + ) + + $userSID = $null + $searchAppPools = $false + + if ($AccountName.Split("\").Count -gt 1) { + if ($AccountName.Split("\")[0] -eq "IIS APPPOOL") { + $searchAppPools = $true + $AccountName = $AccountName.Split("\")[1] + } + } + + if ($searchAppPools) { + Import-Module -Name WebAdministration + $testIISPath = Test-Path -LiteralPath "IIS:" + if ($testIISPath) { + $appPoolObj = Get-ItemProperty -LiteralPath "IIS:\AppPools\$AccountName" + $userSID = $appPoolObj.applicationPoolSid + } + } + else { + $userSID = Convert-ToSID -account_name $AccountName + } + + return $userSID +} + +$params = Parse-Args $args + +Function SetPrivilegeTokens() { + # Set privilege tokens only if admin. + # Admins would have these privs or be able to set these privs in the UI Anyway + + $adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator + $myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent() + $myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID) + + + if ($myWindowsPrincipal.IsInRole($adminRole)) { + # Need to adjust token privs when executing Set-ACL in certain cases. + # e.g. d:\testdir is owned by group in which current user is not a member and no perms are inherited from d:\ + # This also sets us up for setting the owner as a feature. + # See the following for details of each privilege + # https://msdn.microsoft.com/en-us/library/windows/desktop/bb530716(v=vs.85).aspx + $privileges = @( + "SeRestorePrivilege", # Grants all write access control to any file, regardless of ACL. + "SeBackupPrivilege", # Grants all read access control to any file, regardless of ACL. + "SeTakeOwnershipPrivilege" # Grants ability to take owernship of an object w/out being granted discretionary access + ) + foreach ($privilege in $privileges) { + $state = Get-AnsiblePrivilege -Name $privilege + if ($state -eq $false) { + Set-AnsiblePrivilege -Name $privilege -Value $true + } + } + } +} + + +$result = @{ + changed = $false +} + +$path = Get-AnsibleParam -obj $params -name "path" -type "str" -failifempty $true +$user = Get-AnsibleParam -obj $params -name "user" -type "str" -failifempty $true +$rights = Get-AnsibleParam -obj $params -name "rights" -type "str" -failifempty $true + +$type = Get-AnsibleParam -obj $params -name "type" -type "str" -failifempty $true -validateset "allow","deny" +$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "absent","present" + +$inherit = Get-AnsibleParam -obj $params -name "inherit" -type "str" +$propagation = Get-AnsibleParam -obj $params -name "propagation" -type "str" -default "None" -validateset "InheritOnly","None","NoPropagateInherit" + +# We mount the HKCR, HKU, and HKCC registry hives so PS can access them. +# Network paths have no qualifiers so we use -EA SilentlyContinue to ignore that +$path_qualifier = Split-Path -Path $path -Qualifier -ErrorAction SilentlyContinue +if ($path_qualifier -eq "HKCR:" -and (-not (Test-Path -LiteralPath HKCR:\))) { + New-PSDrive -Name HKCR -PSProvider Registry -Root HKEY_CLASSES_ROOT > $null +} +if ($path_qualifier -eq "HKU:" -and (-not (Test-Path -LiteralPath HKU:\))) { + New-PSDrive -Name HKU -PSProvider Registry -Root HKEY_USERS > $null +} +if ($path_qualifier -eq "HKCC:" -and (-not (Test-Path -LiteralPath HKCC:\))) { + New-PSDrive -Name HKCC -PSProvider Registry -Root HKEY_CURRENT_CONFIG > $null +} + +If (-Not (Test-Path -LiteralPath $path)) { + Fail-Json -obj $result -message "$path file or directory does not exist on the host" +} + +# Test that the user/group is resolvable on the local machine +$sid = Get-UserSID -AccountName $user +if (!$sid) { + Fail-Json -obj $result -message "$user is not a valid user or group on the host machine or domain" +} + +If (Test-Path -LiteralPath $path -PathType Leaf) { + $inherit = "None" +} +ElseIf ($null -eq $inherit) { + $inherit = "ContainerInherit, ObjectInherit" +} + +# Bug in Set-Acl, Get-Acl where -LiteralPath only works for the Registry provider if the location is in that root +# qualifier. We also don't have a qualifier for a network path so only change if not null +if ($null -ne $path_qualifier) { + Push-Location -LiteralPath $path_qualifier +} + +Try { + SetPrivilegeTokens + $path_item = Get-Item -LiteralPath $path -Force + If ($path_item.PSProvider.Name -eq "Registry") { + $colRights = [System.Security.AccessControl.RegistryRights]$rights + } + Else { + $colRights = [System.Security.AccessControl.FileSystemRights]$rights + } + + $InheritanceFlag = [System.Security.AccessControl.InheritanceFlags]$inherit + $PropagationFlag = [System.Security.AccessControl.PropagationFlags]$propagation + + If ($type -eq "allow") { + $objType =[System.Security.AccessControl.AccessControlType]::Allow + } + Else { + $objType =[System.Security.AccessControl.AccessControlType]::Deny + } + + $objUser = New-Object System.Security.Principal.SecurityIdentifier($sid) + If ($path_item.PSProvider.Name -eq "Registry") { + $objACE = New-Object System.Security.AccessControl.RegistryAccessRule ($objUser, $colRights, $InheritanceFlag, $PropagationFlag, $objType) + } + Else { + $objACE = New-Object System.Security.AccessControl.FileSystemAccessRule ($objUser, $colRights, $InheritanceFlag, $PropagationFlag, $objType) + } + $objACL = Get-ACL -LiteralPath $path + + # Check if the ACE exists already in the objects ACL list + $match = $false + + ForEach($rule in $objACL.GetAccessRules($true, $true, [System.Security.Principal.SecurityIdentifier])){ + + If ($path_item.PSProvider.Name -eq "Registry") { + If (($rule.RegistryRights -eq $objACE.RegistryRights) -And ($rule.AccessControlType -eq $objACE.AccessControlType) -And ($rule.IdentityReference -eq $objACE.IdentityReference) -And ($rule.IsInherited -eq $objACE.IsInherited) -And ($rule.InheritanceFlags -eq $objACE.InheritanceFlags) -And ($rule.PropagationFlags -eq $objACE.PropagationFlags)) { + $match = $true + Break + } + } else { + If (($rule.FileSystemRights -eq $objACE.FileSystemRights) -And ($rule.AccessControlType -eq $objACE.AccessControlType) -And ($rule.IdentityReference -eq $objACE.IdentityReference) -And ($rule.IsInherited -eq $objACE.IsInherited) -And ($rule.InheritanceFlags -eq $objACE.InheritanceFlags) -And ($rule.PropagationFlags -eq $objACE.PropagationFlags)) { + $match = $true + Break + } + } + } + + If ($state -eq "present" -And $match -eq $false) { + Try { + $objACL.AddAccessRule($objACE) + If ($path_item.PSProvider.Name -eq "Registry") { + Set-ACL -LiteralPath $path -AclObject $objACL + } else { + (Get-Item -LiteralPath $path).SetAccessControl($objACL) + } + $result.changed = $true + } + Catch { + Fail-Json -obj $result -message "an exception occurred when adding the specified rule - $($_.Exception.Message)" + } + } + ElseIf ($state -eq "absent" -And $match -eq $true) { + Try { + $objACL.RemoveAccessRule($objACE) + If ($path_item.PSProvider.Name -eq "Registry") { + Set-ACL -LiteralPath $path -AclObject $objACL + } else { + (Get-Item -LiteralPath $path).SetAccessControl($objACL) + } + $result.changed = $true + } + Catch { + Fail-Json -obj $result -message "an exception occurred when removing the specified rule - $($_.Exception.Message)" + } + } + Else { + # A rule was attempting to be added but already exists + If ($match -eq $true) { + Exit-Json -obj $result -message "the specified rule already exists" + } + # A rule didn't exist that was trying to be removed + Else { + Exit-Json -obj $result -message "the specified rule does not exist" + } + } +} +Catch { + Fail-Json -obj $result -message "an error occurred when attempting to $state $rights permission(s) on $path for $user - $($_.Exception.Message)" +} +Finally { + # Make sure we revert the location stack to the original path just for cleanups sake + if ($null -ne $path_qualifier) { + Pop-Location + } +} + +Exit-Json -obj $result diff --git a/test/support/windows-integration/plugins/modules/win_acl.py b/test/support/windows-integration/plugins/modules/win_acl.py new file mode 100644 index 00000000..14fbd82f --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_acl.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Phil Schwartz +# Copyright: (c) 2015, Trond Hindenes +# Copyright: (c) 2015, Hans-Joachim Kliemeck +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_acl +version_added: "2.0" +short_description: Set file/directory/registry permissions for a system user or group +description: +- Add or remove rights/permissions for a given user or group for the specified + file, folder, registry key or AppPool identifies. +options: + path: + description: + - The path to the file or directory. + type: str + required: yes + user: + description: + - User or Group to add specified rights to act on src file/folder or + registry key. + type: str + required: yes + state: + description: + - Specify whether to add C(present) or remove C(absent) the specified access rule. + type: str + choices: [ absent, present ] + default: present + type: + description: + - Specify whether to allow or deny the rights specified. + type: str + required: yes + choices: [ allow, deny ] + rights: + description: + - The rights/permissions that are to be allowed/denied for the specified + user or group for the item at C(path). + - If C(path) is a file or directory, rights can be any right under MSDN + FileSystemRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.filesystemrights.aspx). + - If C(path) is a registry key, rights can be any right under MSDN + RegistryRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.registryrights.aspx). + type: str + required: yes + inherit: + description: + - Inherit flags on the ACL rules. + - Can be specified as a comma separated list, e.g. C(ContainerInherit), + C(ObjectInherit). + - For more information on the choices see MSDN InheritanceFlags enumeration + at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.inheritanceflags.aspx). + - Defaults to C(ContainerInherit, ObjectInherit) for Directories. + type: str + choices: [ ContainerInherit, ObjectInherit ] + propagation: + description: + - Propagation flag on the ACL rules. + - For more information on the choices see MSDN PropagationFlags enumeration + at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.propagationflags.aspx). + type: str + choices: [ InheritOnly, None, NoPropagateInherit ] + default: "None" +notes: +- If adding ACL's for AppPool identities (available since 2.3), the Windows + Feature "Web-Scripting-Tools" must be enabled. +seealso: +- module: win_acl_inheritance +- module: win_file +- module: win_owner +- module: win_stat +author: +- Phil Schwartz (@schwartzmx) +- Trond Hindenes (@trondhindenes) +- Hans-Joachim Kliemeck (@h0nIg) +''' + +EXAMPLES = r''' +- name: Restrict write and execute access to User Fed-Phil + win_acl: + user: Fed-Phil + path: C:\Important\Executable.exe + type: deny + rights: ExecuteFile,Write + +- name: Add IIS_IUSRS allow rights + win_acl: + path: C:\inetpub\wwwroot\MySite + user: IIS_IUSRS + rights: FullControl + type: allow + state: present + inherit: ContainerInherit, ObjectInherit + propagation: 'None' + +- name: Set registry key right + win_acl: + path: HKCU:\Bovine\Key + user: BUILTIN\Users + rights: EnumerateSubKeys + type: allow + state: present + inherit: ContainerInherit, ObjectInherit + propagation: 'None' + +- name: Remove FullControl AccessRule for IIS_IUSRS + win_acl: + path: C:\inetpub\wwwroot\MySite + user: IIS_IUSRS + rights: FullControl + type: allow + state: absent + inherit: ContainerInherit, ObjectInherit + propagation: 'None' + +- name: Deny Intern + win_acl: + path: C:\Administrator\Documents + user: Intern + rights: Read,Write,Modify,FullControl,Delete + type: deny + state: present +''' diff --git a/test/support/windows-integration/plugins/modules/win_certificate_store.ps1 b/test/support/windows-integration/plugins/modules/win_certificate_store.ps1 new file mode 100644 index 00000000..db984130 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_certificate_store.ps1 @@ -0,0 +1,260 @@ +#!powershell + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic + +$store_name_values = ([System.Security.Cryptography.X509Certificates.StoreName]).GetEnumValues() | ForEach-Object { $_.ToString() } +$store_location_values = ([System.Security.Cryptography.X509Certificates.StoreLocation]).GetEnumValues() | ForEach-Object { $_.ToString() } + +$spec = @{ + options = @{ + state = @{ type = "str"; default = "present"; choices = "absent", "exported", "present" } + path = @{ type = "path" } + thumbprint = @{ type = "str" } + store_name = @{ type = "str"; default = "My"; choices = $store_name_values } + store_location = @{ type = "str"; default = "LocalMachine"; choices = $store_location_values } + password = @{ type = "str"; no_log = $true } + key_exportable = @{ type = "bool"; default = $true } + key_storage = @{ type = "str"; default = "default"; choices = "default", "machine", "user" } + file_type = @{ type = "str"; default = "der"; choices = "der", "pem", "pkcs12" } + } + required_if = @( + @("state", "absent", @("path", "thumbprint"), $true), + @("state", "exported", @("path", "thumbprint")), + @("state", "present", @("path")) + ) + supports_check_mode = $true +} +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + +Function Get-CertFile($module, $path, $password, $key_exportable, $key_storage) { + # parses a certificate file and returns X509Certificate2Collection + if (-not (Test-Path -LiteralPath $path -PathType Leaf)) { + $module.FailJson("File at '$path' either does not exist or is not a file") + } + + # must set at least the PersistKeySet flag so that the PrivateKey + # is stored in a permanent container and not deleted once the handle + # is gone. + $store_flags = [System.Security.Cryptography.X509Certificates.X509KeyStorageFlags]::PersistKeySet + + $key_storage = $key_storage.substring(0,1).ToUpper() + $key_storage.substring(1).ToLower() + $store_flags = $store_flags -bor [Enum]::Parse([System.Security.Cryptography.X509Certificates.X509KeyStorageFlags], "$($key_storage)KeySet") + if ($key_exportable) { + $store_flags = $store_flags -bor [System.Security.Cryptography.X509Certificates.X509KeyStorageFlags]::Exportable + } + + # TODO: If I'm feeling adventurours, write code to parse PKCS#12 PEM encoded + # file as .NET does not have an easy way to import this + $certs = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Certificate2Collection + + try { + $certs.Import($path, $password, $store_flags) + } catch { + $module.FailJson("Failed to load cert from file: $($_.Exception.Message)", $_) + } + + return $certs +} + +Function New-CertFile($module, $cert, $path, $type, $password) { + $content_type = switch ($type) { + "pem" { [System.Security.Cryptography.X509Certificates.X509ContentType]::Cert } + "der" { [System.Security.Cryptography.X509Certificates.X509ContentType]::Cert } + "pkcs12" { [System.Security.Cryptography.X509Certificates.X509ContentType]::Pkcs12 } + } + if ($type -eq "pkcs12") { + $missing_key = $false + if ($null -eq $cert.PrivateKey) { + $missing_key = $true + } elseif ($cert.PrivateKey.CspKeyContainerInfo.Exportable -eq $false) { + $missing_key = $true + } + if ($missing_key) { + $module.FailJson("Cannot export cert with key as PKCS12 when the key is not marked as exportable or not accessible by the current user") + } + } + + if (Test-Path -LiteralPath $path) { + Remove-Item -LiteralPath $path -Force + $module.Result.changed = $true + } + try { + $cert_bytes = $cert.Export($content_type, $password) + } catch { + $module.FailJson("Failed to export certificate as bytes: $($_.Exception.Message)", $_) + } + + # Need to manually handle a PEM file + if ($type -eq "pem") { + $cert_content = "-----BEGIN CERTIFICATE-----`r`n" + $base64_string = [System.Convert]::ToBase64String($cert_bytes, [System.Base64FormattingOptions]::InsertLineBreaks) + $cert_content += $base64_string + $cert_content += "`r`n-----END CERTIFICATE-----" + $file_encoding = [System.Text.Encoding]::ASCII + $cert_bytes = $file_encoding.GetBytes($cert_content) + } elseif ($type -eq "pkcs12") { + $module.Result.key_exported = $false + if ($null -ne $cert.PrivateKey) { + $module.Result.key_exportable = $cert.PrivateKey.CspKeyContainerInfo.Exportable + } + } + + if (-not $module.CheckMode) { + try { + [System.IO.File]::WriteAllBytes($path, $cert_bytes) + } catch [System.ArgumentNullException] { + $module.FailJson("Failed to write cert to file, cert was null: $($_.Exception.Message)", $_) + } catch [System.IO.IOException] { + $module.FailJson("Failed to write cert to file due to IO Exception: $($_.Exception.Message)", $_) + } catch [System.UnauthorizedAccessException] { + $module.FailJson("Failed to write cert to file due to permissions: $($_.Exception.Message)", $_) + } catch { + $module.FailJson("Failed to write cert to file: $($_.Exception.Message)", $_) + } + } + $module.Result.changed = $true +} + +Function Get-CertFileType($path, $password) { + $certs = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Certificate2Collection + try { + $certs.Import($path, $password, 0) + } catch [System.Security.Cryptography.CryptographicException] { + # the file is a pkcs12 we just had the wrong password + return "pkcs12" + } catch { + return "unknown" + } + + $file_contents = Get-Content -LiteralPath $path -Raw + if ($file_contents.StartsWith("-----BEGIN CERTIFICATE-----")) { + return "pem" + } elseif ($file_contents.StartsWith("-----BEGIN PKCS7-----")) { + return "pkcs7-ascii" + } elseif ($certs.Count -gt 1) { + # multiple certs must be pkcs7 + return "pkcs7-binary" + } elseif ($certs[0].HasPrivateKey) { + return "pkcs12" + } elseif ($path.EndsWith(".pfx") -or $path.EndsWith(".p12")) { + # no way to differenciate a pfx with a der file so we must rely on the + # extension + return "pkcs12" + } else { + return "der" + } +} + +$state = $module.Params.state +$path = $module.Params.path +$thumbprint = $module.Params.thumbprint +$store_name = [System.Security.Cryptography.X509Certificates.StoreName]"$($module.Params.store_name)" +$store_location = [System.Security.Cryptography.X509Certificates.Storelocation]"$($module.Params.store_location)" +$password = $module.Params.password +$key_exportable = $module.Params.key_exportable +$key_storage = $module.Params.key_storage +$file_type = $module.Params.file_type + +$module.Result.thumbprints = @() + +$store = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Store -ArgumentList $store_name, $store_location +try { + $store.Open([System.Security.Cryptography.X509Certificates.OpenFlags]::ReadWrite) +} catch [System.Security.Cryptography.CryptographicException] { + $module.FailJson("Unable to open the store as it is not readable: $($_.Exception.Message)", $_) +} catch [System.Security.SecurityException] { + $module.FailJson("Unable to open the store with the current permissions: $($_.Exception.Message)", $_) +} catch { + $module.FailJson("Unable to open the store: $($_.Exception.Message)", $_) +} +$store_certificates = $store.Certificates + +try { + if ($state -eq "absent") { + $cert_thumbprints = @() + + if ($null -ne $path) { + $certs = Get-CertFile -module $module -path $path -password $password -key_exportable $key_exportable -key_storage $key_storage + foreach ($cert in $certs) { + $cert_thumbprints += $cert.Thumbprint + } + } elseif ($null -ne $thumbprint) { + $cert_thumbprints += $thumbprint + } + + foreach ($cert_thumbprint in $cert_thumbprints) { + $module.Result.thumbprints += $cert_thumbprint + $found_certs = $store_certificates.Find([System.Security.Cryptography.X509Certificates.X509FindType]::FindByThumbprint, $cert_thumbprint, $false) + if ($found_certs.Count -gt 0) { + foreach ($found_cert in $found_certs) { + try { + if (-not $module.CheckMode) { + $store.Remove($found_cert) + } + } catch [System.Security.SecurityException] { + $module.FailJson("Unable to remove cert with thumbprint '$cert_thumbprint' with current permissions: $($_.Exception.Message)", $_) + } catch { + $module.FailJson("Unable to remove cert with thumbprint '$cert_thumbprint': $($_.Exception.Message)", $_) + } + $module.Result.changed = $true + } + } + } + } elseif ($state -eq "exported") { + # TODO: Add support for PKCS7 and exporting a cert chain + $module.Result.thumbprints += $thumbprint + $export = $true + if (Test-Path -LiteralPath $path -PathType Container) { + $module.FailJson("Cannot export cert to path '$path' as it is a directory") + } elseif (Test-Path -LiteralPath $path -PathType Leaf) { + $actual_cert_type = Get-CertFileType -path $path -password $password + if ($actual_cert_type -eq $file_type) { + try { + $certs = Get-CertFile -module $module -path $path -password $password -key_exportable $key_exportable -key_storage $key_storage + } catch { + # failed to load the file so we set the thumbprint to something + # that will fail validation + $certs = @{Thumbprint = $null} + } + + if ($certs.Thumbprint -eq $thumbprint) { + $export = $false + } + } + } + + if ($export) { + $found_certs = $store_certificates.Find([System.Security.Cryptography.X509Certificates.X509FindType]::FindByThumbprint, $thumbprint, $false) + if ($found_certs.Count -ne 1) { + $module.FailJson("Found $($found_certs.Count) certs when only expecting 1") + } + + New-CertFile -module $module -cert $found_certs -path $path -type $file_type -password $password + } + } else { + $certs = Get-CertFile -module $module -path $path -password $password -key_exportable $key_exportable -key_storage $key_storage + foreach ($cert in $certs) { + $module.Result.thumbprints += $cert.Thumbprint + $found_certs = $store_certificates.Find([System.Security.Cryptography.X509Certificates.X509FindType]::FindByThumbprint, $cert.Thumbprint, $false) + if ($found_certs.Count -eq 0) { + try { + if (-not $module.CheckMode) { + $store.Add($cert) + } + } catch [System.Security.Cryptography.CryptographicException] { + $module.FailJson("Unable to import certificate with thumbprint '$($cert.Thumbprint)' with the current permissions: $($_.Exception.Message)", $_) + } catch { + $module.FailJson("Unable to import certificate with thumbprint '$($cert.Thumbprint)': $($_.Exception.Message)", $_) + } + $module.Result.changed = $true + } + } + } +} finally { + $store.Close() +} + +$module.ExitJson() diff --git a/test/support/windows-integration/plugins/modules/win_certificate_store.py b/test/support/windows-integration/plugins/modules/win_certificate_store.py new file mode 100644 index 00000000..dc617e33 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_certificate_store.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_certificate_store +version_added: '2.5' +short_description: Manages the certificate store +description: +- Used to import/export and remove certificates and keys from the local + certificate store. +- This module is not used to create certificates and will only manage existing + certs as a file or in the store. +- It can be used to import PEM, DER, P7B, PKCS12 (PFX) certificates and export + PEM, DER and PKCS12 certificates. +options: + state: + description: + - If C(present), will ensure that the certificate at I(path) is imported + into the certificate store specified. + - If C(absent), will ensure that the certificate specified by I(thumbprint) + or the thumbprint of the cert at I(path) is removed from the store + specified. + - If C(exported), will ensure the file at I(path) is a certificate + specified by I(thumbprint). + - When exporting a certificate, if I(path) is a directory then the module + will fail, otherwise the file will be replaced if needed. + type: str + choices: [ absent, exported, present ] + default: present + path: + description: + - The path to a certificate file. + - This is required when I(state) is C(present) or C(exported). + - When I(state) is C(absent) and I(thumbprint) is not specified, the + thumbprint is derived from the certificate at this path. + type: path + thumbprint: + description: + - The thumbprint as a hex string to either export or remove. + - See the examples for how to specify the thumbprint. + type: str + store_name: + description: + - The store name to use when importing a certificate or searching for a + certificate. + - "C(AddressBook): The X.509 certificate store for other users" + - "C(AuthRoot): The X.509 certificate store for third-party certificate authorities (CAs)" + - "C(CertificateAuthority): The X.509 certificate store for intermediate certificate authorities (CAs)" + - "C(Disallowed): The X.509 certificate store for revoked certificates" + - "C(My): The X.509 certificate store for personal certificates" + - "C(Root): The X.509 certificate store for trusted root certificate authorities (CAs)" + - "C(TrustedPeople): The X.509 certificate store for directly trusted people and resources" + - "C(TrustedPublisher): The X.509 certificate store for directly trusted publishers" + type: str + choices: + - AddressBook + - AuthRoot + - CertificateAuthority + - Disallowed + - My + - Root + - TrustedPeople + - TrustedPublisher + default: My + store_location: + description: + - The store location to use when importing a certificate or searching for a + certificate. + choices: [ CurrentUser, LocalMachine ] + default: LocalMachine + password: + description: + - The password of the pkcs12 certificate key. + - This is used when reading a pkcs12 certificate file or the password to + set when C(state=exported) and C(file_type=pkcs12). + - If the pkcs12 file has no password set or no password should be set on + the exported file, do not set this option. + type: str + key_exportable: + description: + - Whether to allow the private key to be exported. + - If C(no), then this module and other process will only be able to export + the certificate and the private key cannot be exported. + - Used when C(state=present) only. + type: bool + default: yes + key_storage: + description: + - Specifies where Windows will store the private key when it is imported. + - When set to C(default), the default option as set by Windows is used, typically C(user). + - When set to C(machine), the key is stored in a path accessible by various + users. + - When set to C(user), the key is stored in a path only accessible by the + current user. + - Used when C(state=present) only and cannot be changed once imported. + - See U(https://msdn.microsoft.com/en-us/library/system.security.cryptography.x509certificates.x509keystorageflags.aspx) + for more details. + type: str + choices: [ default, machine, user ] + default: default + file_type: + description: + - The file type to export the certificate as when C(state=exported). + - C(der) is a binary ASN.1 encoded file. + - C(pem) is a base64 encoded file of a der file in the OpenSSL form. + - C(pkcs12) (also known as pfx) is a binary container that contains both + the certificate and private key unlike the other options. + - When C(pkcs12) is set and the private key is not exportable or accessible + by the current user, it will throw an exception. + type: str + choices: [ der, pem, pkcs12 ] + default: der +notes: +- Some actions on PKCS12 certificates and keys may fail with the error + C(the specified network password is not correct), either use CredSSP or + Kerberos with credential delegation, or use C(become) to bypass these + restrictions. +- The certificates must be located on the Windows host to be set with I(path). +- When importing a certificate for usage in IIS, it is generally required + to use the C(machine) key_storage option, as both C(default) and C(user) + will make the private key unreadable to IIS APPPOOL identities and prevent + binding the certificate to the https endpoint. +author: +- Jordan Borean (@jborean93) +''' + +EXAMPLES = r''' +- name: Import a certificate + win_certificate_store: + path: C:\Temp\cert.pem + state: present + +- name: Import pfx certificate that is password protected + win_certificate_store: + path: C:\Temp\cert.pfx + state: present + password: VeryStrongPasswordHere! + become: yes + become_method: runas + +- name: Import pfx certificate without password and set private key as un-exportable + win_certificate_store: + path: C:\Temp\cert.pfx + state: present + key_exportable: no + # usually you don't set this here but it is for illustrative purposes + vars: + ansible_winrm_transport: credssp + +- name: Remove a certificate based on file thumbprint + win_certificate_store: + path: C:\Temp\cert.pem + state: absent + +- name: Remove a certificate based on thumbprint + win_certificate_store: + thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27 + state: absent + +- name: Remove certificate based on thumbprint is CurrentUser/TrustedPublishers store + win_certificate_store: + thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27 + state: absent + store_location: CurrentUser + store_name: TrustedPublisher + +- name: Export certificate as der encoded file + win_certificate_store: + path: C:\Temp\cert.cer + state: exported + file_type: der + +- name: Export certificate and key as pfx encoded file + win_certificate_store: + path: C:\Temp\cert.pfx + state: exported + file_type: pkcs12 + password: AnotherStrongPass! + become: yes + become_method: runas + become_user: SYSTEM + +- name: Import certificate be used by IIS + win_certificate_store: + path: C:\Temp\cert.pfx + file_type: pkcs12 + password: StrongPassword! + store_location: LocalMachine + key_storage: machine + state: present +''' + +RETURN = r''' +thumbprints: + description: A list of certificate thumbprints that were touched by the + module. + returned: success + type: list + sample: ["BC05633694E675449136679A658281F17A191087"] +''' diff --git a/test/support/windows-integration/plugins/modules/win_command.ps1 b/test/support/windows-integration/plugins/modules/win_command.ps1 new file mode 100644 index 00000000..e2a30650 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_command.ps1 @@ -0,0 +1,78 @@ +#!powershell + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.CommandUtil +#Requires -Module Ansible.ModuleUtils.FileUtil + +# TODO: add check mode support + +Set-StrictMode -Version 2 +$ErrorActionPreference = 'Stop' + +$params = Parse-Args $args -supports_check_mode $false + +$raw_command_line = Get-AnsibleParam -obj $params -name "_raw_params" -type "str" -failifempty $true +$chdir = Get-AnsibleParam -obj $params -name "chdir" -type "path" +$creates = Get-AnsibleParam -obj $params -name "creates" -type "path" +$removes = Get-AnsibleParam -obj $params -name "removes" -type "path" +$stdin = Get-AnsibleParam -obj $params -name "stdin" -type "str" +$output_encoding_override = Get-AnsibleParam -obj $params -name "output_encoding_override" -type "str" + +$raw_command_line = $raw_command_line.Trim() + +$result = @{ + changed = $true + cmd = $raw_command_line +} + +if ($creates -and $(Test-AnsiblePath -Path $creates)) { + Exit-Json @{msg="skipped, since $creates exists";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0} +} + +if ($removes -and -not $(Test-AnsiblePath -Path $removes)) { + Exit-Json @{msg="skipped, since $removes does not exist";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0} +} + +$command_args = @{ + command = $raw_command_line +} +if ($chdir) { + $command_args['working_directory'] = $chdir +} +if ($stdin) { + $command_args['stdin'] = $stdin +} +if ($output_encoding_override) { + $command_args['output_encoding_override'] = $output_encoding_override +} + +$start_datetime = [DateTime]::UtcNow +try { + $command_result = Run-Command @command_args +} catch { + $result.changed = $false + try { + $result.rc = $_.Exception.NativeErrorCode + } catch { + $result.rc = 2 + } + Fail-Json -obj $result -message $_.Exception.Message +} + +$result.stdout = $command_result.stdout +$result.stderr = $command_result.stderr +$result.rc = $command_result.rc + +$end_datetime = [DateTime]::UtcNow +$result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff") + +If ($result.rc -ne 0) { + Fail-Json -obj $result -message "non-zero return code" +} + +Exit-Json $result diff --git a/test/support/windows-integration/plugins/modules/win_command.py b/test/support/windows-integration/plugins/modules/win_command.py new file mode 100644 index 00000000..508419b2 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_command.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Ansible, inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_command +short_description: Executes a command on a remote Windows node +version_added: 2.2 +description: + - The C(win_command) module takes the command name followed by a list of space-delimited arguments. + - The given command will be executed on all selected nodes. It will not be + processed through the shell, so variables like C($env:HOME) and operations + like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell) + module if you need these features). + - For non-Windows targets, use the M(command) module instead. +options: + free_form: + description: + - The C(win_command) module takes a free form command to run. + - There is no parameter actually named 'free form'. See the examples! + type: str + required: yes + creates: + description: + - A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped. + type: path + removes: + description: + - A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped. + type: path + chdir: + description: + - Set the specified path as the current working directory before executing a command. + type: path + stdin: + description: + - Set the stdin of the command directly to the specified value. + type: str + version_added: '2.5' + output_encoding_override: + description: + - This option overrides the encoding of stdout/stderr output. + - You can use this option when you need to run a command which ignore the console's codepage. + - You should only need to use this option in very rare circumstances. + - This value can be any valid encoding C(Name) based on the output of C([System.Text.Encoding]::GetEncodings()). + See U(https://docs.microsoft.com/dotnet/api/system.text.encoding.getencodings). + type: str + version_added: '2.10' +notes: + - If you want to run a command through a shell (say you are using C(<), + C(>), C(|), etc), you actually want the M(win_shell) module instead. The + C(win_command) module is much more secure as it's not affected by the user's + environment. + - C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not + exist, use this. +seealso: +- module: command +- module: psexec +- module: raw +- module: win_psexec +- module: win_shell +author: + - Matt Davis (@nitzmahone) +''' + +EXAMPLES = r''' +- name: Save the result of 'whoami' in 'whoami_out' + win_command: whoami + register: whoami_out + +- name: Run command that only runs if folder exists and runs from a specific folder + win_command: wbadmin -backupTarget:C:\backup\ + args: + chdir: C:\somedir\ + creates: C:\backup\ + +- name: Run an executable and send data to the stdin for the executable + win_command: powershell.exe - + args: + stdin: Write-Host test +''' + +RETURN = r''' +msg: + description: changed + returned: always + type: bool + sample: true +start: + description: The command execution start time + returned: always + type: str + sample: '2016-02-25 09:18:26.429568' +end: + description: The command execution end time + returned: always + type: str + sample: '2016-02-25 09:18:26.755339' +delta: + description: The command execution delta time + returned: always + type: str + sample: '0:00:00.325771' +stdout: + description: The command standard output + returned: always + type: str + sample: 'Clustering node rabbit@slave1 with rabbit@master ...' +stderr: + description: The command standard error + returned: always + type: str + sample: 'ls: cannot access foo: No such file or directory' +cmd: + description: The command executed by the task + returned: always + type: str + sample: 'rabbitmqctl join_cluster rabbit@master' +rc: + description: The command return code (0 means success) + returned: always + type: int + sample: 0 +stdout_lines: + description: The command standard output split in lines + returned: always + type: list + sample: [u'Clustering node rabbit@slave1 with rabbit@master ...'] +''' diff --git a/test/support/windows-integration/plugins/modules/win_copy.ps1 b/test/support/windows-integration/plugins/modules/win_copy.ps1 new file mode 100644 index 00000000..6a26ee72 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_copy.ps1 @@ -0,0 +1,403 @@ +#!powershell + +# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.Backup + +$ErrorActionPreference = 'Stop' + +$params = Parse-Args -arguments $args -supports_check_mode $true +$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false +$diff_mode = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false + +# there are 4 modes to win_copy which are driven by the action plugins: +# explode: src is a zip file which needs to be extracted to dest, for use with multiple files +# query: win_copy action plugin wants to get the state of remote files to check whether it needs to send them +# remote: all copy action is happening remotely (remote_src=True) +# single: a single file has been copied, also used with template +$copy_mode = Get-AnsibleParam -obj $params -name "_copy_mode" -type "str" -default "single" -validateset "explode","query","remote","single" + +# used in explode, remote and single mode +$src = Get-AnsibleParam -obj $params -name "src" -type "path" -failifempty ($copy_mode -in @("explode","process","single")) +$dest = Get-AnsibleParam -obj $params -name "dest" -type "path" -failifempty $true +$backup = Get-AnsibleParam -obj $params -name "backup" -type "bool" -default $false + +# used in single mode +$original_basename = Get-AnsibleParam -obj $params -name "_original_basename" -type "str" + +# used in query and remote mode +$force = Get-AnsibleParam -obj $params -name "force" -type "bool" -default $true + +# used in query mode, contains the local files/directories/symlinks that are to be copied +$files = Get-AnsibleParam -obj $params -name "files" -type "list" +$directories = Get-AnsibleParam -obj $params -name "directories" -type "list" + +$result = @{ + changed = $false +} + +if ($diff_mode) { + $result.diff = @{} +} + +Function Copy-File($source, $dest) { + $diff = "" + $copy_file = $false + $source_checksum = $null + if ($force) { + $source_checksum = Get-FileChecksum -path $source + } + + if (Test-Path -LiteralPath $dest -PathType Container) { + Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': dest is already a folder" + } elseif (Test-Path -LiteralPath $dest -PathType Leaf) { + if ($force) { + $target_checksum = Get-FileChecksum -path $dest + if ($source_checksum -ne $target_checksum) { + $copy_file = $true + } + } + } else { + $copy_file = $true + } + + if ($copy_file) { + $file_dir = [System.IO.Path]::GetDirectoryName($dest) + # validate the parent dir is not a file and that it exists + if (Test-Path -LiteralPath $file_dir -PathType Leaf) { + Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder" + } elseif (-not (Test-Path -LiteralPath $file_dir)) { + # directory doesn't exist, need to create + New-Item -Path $file_dir -ItemType Directory -WhatIf:$check_mode | Out-Null + $diff += "+$file_dir\`n" + } + + if ($backup) { + $result.backup_file = Backup-File -path $dest -WhatIf:$check_mode + } + + if (Test-Path -LiteralPath $dest -PathType Leaf) { + Remove-Item -LiteralPath $dest -Force -Recurse -WhatIf:$check_mode | Out-Null + $diff += "-$dest`n" + } + + if (-not $check_mode) { + # cannot run with -WhatIf:$check_mode as if the parent dir didn't + # exist and was created above would still not exist in check mode + Copy-Item -LiteralPath $source -Destination $dest -Force | Out-Null + } + $diff += "+$dest`n" + + $result.changed = $true + } + + # ugly but to save us from running the checksum twice, let's return it for + # the main code to add it to $result + return ,@{ diff = $diff; checksum = $source_checksum } +} + +Function Copy-Folder($source, $dest) { + $diff = "" + + if (-not (Test-Path -LiteralPath $dest -PathType Container)) { + $parent_dir = [System.IO.Path]::GetDirectoryName($dest) + if (Test-Path -LiteralPath $parent_dir -PathType Leaf) { + Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder" + } + if (Test-Path -LiteralPath $dest -PathType Leaf) { + Fail-Json -obj $result -message "cannot copy folder from '$source' to '$dest': dest is already a file" + } + + New-Item -Path $dest -ItemType Container -WhatIf:$check_mode | Out-Null + $diff += "+$dest\`n" + $result.changed = $true + } + + $child_items = Get-ChildItem -LiteralPath $source -Force + foreach ($child_item in $child_items) { + $dest_child_path = Join-Path -Path $dest -ChildPath $child_item.Name + if ($child_item.PSIsContainer) { + $diff += (Copy-Folder -source $child_item.Fullname -dest $dest_child_path) + } else { + $diff += (Copy-File -source $child_item.Fullname -dest $dest_child_path).diff + } + } + + return $diff +} + +Function Get-FileSize($path) { + $file = Get-Item -LiteralPath $path -Force + if ($file.PSIsContainer) { + $size = (Get-ChildItem -Literalpath $file.FullName -Recurse -Force | ` + Where-Object { $_.PSObject.Properties.Name -contains 'Length' } | ` + Measure-Object -Property Length -Sum).Sum + if ($null -eq $size) { + $size = 0 + } + } else { + $size = $file.Length + } + + $size +} + +Function Extract-Zip($src, $dest) { + $archive = [System.IO.Compression.ZipFile]::Open($src, [System.IO.Compression.ZipArchiveMode]::Read, [System.Text.Encoding]::UTF8) + foreach ($entry in $archive.Entries) { + $archive_name = $entry.FullName + + # FullName may be appended with / or \, determine if it is padded and remove it + $padding_length = $archive_name.Length % 4 + if ($padding_length -eq 0) { + $is_dir = $false + $base64_name = $archive_name + } elseif ($padding_length -eq 1) { + $is_dir = $true + if ($archive_name.EndsWith("/") -or $archive_name.EndsWith("`\")) { + $base64_name = $archive_name.Substring(0, $archive_name.Length - 1) + } else { + throw "invalid base64 archive name '$archive_name'" + } + } else { + throw "invalid base64 length '$archive_name'" + } + + # to handle unicode character, win_copy action plugin has encoded the filename + $decoded_archive_name = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($base64_name)) + # re-add the / to the entry full name if it was a directory + if ($is_dir) { + $decoded_archive_name = "$decoded_archive_name/" + } + $entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_name) + $entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path) + + if (-not (Test-Path -LiteralPath $entry_dir)) { + New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null + } + + if ($is_dir -eq $false) { + if (-not $check_mode) { + [System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $entry_target_path, $true) + } + } + } + $archive.Dispose() # release the handle of the zip file +} + +Function Extract-ZipLegacy($src, $dest) { + if (-not (Test-Path -LiteralPath $dest)) { + New-Item -Path $dest -ItemType Directory -WhatIf:$check_mode | Out-Null + } + $shell = New-Object -ComObject Shell.Application + $zip = $shell.NameSpace($src) + $dest_path = $shell.NameSpace($dest) + + foreach ($entry in $zip.Items()) { + $is_dir = $entry.IsFolder + $encoded_archive_entry = $entry.Name + # to handle unicode character, win_copy action plugin has encoded the filename + $decoded_archive_entry = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($encoded_archive_entry)) + if ($is_dir) { + $decoded_archive_entry = "$decoded_archive_entry/" + } + + $entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_entry) + $entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path) + + if (-not (Test-Path -LiteralPath $entry_dir)) { + New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null + } + + if ($is_dir -eq $false -and (-not $check_mode)) { + # https://msdn.microsoft.com/en-us/library/windows/desktop/bb787866.aspx + # From Folder.CopyHere documentation, 1044 means: + # - 1024: do not display a user interface if an error occurs + # - 16: respond with "yes to all" for any dialog box that is displayed + # - 4: do not display a progress dialog box + $dest_path.CopyHere($entry, 1044) + + # once file is extraced, we need to rename it with non base64 name + $combined_encoded_path = [System.IO.Path]::Combine($dest, $encoded_archive_entry) + Move-Item -LiteralPath $combined_encoded_path -Destination $entry_target_path -Force | Out-Null + } + } +} + +if ($copy_mode -eq "query") { + # we only return a list of files/directories that need to be copied over + # the source of the local file will be the key used + $changed_files = @() + $changed_directories = @() + $changed_symlinks = @() + + foreach ($file in $files) { + $filename = $file.dest + $local_checksum = $file.checksum + + $filepath = Join-Path -Path $dest -ChildPath $filename + if (Test-Path -LiteralPath $filepath -PathType Leaf) { + if ($force) { + $checksum = Get-FileChecksum -path $filepath + if ($checksum -ne $local_checksum) { + $changed_files += $file + } + } + } elseif (Test-Path -LiteralPath $filepath -PathType Container) { + Fail-Json -obj $result -message "cannot copy file to dest '$filepath': object at path is already a directory" + } else { + $changed_files += $file + } + } + + foreach ($directory in $directories) { + $dirname = $directory.dest + + $dirpath = Join-Path -Path $dest -ChildPath $dirname + $parent_dir = [System.IO.Path]::GetDirectoryName($dirpath) + if (Test-Path -LiteralPath $parent_dir -PathType Leaf) { + Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at parent directory path is already a file" + } + if (Test-Path -LiteralPath $dirpath -PathType Leaf) { + Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at path is already a file" + } elseif (-not (Test-Path -LiteralPath $dirpath -PathType Container)) { + $changed_directories += $directory + } + } + + # TODO: Handle symlinks + + $result.files = $changed_files + $result.directories = $changed_directories + $result.symlinks = $changed_symlinks +} elseif ($copy_mode -eq "explode") { + # a single zip file containing the files and directories needs to be + # expanded this will always result in a change as the calculation is done + # on the win_copy action plugin and is only run if a change needs to occur + if (-not (Test-Path -LiteralPath $src -PathType Leaf)) { + Fail-Json -obj $result -message "Cannot expand src zip file: '$src' as it does not exist" + } + + # Detect if the PS zip assemblies are available or whether to use Shell + $use_legacy = $false + try { + Add-Type -AssemblyName System.IO.Compression.FileSystem | Out-Null + Add-Type -AssemblyName System.IO.Compression | Out-Null + } catch { + $use_legacy = $true + } + if ($use_legacy) { + Extract-ZipLegacy -src $src -dest $dest + } else { + Extract-Zip -src $src -dest $dest + } + + $result.changed = $true +} elseif ($copy_mode -eq "remote") { + # all copy actions are happening on the remote side (windows host), need + # too copy source and dest using PS code + $result.src = $src + $result.dest = $dest + + if (-not (Test-Path -LiteralPath $src)) { + Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist" + } + + if (Test-Path -LiteralPath $src -PathType Container) { + # we are copying a directory or the contents of a directory + $result.operation = 'folder_copy' + if ($src.EndsWith("/") -or $src.EndsWith("`\")) { + # copying the folder's contents to dest + $diff = "" + $child_files = Get-ChildItem -LiteralPath $src -Force + foreach ($child_file in $child_files) { + $dest_child_path = Join-Path -Path $dest -ChildPath $child_file.Name + if ($child_file.PSIsContainer) { + $diff += Copy-Folder -source $child_file.FullName -dest $dest_child_path + } else { + $diff += (Copy-File -source $child_file.FullName -dest $dest_child_path).diff + } + } + } else { + # copying the folder and it's contents to dest + $dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name + $result.dest = $dest + $diff = Copy-Folder -source $src -dest $dest + } + } else { + # we are just copying a single file to dest + $result.operation = 'file_copy' + + $source_basename = (Get-Item -LiteralPath $src -Force).Name + $result.original_basename = $source_basename + + if ($dest.EndsWith("/") -or $dest.EndsWith("`\")) { + $dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name + $result.dest = $dest + } else { + # check if the parent dir exists, this is only done if src is a + # file and dest if the path to a file (doesn't end with \ or /) + $parent_dir = Split-Path -LiteralPath $dest + if (Test-Path -LiteralPath $parent_dir -PathType Leaf) { + Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file" + } elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) { + Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist" + } + } + $copy_result = Copy-File -source $src -dest $dest + $diff = $copy_result.diff + $result.checksum = $copy_result.checksum + } + + # the file might not exist if running in check mode + if (-not $check_mode -or (Test-Path -LiteralPath $dest -PathType Leaf)) { + $result.size = Get-FileSize -path $dest + } else { + $result.size = $null + } + if ($diff_mode) { + $result.diff.prepared = $diff + } +} elseif ($copy_mode -eq "single") { + # a single file is located in src and we need to copy to dest, this will + # always result in a change as the calculation is done on the Ansible side + # before this is run. This should also never run in check mode + if (-not (Test-Path -LiteralPath $src -PathType Leaf)) { + Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist" + } + + # the dest parameter is a directory, we need to append original_basename + if ($dest.EndsWith("/") -or $dest.EndsWith("`\") -or (Test-Path -LiteralPath $dest -PathType Container)) { + $remote_dest = Join-Path -Path $dest -ChildPath $original_basename + $parent_dir = Split-Path -LiteralPath $remote_dest + + # when dest ends with /, we need to create the destination directories + if (Test-Path -LiteralPath $parent_dir -PathType Leaf) { + Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file" + } elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) { + New-Item -Path $parent_dir -ItemType Directory | Out-Null + } + } else { + $remote_dest = $dest + $parent_dir = Split-Path -LiteralPath $remote_dest + + # check if the dest parent dirs exist, need to fail if they don't + if (Test-Path -LiteralPath $parent_dir -PathType Leaf) { + Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file" + } elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) { + Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist" + } + } + + if ($backup) { + $result.backup_file = Backup-File -path $remote_dest -WhatIf:$check_mode + } + + Copy-Item -LiteralPath $src -Destination $remote_dest -Force | Out-Null + $result.changed = $true +} + +Exit-Json -obj $result diff --git a/test/support/windows-integration/plugins/modules/win_copy.py b/test/support/windows-integration/plugins/modules/win_copy.py new file mode 100644 index 00000000..a55f4c65 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_copy.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_copy +version_added: '1.9.2' +short_description: Copies files to remote locations on windows hosts +description: +- The C(win_copy) module copies a file on the local box to remote windows locations. +- For non-Windows targets, use the M(copy) module instead. +options: + content: + description: + - When used instead of C(src), sets the contents of a file directly to the + specified value. + - This is for simple values, for anything complex or with formatting please + switch to the M(template) module. + type: str + version_added: '2.3' + decrypt: + description: + - This option controls the autodecryption of source files using vault. + type: bool + default: yes + version_added: '2.5' + dest: + description: + - Remote absolute path where the file should be copied to. + - If C(src) is a directory, this must be a directory too. + - Use \ for path separators or \\ when in "double quotes". + - If C(dest) ends with \ then source or the contents of source will be + copied to the directory without renaming. + - If C(dest) is a nonexistent path, it will only be created if C(dest) ends + with "/" or "\", or C(src) is a directory. + - If C(src) and C(dest) are files and if the parent directory of C(dest) + doesn't exist, then the task will fail. + type: path + required: yes + backup: + description: + - Determine whether a backup should be created. + - When set to C(yes), create a backup file including the timestamp information + so you can get the original file back if you somehow clobbered it incorrectly. + - No backup is taken when C(remote_src=False) and multiple files are being + copied. + type: bool + default: no + version_added: '2.8' + force: + description: + - If set to C(yes), the file will only be transferred if the content + is different than destination. + - If set to C(no), the file will only be transferred if the + destination does not exist. + - If set to C(no), no checksuming of the content is performed which can + help improve performance on larger files. + type: bool + default: yes + version_added: '2.3' + local_follow: + description: + - This flag indicates that filesystem links in the source tree, if they + exist, should be followed. + type: bool + default: yes + version_added: '2.4' + remote_src: + description: + - If C(no), it will search for src at originating/master machine. + - If C(yes), it will go to the remote/target machine for the src. + type: bool + default: no + version_added: '2.3' + src: + description: + - Local path to a file to copy to the remote server; can be absolute or + relative. + - If path is a directory, it is copied (including the source folder name) + recursively to C(dest). + - If path is a directory and ends with "/", only the inside contents of + that directory are copied to the destination. Otherwise, if it does not + end with "/", the directory itself with all contents is copied. + - If path is a file and dest ends with "\", the file is copied to the + folder with the same filename. + - Required unless using C(content). + type: path +notes: +- Currently win_copy does not support copying symbolic links from both local to + remote and remote to remote. +- It is recommended that backslashes C(\) are used instead of C(/) when dealing + with remote paths. +- Because win_copy runs over WinRM, it is not a very efficient transfer + mechanism. If sending large files consider hosting them on a web service and + using M(win_get_url) instead. +seealso: +- module: assemble +- module: copy +- module: win_get_url +- module: win_robocopy +author: +- Jon Hawkesworth (@jhawkesworth) +- Jordan Borean (@jborean93) +''' + +EXAMPLES = r''' +- name: Copy a single file + win_copy: + src: /srv/myfiles/foo.conf + dest: C:\Temp\renamed-foo.conf + +- name: Copy a single file, but keep a backup + win_copy: + src: /srv/myfiles/foo.conf + dest: C:\Temp\renamed-foo.conf + backup: yes + +- name: Copy a single file keeping the filename + win_copy: + src: /src/myfiles/foo.conf + dest: C:\Temp\ + +- name: Copy folder to C:\Temp (results in C:\Temp\temp_files) + win_copy: + src: files/temp_files + dest: C:\Temp + +- name: Copy folder contents recursively + win_copy: + src: files/temp_files/ + dest: C:\Temp + +- name: Copy a single file where the source is on the remote host + win_copy: + src: C:\Temp\foo.txt + dest: C:\ansible\foo.txt + remote_src: yes + +- name: Copy a folder recursively where the source is on the remote host + win_copy: + src: C:\Temp + dest: C:\ansible + remote_src: yes + +- name: Set the contents of a file + win_copy: + content: abc123 + dest: C:\Temp\foo.txt + +- name: Copy a single file as another user + win_copy: + src: NuGet.config + dest: '%AppData%\NuGet\NuGet.config' + vars: + ansible_become_user: user + ansible_become_password: pass + # The tmp dir must be set when using win_copy as another user + # This ensures the become user will have permissions for the operation + # Make sure to specify a folder both the ansible_user and the become_user have access to (i.e not %TEMP% which is user specific and requires Admin) + ansible_remote_tmp: 'c:\tmp' +''' + +RETURN = r''' +backup_file: + description: Name of the backup file that was created. + returned: if backup=yes + type: str + sample: C:\Path\To\File.txt.11540.20150212-220915.bak +dest: + description: Destination file/path. + returned: changed + type: str + sample: C:\Temp\ +src: + description: Source file used for the copy on the target machine. + returned: changed + type: str + sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source +checksum: + description: SHA1 checksum of the file after running copy. + returned: success, src is a file + type: str + sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827 +size: + description: Size of the target, after execution. + returned: changed, src is a file + type: int + sample: 1220 +operation: + description: Whether a single file copy took place or a folder copy. + returned: success + type: str + sample: file_copy +original_basename: + description: Basename of the copied file. + returned: changed, src is a file + type: str + sample: foo.txt +''' diff --git a/test/support/windows-integration/plugins/modules/win_data_deduplication.ps1 b/test/support/windows-integration/plugins/modules/win_data_deduplication.ps1 new file mode 100644 index 00000000..593ee763 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_data_deduplication.ps1 @@ -0,0 +1,129 @@ +#!powershell + +# Copyright: 2019, rnsc(@rnsc) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt + +#AnsibleRequires -CSharpUtil Ansible.Basic +#AnsibleRequires -OSVersion 6.3 + +$spec = @{ + options = @{ + drive_letter = @{ type = "str"; required = $true } + state = @{ type = "str"; choices = "absent", "present"; default = "present"; } + settings = @{ + type = "dict" + required = $false + options = @{ + minimum_file_size = @{ type = "int"; default = 32768 } + minimum_file_age_days = @{ type = "int"; default = 2 } + no_compress = @{ type = "bool"; required = $false; default = $false } + optimize_in_use_files = @{ type = "bool"; required = $false; default = $false } + verify = @{ type = "bool"; required = $false; default = $false } + } + } + } + supports_check_mode = $true +} + +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + +$drive_letter = $module.Params.drive_letter +$state = $module.Params.state +$settings = $module.Params.settings + +$module.Result.changed = $false +$module.Result.reboot_required = $false +$module.Result.msg = "" + +function Set-DataDeduplication($volume, $state, $settings, $dedup_job) { + + $current_state = 'absent' + + try { + $dedup_info = Get-DedupVolume -Volume "$($volume.DriveLetter):" + } catch { + $dedup_info = $null + } + + if ($dedup_info.Enabled) { + $current_state = 'present' + } + + if ( $state -ne $current_state ) { + if( -not $module.CheckMode) { + if($state -eq 'present') { + # Enable-DedupVolume -Volume + Enable-DedupVolume -Volume "$($volume.DriveLetter):" + } elseif ($state -eq 'absent') { + Disable-DedupVolume -Volume "$($volume.DriveLetter):" + } + } + $module.Result.changed = $true + } + + if ($state -eq 'present') { + if ($null -ne $settings) { + Set-DataDedupJobSettings -volume $volume -settings $settings + } + } +} + +function Set-DataDedupJobSettings ($volume, $settings) { + + try { + $dedup_info = Get-DedupVolume -Volume "$($volume.DriveLetter):" + } catch { + $dedup_info = $null + } + + ForEach ($key in $settings.keys) { + + # See Microsoft documentation: + # https://docs.microsoft.com/en-us/powershell/module/deduplication/set-dedupvolume?view=win10-ps + + $update_key = $key + $update_value = $settings.$($key) + # Transform Ansible style options to Powershell params + $update_key = $update_key -replace('_', '') + + if ($update_key -eq "MinimumFileSize" -and $update_value -lt 32768) { + $update_value = 32768 + } + + $current_value = ($dedup_info | Select-Object -ExpandProperty $update_key) + + if ($update_value -ne $current_value) { + $command_param = @{ + $($update_key) = $update_value + } + + # Set-DedupVolume -Volume ` + # -NoCompress ` + # -MinimumFileAgeDays ` + # -MinimumFileSize (minimum 32768) + if( -not $module.CheckMode ) { + Set-DedupVolume -Volume "$($volume.DriveLetter):" @command_param + } + + $module.Result.changed = $true + } + } + +} + +# Install required feature +$feature_name = "FS-Data-Deduplication" +if( -not $module.CheckMode) { + $feature = Install-WindowsFeature -Name $feature_name + + if ($feature.RestartNeeded -eq 'Yes') { + $module.Result.reboot_required = $true + $module.FailJson("$feature_name was installed but requires Windows to be rebooted to work.") + } +} + +$volume = Get-Volume -DriveLetter $drive_letter + +Set-DataDeduplication -volume $volume -state $state -settings $settings -dedup_job $dedup_job + +$module.ExitJson() diff --git a/test/support/windows-integration/plugins/modules/win_data_deduplication.py b/test/support/windows-integration/plugins/modules/win_data_deduplication.py new file mode 100644 index 00000000..d320b9f7 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_data_deduplication.py @@ -0,0 +1,87 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: 2019, rnsc(@rnsc) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_data_deduplication +version_added: "2.10" +short_description: Module to enable Data Deduplication on a volume. +description: +- This module can be used to enable Data Deduplication on a Windows volume. +- The module will install the FS-Data-Deduplication feature (a reboot will be necessary). +options: + drive_letter: + description: + - Windows drive letter on which to enable data deduplication. + required: yes + type: str + state: + description: + - Wether to enable or disable data deduplication on the selected volume. + default: present + type: str + choices: [ present, absent ] + settings: + description: + - Dictionary of settings to pass to the Set-DedupVolume powershell command. + type: dict + suboptions: + minimum_file_size: + description: + - Minimum file size you want to target for deduplication. + - It will default to 32768 if not defined or if the value is less than 32768. + type: int + default: 32768 + minimum_file_age_days: + description: + - Minimum file age you want to target for deduplication. + type: int + default: 2 + no_compress: + description: + - Wether you want to enabled filesystem compression or not. + type: bool + default: no + optimize_in_use_files: + description: + - Indicates that the server attempts to optimize currently open files. + type: bool + default: no + verify: + description: + - Indicates whether the deduplication engine performs a byte-for-byte verification for each duplicate chunk + that optimization creates, rather than relying on a cryptographically strong hash. + - This option is not recommend. + - Setting this parameter to True can degrade optimization performance. + type: bool + default: no +author: +- rnsc (@rnsc) +''' + +EXAMPLES = r''' +- name: Enable Data Deduplication on D + win_data_deduplication: + drive_letter: 'D' + state: present + +- name: Enable Data Deduplication on D + win_data_deduplication: + drive_letter: 'D' + state: present + settings: + no_compress: true + minimum_file_age_days: 1 + minimum_file_size: 0 +''' + +RETURN = r''' +# +''' diff --git a/test/support/windows-integration/plugins/modules/win_dsc.ps1 b/test/support/windows-integration/plugins/modules/win_dsc.ps1 new file mode 100644 index 00000000..690f391a --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_dsc.ps1 @@ -0,0 +1,398 @@ +#!powershell + +# Copyright: (c) 2015, Trond Hindenes , and others +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#Requires -Version 5 + +Function ConvertTo-ArgSpecType { + <# + .SYNOPSIS + Converts the DSC parameter type to the arg spec type required for Ansible. + #> + param( + [Parameter(Mandatory=$true)][String]$CimType + ) + + $arg_type = switch($CimType) { + Boolean { "bool" } + Char16 { [Func[[Object], [Char]]]{ [System.Char]::Parse($args[0].ToString()) } } + DateTime { [Func[[Object], [DateTime]]]{ [System.DateTime]($args[0].ToString()) } } + Instance { "dict" } + Real32 { "float" } + Real64 { [Func[[Object], [Double]]]{ [System.Double]::Parse($args[0].ToString()) } } + Reference { "dict" } + SInt16 { [Func[[Object], [Int16]]]{ [System.Int16]::Parse($args[0].ToString()) } } + SInt32 { "int" } + SInt64 { [Func[[Object], [Int64]]]{ [System.Int64]::Parse($args[0].ToString()) } } + SInt8 { [Func[[Object], [SByte]]]{ [System.SByte]::Parse($args[0].ToString()) } } + String { "str" } + UInt16 { [Func[[Object], [UInt16]]]{ [System.UInt16]::Parse($args[0].ToString()) } } + UInt32 { [Func[[Object], [UInt32]]]{ [System.UInt32]::Parse($args[0].ToString()) } } + UInt64 { [Func[[Object], [UInt64]]]{ [System.UInt64]::Parse($args[0].ToString()) } } + UInt8 { [Func[[Object], [Byte]]]{ [System.Byte]::Parse($args[0].ToString()) } } + Unknown { "raw" } + default { "raw" } + } + return $arg_type +} + +Function Get-DscCimClassProperties { + <# + .SYNOPSIS + Get's a list of CimProperties of a CIM Class. It filters out any magic or + read only properties that we don't need to know about. + #> + param([Parameter(Mandatory=$true)][String]$ClassName) + + $resource = Get-CimClass -ClassName $ClassName -Namespace root\Microsoft\Windows\DesiredStateConfiguration + + # Filter out any magic properties that are used internally on an OMI_BaseResource + # https://github.com/PowerShell/PowerShell/blob/master/src/System.Management.Automation/DscSupport/CimDSCParser.cs#L1203 + $magic_properties = @("ResourceId", "SourceInfo", "ModuleName", "ModuleVersion", "ConfigurationName") + $properties = $resource.CimClassProperties | Where-Object { + + ($resource.CimSuperClassName -ne "OMI_BaseResource" -or $_.Name -notin $magic_properties) -and + -not $_.Flags.HasFlag([Microsoft.Management.Infrastructure.CimFlags]::ReadOnly) + } + + return ,$properties +} + +Function Add-PropertyOption { + <# + .SYNOPSIS + Adds the spec for the property type to the existing module specification. + #> + param( + [Parameter(Mandatory=$true)][Hashtable]$Spec, + [Parameter(Mandatory=$true)] + [Microsoft.Management.Infrastructure.CimPropertyDeclaration]$Property + ) + + $option = @{ + required = $false + } + $property_name = $Property.Name + $property_type = $Property.CimType.ToString() + + if ($Property.Flags.HasFlag([Microsoft.Management.Infrastructure.CimFlags]::Key) -or + $Property.Flags.HasFlag([Microsoft.Management.Infrastructure.CimFlags]::Required)) { + $option.required = $true + } + + if ($null -ne $Property.Qualifiers['Values']) { + $option.choices = [System.Collections.Generic.List`1[Object]]$Property.Qualifiers['Values'].Value + } + + if ($property_name -eq "Name") { + # For backwards compatibility we support specifying the Name DSC property as item_name + $option.aliases = @("item_name") + } elseif ($property_name -ceq "key") { + # There seems to be a bug in the CIM property parsing when the property name is 'Key'. The CIM instance will + # think the name is 'key' when the MOF actually defines it as 'Key'. We set the proper casing so the module arg + # validator won't fire a case sensitive warning + $property_name = "Key" + } + + if ($Property.ReferenceClassName -eq "MSFT_Credential") { + # Special handling for the MSFT_Credential type (PSCredential), we handle this with having 2 options that + # have the suffix _username and _password. + $option_spec_pass = @{ + type = "str" + required = $option.required + no_log = $true + } + $Spec.options."$($property_name)_password" = $option_spec_pass + $Spec.required_together.Add(@("$($property_name)_username", "$($property_name)_password")) > $null + + $property_name = "$($property_name)_username" + $option.type = "str" + } elseif ($Property.ReferenceClassName -eq "MSFT_KeyValuePair") { + $option.type = "dict" + } elseif ($property_type.EndsWith("Array")) { + $option.type = "list" + $option.elements = ConvertTo-ArgSpecType -CimType $property_type.Substring(0, $property_type.Length - 5) + } else { + $option.type = ConvertTo-ArgSpecType -CimType $property_type + } + + if (($option.type -eq "dict" -or ($option.type -eq "list" -and $option.elements -eq "dict")) -and + $Property.ReferenceClassName -ne "MSFT_KeyValuePair") { + # Get the sub spec if the type is a Instance (CimInstance/dict) + $sub_option_spec = Get-OptionSpec -ClassName $Property.ReferenceClassName + $option += $sub_option_spec + } + + $Spec.options.$property_name = $option +} + +Function Get-OptionSpec { + <# + .SYNOPSIS + Generates the specifiec used in AnsibleModule for a CIM MOF resource name. + + .NOTES + This won't be able to retrieve the default values for an option as that is not defined in the MOF for a resource. + Default values are still preserved in the DSC engine if we don't pass in the property at all, we just can't report + on what they are automatically. + #> + param( + [Parameter(Mandatory=$true)][String]$ClassName + ) + + $spec = @{ + options = @{} + required_together = [System.Collections.ArrayList]@() + } + $properties = Get-DscCimClassProperties -ClassName $ClassName + foreach ($property in $properties) { + Add-PropertyOption -Spec $spec -Property $property + } + + return $spec +} + +Function ConvertTo-CimInstance { + <# + .SYNOPSIS + Converts a dict to a CimInstance of the specified Class. Also provides a + better error message if this fails that contains the option name that failed. + #> + param( + [Parameter(Mandatory=$true)][String]$Name, + [Parameter(Mandatory=$true)][String]$ClassName, + [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Value, + [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module, + [Switch]$Recurse + ) + + $properties = @{} + foreach ($value_info in $Value.GetEnumerator()) { + # Need to remove all null values from existing dict so the conversion works + if ($null -eq $value_info.Value) { + continue + } + $properties.($value_info.Key) = $value_info.Value + } + + if ($Recurse) { + # We want to validate and convert and values to what's required by DSC + $properties = ConvertTo-DscProperty -ClassName $ClassName -Params $properties -Module $Module + } + + try { + return (New-CimInstance -ClassName $ClassName -Property $properties -ClientOnly) + } catch { + # New-CimInstance raises a poor error message, make sure we mention what option it is for + $Module.FailJson("Failed to cast dict value for option '$Name' to a CimInstance: $($_.Exception.Message)", $_) + } +} + +Function ConvertTo-DscProperty { + <# + .SYNOPSIS + Converts the input module parameters that have been validated and casted + into the types expected by the DSC engine. This is mostly done to deal with + types like PSCredential and Dictionaries. + #> + param( + [Parameter(Mandatory=$true)][String]$ClassName, + [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Params, + [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module + ) + $properties = Get-DscCimClassProperties -ClassName $ClassName + + $dsc_properties = @{} + foreach ($property in $properties) { + $property_name = $property.Name + $property_type = $property.CimType.ToString() + + if ($property.ReferenceClassName -eq "MSFT_Credential") { + $username = $Params."$($property_name)_username" + $password = $Params."$($property_name)_password" + + # No user set == No option set in playbook, skip this property + if ($null -eq $username) { + continue + } + $sec_password = ConvertTo-SecureString -String $password -AsPlainText -Force + $value = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $username, $sec_password + } else { + $value = $Params.$property_name + + # The actual value wasn't set, skip adding this property + if ($null -eq $value) { + continue + } + + if ($property.ReferenceClassName -eq "MSFT_KeyValuePair") { + $key_value_pairs = [System.Collections.Generic.List`1[CimInstance]]@() + foreach ($value_info in $value.GetEnumerator()) { + $kvp = @{Key = $value_info.Key; Value = $value_info.Value.ToString()} + $cim_instance = ConvertTo-CimInstance -Name $property_name -ClassName MSFT_KeyValuePair ` + -Value $kvp -Module $Module + $key_value_pairs.Add($cim_instance) > $null + } + $value = $key_value_pairs.ToArray() + } elseif ($null -ne $property.ReferenceClassName) { + # Convert the dict to a CimInstance (or list of CimInstances) + $convert_args = @{ + ClassName = $property.ReferenceClassName + Module = $Module + Name = $property_name + Recurse = $true + } + if ($property_type.EndsWith("Array")) { + $value = [System.Collections.Generic.List`1[CimInstance]]@() + foreach ($raw in $Params.$property_name.GetEnumerator()) { + $cim_instance = ConvertTo-CimInstance -Value $raw @convert_args + $value.Add($cim_instance) > $null + } + $value = $value.ToArray() # Need to make sure we are dealing with an Array not a List + } else { + $value = ConvertTo-CimInstance -Value $value @convert_args + } + } + } + $dsc_properties.$property_name = $value + } + + return $dsc_properties +} + +Function Invoke-DscMethod { + <# + .SYNOPSIS + Invokes the DSC Resource Method specified in another PS pipeline. This is + done so we can retrieve the Verbose stream and return it back to the user + for futher debugging. + #> + param( + [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module, + [Parameter(Mandatory=$true)][String]$Method, + [Parameter(Mandatory=$true)][Hashtable]$Arguments + ) + + # Invoke the DSC resource in a separate runspace so we can capture the Verbose output + $ps = [PowerShell]::Create() + $ps.AddCommand("Invoke-DscResource").AddParameter("Method", $Method) > $null + $ps.AddParameters($Arguments) > $null + + $result = $ps.Invoke() + + # Pass the warnings through to the AnsibleModule return result + foreach ($warning in $ps.Streams.Warning) { + $Module.Warn($warning.Message) + } + + # If running at a high enough verbosity, add the verbose output to the AnsibleModule return result + if ($Module.Verbosity -ge 3) { + $verbose_logs = [System.Collections.Generic.List`1[String]]@() + foreach ($verbosity in $ps.Streams.Verbose) { + $verbose_logs.Add($verbosity.Message) > $null + } + $Module.Result."verbose_$($Method.ToLower())" = $verbose_logs + } + + if ($ps.HadErrors) { + # Cannot pass in the ErrorRecord as it's a RemotingErrorRecord and doesn't contain the ScriptStackTrace + # or other info that would be useful + $Module.FailJson("Failed to invoke DSC $Method method: $($ps.Streams.Error[0].Exception.Message)") + } + + return $result +} + +# win_dsc is unique in that is builds the arg spec based on DSC Resource input. To get this info +# we need to read the resource_name and module_version value which is done outside of Ansible.Basic +if ($args.Length -gt 0) { + $params = Get-Content -Path $args[0] | ConvertFrom-Json +} else { + $params = $complex_args +} +if (-not $params.ContainsKey("resource_name")) { + $res = @{ + msg = "missing required argument: resource_name" + failed = $true + } + Write-Output -InputObject (ConvertTo-Json -Compress -InputObject $res) + exit 1 +} +$resource_name = $params.resource_name + +if ($params.ContainsKey("module_version")) { + $module_version = $params.module_version +} else { + $module_version = "latest" +} + +$module_versions = (Get-DscResource -Name $resource_name -ErrorAction SilentlyContinue | Sort-Object -Property Version) +$resource = $null +if ($module_version -eq "latest" -and $null -ne $module_versions) { + $resource = $module_versions[-1] +} elseif ($module_version -ne "latest") { + $resource = $module_versions | Where-Object { $_.Version -eq $module_version } +} + +if (-not $resource) { + if ($module_version -eq "latest") { + $msg = "Resource '$resource_name' not found." + } else { + $msg = "Resource '$resource_name' with version '$module_version' not found." + $msg += " Versions installed: '$($module_versions.Version -join "', '")'." + } + + Write-Output -InputObject (ConvertTo-Json -Compress -InputObject @{ failed = $true; msg = $msg }) + exit 1 +} + +# Build the base args for the DSC Invocation based on the resource selected +$dsc_args = @{ + Name = $resource.Name +} + +# Binary resources are not working very well with that approach - need to guesstimate module name/version +$module_version = $null +if ($resource.Module) { + $dsc_args.ModuleName = @{ + ModuleName = $resource.Module.Name + ModuleVersion = $resource.Module.Version + } + $module_version = $resource.Module.Version.ToString() +} else { + $dsc_args.ModuleName = "PSDesiredStateConfiguration" +} + +# To ensure the class registered with CIM is the one based on our version, we want to run the Get method so the DSC +# engine updates the metadata propery. We don't care about any errors here +try { + Invoke-DscResource -Method Get -Property @{Fake="Fake"} @dsc_args > $null +} catch {} + +# Dynamically build the option spec based on the resource_name specified and create the module object +$spec = Get-OptionSpec -ClassName $resource.ResourceType +$spec.supports_check_mode = $true +$spec.options.module_version = @{ type = "str"; default = "latest" } +$spec.options.resource_name = @{ type = "str"; required = $true } + +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) +$module.Result.reboot_required = $false +$module.Result.module_version = $module_version + +# Build the DSC invocation arguments and invoke the resource +$dsc_args.Property = ConvertTo-DscProperty -ClassName $resource.ResourceType -Module $module -Params $Module.Params +$dsc_args.Verbose = $true + +$test_result = Invoke-DscMethod -Module $module -Method Test -Arguments $dsc_args +if ($test_result.InDesiredState -ne $true) { + if (-not $module.CheckMode) { + $result = Invoke-DscMethod -Module $module -Method Set -Arguments $dsc_args + $module.Result.reboot_required = $result.RebootRequired + } + $module.Result.changed = $true +} + +$module.ExitJson() diff --git a/test/support/windows-integration/plugins/modules/win_dsc.py b/test/support/windows-integration/plugins/modules/win_dsc.py new file mode 100644 index 00000000..200d025e --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_dsc.py @@ -0,0 +1,183 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Trond Hindenes , and others +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_dsc +version_added: "2.4" +short_description: Invokes a PowerShell DSC configuration +description: +- Configures a resource using PowerShell DSC. +- Requires PowerShell version 5.0 or newer. +- Most of the options for this module are dynamic and will vary depending on + the DSC Resource specified in I(resource_name). +- See :doc:`/user_guide/windows_dsc` for more information on how to use this module. +options: + resource_name: + description: + - The name of the DSC Resource to use. + - Must be accessible to PowerShell using any of the default paths. + type: str + required: yes + module_version: + description: + - Can be used to configure the exact version of the DSC resource to be + invoked. + - Useful if the target node has multiple versions installed of the module + containing the DSC resource. + - If not specified, the module will follow standard PowerShell convention + and use the highest version available. + type: str + default: latest + free_form: + description: + - The M(win_dsc) module takes in multiple free form options based on the + DSC resource being invoked by I(resource_name). + - There is no option actually named C(free_form) so see the examples. + - This module will try and convert the option to the correct type required + by the DSC resource and throw a warning if it fails. + - If the type of the DSC resource option is a C(CimInstance) or + C(CimInstance[]), this means the value should be a dictionary or list + of dictionaries based on the values required by that option. + - If the type of the DSC resource option is a C(PSCredential) then there + needs to be 2 options set in the Ansible task definition suffixed with + C(_username) and C(_password). + - If the type of the DSC resource option is an array, then a list should be + provided but a comma separated string also work. Use a list where + possible as no escaping is required and it works with more complex types + list C(CimInstance[]). + - If the type of the DSC resource option is a C(DateTime), you should use + a string in the form of an ISO 8901 string to ensure the exact date is + used. + - Since Ansible 2.8, Ansible will now validate the input fields against the + DSC resource definition automatically. Older versions will silently + ignore invalid fields. + type: str + required: true +notes: +- By default there are a few builtin resources that come with PowerShell 5.0, + see U(https://docs.microsoft.com/en-us/powershell/scripting/dsc/resources/resources) for + more information on these resources. +- Custom DSC resources can be installed with M(win_psmodule) using the I(name) + option. +- The DSC engine run's each task as the SYSTEM account, any resources that need + to be accessed with a different account need to have C(PsDscRunAsCredential) + set. +- To see the valid options for a DSC resource, run the module with C(-vvv) to + show the possible module invocation. Default values are not shown in this + output but are applied within the DSC engine. +author: +- Trond Hindenes (@trondhindenes) +''' + +EXAMPLES = r''' +- name: Extract zip file + win_dsc: + resource_name: Archive + Ensure: Present + Path: C:\Temp\zipfile.zip + Destination: C:\Temp\Temp2 + +- name: Install a Windows feature with the WindowsFeature resource + win_dsc: + resource_name: WindowsFeature + Name: telnet-client + +- name: Edit HKCU reg key under specific user + win_dsc: + resource_name: Registry + Ensure: Present + Key: HKEY_CURRENT_USER\ExampleKey + ValueName: TestValue + ValueData: TestData + PsDscRunAsCredential_username: '{{ansible_user}}' + PsDscRunAsCredential_password: '{{ansible_password}}' + no_log: true + +- name: Create file with multiple attributes + win_dsc: + resource_name: File + DestinationPath: C:\ansible\dsc + Attributes: # can also be a comma separated string, e.g. 'Hidden, System' + - Hidden + - System + Ensure: Present + Type: Directory + +- name: Call DSC resource with DateTime option + win_dsc: + resource_name: DateTimeResource + DateTimeOption: '2019-02-22T13:57:31.2311892+00:00' + +# more complex example using custom DSC resource and dict values +- name: Setup the xWebAdministration module + win_psmodule: + name: xWebAdministration + state: present + +- name: Create IIS Website with Binding and Authentication options + win_dsc: + resource_name: xWebsite + Ensure: Present + Name: DSC Website + State: Started + PhysicalPath: C:\inetpub\wwwroot + BindingInfo: # Example of a CimInstance[] DSC parameter (list of dicts) + - Protocol: https + Port: 1234 + CertificateStoreName: MY + CertificateThumbprint: C676A89018C4D5902353545343634F35E6B3A659 + HostName: DSCTest + IPAddress: '*' + SSLFlags: '1' + - Protocol: http + Port: 4321 + IPAddress: '*' + AuthenticationInfo: # Example of a CimInstance DSC parameter (dict) + Anonymous: no + Basic: true + Digest: false + Windows: yes +''' + +RETURN = r''' +module_version: + description: The version of the dsc resource/module used. + returned: always + type: str + sample: "1.0.1" +reboot_required: + description: Flag returned from the DSC engine indicating whether or not + the machine requires a reboot for the invoked changes to take effect. + returned: always + type: bool + sample: true +verbose_test: + description: The verbose output as a list from executing the DSC test + method. + returned: Ansible verbosity is -vvv or greater + type: list + sample: [ + "Perform operation 'Invoke CimMethod' with the following parameters, ", + "[SERVER]: LCM: [Start Test ] [[File]DirectResourceAccess]", + "Operation 'Invoke CimMethod' complete." + ] +verbose_set: + description: The verbose output as a list from executing the DSC Set + method. + returned: Ansible verbosity is -vvv or greater and a change occurred + type: list + sample: [ + "Perform operation 'Invoke CimMethod' with the following parameters, ", + "[SERVER]: LCM: [Start Set ] [[File]DirectResourceAccess]", + "Operation 'Invoke CimMethod' complete." + ] +''' diff --git a/test/support/windows-integration/plugins/modules/win_feature.ps1 b/test/support/windows-integration/plugins/modules/win_feature.ps1 new file mode 100644 index 00000000..9a7e1c30 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_feature.ps1 @@ -0,0 +1,111 @@ +#!powershell + +# Copyright: (c) 2014, Paul Durivage +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy + +Import-Module -Name ServerManager + +$result = @{ + changed = $false +} + +$params = Parse-Args $args -supports_check_mode $true +$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false + +$name = Get-AnsibleParam -obj $params -name "name" -type "list" -failifempty $true +$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "present","absent" + +$include_sub_features = Get-AnsibleParam -obj $params -name "include_sub_features" -type "bool" -default $false +$include_management_tools = Get-AnsibleParam -obj $params -name "include_management_tools" -type "bool" -default $false +$source = Get-AnsibleParam -obj $params -name "source" -type "str" + +$install_cmdlet = $false +if (Get-Command -Name Install-WindowsFeature -ErrorAction SilentlyContinue) { + Set-Alias -Name Install-AnsibleWindowsFeature -Value Install-WindowsFeature + Set-Alias -Name Uninstall-AnsibleWindowsFeature -Value Uninstall-WindowsFeature + $install_cmdlet = $true +} elseif (Get-Command -Name Add-WindowsFeature -ErrorAction SilentlyContinue) { + Set-Alias -Name Install-AnsibleWindowsFeature -Value Add-WindowsFeature + Set-Alias -Name Uninstall-AnsibleWindowsFeature -Value Remove-WindowsFeature +} else { + Fail-Json -obj $result -message "This version of Windows does not support the cmdlets Install-WindowsFeature or Add-WindowsFeature" +} + +if ($state -eq "present") { + $install_args = @{ + Name = $name + IncludeAllSubFeature = $include_sub_features + Restart = $false + WhatIf = $check_mode + ErrorAction = "Stop" + } + + if ($install_cmdlet) { + $install_args.IncludeManagementTools = $include_management_tools + $install_args.Confirm = $false + if ($source) { + if (-not (Test-Path -Path $source)) { + Fail-Json -obj $result -message "Failed to find source path $source for feature install" + } + $install_args.Source = $source + } + } + + try { + $action_results = Install-AnsibleWindowsFeature @install_args + } catch { + Fail-Json -obj $result -message "Failed to install Windows Feature: $($_.Exception.Message)" + } +} else { + $uninstall_args = @{ + Name = $name + Restart = $false + WhatIf = $check_mode + ErrorAction = "Stop" + } + if ($install_cmdlet) { + $uninstall_args.IncludeManagementTools = $include_management_tools + } + + try { + $action_results = Uninstall-AnsibleWindowsFeature @uninstall_args + } catch { + Fail-Json -obj $result -message "Failed to uninstall Windows Feature: $($_.Exception.Message)" + } +} + +# Loop through results and create a hash containing details about +# each role/feature that is installed/removed +# $action_results.FeatureResult is not empty if anything was changed +$feature_results = @() +foreach ($action_result in $action_results.FeatureResult) { + $message = @() + foreach ($msg in $action_result.Message) { + $message += @{ + message_type = $msg.MessageType.ToString() + error_code = $msg.ErrorCode + text = $msg.Text + } + } + + $feature_results += @{ + id = $action_result.Id + display_name = $action_result.DisplayName + message = $message + reboot_required = ConvertTo-Bool -obj $action_result.RestartNeeded + skip_reason = $action_result.SkipReason.ToString() + success = ConvertTo-Bool -obj $action_result.Success + restart_needed = ConvertTo-Bool -obj $action_result.RestartNeeded + } + $result.changed = $true +} +$result.feature_result = $feature_results +$result.success = ConvertTo-Bool -obj $action_results.Success +$result.exitcode = $action_results.ExitCode.ToString() +$result.reboot_required = ConvertTo-Bool -obj $action_results.RestartNeeded +# controls whether Ansible will fail or not +$result.failed = (-not $action_results.Success) + +Exit-Json -obj $result diff --git a/test/support/windows-integration/plugins/modules/win_feature.py b/test/support/windows-integration/plugins/modules/win_feature.py new file mode 100644 index 00000000..62e310b2 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_feature.py @@ -0,0 +1,149 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Paul Durivage +# Copyright: (c) 2014, Trond Hindenes +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_feature +version_added: "1.7" +short_description: Installs and uninstalls Windows Features on Windows Server +description: + - Installs or uninstalls Windows Roles or Features on Windows Server. + - This module uses the Add/Remove-WindowsFeature Cmdlets on Windows 2008 R2 + and Install/Uninstall-WindowsFeature Cmdlets on Windows 2012, which are not available on client os machines. +options: + name: + description: + - Names of roles or features to install as a single feature or a comma-separated list of features. + - To list all available features use the PowerShell command C(Get-WindowsFeature). + type: list + required: yes + state: + description: + - State of the features or roles on the system. + type: str + choices: [ absent, present ] + default: present + include_sub_features: + description: + - Adds all subfeatures of the specified feature. + type: bool + default: no + include_management_tools: + description: + - Adds the corresponding management tools to the specified feature. + - Not supported in Windows 2008 R2 and will be ignored. + type: bool + default: no + source: + description: + - Specify a source to install the feature from. + - Not supported in Windows 2008 R2 and will be ignored. + - Can either be C({driveletter}:\sources\sxs) or C(\\{IP}\share\sources\sxs). + type: str + version_added: "2.1" +seealso: +- module: win_chocolatey +- module: win_package +author: + - Paul Durivage (@angstwad) + - Trond Hindenes (@trondhindenes) +''' + +EXAMPLES = r''' +- name: Install IIS (Web-Server only) + win_feature: + name: Web-Server + state: present + +- name: Install IIS (Web-Server and Web-Common-Http) + win_feature: + name: + - Web-Server + - Web-Common-Http + state: present + +- name: Install NET-Framework-Core from file + win_feature: + name: NET-Framework-Core + source: C:\Temp\iso\sources\sxs + state: present + +- name: Install IIS Web-Server with sub features and management tools + win_feature: + name: Web-Server + state: present + include_sub_features: yes + include_management_tools: yes + register: win_feature + +- name: Reboot if installing Web-Server feature requires it + win_reboot: + when: win_feature.reboot_required +''' + +RETURN = r''' +exitcode: + description: The stringified exit code from the feature installation/removal command. + returned: always + type: str + sample: Success +feature_result: + description: List of features that were installed or removed. + returned: success + type: complex + sample: + contains: + display_name: + description: Feature display name. + returned: always + type: str + sample: "Telnet Client" + id: + description: A list of KB article IDs that apply to the update. + returned: always + type: int + sample: 44 + message: + description: Any messages returned from the feature subsystem that occurred during installation or removal of this feature. + returned: always + type: list + elements: str + sample: [] + reboot_required: + description: True when the target server requires a reboot as a result of installing or removing this feature. + returned: always + type: bool + sample: true + restart_needed: + description: DEPRECATED in Ansible 2.4 (refer to C(reboot_required) instead). True when the target server requires a reboot as a + result of installing or removing this feature. + returned: always + type: bool + sample: true + skip_reason: + description: The reason a feature installation or removal was skipped. + returned: always + type: str + sample: NotSkipped + success: + description: If the feature installation or removal was successful. + returned: always + type: bool + sample: true +reboot_required: + description: True when the target server requires a reboot to complete updates (no further updates can be installed until after a reboot). + returned: success + type: bool + sample: true +''' diff --git a/test/support/windows-integration/plugins/modules/win_file.ps1 b/test/support/windows-integration/plugins/modules/win_file.ps1 new file mode 100644 index 00000000..54427549 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_file.ps1 @@ -0,0 +1,152 @@ +#!powershell + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy + +$ErrorActionPreference = "Stop" + +$params = Parse-Args $args -supports_check_mode $true + +$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -default $false +$_remote_tmp = Get-AnsibleParam $params "_ansible_remote_tmp" -type "path" -default $env:TMP + +$path = Get-AnsibleParam -obj $params -name "path" -type "path" -failifempty $true -aliases "dest","name" +$state = Get-AnsibleParam -obj $params -name "state" -type "str" -validateset "absent","directory","file","touch" + +# used in template/copy when dest is the path to a dir and source is a file +$original_basename = Get-AnsibleParam -obj $params -name "_original_basename" -type "str" +if ((Test-Path -LiteralPath $path -PathType Container) -and ($null -ne $original_basename)) { + $path = Join-Path -Path $path -ChildPath $original_basename +} + +$result = @{ + changed = $false +} + +# Used to delete symlinks as powershell cannot delete broken symlinks +$symlink_util = @" +using System; +using System.ComponentModel; +using System.Runtime.InteropServices; + +namespace Ansible.Command { + public class SymLinkHelper { + [DllImport("kernel32.dll", CharSet=CharSet.Unicode, SetLastError=true)] + public static extern bool DeleteFileW(string lpFileName); + + [DllImport("kernel32.dll", CharSet=CharSet.Unicode, SetLastError=true)] + public static extern bool RemoveDirectoryW(string lpPathName); + + public static void DeleteDirectory(string path) { + if (!RemoveDirectoryW(path)) + throw new Exception(String.Format("RemoveDirectoryW({0}) failed: {1}", path, new Win32Exception(Marshal.GetLastWin32Error()).Message)); + } + + public static void DeleteFile(string path) { + if (!DeleteFileW(path)) + throw new Exception(String.Format("DeleteFileW({0}) failed: {1}", path, new Win32Exception(Marshal.GetLastWin32Error()).Message)); + } + } +} +"@ +$original_tmp = $env:TMP +$env:TMP = $_remote_tmp +Add-Type -TypeDefinition $symlink_util +$env:TMP = $original_tmp + +# Used to delete directories and files with logic on handling symbolic links +function Remove-File($file, $checkmode) { + try { + if ($file.Attributes -band [System.IO.FileAttributes]::ReparsePoint) { + # Bug with powershell, if you try and delete a symbolic link that is pointing + # to an invalid path it will fail, using Win32 API to do this instead + if ($file.PSIsContainer) { + if (-not $checkmode) { + [Ansible.Command.SymLinkHelper]::DeleteDirectory($file.FullName) + } + } else { + if (-not $checkmode) { + [Ansible.Command.SymlinkHelper]::DeleteFile($file.FullName) + } + } + } elseif ($file.PSIsContainer) { + Remove-Directory -directory $file -checkmode $checkmode + } else { + Remove-Item -LiteralPath $file.FullName -Force -WhatIf:$checkmode + } + } catch [Exception] { + Fail-Json $result "Failed to delete $($file.FullName): $($_.Exception.Message)" + } +} + +function Remove-Directory($directory, $checkmode) { + foreach ($file in Get-ChildItem -LiteralPath $directory.FullName) { + Remove-File -file $file -checkmode $checkmode + } + Remove-Item -LiteralPath $directory.FullName -Force -Recurse -WhatIf:$checkmode +} + + +if ($state -eq "touch") { + if (Test-Path -LiteralPath $path) { + if (-not $check_mode) { + (Get-ChildItem -LiteralPath $path).LastWriteTime = Get-Date + } + $result.changed = $true + } else { + Write-Output $null | Out-File -LiteralPath $path -Encoding ASCII -WhatIf:$check_mode + $result.changed = $true + } +} + +if (Test-Path -LiteralPath $path) { + $fileinfo = Get-Item -LiteralPath $path -Force + if ($state -eq "absent") { + Remove-File -file $fileinfo -checkmode $check_mode + $result.changed = $true + } else { + if ($state -eq "directory" -and -not $fileinfo.PsIsContainer) { + Fail-Json $result "path $path is not a directory" + } + + if ($state -eq "file" -and $fileinfo.PsIsContainer) { + Fail-Json $result "path $path is not a file" + } + } + +} else { + + # If state is not supplied, test the $path to see if it looks like + # a file or a folder and set state to file or folder + if ($null -eq $state) { + $basename = Split-Path -Path $path -Leaf + if ($basename.length -gt 0) { + $state = "file" + } else { + $state = "directory" + } + } + + if ($state -eq "directory") { + try { + New-Item -Path $path -ItemType Directory -WhatIf:$check_mode | Out-Null + } catch { + if ($_.CategoryInfo.Category -eq "ResourceExists") { + $fileinfo = Get-Item -LiteralPath $_.CategoryInfo.TargetName + if ($state -eq "directory" -and -not $fileinfo.PsIsContainer) { + Fail-Json $result "path $path is not a directory" + } + } else { + Fail-Json $result $_.Exception.Message + } + } + $result.changed = $true + } elseif ($state -eq "file") { + Fail-Json $result "path $path will not be created" + } + +} + +Exit-Json $result diff --git a/test/support/windows-integration/plugins/modules/win_file.py b/test/support/windows-integration/plugins/modules/win_file.py new file mode 100644 index 00000000..28149579 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_file.py @@ -0,0 +1,70 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_file +version_added: "1.9.2" +short_description: Creates, touches or removes files or directories +description: + - Creates (empty) files, updates file modification stamps of existing files, + and can create or remove directories. + - Unlike M(file), does not modify ownership, permissions or manipulate links. + - For non-Windows targets, use the M(file) module instead. +options: + path: + description: + - Path to the file being managed. + required: yes + type: path + aliases: [ dest, name ] + state: + description: + - If C(directory), all immediate subdirectories will be created if they + do not exist. + - If C(file), the file will NOT be created if it does not exist, see the M(copy) + or M(template) module if you want that behavior. + - If C(absent), directories will be recursively deleted, and files will be removed. + - If C(touch), an empty file will be created if the C(path) does not + exist, while an existing file or directory will receive updated file access and + modification times (similar to the way C(touch) works from the command line). + type: str + choices: [ absent, directory, file, touch ] +seealso: +- module: file +- module: win_acl +- module: win_acl_inheritance +- module: win_owner +- module: win_stat +author: +- Jon Hawkesworth (@jhawkesworth) +''' + +EXAMPLES = r''' +- name: Touch a file (creates if not present, updates modification time if present) + win_file: + path: C:\Temp\foo.conf + state: touch + +- name: Remove a file, if present + win_file: + path: C:\Temp\foo.conf + state: absent + +- name: Create directory structure + win_file: + path: C:\Temp\folder\subfolder + state: directory + +- name: Remove directory structure + win_file: + path: C:\Temp + state: absent +''' diff --git a/test/support/windows-integration/plugins/modules/win_find.ps1 b/test/support/windows-integration/plugins/modules/win_find.ps1 new file mode 100644 index 00000000..bc57c5ff --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_find.ps1 @@ -0,0 +1,416 @@ +#!powershell + +# Copyright: (c) 2016, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#Requires -Module Ansible.ModuleUtils.LinkUtil + +$spec = @{ + options = @{ + paths = @{ type = "list"; elements = "str"; required = $true } + age = @{ type = "str" } + age_stamp = @{ type = "str"; default = "mtime"; choices = "mtime", "ctime", "atime" } + file_type = @{ type = "str"; default = "file"; choices = "file", "directory" } + follow = @{ type = "bool"; default = $false } + hidden = @{ type = "bool"; default = $false } + patterns = @{ type = "list"; elements = "str"; aliases = "regex", "regexp" } + recurse = @{ type = "bool"; default = $false } + size = @{ type = "str" } + use_regex = @{ type = "bool"; default = $false } + get_checksum = @{ type = "bool"; default = $true } + checksum_algorithm = @{ type = "str"; default = "sha1"; choices = "md5", "sha1", "sha256", "sha384", "sha512" } + } + supports_check_mode = $true +} + +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + +$paths = $module.Params.paths +$age = $module.Params.age +$age_stamp = $module.Params.age_stamp +$file_type = $module.Params.file_type +$follow = $module.Params.follow +$hidden = $module.Params.hidden +$patterns = $module.Params.patterns +$recurse = $module.Params.recurse +$size = $module.Params.size +$use_regex = $module.Params.use_regex +$get_checksum = $module.Params.get_checksum +$checksum_algorithm = $module.Params.checksum_algorithm + +$module.Result.examined = 0 +$module.Result.files = @() +$module.Result.matched = 0 + +Load-LinkUtils + +Function Assert-Age { + Param ( + [System.IO.FileSystemInfo]$File, + [System.Int64]$Age, + [System.String]$AgeStamp + ) + + $actual_age = switch ($AgeStamp) { + mtime { $File.LastWriteTime.Ticks } + ctime { $File.CreationTime.Ticks } + atime { $File.LastAccessTime.Ticks } + } + + if ($Age -ge 0) { + return $Age -ge $actual_age + } else { + return ($Age * -1) -le $actual_age + } +} + +Function Assert-FileType { + Param ( + [System.IO.FileSystemInfo]$File, + [System.String]$FileType + ) + + $is_dir = $File.Attributes.HasFlag([System.IO.FileAttributes]::Directory) + return ($FileType -eq 'directory' -and $is_dir) -or ($FileType -eq 'file' -and -not $is_dir) +} + +Function Assert-FileHidden { + Param ( + [System.IO.FileSystemInfo]$File, + [Switch]$IsHidden + ) + + $file_is_hidden = $File.Attributes.HasFlag([System.IO.FileAttributes]::Hidden) + return $IsHidden.IsPresent -eq $file_is_hidden +} + + +Function Assert-FileNamePattern { + Param ( + [System.IO.FileSystemInfo]$File, + [System.String[]]$Patterns, + [Switch]$UseRegex + ) + + $valid_match = $false + foreach ($pattern in $Patterns) { + if ($UseRegex) { + if ($File.Name -match $pattern) { + $valid_match = $true + break + } + } else { + if ($File.Name -like $pattern) { + $valid_match = $true + break + } + } + } + return $valid_match +} + +Function Assert-FileSize { + Param ( + [System.IO.FileSystemInfo]$File, + [System.Int64]$Size + ) + + if ($Size -ge 0) { + return $File.Length -ge $Size + } else { + return $File.Length -le ($Size * -1) + } +} + +Function Get-FileChecksum { + Param ( + [System.String]$Path, + [System.String]$Algorithm + ) + + $sp = switch ($algorithm) { + 'md5' { New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider } + 'sha1' { New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider } + 'sha256' { New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider } + 'sha384' { New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider } + 'sha512' { New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider } + } + + $fp = [System.IO.File]::Open($Path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::ReadWrite) + try { + $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower() + } finally { + $fp.Dispose() + } + + return $hash +} + +Function Search-Path { + [CmdletBinding()] + Param ( + [Parameter(Mandatory=$true)] + [System.String] + $Path, + + [Parameter(Mandatory=$true)] + [AllowEmptyCollection()] + [System.Collections.Generic.HashSet`1[System.String]] + $CheckedPaths, + + [Parameter(Mandatory=$true)] + [Object] + $Module, + + [System.Int64] + $Age, + + [System.String] + $AgeStamp, + + [System.String] + $FileType, + + [Switch] + $Follow, + + [Switch] + $GetChecksum, + + [Switch] + $IsHidden, + + [System.String[]] + $Patterns, + + [Switch] + $Recurse, + + [System.Int64] + $Size, + + [Switch] + $UseRegex + ) + + $dir_obj = New-Object -TypeName System.IO.DirectoryInfo -ArgumentList $Path + if ([Int32]$dir_obj.Attributes -eq -1) { + $Module.Warn("Argument path '$Path' does not exist, skipping") + return + } elseif (-not $dir_obj.Attributes.HasFlag([System.IO.FileAttributes]::Directory)) { + $Module.Warn("Argument path '$Path' is a file not a directory, skipping") + return + } + + $dir_files = @() + try { + $dir_files = $dir_obj.EnumerateFileSystemInfos("*", [System.IO.SearchOption]::TopDirectoryOnly) + } catch [System.IO.DirectoryNotFoundException] { # Broken ReparsePoint/Symlink, cannot enumerate + } catch [System.UnauthorizedAccessException] {} # No ListDirectory permissions, Get-ChildItem ignored this + + foreach ($dir_child in $dir_files) { + if ($dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Directory) -and $Recurse) { + if ($Follow -or -not $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::ReparsePoint)) { + $PSBoundParameters.Remove('Path') > $null + Search-Path -Path $dir_child.FullName @PSBoundParameters + } + } + + # Check to see if we've already encountered this path and skip if we have. + if (-not $CheckedPaths.Add($dir_child.FullName.ToLowerInvariant())) { + continue + } + + $Module.Result.examined++ + + if ($PSBoundParameters.ContainsKey('Age')) { + $age_match = Assert-Age -File $dir_child -Age $Age -AgeStamp $AgeStamp + } else { + $age_match = $true + } + + $file_type_match = Assert-FileType -File $dir_child -FileType $FileType + $hidden_match = Assert-FileHidden -File $dir_child -IsHidden:$IsHidden + + if ($PSBoundParameters.ContainsKey('Patterns')) { + $pattern_match = Assert-FileNamePattern -File $dir_child -Patterns $Patterns -UseRegex:$UseRegex.IsPresent + } else { + $pattern_match = $true + } + + if ($PSBoundParameters.ContainsKey('Size')) { + $size_match = Assert-FileSize -File $dir_child -Size $Size + } else { + $size_match = $true + } + + if (-not ($age_match -and $file_type_match -and $hidden_match -and $pattern_match -and $size_match)) { + continue + } + + # It passed all our filters so add it + $module.Result.matched++ + + # TODO: Make this generic so it can be shared with win_find and win_stat. + $epoch = New-Object -Type System.DateTime -ArgumentList 1970, 1, 1, 0, 0, 0, 0 + $file_info = @{ + attributes = $dir_child.Attributes.ToString() + checksum = $null + creationtime = (New-TimeSpan -Start $epoch -End $dir_child.CreationTime).TotalSeconds + exists = $true + extension = $null + filename = $dir_child.Name + isarchive = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Archive) + isdir = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Directory) + ishidden = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Hidden) + isreadonly = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::ReadOnly) + isreg = $false + isshared = $false + lastaccesstime = (New-TimeSpan -Start $epoch -End $dir_child.LastAccessTime).TotalSeconds + lastwritetime = (New-TimeSpan -Start $epoch -End $dir_child.LastWriteTime).TotalSeconds + owner = $null + path = $dir_child.FullName + sharename = $null + size = $null + } + + try { + $file_info.owner = $dir_child.GetAccessControl().Owner + } catch {} # May not have rights to get the Owner, historical behaviour is to ignore. + + if ($dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Directory)) { + $share_info = Get-CimInstance -ClassName Win32_Share -Filter "Path='$($dir_child.FullName -replace '\\', '\\')'" + if ($null -ne $share_info) { + $file_info.isshared = $true + $file_info.sharename = $share_info.Name + } + } else { + $file_info.extension = $dir_child.Extension + $file_info.isreg = $true + $file_info.size = $dir_child.Length + + if ($GetChecksum) { + try { + $file_info.checksum = Get-FileChecksum -Path $dir_child.FullName -Algorithm $checksum_algorithm + } catch {} # Just keep the checksum as $null in the case of a failure. + } + } + + # Append the link information if the path is a link + $link_info = @{ + isjunction = $false + islnk = $false + nlink = 1 + lnk_source = $null + lnk_target = $null + hlnk_targets = @() + } + $link_stat = Get-Link -link_path $dir_child.FullName + if ($null -ne $link_stat) { + switch ($link_stat.Type) { + "SymbolicLink" { + $link_info.islnk = $true + $link_info.isreg = $false + $link_info.lnk_source = $link_stat.AbsolutePath + $link_info.lnk_target = $link_stat.TargetPath + break + } + "JunctionPoint" { + $link_info.isjunction = $true + $link_info.isreg = $false + $link_info.lnk_source = $link_stat.AbsolutePath + $link_info.lnk_target = $link_stat.TargetPath + break + } + "HardLink" { + $link_info.nlink = $link_stat.HardTargets.Count + + # remove current path from the targets + $hlnk_targets = $link_info.HardTargets | Where-Object { $_ -ne $dir_child.FullName } + $link_info.hlnk_targets = @($hlnk_targets) + break + } + } + } + foreach ($kv in $link_info.GetEnumerator()) { + $file_info.$($kv.Key) = $kv.Value + } + + # Output the file_info object + $file_info + } +} + +$search_params = @{ + CheckedPaths = [System.Collections.Generic.HashSet`1[System.String]]@() + GetChecksum = $get_checksum + Module = $module + FileType = $file_type + Follow = $follow + IsHidden = $hidden + Recurse = $recurse +} + +if ($null -ne $age) { + $seconds_per_unit = @{'s'=1; 'm'=60; 'h'=3600; 'd'=86400; 'w'=604800} + $seconds_pattern = '^(-?\d+)(s|m|h|d|w)?$' + $match = $age -match $seconds_pattern + if ($Match) { + $specified_seconds = [Int64]$Matches[1] + if ($null -eq $Matches[2]) { + $chosen_unit = 's' + } else { + $chosen_unit = $Matches[2] + } + + $total_seconds = $specified_seconds * ($seconds_per_unit.$chosen_unit) + + if ($total_seconds -ge 0) { + $search_params.Age = (Get-Date).AddSeconds($total_seconds * -1).Ticks + } else { + # Make sure we add the positive value of seconds to current time then make it negative for later comparisons. + $age = (Get-Date).AddSeconds($total_seconds).Ticks + $search_params.Age = $age * -1 + } + $search_params.AgeStamp = $age_stamp + } else { + $module.FailJson("Invalid age pattern specified") + } +} + +if ($null -ne $patterns) { + $search_params.Patterns = $patterns + $search_params.UseRegex = $use_regex +} + +if ($null -ne $size) { + $bytes_per_unit = @{'b'=1; 'k'=1KB; 'm'=1MB; 'g'=1GB;'t'=1TB} + $size_pattern = '^(-?\d+)(b|k|m|g|t)?$' + $match = $size -match $size_pattern + if ($Match) { + $specified_size = [Int64]$Matches[1] + if ($null -eq $Matches[2]) { + $chosen_byte = 'b' + } else { + $chosen_byte = $Matches[2] + } + + $search_params.Size = $specified_size * ($bytes_per_unit.$chosen_byte) + } else { + $module.FailJson("Invalid size pattern specified") + } +} + +$matched_files = foreach ($path in $paths) { + # Ensure we pass in an absolute path. We use the ExecutionContext as this is based on the PSProvider path not the + # process location which can be different. + $abs_path = $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($path) + Search-Path -Path $abs_path @search_params +} + +# Make sure we sort the files in alphabetical order. +$module.Result.files = @() + ($matched_files | Sort-Object -Property {$_.path}) + +$module.ExitJson() + diff --git a/test/support/windows-integration/plugins/modules/win_find.py b/test/support/windows-integration/plugins/modules/win_find.py new file mode 100644 index 00000000..f506f956 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_find.py @@ -0,0 +1,345 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_find +version_added: "2.3" +short_description: Return a list of files based on specific criteria +description: + - Return a list of files based on specified criteria. + - Multiple criteria are AND'd together. + - For non-Windows targets, use the M(find) module instead. +options: + age: + description: + - Select files or folders whose age is equal to or greater than + the specified time. + - Use a negative age to find files equal to or less than + the specified time. + - You can choose seconds, minutes, hours, days or weeks + by specifying the first letter of an of + those words (e.g., "2s", "10d", 1w"). + type: str + age_stamp: + description: + - Choose the file property against which we compare C(age). + - The default attribute we compare with is the last modification time. + type: str + choices: [ atime, ctime, mtime ] + default: mtime + checksum_algorithm: + description: + - Algorithm to determine the checksum of a file. + - Will throw an error if the host is unable to use specified algorithm. + type: str + choices: [ md5, sha1, sha256, sha384, sha512 ] + default: sha1 + file_type: + description: Type of file to search for. + type: str + choices: [ directory, file ] + default: file + follow: + description: + - Set this to C(yes) to follow symlinks in the path. + - This needs to be used in conjunction with C(recurse). + type: bool + default: no + get_checksum: + description: + - Whether to return a checksum of the file in the return info (default sha1), + use C(checksum_algorithm) to change from the default. + type: bool + default: yes + hidden: + description: Set this to include hidden files or folders. + type: bool + default: no + paths: + description: + - List of paths of directories to search for files or folders in. + - This can be supplied as a single path or a list of paths. + type: list + required: yes + patterns: + description: + - One or more (powershell or regex) patterns to compare filenames with. + - The type of pattern matching is controlled by C(use_regex) option. + - The patterns restrict the list of files or folders to be returned based on the filenames. + - For a file to be matched it only has to match with one pattern in a list provided. + type: list + aliases: [ "regex", "regexp" ] + recurse: + description: + - Will recursively descend into the directory looking for files or folders. + type: bool + default: no + size: + description: + - Select files or folders whose size is equal to or greater than the specified size. + - Use a negative value to find files equal to or less than the specified size. + - You can specify the size with a suffix of the byte type i.e. kilo = k, mega = m... + - Size is not evaluated for symbolic links. + type: str + use_regex: + description: + - Will set patterns to run as a regex check if set to C(yes). + type: bool + default: no +author: +- Jordan Borean (@jborean93) +''' + +EXAMPLES = r''' +- name: Find files in path + win_find: + paths: D:\Temp + +- name: Find hidden files in path + win_find: + paths: D:\Temp + hidden: yes + +- name: Find files in multiple paths + win_find: + paths: + - C:\Temp + - D:\Temp + +- name: Find files in directory while searching recursively + win_find: + paths: D:\Temp + recurse: yes + +- name: Find files in directory while following symlinks + win_find: + paths: D:\Temp + recurse: yes + follow: yes + +- name: Find files with .log and .out extension using powershell wildcards + win_find: + paths: D:\Temp + patterns: [ '*.log', '*.out' ] + +- name: Find files in path based on regex pattern + win_find: + paths: D:\Temp + patterns: out_\d{8}-\d{6}.log + +- name: Find files older than 1 day + win_find: + paths: D:\Temp + age: 86400 + +- name: Find files older than 1 day based on create time + win_find: + paths: D:\Temp + age: 86400 + age_stamp: ctime + +- name: Find files older than 1 day with unit syntax + win_find: + paths: D:\Temp + age: 1d + +- name: Find files newer than 1 hour + win_find: + paths: D:\Temp + age: -3600 + +- name: Find files newer than 1 hour with unit syntax + win_find: + paths: D:\Temp + age: -1h + +- name: Find files larger than 1MB + win_find: + paths: D:\Temp + size: 1048576 + +- name: Find files larger than 1GB with unit syntax + win_find: + paths: D:\Temp + size: 1g + +- name: Find files smaller than 1MB + win_find: + paths: D:\Temp + size: -1048576 + +- name: Find files smaller than 1GB with unit syntax + win_find: + paths: D:\Temp + size: -1g + +- name: Find folders/symlinks in multiple paths + win_find: + paths: + - C:\Temp + - D:\Temp + file_type: directory + +- name: Find files and return SHA256 checksum of files found + win_find: + paths: C:\Temp + get_checksum: yes + checksum_algorithm: sha256 + +- name: Find files and do not return the checksum + win_find: + paths: C:\Temp + get_checksum: no +''' + +RETURN = r''' +examined: + description: The number of files/folders that was checked. + returned: always + type: int + sample: 10 +matched: + description: The number of files/folders that match the criteria. + returned: always + type: int + sample: 2 +files: + description: Information on the files/folders that match the criteria returned as a list of dictionary elements + for each file matched. The entries are sorted by the path value alphabetically. + returned: success + type: complex + contains: + attributes: + description: attributes of the file at path in raw form. + returned: success, path exists + type: str + sample: "Archive, Hidden" + checksum: + description: The checksum of a file based on checksum_algorithm specified. + returned: success, path exists, path is a file, get_checksum == True + type: str + sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98 + creationtime: + description: The create time of the file represented in seconds since epoch. + returned: success, path exists + type: float + sample: 1477984205.15 + exists: + description: Whether the file exists, will always be true for M(win_find). + returned: success, path exists + type: bool + sample: true + extension: + description: The extension of the file at path. + returned: success, path exists, path is a file + type: str + sample: ".ps1" + filename: + description: The name of the file. + returned: success, path exists + type: str + sample: temp + hlnk_targets: + description: List of other files pointing to the same file (hard links), excludes the current file. + returned: success, path exists + type: list + sample: + - C:\temp\file.txt + - C:\Windows\update.log + isarchive: + description: If the path is ready for archiving or not. + returned: success, path exists + type: bool + sample: true + isdir: + description: If the path is a directory or not. + returned: success, path exists + type: bool + sample: true + ishidden: + description: If the path is hidden or not. + returned: success, path exists + type: bool + sample: true + isjunction: + description: If the path is a junction point. + returned: success, path exists + type: bool + sample: true + islnk: + description: If the path is a symbolic link. + returned: success, path exists + type: bool + sample: true + isreadonly: + description: If the path is read only or not. + returned: success, path exists + type: bool + sample: true + isreg: + description: If the path is a regular file or not. + returned: success, path exists + type: bool + sample: true + isshared: + description: If the path is shared or not. + returned: success, path exists + type: bool + sample: true + lastaccesstime: + description: The last access time of the file represented in seconds since epoch. + returned: success, path exists + type: float + sample: 1477984205.15 + lastwritetime: + description: The last modification time of the file represented in seconds since epoch. + returned: success, path exists + type: float + sample: 1477984205.15 + lnk_source: + description: The target of the symlink normalized for the remote filesystem. + returned: success, path exists, path is a symbolic link or junction point + type: str + sample: C:\temp + lnk_target: + description: The target of the symlink. Note that relative paths remain relative, will return null if not a link. + returned: success, path exists, path is a symbolic link or junction point + type: str + sample: temp + nlink: + description: Number of links to the file (hard links) + returned: success, path exists + type: int + sample: 1 + owner: + description: The owner of the file. + returned: success, path exists + type: str + sample: BUILTIN\Administrators + path: + description: The full absolute path to the file. + returned: success, path exists + type: str + sample: BUILTIN\Administrators + sharename: + description: The name of share if folder is shared. + returned: success, path exists, path is a directory and isshared == True + type: str + sample: file-share + size: + description: The size in bytes of the file. + returned: success, path exists, path is a file + type: int + sample: 1024 +''' diff --git a/test/support/windows-integration/plugins/modules/win_format.ps1 b/test/support/windows-integration/plugins/modules/win_format.ps1 new file mode 100644 index 00000000..b5fd3ae0 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_format.ps1 @@ -0,0 +1,200 @@ +#!powershell + +# Copyright: (c) 2019, Varun Chopra (@chopraaa) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#AnsibleRequires -OSVersion 6.2 + +Set-StrictMode -Version 2 + +$ErrorActionPreference = "Stop" + +$spec = @{ + options = @{ + drive_letter = @{ type = "str" } + path = @{ type = "str" } + label = @{ type = "str" } + new_label = @{ type = "str" } + file_system = @{ type = "str"; choices = "ntfs", "refs", "exfat", "fat32", "fat" } + allocation_unit_size = @{ type = "int" } + large_frs = @{ type = "bool" } + full = @{ type = "bool"; default = $false } + compress = @{ type = "bool" } + integrity_streams = @{ type = "bool" } + force = @{ type = "bool"; default = $false } + } + mutually_exclusive = @( + ,@('drive_letter', 'path', 'label') + ) + required_one_of = @( + ,@('drive_letter', 'path', 'label') + ) + supports_check_mode = $true +} + +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + +$drive_letter = $module.Params.drive_letter +$path = $module.Params.path +$label = $module.Params.label +$new_label = $module.Params.new_label +$file_system = $module.Params.file_system +$allocation_unit_size = $module.Params.allocation_unit_size +$large_frs = $module.Params.large_frs +$full_format = $module.Params.full +$compress_volume = $module.Params.compress +$integrity_streams = $module.Params.integrity_streams +$force_format = $module.Params.force + +# Some pre-checks +if ($null -ne $drive_letter -and $drive_letter -notmatch "^[a-zA-Z]$") { + $module.FailJson("The parameter drive_letter should be a single character A-Z") +} +if ($integrity_streams -eq $true -and $file_system -ne "refs") { + $module.FailJson("Integrity streams can be enabled only on ReFS volumes. You specified: $($file_system)") +} +if ($compress_volume -eq $true) { + if ($file_system -eq "ntfs") { + if ($null -ne $allocation_unit_size -and $allocation_unit_size -gt 4096) { + $module.FailJson("NTFS compression is not supported for allocation unit sizes above 4096") + } + } + else { + $module.FailJson("Compression can be enabled only on NTFS volumes. You specified: $($file_system)") + } +} + +function Get-AnsibleVolume { + param( + $DriveLetter, + $Path, + $Label + ) + + if ($null -ne $DriveLetter) { + try { + $volume = Get-Volume -DriveLetter $DriveLetter + } catch { + $module.FailJson("There was an error retrieving the volume using drive_letter $($DriveLetter): $($_.Exception.Message)", $_) + } + } + elseif ($null -ne $Path) { + try { + $volume = Get-Volume -Path $Path + } catch { + $module.FailJson("There was an error retrieving the volume using path $($Path): $($_.Exception.Message)", $_) + } + } + elseif ($null -ne $Label) { + try { + $volume = Get-Volume -FileSystemLabel $Label + } catch { + $module.FailJson("There was an error retrieving the volume using label $($Label): $($_.Exception.Message)", $_) + } + } + else { + $module.FailJson("Unable to locate volume: drive_letter, path and label were not specified") + } + + return $volume +} + +function Format-AnsibleVolume { + param( + $Path, + $Label, + $FileSystem, + $Full, + $UseLargeFRS, + $Compress, + $SetIntegrityStreams, + $AllocationUnitSize + ) + $parameters = @{ + Path = $Path + Full = $Full + } + if ($null -ne $UseLargeFRS) { + $parameters.Add("UseLargeFRS", $UseLargeFRS) + } + if ($null -ne $SetIntegrityStreams) { + $parameters.Add("SetIntegrityStreams", $SetIntegrityStreams) + } + if ($null -ne $Compress){ + $parameters.Add("Compress", $Compress) + } + if ($null -ne $Label) { + $parameters.Add("NewFileSystemLabel", $Label) + } + if ($null -ne $FileSystem) { + $parameters.Add("FileSystem", $FileSystem) + } + if ($null -ne $AllocationUnitSize) { + $parameters.Add("AllocationUnitSize", $AllocationUnitSize) + } + + Format-Volume @parameters -Confirm:$false | Out-Null + +} + +$ansible_volume = Get-AnsibleVolume -DriveLetter $drive_letter -Path $path -Label $label +$ansible_file_system = $ansible_volume.FileSystem +$ansible_volume_size = $ansible_volume.Size +$ansible_volume_alu = (Get-CimInstance -ClassName Win32_Volume -Filter "DeviceId = '$($ansible_volume.path.replace('\','\\'))'" -Property BlockSize).BlockSize + +$ansible_partition = Get-Partition -Volume $ansible_volume + +if (-not $force_format -and $null -ne $allocation_unit_size -and $ansible_volume_alu -ne 0 -and $null -ne $ansible_volume_alu -and $allocation_unit_size -ne $ansible_volume_alu) { + $module.FailJson("Force format must be specified since target allocation unit size: $($allocation_unit_size) is different from the current allocation unit size of the volume: $($ansible_volume_alu)") +} + +foreach ($access_path in $ansible_partition.AccessPaths) { + if ($access_path -ne $Path) { + if ($null -ne $file_system -and + -not [string]::IsNullOrEmpty($ansible_file_system) -and + $file_system -ne $ansible_file_system) + { + if (-not $force_format) + { + $no_files_in_volume = (Get-ChildItem -LiteralPath $access_path -ErrorAction SilentlyContinue | Measure-Object).Count -eq 0 + if($no_files_in_volume) + { + $module.FailJson("Force format must be specified since target file system: $($file_system) is different from the current file system of the volume: $($ansible_file_system.ToLower())") + } + else + { + $module.FailJson("Force format must be specified to format non-pristine volumes") + } + } + } + else + { + $pristine = -not $force_format + } + } +} + +if ($force_format) { + if (-not $module.CheckMode) { + Format-AnsibleVolume -Path $ansible_volume.Path -Full $full_format -Label $new_label -FileSystem $file_system -SetIntegrityStreams $integrity_streams -UseLargeFRS $large_frs -Compress $compress_volume -AllocationUnitSize $allocation_unit_size + } + $module.Result.changed = $true +} +else { + if ($pristine) { + if ($null -eq $new_label) { + $new_label = $ansible_volume.FileSystemLabel + } + # Conditions for formatting + if ($ansible_volume_size -eq 0 -or + $ansible_volume.FileSystemLabel -ne $new_label) { + if (-not $module.CheckMode) { + Format-AnsibleVolume -Path $ansible_volume.Path -Full $full_format -Label $new_label -FileSystem $file_system -SetIntegrityStreams $integrity_streams -UseLargeFRS $large_frs -Compress $compress_volume -AllocationUnitSize $allocation_unit_size + } + $module.Result.changed = $true + } + } +} + +$module.ExitJson() diff --git a/test/support/windows-integration/plugins/modules/win_format.py b/test/support/windows-integration/plugins/modules/win_format.py new file mode 100644 index 00000000..f8f18ed7 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_format.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Varun Chopra (@chopraaa) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = r''' +module: win_format +version_added: '2.8' +short_description: Formats an existing volume or a new volume on an existing partition on Windows +description: + - The M(win_format) module formats an existing volume or a new volume on an existing partition on Windows +options: + drive_letter: + description: + - Used to specify the drive letter of the volume to be formatted. + type: str + path: + description: + - Used to specify the path to the volume to be formatted. + type: str + label: + description: + - Used to specify the label of the volume to be formatted. + type: str + new_label: + description: + - Used to specify the new file system label of the formatted volume. + type: str + file_system: + description: + - Used to specify the file system to be used when formatting the target volume. + type: str + choices: [ ntfs, refs, exfat, fat32, fat ] + allocation_unit_size: + description: + - Specifies the cluster size to use when formatting the volume. + - If no cluster size is specified when you format a partition, defaults are selected based on + the size of the partition. + - This value must be a multiple of the physical sector size of the disk. + type: int + large_frs: + description: + - Specifies that large File Record System (FRS) should be used. + type: bool + compress: + description: + - Enable compression on the resulting NTFS volume. + - NTFS compression is not supported where I(allocation_unit_size) is more than 4096. + type: bool + integrity_streams: + description: + - Enable integrity streams on the resulting ReFS volume. + type: bool + full: + description: + - A full format writes to every sector of the disk, takes much longer to perform than the + default (quick) format, and is not recommended on storage that is thinly provisioned. + - Specify C(true) for full format. + type: bool + force: + description: + - Specify if formatting should be forced for volumes that are not created from new partitions + or if the source and target file system are different. + type: bool +notes: + - Microsoft Windows Server 2012 or Microsoft Windows 8 or newer is required to use this module. To check if your system is compatible, see + U(https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version). + - One of three parameters (I(drive_letter), I(path) and I(label)) are mandatory to identify the target + volume but more than one cannot be specified at the same time. + - This module is idempotent if I(force) is not specified and file system labels remain preserved. + - For more information, see U(https://docs.microsoft.com/en-us/previous-versions/windows/desktop/stormgmt/format-msft-volume) +seealso: + - module: win_disk_facts + - module: win_partition +author: + - Varun Chopra (@chopraaa) +''' + +EXAMPLES = r''' +- name: Create a partition with drive letter D and size 5 GiB + win_partition: + drive_letter: D + partition_size: 5 GiB + disk_number: 1 + +- name: Full format the newly created partition as NTFS and label it + win_format: + drive_letter: D + file_system: NTFS + new_label: Formatted + full: True +''' + +RETURN = r''' +# +''' diff --git a/test/support/windows-integration/plugins/modules/win_get_url.ps1 b/test/support/windows-integration/plugins/modules/win_get_url.ps1 new file mode 100644 index 00000000..1d8dd5a3 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_get_url.ps1 @@ -0,0 +1,274 @@ +#!powershell + +# Copyright: (c) 2015, Paul Durivage +# Copyright: (c) 2015, Tal Auslander +# Copyright: (c) 2017, Dag Wieers +# Copyright: (c) 2019, Viktor Utkin +# Copyright: (c) 2019, Uladzimir Klybik +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#Requires -Module Ansible.ModuleUtils.FileUtil +#Requires -Module Ansible.ModuleUtils.WebRequest + +$spec = @{ + options = @{ + url = @{ type="str"; required=$true } + dest = @{ type='path'; required=$true } + force = @{ type='bool'; default=$true } + checksum = @{ type='str' } + checksum_algorithm = @{ type='str'; default='sha1'; choices = @("md5", "sha1", "sha256", "sha384", "sha512") } + checksum_url = @{ type='str' } + + # Defined for the alias backwards compatibility, remove once aliases are removed + url_username = @{ + aliases = @("user", "username") + deprecated_aliases = @( + @{ name = "user"; version = "2.14" }, + @{ name = "username"; version = "2.14" } + ) + } + url_password = @{ + aliases = @("password") + deprecated_aliases = @( + @{ name = "password"; version = "2.14" } + ) + } + } + mutually_exclusive = @( + ,@('checksum', 'checksum_url') + ) + supports_check_mode = $true +} +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-AnsibleWebRequestSpec)) + +$url = $module.Params.url +$dest = $module.Params.dest +$force = $module.Params.force +$checksum = $module.Params.checksum +$checksum_algorithm = $module.Params.checksum_algorithm +$checksum_url = $module.Params.checksum_url + +$module.Result.elapsed = 0 +$module.Result.url = $url + +Function Get-ChecksumFromUri { + param( + [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module, + [Parameter(Mandatory=$true)][Uri]$Uri, + [Uri]$SourceUri + ) + + $script = { + param($Response, $Stream) + + $read_stream = New-Object -TypeName System.IO.StreamReader -ArgumentList $Stream + $web_checksum = $read_stream.ReadToEnd() + $basename = (Split-Path -Path $SourceUri.LocalPath -Leaf) + $basename = [regex]::Escape($basename) + $web_checksum_str = $web_checksum -split '\r?\n' | Select-String -Pattern $("\s+\.?\/?\\?" + $basename + "\s*$") + if (-not $web_checksum_str) { + $Module.FailJson("Checksum record not found for file name '$basename' in file from url: '$Uri'") + } + + $web_checksum_str_splitted = $web_checksum_str[0].ToString().split(" ", 2) + $hash_from_file = $web_checksum_str_splitted[0].Trim() + # Remove any non-alphanumeric characters + $hash_from_file = $hash_from_file -replace '\W+', '' + + Write-Output -InputObject $hash_from_file + } + $web_request = Get-AnsibleWebRequest -Uri $Uri -Module $Module + + try { + Invoke-WithWebRequest -Module $Module -Request $web_request -Script $script + } catch { + $Module.FailJson("Error when getting the remote checksum from '$Uri'. $($_.Exception.Message)", $_) + } +} + +Function Compare-ModifiedFile { + <# + .SYNOPSIS + Compares the remote URI resource against the local Dest resource. Will + return true if the LastWriteTime/LastModificationDate of the remote is + newer than the local resource date. + #> + param( + [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module, + [Parameter(Mandatory=$true)][Uri]$Uri, + [Parameter(Mandatory=$true)][String]$Dest + ) + + $dest_last_mod = (Get-AnsibleItem -Path $Dest).LastWriteTimeUtc + + # If the URI is a file we don't need to go through the whole WebRequest + if ($Uri.IsFile) { + $src_last_mod = (Get-AnsibleItem -Path $Uri.AbsolutePath).LastWriteTimeUtc + } else { + $web_request = Get-AnsibleWebRequest -Uri $Uri -Module $Module + $web_request.Method = switch ($web_request.GetType().Name) { + FtpWebRequest { [System.Net.WebRequestMethods+Ftp]::GetDateTimestamp } + HttpWebRequest { [System.Net.WebRequestMethods+Http]::Head } + } + $script = { param($Response, $Stream); $Response.LastModified } + + try { + $src_last_mod = Invoke-WithWebRequest -Module $Module -Request $web_request -Script $script + } catch { + $Module.FailJson("Error when requesting 'Last-Modified' date from '$Uri'. $($_.Exception.Message)", $_) + } + } + + # Return $true if the Uri LastModification date is newer than the Dest LastModification date + ((Get-Date -Date $src_last_mod).ToUniversalTime() -gt $dest_last_mod) +} + +Function Get-Checksum { + param( + [Parameter(Mandatory=$true)][String]$Path, + [String]$Algorithm = "sha1" + ) + + switch ($Algorithm) { + 'md5' { $sp = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider } + 'sha1' { $sp = New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider } + 'sha256' { $sp = New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider } + 'sha384' { $sp = New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider } + 'sha512' { $sp = New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider } + } + + $fs = [System.IO.File]::Open($Path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, + [System.IO.FileShare]::ReadWrite) + try { + $hash = [System.BitConverter]::ToString($sp.ComputeHash($fs)).Replace("-", "").ToLower() + } finally { + $fs.Dispose() + } + return $hash +} + +Function Invoke-DownloadFile { + param( + [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module, + [Parameter(Mandatory=$true)][Uri]$Uri, + [Parameter(Mandatory=$true)][String]$Dest, + [String]$Checksum, + [String]$ChecksumAlgorithm + ) + + # Check $dest parent folder exists before attempting download, which avoids unhelpful generic error message. + $dest_parent = Split-Path -LiteralPath $Dest + if (-not (Test-Path -LiteralPath $dest_parent -PathType Container)) { + $module.FailJson("The path '$dest_parent' does not exist for destination '$Dest', or is not visible to the current user. Ensure download destination folder exists (perhaps using win_file state=directory) before win_get_url runs.") + } + + $download_script = { + param($Response, $Stream) + + # Download the file to a temporary directory so we can compare it + $tmp_dest = Join-Path -Path $Module.Tmpdir -ChildPath ([System.IO.Path]::GetRandomFileName()) + $fs = [System.IO.File]::Create($tmp_dest) + try { + $Stream.CopyTo($fs) + $fs.Flush() + } finally { + $fs.Dispose() + } + $tmp_checksum = Get-Checksum -Path $tmp_dest -Algorithm $ChecksumAlgorithm + $Module.Result.checksum_src = $tmp_checksum + + # If the checksum has been set, verify the checksum of the remote against the input checksum. + if ($Checksum -and $Checksum -ne $tmp_checksum) { + $Module.FailJson(("The checksum for {0} did not match '{1}', it was '{2}'" -f $Uri, $Checksum, $tmp_checksum)) + } + + $download = $true + if (Test-Path -LiteralPath $Dest) { + # Validate the remote checksum against the existing downloaded file + $dest_checksum = Get-Checksum -Path $Dest -Algorithm $ChecksumAlgorithm + + # If we don't need to download anything, save the dest checksum so we don't waste time calculating it + # again at the end of the script + if ($dest_checksum -eq $tmp_checksum) { + $download = $false + $Module.Result.checksum_dest = $dest_checksum + $Module.Result.size = (Get-AnsibleItem -Path $Dest).Length + } + } + + if ($download) { + Copy-Item -LiteralPath $tmp_dest -Destination $Dest -Force -WhatIf:$Module.CheckMode > $null + $Module.Result.changed = $true + } + } + $web_request = Get-AnsibleWebRequest -Uri $Uri -Module $Module + + try { + Invoke-WithWebRequest -Module $Module -Request $web_request -Script $download_script + } catch { + $Module.FailJson("Error downloading '$Uri' to '$Dest': $($_.Exception.Message)", $_) + } +} + +# Use last part of url for dest file name if a directory is supplied for $dest +if (Test-Path -LiteralPath $dest -PathType Container) { + $uri = [System.Uri]$url + $basename = Split-Path -Path $uri.LocalPath -Leaf + if ($uri.LocalPath -and $uri.LocalPath -ne '/' -and $basename) { + $url_basename = Split-Path -Path $uri.LocalPath -Leaf + $dest = Join-Path -Path $dest -ChildPath $url_basename + } else { + $dest = Join-Path -Path $dest -ChildPath $uri.Host + } + + # Ensure we have a string instead of a PS object to avoid serialization issues + $dest = $dest.ToString() +} elseif (([System.IO.Path]::GetFileName($dest)) -eq '') { + # We have a trailing path separator + $module.FailJson("The destination path '$dest' does not exist, or is not visible to the current user. Ensure download destination folder exists (perhaps using win_file state=directory) before win_get_url runs.") +} + +$module.Result.dest = $dest + +if ($checksum) { + $checksum = $checksum.Trim().ToLower() +} +if ($checksum_algorithm) { + $checksum_algorithm = $checksum_algorithm.Trim().ToLower() +} +if ($checksum_url) { + $checksum_url = $checksum_url.Trim() +} + +# Check for case $checksum variable contain url. If yes, get file data from url and replace original value in $checksum +if ($checksum_url) { + $checksum_uri = [System.Uri]$checksum_url + if ($checksum_uri.Scheme -notin @("file", "ftp", "http", "https")) { + $module.FailJson("Unsupported 'checksum_url' value for '$dest': '$checksum_url'") + } + + $checksum = Get-ChecksumFromUri -Module $Module -Uri $checksum_uri -SourceUri $url +} + +if ($force -or -not (Test-Path -LiteralPath $dest)) { + # force=yes or dest does not exist, download the file + # Note: Invoke-DownloadFile will compare the checksums internally if dest exists + Invoke-DownloadFile -Module $module -Uri $url -Dest $dest -Checksum $checksum ` + -ChecksumAlgorithm $checksum_algorithm +} else { + # force=no, we want to check the last modified dates and only download if they don't match + $is_modified = Compare-ModifiedFile -Module $module -Uri $url -Dest $dest + if ($is_modified) { + Invoke-DownloadFile -Module $module -Uri $url -Dest $dest -Checksum $checksum ` + -ChecksumAlgorithm $checksum_algorithm + } +} + +if ((-not $module.Result.ContainsKey("checksum_dest")) -and (Test-Path -LiteralPath $dest)) { + # Calculate the dest file checksum if it hasn't already been done + $module.Result.checksum_dest = Get-Checksum -Path $dest -Algorithm $checksum_algorithm + $module.Result.size = (Get-AnsibleItem -Path $dest).Length +} + +$module.ExitJson() diff --git a/test/support/windows-integration/plugins/modules/win_get_url.py b/test/support/windows-integration/plugins/modules/win_get_url.py new file mode 100644 index 00000000..ef5b5f97 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_get_url.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Paul Durivage , and others +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# This is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_get_url +version_added: "1.7" +short_description: Downloads file from HTTP, HTTPS, or FTP to node +description: +- Downloads files from HTTP, HTTPS, or FTP to the remote server. +- The remote server I(must) have direct access to the remote resource. +- For non-Windows targets, use the M(get_url) module instead. +options: + url: + description: + - The full URL of a file to download. + type: str + required: yes + dest: + description: + - The location to save the file at the URL. + - Be sure to include a filename and extension as appropriate. + type: path + required: yes + force: + description: + - If C(yes), will download the file every time and replace the file if the contents change. If C(no), will only + download the file if it does not exist or the remote file has been + modified more recently than the local file. + - This works by sending an http HEAD request to retrieve last modified + time of the requested resource, so for this to work, the remote web + server must support HEAD requests. + type: bool + default: yes + version_added: "2.0" + checksum: + description: + - If a I(checksum) is passed to this parameter, the digest of the + destination file will be calculated after it is downloaded to ensure + its integrity and verify that the transfer completed successfully. + - This option cannot be set with I(checksum_url). + type: str + version_added: "2.8" + checksum_algorithm: + description: + - Specifies the hashing algorithm used when calculating the checksum of + the remote and destination file. + type: str + choices: + - md5 + - sha1 + - sha256 + - sha384 + - sha512 + default: sha1 + version_added: "2.8" + checksum_url: + description: + - Specifies a URL that contains the checksum values for the resource at + I(url). + - Like C(checksum), this is used to verify the integrity of the remote + transfer. + - This option cannot be set with I(checksum). + type: str + version_added: "2.8" + url_username: + description: + - The username to use for authentication. + - The aliases I(user) and I(username) are deprecated and will be removed in + Ansible 2.14. + aliases: + - user + - username + url_password: + description: + - The password for I(url_username). + - The alias I(password) is deprecated and will be removed in Ansible 2.14. + aliases: + - password + proxy_url: + version_added: "2.0" + proxy_username: + version_added: "2.0" + proxy_password: + version_added: "2.0" + headers: + version_added: "2.4" + use_proxy: + version_added: "2.4" + follow_redirects: + version_added: "2.9" + maximum_redirection: + version_added: "2.9" + client_cert: + version_added: "2.9" + client_cert_password: + version_added: "2.9" + method: + description: + - This option is not for use with C(win_get_url) and should be ignored. + version_added: "2.9" +notes: +- If your URL includes an escaped slash character (%2F) this module will convert it to a real slash. + This is a result of the behaviour of the System.Uri class as described in + L(the documentation,https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/network/schemesettings-element-uri-settings#remarks). +- Since Ansible 2.8, the module will skip reporting a change if the remote + checksum is the same as the local local even when C(force=yes). This is to + better align with M(get_url). +extends_documentation_fragment: +- url_windows +seealso: +- module: get_url +- module: uri +- module: win_uri +author: +- Paul Durivage (@angstwad) +- Takeshi Kuramochi (@tksarah) +''' + +EXAMPLES = r''' +- name: Download earthrise.jpg to specified path + win_get_url: + url: http://www.example.com/earthrise.jpg + dest: C:\Users\RandomUser\earthrise.jpg + +- name: Download earthrise.jpg to specified path only if modified + win_get_url: + url: http://www.example.com/earthrise.jpg + dest: C:\Users\RandomUser\earthrise.jpg + force: no + +- name: Download earthrise.jpg to specified path through a proxy server. + win_get_url: + url: http://www.example.com/earthrise.jpg + dest: C:\Users\RandomUser\earthrise.jpg + proxy_url: http://10.0.0.1:8080 + proxy_username: username + proxy_password: password + +- name: Download file from FTP with authentication + win_get_url: + url: ftp://server/file.txt + dest: '%TEMP%\ftp-file.txt' + url_username: ftp-user + url_password: ftp-password + +- name: Download src with sha256 checksum url + win_get_url: + url: http://www.example.com/earthrise.jpg + dest: C:\temp\earthrise.jpg + checksum_url: http://www.example.com/sha256sum.txt + checksum_algorithm: sha256 + force: True + +- name: Download src with sha256 checksum url + win_get_url: + url: http://www.example.com/earthrise.jpg + dest: C:\temp\earthrise.jpg + checksum: a97e6837f60cec6da4491bab387296bbcd72bdba + checksum_algorithm: sha1 + force: True +''' + +RETURN = r''' +dest: + description: destination file/path + returned: always + type: str + sample: C:\Users\RandomUser\earthrise.jpg +checksum_dest: + description: checksum of the file after the download + returned: success and dest has been downloaded + type: str + sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827 +checksum_src: + description: checksum of the remote resource + returned: force=yes or dest did not exist + type: str + sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827 +elapsed: + description: The elapsed seconds between the start of poll and the end of the module. + returned: always + type: float + sample: 2.1406487 +size: + description: size of the dest file + returned: success + type: int + sample: 1220 +url: + description: requested url + returned: always + type: str + sample: http://www.example.com/earthrise.jpg +msg: + description: Error message, or HTTP status message from web-server + returned: always + type: str + sample: OK +status_code: + description: HTTP status code + returned: always + type: int + sample: 200 +''' diff --git a/test/support/windows-integration/plugins/modules/win_lineinfile.ps1 b/test/support/windows-integration/plugins/modules/win_lineinfile.ps1 new file mode 100644 index 00000000..38dd8b8b --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_lineinfile.ps1 @@ -0,0 +1,450 @@ +#!powershell + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.Backup + +function WriteLines($outlines, $path, $linesep, $encodingobj, $validate, $check_mode) { + Try { + $temppath = [System.IO.Path]::GetTempFileName(); + } + Catch { + Fail-Json @{} "Cannot create temporary file! ($($_.Exception.Message))"; + } + $joined = $outlines -join $linesep; + [System.IO.File]::WriteAllText($temppath, $joined, $encodingobj); + + If ($validate) { + + If (-not ($validate -like "*%s*")) { + Fail-Json @{} "validate must contain %s: $validate"; + } + + $validate = $validate.Replace("%s", $temppath); + + $parts = [System.Collections.ArrayList] $validate.Split(" "); + $cmdname = $parts[0]; + + $cmdargs = $validate.Substring($cmdname.Length + 1); + + $process = [Diagnostics.Process]::Start($cmdname, $cmdargs); + $process.WaitForExit(); + + If ($process.ExitCode -ne 0) { + [string] $output = $process.StandardOutput.ReadToEnd(); + [string] $error = $process.StandardError.ReadToEnd(); + Remove-Item $temppath -force; + Fail-Json @{} "failed to validate $cmdname $cmdargs with error: $output $error"; + } + + } + + # Commit changes to the path + $cleanpath = $path.Replace("/", "\"); + Try { + Copy-Item -Path $temppath -Destination $cleanpath -Force -WhatIf:$check_mode; + } + Catch { + Fail-Json @{} "Cannot write to: $cleanpath ($($_.Exception.Message))"; + } + + Try { + Remove-Item -Path $temppath -Force -WhatIf:$check_mode; + } + Catch { + Fail-Json @{} "Cannot remove temporary file: $temppath ($($_.Exception.Message))"; + } + + return $joined; + +} + + +# Implement the functionality for state == 'present' +function Present($path, $regex, $line, $insertafter, $insertbefore, $create, $backup, $backrefs, $validate, $encodingobj, $linesep, $check_mode, $diff_support) { + + # Note that we have to clean up the path because ansible wants to treat / and \ as + # interchangeable in windows pathnames, but .NET framework internals do not support that. + $cleanpath = $path.Replace("/", "\"); + + # Check if path exists. If it does not exist, either create it if create == "yes" + # was specified or fail with a reasonable error message. + If (-not (Test-Path -LiteralPath $path)) { + If (-not $create) { + Fail-Json @{} "Path $path does not exist !"; + } + # Create new empty file, using the specified encoding to write correct BOM + [System.IO.File]::WriteAllLines($cleanpath, "", $encodingobj); + } + + # Initialize result information + $result = @{ + backup = ""; + changed = $false; + msg = ""; + } + + # Read the dest file lines using the indicated encoding into a mutable ArrayList. + $before = [System.IO.File]::ReadAllLines($cleanpath, $encodingobj) + If ($null -eq $before) { + $lines = New-Object System.Collections.ArrayList; + } + Else { + $lines = [System.Collections.ArrayList] $before; + } + + if ($diff_support) { + $result.diff = @{ + before = $before -join $linesep; + } + } + + # Compile the regex specified, if provided + $mre = $null; + If ($regex) { + $mre = New-Object Regex $regex, 'Compiled'; + } + + # Compile the regex for insertafter or insertbefore, if provided + $insre = $null; + If ($insertafter -and $insertafter -ne "BOF" -and $insertafter -ne "EOF") { + $insre = New-Object Regex $insertafter, 'Compiled'; + } + ElseIf ($insertbefore -and $insertbefore -ne "BOF") { + $insre = New-Object Regex $insertbefore, 'Compiled'; + } + + # index[0] is the line num where regex has been found + # index[1] is the line num where insertafter/insertbefore has been found + $index = -1, -1; + $lineno = 0; + + # The latest match object and matched line + $matched_line = ""; + + # Iterate through the lines in the file looking for matches + Foreach ($cur_line in $lines) { + If ($regex) { + $m = $mre.Match($cur_line); + $match_found = $m.Success; + If ($match_found) { + $matched_line = $cur_line; + } + } + Else { + $match_found = $line -ceq $cur_line; + } + If ($match_found) { + $index[0] = $lineno; + } + ElseIf ($insre -and $insre.Match($cur_line).Success) { + If ($insertafter) { + $index[1] = $lineno + 1; + } + If ($insertbefore) { + $index[1] = $lineno; + } + } + $lineno = $lineno + 1; + } + + If ($index[0] -ne -1) { + If ($backrefs) { + $new_line = [regex]::Replace($matched_line, $regex, $line); + } + Else { + $new_line = $line; + } + If ($lines[$index[0]] -cne $new_line) { + $lines[$index[0]] = $new_line; + $result.changed = $true; + $result.msg = "line replaced"; + } + } + ElseIf ($backrefs) { + # No matches - no-op + } + ElseIf ($insertbefore -eq "BOF" -or $insertafter -eq "BOF") { + $lines.Insert(0, $line); + $result.changed = $true; + $result.msg = "line added"; + } + ElseIf ($insertafter -eq "EOF" -or $index[1] -eq -1) { + $lines.Add($line) > $null; + $result.changed = $true; + $result.msg = "line added"; + } + Else { + $lines.Insert($index[1], $line); + $result.changed = $true; + $result.msg = "line added"; + } + + # Write changes to the path if changes were made + If ($result.changed) { + + # Write backup file if backup == "yes" + If ($backup) { + $result.backup_file = Backup-File -path $path -WhatIf:$check_mode + # Ensure backward compatibility (deprecate in future) + $result.backup = $result.backup_file + } + + $writelines_params = @{ + outlines = $lines + path = $path + linesep = $linesep + encodingobj = $encodingobj + validate = $validate + check_mode = $check_mode + } + $after = WriteLines @writelines_params; + + if ($diff_support) { + $result.diff.after = $after; + } + } + + $result.encoding = $encodingobj.WebName; + + Exit-Json $result; +} + + +# Implement the functionality for state == 'absent' +function Absent($path, $regex, $line, $backup, $validate, $encodingobj, $linesep, $check_mode, $diff_support) { + + # Check if path exists. If it does not exist, fail with a reasonable error message. + If (-not (Test-Path -LiteralPath $path)) { + Fail-Json @{} "Path $path does not exist !"; + } + + # Initialize result information + $result = @{ + backup = ""; + changed = $false; + msg = ""; + } + + # Read the dest file lines using the indicated encoding into a mutable ArrayList. Note + # that we have to clean up the path because ansible wants to treat / and \ as + # interchangeable in windows pathnames, but .NET framework internals do not support that. + $cleanpath = $path.Replace("/", "\"); + $before = [System.IO.File]::ReadAllLines($cleanpath, $encodingobj); + If ($null -eq $before) { + $lines = New-Object System.Collections.ArrayList; + } + Else { + $lines = [System.Collections.ArrayList] $before; + } + + if ($diff_support) { + $result.diff = @{ + before = $before -join $linesep; + } + } + + # Compile the regex specified, if provided + $cre = $null; + If ($regex) { + $cre = New-Object Regex $regex, 'Compiled'; + } + + $found = New-Object System.Collections.ArrayList; + $left = New-Object System.Collections.ArrayList; + + Foreach ($cur_line in $lines) { + If ($regex) { + $m = $cre.Match($cur_line); + $match_found = $m.Success; + } + Else { + $match_found = $line -ceq $cur_line; + } + If ($match_found) { + $found.Add($cur_line) > $null; + $result.changed = $true; + } + Else { + $left.Add($cur_line) > $null; + } + } + + # Write changes to the path if changes were made + If ($result.changed) { + + # Write backup file if backup == "yes" + If ($backup) { + $result.backup_file = Backup-File -path $path -WhatIf:$check_mode + # Ensure backward compatibility (deprecate in future) + $result.backup = $result.backup_file + } + + $writelines_params = @{ + outlines = $left + path = $path + linesep = $linesep + encodingobj = $encodingobj + validate = $validate + check_mode = $check_mode + } + $after = WriteLines @writelines_params; + + if ($diff_support) { + $result.diff.after = $after; + } + } + + $result.encoding = $encodingobj.WebName; + $result.found = $found.Count; + $result.msg = "$($found.Count) line(s) removed"; + + Exit-Json $result; +} + + +# Parse the parameters file dropped by the Ansible machinery +$params = Parse-Args $args -supports_check_mode $true; +$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false; +$diff_support = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false; + +# Initialize defaults for input parameters. +$path = Get-AnsibleParam -obj $params -name "path" -type "path" -failifempty $true -aliases "dest","destfile","name"; +$regex = Get-AnsibleParam -obj $params -name "regex" -type "str" -aliases "regexp"; +$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "present","absent"; +$line = Get-AnsibleParam -obj $params -name "line" -type "str"; +$backrefs = Get-AnsibleParam -obj $params -name "backrefs" -type "bool" -default $false; +$insertafter = Get-AnsibleParam -obj $params -name "insertafter" -type "str"; +$insertbefore = Get-AnsibleParam -obj $params -name "insertbefore" -type "str"; +$create = Get-AnsibleParam -obj $params -name "create" -type "bool" -default $false; +$backup = Get-AnsibleParam -obj $params -name "backup" -type "bool" -default $false; +$validate = Get-AnsibleParam -obj $params -name "validate" -type "str"; +$encoding = Get-AnsibleParam -obj $params -name "encoding" -type "str" -default "auto"; +$newline = Get-AnsibleParam -obj $params -name "newline" -type "str" -default "windows" -validateset "unix","windows"; + +# Fail if the path is not a file +If (Test-Path -LiteralPath $path -PathType "container") { + Fail-Json @{} "Path $path is a directory"; +} + +# Default to windows line separator - probably most common +$linesep = "`r`n" +If ($newline -eq "unix") { + $linesep = "`n"; +} + +# Figure out the proper encoding to use for reading / writing the target file. + +# The default encoding is UTF-8 without BOM +$encodingobj = [System.Text.UTF8Encoding] $false; + +# If an explicit encoding is specified, use that instead +If ($encoding -ne "auto") { + $encodingobj = [System.Text.Encoding]::GetEncoding($encoding); +} + +# Otherwise see if we can determine the current encoding of the target file. +# If the file doesn't exist yet (create == 'yes') we use the default or +# explicitly specified encoding set above. +ElseIf (Test-Path -LiteralPath $path) { + + # Get a sorted list of encodings with preambles, longest first + $max_preamble_len = 0; + $sortedlist = New-Object System.Collections.SortedList; + Foreach ($encodinginfo in [System.Text.Encoding]::GetEncodings()) { + $encoding = $encodinginfo.GetEncoding(); + $plen = $encoding.GetPreamble().Length; + If ($plen -gt $max_preamble_len) { + $max_preamble_len = $plen; + } + If ($plen -gt 0) { + $sortedlist.Add(-($plen * 1000000 + $encoding.CodePage), $encoding) > $null; + } + } + + # Get the first N bytes from the file, where N is the max preamble length we saw + [Byte[]]$bom = Get-Content -Encoding Byte -ReadCount $max_preamble_len -TotalCount $max_preamble_len -LiteralPath $path; + + # Iterate through the sorted encodings, looking for a full match. + $found = $false; + Foreach ($encoding in $sortedlist.GetValueList()) { + $preamble = $encoding.GetPreamble(); + If ($preamble -and $bom) { + Foreach ($i in 0..($preamble.Length - 1)) { + If ($i -ge $bom.Length) { + break; + } + If ($preamble[$i] -ne $bom[$i]) { + break; + } + ElseIf ($i + 1 -eq $preamble.Length) { + $encodingobj = $encoding; + $found = $true; + } + } + If ($found) { + break; + } + } + } +} + + +# Main dispatch - based on the value of 'state', perform argument validation and +# call the appropriate handler function. +If ($state -eq "present") { + + If ($backrefs -and -not $regex) { + Fail-Json @{} "regexp= is required with backrefs=true"; + } + + If (-not $line) { + Fail-Json @{} "line= is required with state=present"; + } + + If ($insertbefore -and $insertafter) { + Add-Warning $result "Both insertbefore and insertafter parameters found, ignoring `"insertafter=$insertafter`"" + } + + If (-not $insertbefore -and -not $insertafter) { + $insertafter = "EOF"; + } + + $present_params = @{ + path = $path + regex = $regex + line = $line + insertafter = $insertafter + insertbefore = $insertbefore + create = $create + backup = $backup + backrefs = $backrefs + validate = $validate + encodingobj = $encodingobj + linesep = $linesep + check_mode = $check_mode + diff_support = $diff_support + } + Present @present_params; + +} +ElseIf ($state -eq "absent") { + + If (-not $regex -and -not $line) { + Fail-Json @{} "one of line= or regexp= is required with state=absent"; + } + + $absent_params = @{ + path = $path + regex = $regex + line = $line + backup = $backup + validate = $validate + encodingobj = $encodingobj + linesep = $linesep + check_mode = $check_mode + diff_support = $diff_support + } + Absent @absent_params; +} diff --git a/test/support/windows-integration/plugins/modules/win_lineinfile.py b/test/support/windows-integration/plugins/modules/win_lineinfile.py new file mode 100644 index 00000000..f4fb7f5a --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_lineinfile.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_lineinfile +short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression +description: + - This module will search a file for a line, and ensure that it is present or absent. + - This is primarily useful when you want to change a single line in a file only. +version_added: "2.0" +options: + path: + description: + - The path of the file to modify. + - Note that the Windows path delimiter C(\) must be escaped as C(\\) when the line is double quoted. + - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name). + type: path + required: yes + aliases: [ dest, destfile, name ] + backup: + description: + - Determine whether a backup should be created. + - When set to C(yes), create a backup file including the timestamp information + so you can get the original file back if you somehow clobbered it incorrectly. + type: bool + default: no + regex: + description: + - The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found + will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions; + see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx). + aliases: [ "regexp" ] + state: + description: + - Whether the line should be there or not. + type: str + choices: [ absent, present ] + default: present + line: + description: + - Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get + expanded with the C(regexp) capture groups if the regexp matches. + - Be aware that the line is processed first on the controller and thus is dependent on yaml quoting rules. Any double quoted line + will have control characters, such as '\r\n', expanded. To print such characters literally, use single or no quotes. + type: str + backrefs: + description: + - Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp) + matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp) + doesn't match anywhere in the file, the file will be left unchanged. + - If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter. + type: bool + default: no + insertafter: + description: + - Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is + available; C(EOF) for inserting the line at the end of the file. + - If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs). + type: str + choices: [ EOF, '*regex*' ] + default: EOF + insertbefore: + description: + - Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available; + C(BOF) for inserting the line at the beginning of the file. + - If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs). + type: str + choices: [ BOF, '*regex*' ] + create: + description: + - Used with C(state=present). If specified, the file will be created if it does not already exist. By default it will fail if the file is missing. + type: bool + default: no + validate: + description: + - Validation to run before copying into place. Use %s in the command to indicate the current file to validate. + - The command is passed securely so shell features like expansion and pipes won't work. + type: str + encoding: + description: + - Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause + the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding. + - An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method - + see U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx). + - This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a + specific encoding, the default encoding (UTF-8, no BOM) will be used. + type: str + default: auto + newline: + description: + - Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated + line separator will be used for file output regardless of the original line separator that appears in the input file. + type: str + choices: [ unix, windows ] + default: windows +notes: + - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. +seealso: +- module: assemble +- module: lineinfile +author: +- Brian Lloyd (@brianlloyd) +''' + +EXAMPLES = r''' +# Before Ansible 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path' +- name: Insert path without converting \r\n + win_lineinfile: + path: c:\file.txt + line: c:\return\new + +- win_lineinfile: + path: C:\Temp\example.conf + regex: '^name=' + line: 'name=JohnDoe' + +- win_lineinfile: + path: C:\Temp\example.conf + regex: '^name=' + state: absent + +- win_lineinfile: + path: C:\Temp\example.conf + regex: '^127\.0\.0\.1' + line: '127.0.0.1 localhost' + +- win_lineinfile: + path: C:\Temp\httpd.conf + regex: '^Listen ' + insertafter: '^#Listen ' + line: Listen 8080 + +- win_lineinfile: + path: C:\Temp\services + regex: '^# port for http' + insertbefore: '^www.*80/tcp' + line: '# port for http by default' + +- name: Create file if it doesn't exist with a specific encoding + win_lineinfile: + path: C:\Temp\utf16.txt + create: yes + encoding: utf-16 + line: This is a utf-16 encoded file + +- name: Add a line to a file and ensure the resulting file uses unix line separators + win_lineinfile: + path: C:\Temp\testfile.txt + line: Line added to file + newline: unix + +- name: Update a line using backrefs + win_lineinfile: + path: C:\Temp\example.conf + backrefs: yes + regex: '(^name=)' + line: '$1JohnDoe' +''' + +RETURN = r''' +backup: + description: + - Name of the backup file that was created. + - This is now deprecated, use C(backup_file) instead. + returned: if backup=yes + type: str + sample: C:\Path\To\File.txt.11540.20150212-220915.bak +backup_file: + description: Name of the backup file that was created. + returned: if backup=yes + type: str + sample: C:\Path\To\File.txt.11540.20150212-220915.bak +''' diff --git a/test/support/windows-integration/plugins/modules/win_path.ps1 b/test/support/windows-integration/plugins/modules/win_path.ps1 new file mode 100644 index 00000000..04eb41a3 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_path.ps1 @@ -0,0 +1,145 @@ +#!powershell + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy + +Set-StrictMode -Version 2 +$ErrorActionPreference = "Stop" + +$system_path = "System\CurrentControlSet\Control\Session Manager\Environment" +$user_path = "Environment" + +# list/arraylist methods don't allow IEqualityComparer override for case/backslash/quote-insensitivity, roll our own search +Function Get-IndexOfPathElement ($list, [string]$value) { + $idx = 0 + $value = $value.Trim('"').Trim('\') + ForEach($el in $list) { + If ([string]$el.Trim('"').Trim('\') -ieq $value) { + return $idx + } + + $idx++ + } + + return -1 +} + +# alters list in place, returns true if at least one element was added +Function Add-Elements ($existing_elements, $elements_to_add) { + $last_idx = -1 + $changed = $false + + ForEach($el in $elements_to_add) { + $idx = Get-IndexOfPathElement $existing_elements $el + + # add missing elements at the end + If ($idx -eq -1) { + $last_idx = $existing_elements.Add($el) + $changed = $true + } + ElseIf ($idx -lt $last_idx) { + $existing_elements.RemoveAt($idx) | Out-Null + $existing_elements.Add($el) | Out-Null + $last_idx = $existing_elements.Count - 1 + $changed = $true + } + Else { + $last_idx = $idx + } + } + + return $changed +} + +# alters list in place, returns true if at least one element was removed +Function Remove-Elements ($existing_elements, $elements_to_remove) { + $count = $existing_elements.Count + + ForEach($el in $elements_to_remove) { + $idx = Get-IndexOfPathElement $existing_elements $el + $result.removed_idx = $idx + If ($idx -gt -1) { + $existing_elements.RemoveAt($idx) + } + } + + return $count -ne $existing_elements.Count +} + +# PS registry provider doesn't allow access to unexpanded REG_EXPAND_SZ; fall back to .NET +Function Get-RawPathVar ($scope) { + If ($scope -eq "user") { + $env_key = [Microsoft.Win32.Registry]::CurrentUser.OpenSubKey($user_path) + } + ElseIf ($scope -eq "machine") { + $env_key = [Microsoft.Win32.Registry]::LocalMachine.OpenSubKey($system_path) + } + + return $env_key.GetValue($var_name, "", [Microsoft.Win32.RegistryValueOptions]::DoNotExpandEnvironmentNames) +} + +Function Set-RawPathVar($path_value, $scope) { + If ($scope -eq "user") { + $var_path = "HKCU:\" + $user_path + } + ElseIf ($scope -eq "machine") { + $var_path = "HKLM:\" + $system_path + } + + Set-ItemProperty $var_path -Name $var_name -Value $path_value -Type ExpandString | Out-Null + + return $path_value +} + +$parsed_args = Parse-Args $args -supports_check_mode $true + +$result = @{changed=$false} + +$var_name = Get-AnsibleParam $parsed_args "name" -Default "PATH" +$elements = Get-AnsibleParam $parsed_args "elements" -FailIfEmpty $result +$state = Get-AnsibleParam $parsed_args "state" -Default "present" -ValidateSet "present","absent" +$scope = Get-AnsibleParam $parsed_args "scope" -Default "machine" -ValidateSet "machine","user" + +$check_mode = Get-AnsibleParam $parsed_args "_ansible_check_mode" -Default $false + +If ($elements -is [string]) { + $elements = @($elements) +} + +If ($elements -isnot [Array]) { + Fail-Json $result "elements must be a string or list of path strings" +} + +$current_value = Get-RawPathVar $scope +$result.path_value = $current_value + +# TODO: test case-canonicalization on wacky unicode values (eg turkish i) +# TODO: detect and warn/fail on unparseable path? (eg, unbalanced quotes, invalid path chars) +# TODO: detect and warn/fail if system path and Powershell isn't on it? + +$existing_elements = New-Object System.Collections.ArrayList + +# split on semicolons, accounting for quoted values with embedded semicolons (which may or may not be wrapped in whitespace) +$pathsplit_re = [regex] '((?\s*"[^"]+"\s*)|(?[^;]+))(;$|$|;)' + +ForEach ($m in $pathsplit_re.Matches($current_value)) { + $existing_elements.Add($m.Groups['q'].Value) | Out-Null +} + +If ($state -eq "absent") { + $result.changed = Remove-Elements $existing_elements $elements +} +ElseIf ($state -eq "present") { + $result.changed = Add-Elements $existing_elements $elements +} + +# calculate the new path value from the existing elements +$path_value = [String]::Join(";", $existing_elements.ToArray()) +$result.path_value = $path_value + +If ($result.changed -and -not $check_mode) { + Set-RawPathVar $path_value $scope | Out-Null +} + +Exit-Json $result diff --git a/test/support/windows-integration/plugins/modules/win_path.py b/test/support/windows-integration/plugins/modules/win_path.py new file mode 100644 index 00000000..6404504f --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_path.py @@ -0,0 +1,79 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# This is a windows documentation stub. Actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_path +version_added: "2.3" +short_description: Manage Windows path environment variables +description: + - Allows element-based ordering, addition, and removal of Windows path environment variables. +options: + name: + description: + - Target path environment variable name. + type: str + default: PATH + elements: + description: + - A single path element, or a list of path elements (ie, directories) to add or remove. + - When multiple elements are included in the list (and C(state) is C(present)), the elements are guaranteed to appear in the same relative order + in the resultant path value. + - Variable expansions (eg, C(%VARNAME%)) are allowed, and are stored unexpanded in the target path element. + - Any existing path elements not mentioned in C(elements) are always preserved in their current order. + - New path elements are appended to the path, and existing path elements may be moved closer to the end to satisfy the requested ordering. + - Paths are compared in a case-insensitive fashion, and trailing backslashes are ignored for comparison purposes. However, note that trailing + backslashes in YAML require quotes. + type: list + required: yes + state: + description: + - Whether the path elements specified in C(elements) should be present or absent. + type: str + choices: [ absent, present ] + scope: + description: + - The level at which the environment variable specified by C(name) should be managed (either for the current user or global machine scope). + type: str + choices: [ machine, user ] + default: machine +notes: + - This module is for modifying individual elements of path-like + environment variables. For general-purpose management of other + environment vars, use the M(win_environment) module. + - This module does not broadcast change events. + This means that the minority of windows applications which can have + their environment changed without restarting will not be notified and + therefore will need restarting to pick up new environment settings. + - User level environment variables will require an interactive user to + log out and in again before they become available. +seealso: +- module: win_environment +author: +- Matt Davis (@nitzmahone) +''' + +EXAMPLES = r''' +- name: Ensure that system32 and Powershell are present on the global system path, and in the specified order + win_path: + elements: + - '%SystemRoot%\system32' + - '%SystemRoot%\system32\WindowsPowerShell\v1.0' + +- name: Ensure that C:\Program Files\MyJavaThing is not on the current user's CLASSPATH + win_path: + name: CLASSPATH + elements: C:\Program Files\MyJavaThing + scope: user + state: absent +''' diff --git a/test/support/windows-integration/plugins/modules/win_ping.ps1 b/test/support/windows-integration/plugins/modules/win_ping.ps1 new file mode 100644 index 00000000..c848b912 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_ping.ps1 @@ -0,0 +1,21 @@ +#!powershell + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic + +$spec = @{ + options = @{ + data = @{ type = "str"; default = "pong" } + } + supports_check_mode = $true +} +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) +$data = $module.Params.data + +if ($data -eq "crash") { + throw "boom" +} + +$module.Result.ping = $data +$module.ExitJson() diff --git a/test/support/windows-integration/plugins/modules/win_ping.py b/test/support/windows-integration/plugins/modules/win_ping.py new file mode 100644 index 00000000..6d35f379 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_ping.py @@ -0,0 +1,55 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2012, Michael DeHaan , and others +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_ping +version_added: "1.7" +short_description: A windows version of the classic ping module +description: + - Checks management connectivity of a windows host. + - This is NOT ICMP ping, this is just a trivial test module. + - For non-Windows targets, use the M(ping) module instead. + - For Network targets, use the M(net_ping) module instead. +options: + data: + description: + - Alternate data to return instead of 'pong'. + - If this parameter is set to C(crash), the module will cause an exception. + type: str + default: pong +seealso: +- module: ping +author: +- Chris Church (@cchurch) +''' + +EXAMPLES = r''' +# Test connectivity to a windows host +# ansible winserver -m win_ping + +- name: Example from an Ansible Playbook + win_ping: + +- name: Induce an exception to see what happens + win_ping: + data: crash +''' + +RETURN = r''' +ping: + description: Value provided with the data parameter. + returned: success + type: str + sample: pong +''' diff --git a/test/support/windows-integration/plugins/modules/win_psexec.ps1 b/test/support/windows-integration/plugins/modules/win_psexec.ps1 new file mode 100644 index 00000000..04a51270 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_psexec.ps1 @@ -0,0 +1,152 @@ +#!powershell + +# Copyright: (c) 2017, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#Requires -Module Ansible.ModuleUtils.ArgvParser +#Requires -Module Ansible.ModuleUtils.CommandUtil + +# See also: https://technet.microsoft.com/en-us/sysinternals/pxexec.aspx + +$spec = @{ + options = @{ + command = @{ type='str'; required=$true } + executable = @{ type='path'; default='psexec.exe' } + hostnames = @{ type='list' } + username = @{ type='str' } + password = @{ type='str'; no_log=$true } + chdir = @{ type='path' } + wait = @{ type='bool'; default=$true } + nobanner = @{ type='bool'; default=$false } + noprofile = @{ type='bool'; default=$false } + elevated = @{ type='bool'; default=$false } + limited = @{ type='bool'; default=$false } + system = @{ type='bool'; default=$false } + interactive = @{ type='bool'; default=$false } + session = @{ type='int' } + priority = @{ type='str'; choices=@( 'background', 'low', 'belownormal', 'abovenormal', 'high', 'realtime' ) } + timeout = @{ type='int' } + } +} + +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + +$command = $module.Params.command +$executable = $module.Params.executable +$hostnames = $module.Params.hostnames +$username = $module.Params.username +$password = $module.Params.password +$chdir = $module.Params.chdir +$wait = $module.Params.wait +$nobanner = $module.Params.nobanner +$noprofile = $module.Params.noprofile +$elevated = $module.Params.elevated +$limited = $module.Params.limited +$system = $module.Params.system +$interactive = $module.Params.interactive +$session = $module.Params.session +$priority = $module.Params.Priority +$timeout = $module.Params.timeout + +$module.Result.changed = $true + +If (-Not (Get-Command $executable -ErrorAction SilentlyContinue)) { + $module.FailJson("Executable '$executable' was not found.") +} + +$arguments = [System.Collections.Generic.List`1[String]]@($executable) + +If ($nobanner -eq $true) { + $arguments.Add("-nobanner") +} + +# Support running on local system if no hostname is specified +If ($hostnames) { + $hostname_argument = ($hostnames | sort -Unique) -join ',' + $arguments.Add("\\$hostname_argument") +} + +# Username is optional +If ($null -ne $username) { + $arguments.Add("-u") + $arguments.Add($username) +} + +# Password is optional +If ($null -ne $password) { + $arguments.Add("-p") + $arguments.Add($password) +} + +If ($null -ne $chdir) { + $arguments.Add("-w") + $arguments.Add($chdir) +} + +If ($wait -eq $false) { + $arguments.Add("-d") +} + +If ($noprofile -eq $true) { + $arguments.Add("-e") +} + +If ($elevated -eq $true) { + $arguments.Add("-h") +} + +If ($system -eq $true) { + $arguments.Add("-s") +} + +If ($interactive -eq $true) { + $arguments.Add("-i") + If ($null -ne $session) { + $arguments.Add($session) + } +} + +If ($limited -eq $true) { + $arguments.Add("-l") +} + +If ($null -ne $priority) { + $arguments.Add("-$priority") +} + +If ($null -ne $timeout) { + $arguments.Add("-n") + $arguments.Add($timeout) +} + +$arguments.Add("-accepteula") + +$argument_string = Argv-ToString -arguments $arguments + +# Add the command at the end of the argument string, we don't want to escape +# that as psexec doesn't expect it to be one arg +$argument_string += " $command" + +$start_datetime = [DateTime]::UtcNow +$module.Result.psexec_command = $argument_string + +$command_result = Run-Command -command $argument_string + +$end_datetime = [DateTime]::UtcNow + +$module.Result.stdout = $command_result.stdout +$module.Result.stderr = $command_result.stderr + +If ($wait -eq $true) { + $module.Result.rc = $command_result.rc +} else { + $module.Result.rc = 0 + $module.Result.pid = $command_result.rc +} + +$module.Result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$module.Result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$module.Result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff") + +$module.ExitJson() diff --git a/test/support/windows-integration/plugins/modules/win_psexec.py b/test/support/windows-integration/plugins/modules/win_psexec.py new file mode 100644 index 00000000..c3fc37e4 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_psexec.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: 2017, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_psexec +version_added: '2.3' +short_description: Runs commands (remotely) as another (privileged) user +description: +- Run commands (remotely) through the PsExec service. +- Run commands as another (domain) user (with elevated privileges). +requirements: +- Microsoft PsExec +options: + command: + description: + - The command line to run through PsExec (limited to 260 characters). + type: str + required: yes + executable: + description: + - The location of the PsExec utility (in case it is not located in your PATH). + type: path + default: psexec.exe + hostnames: + description: + - The hostnames to run the command. + - If not provided, the command is run locally. + type: list + username: + description: + - The (remote) user to run the command as. + - If not provided, the current user is used. + type: str + password: + description: + - The password for the (remote) user to run the command as. + - This is mandatory in order authenticate yourself. + type: str + chdir: + description: + - Run the command from this (remote) directory. + type: path + nobanner: + description: + - Do not display the startup banner and copyright message. + - This only works for specific versions of the PsExec binary. + type: bool + default: no + version_added: '2.4' + noprofile: + description: + - Run the command without loading the account's profile. + type: bool + default: no + elevated: + description: + - Run the command with elevated privileges. + type: bool + default: no + interactive: + description: + - Run the program so that it interacts with the desktop on the remote system. + type: bool + default: no + session: + description: + - Specifies the session ID to use. + - This parameter works in conjunction with I(interactive). + - It has no effect when I(interactive) is set to C(no). + type: int + version_added: '2.7' + limited: + description: + - Run the command as limited user (strips the Administrators group and allows only privileges assigned to the Users group). + type: bool + default: no + system: + description: + - Run the remote command in the System account. + type: bool + default: no + priority: + description: + - Used to run the command at a different priority. + choices: [ abovenormal, background, belownormal, high, low, realtime ] + timeout: + description: + - The connection timeout in seconds + type: int + wait: + description: + - Wait for the application to terminate. + - Only use for non-interactive applications. + type: bool + default: yes +notes: +- More information related to Microsoft PsExec is available from + U(https://technet.microsoft.com/en-us/sysinternals/bb897553.aspx) +seealso: +- module: psexec +- module: raw +- module: win_command +- module: win_shell +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- name: Test the PsExec connection to the local system (target node) with your user + win_psexec: + command: whoami.exe + +- name: Run regedit.exe locally (on target node) as SYSTEM and interactively + win_psexec: + command: regedit.exe + interactive: yes + system: yes + +- name: Run the setup.exe installer on multiple servers using the Domain Administrator + win_psexec: + command: E:\setup.exe /i /IACCEPTEULA + hostnames: + - remote_server1 + - remote_server2 + username: DOMAIN\Administrator + password: some_password + priority: high + +- name: Run PsExec from custom location C:\Program Files\sysinternals\ + win_psexec: + command: netsh advfirewall set allprofiles state off + executable: C:\Program Files\sysinternals\psexec.exe + hostnames: [ remote_server ] + password: some_password + priority: low +''' + +RETURN = r''' +cmd: + description: The complete command line used by the module, including PsExec call and additional options. + returned: always + type: str + sample: psexec.exe -nobanner \\remote_server -u "DOMAIN\Administrator" -p "some_password" -accepteula E:\setup.exe +pid: + description: The PID of the async process created by PsExec. + returned: when C(wait=False) + type: int + sample: 1532 +rc: + description: The return code for the command. + returned: always + type: int + sample: 0 +stdout: + description: The standard output from the command. + returned: always + type: str + sample: Success. +stderr: + description: The error output from the command. + returned: always + type: str + sample: Error 15 running E:\setup.exe +''' diff --git a/test/support/windows-integration/plugins/modules/win_reboot.py b/test/support/windows-integration/plugins/modules/win_reboot.py new file mode 100644 index 00000000..14318041 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_reboot.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_reboot +short_description: Reboot a windows machine +description: +- Reboot a Windows machine, wait for it to go down, come back up, and respond to commands. +- For non-Windows targets, use the M(reboot) module instead. +version_added: '2.1' +options: + pre_reboot_delay: + description: + - Seconds to wait before reboot. Passed as a parameter to the reboot command. + type: int + default: 2 + aliases: [ pre_reboot_delay_sec ] + post_reboot_delay: + description: + - Seconds to wait after the reboot command was successful before attempting to validate the system rebooted successfully. + - This is useful if you want wait for something to settle despite your connection already working. + type: int + default: 0 + version_added: '2.4' + aliases: [ post_reboot_delay_sec ] + shutdown_timeout: + description: + - Maximum seconds to wait for shutdown to occur. + - Increase this timeout for very slow hardware, large update applications, etc. + - This option has been removed since Ansible 2.5 as the win_reboot behavior has changed. + type: int + default: 600 + aliases: [ shutdown_timeout_sec ] + reboot_timeout: + description: + - Maximum seconds to wait for machine to re-appear on the network and respond to a test command. + - This timeout is evaluated separately for both reboot verification and test command success so maximum clock time is actually twice this value. + type: int + default: 600 + aliases: [ reboot_timeout_sec ] + connect_timeout: + description: + - Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again. + type: int + default: 5 + aliases: [ connect_timeout_sec ] + test_command: + description: + - Command to expect success for to determine the machine is ready for management. + type: str + default: whoami + msg: + description: + - Message to display to users. + type: str + default: Reboot initiated by Ansible + boot_time_command: + description: + - Command to run that returns a unique string indicating the last time the system was booted. + - Setting this to a command that has different output each time it is run will cause the task to fail. + type: str + default: '(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime' + version_added: '2.10' +notes: +- If a shutdown was already scheduled on the system, C(win_reboot) will abort the scheduled shutdown and enforce its own shutdown. +- Beware that when C(win_reboot) returns, the Windows system may not have settled yet and some base services could be in limbo. + This can result in unexpected behavior. Check the examples for ways to mitigate this. +- The connection user must have the C(SeRemoteShutdownPrivilege) privilege enabled, see + U(https://docs.microsoft.com/en-us/windows/security/threat-protection/security-policy-settings/force-shutdown-from-a-remote-system) + for more information. +seealso: +- module: reboot +author: +- Matt Davis (@nitzmahone) +''' + +EXAMPLES = r''' +- name: Reboot the machine with all defaults + win_reboot: + +- name: Reboot a slow machine that might have lots of updates to apply + win_reboot: + reboot_timeout: 3600 + +# Install a Windows feature and reboot if necessary +- name: Install IIS Web-Server + win_feature: + name: Web-Server + register: iis_install + +- name: Reboot when Web-Server feature requires it + win_reboot: + when: iis_install.reboot_required + +# One way to ensure the system is reliable, is to set WinRM to a delayed startup +- name: Ensure WinRM starts when the system has settled and is ready to work reliably + win_service: + name: WinRM + start_mode: delayed + + +# Additionally, you can add a delay before running the next task +- name: Reboot a machine that takes time to settle after being booted + win_reboot: + post_reboot_delay: 120 + +# Or you can make win_reboot validate exactly what you need to work before running the next task +- name: Validate that the netlogon service has started, before running the next task + win_reboot: + test_command: 'exit (Get-Service -Name Netlogon).Status -ne "Running"' +''' + +RETURN = r''' +rebooted: + description: True if the machine was rebooted. + returned: always + type: bool + sample: true +elapsed: + description: The number of seconds that elapsed waiting for the system to be rebooted. + returned: always + type: float + sample: 23.2 +''' diff --git a/test/support/windows-integration/plugins/modules/win_regedit.ps1 b/test/support/windows-integration/plugins/modules/win_regedit.ps1 new file mode 100644 index 00000000..c56b4833 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_regedit.ps1 @@ -0,0 +1,495 @@ +#!powershell + +# Copyright: (c) 2015, Adam Keech +# Copyright: (c) 2015, Josh Ludwig +# Copyright: (c) 2017, Jordan Borean +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.PrivilegeUtil + +$params = Parse-Args -arguments $args -supports_check_mode $true +$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false +$diff_mode = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false +$_remote_tmp = Get-AnsibleParam $params "_ansible_remote_tmp" -type "path" -default $env:TMP + +$path = Get-AnsibleParam -obj $params -name "path" -type "str" -failifempty $true -aliases "key" +$name = Get-AnsibleParam -obj $params -name "name" -type "str" -aliases "entry","value" +$data = Get-AnsibleParam -obj $params -name "data" +$type = Get-AnsibleParam -obj $params -name "type" -type "str" -default "string" -validateset "none","binary","dword","expandstring","multistring","string","qword" -aliases "datatype" +$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "present","absent" +$delete_key = Get-AnsibleParam -obj $params -name "delete_key" -type "bool" -default $true +$hive = Get-AnsibleParam -obj $params -name "hive" -type "path" + +$result = @{ + changed = $false + data_changed = $false + data_type_changed = $false +} + +if ($diff_mode) { + $result.diff = @{ + before = "" + after = "" + } +} + +$registry_util = @' +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace Ansible.WinRegedit +{ + internal class NativeMethods + { + [DllImport("advapi32.dll", CharSet = CharSet.Unicode)] + public static extern int RegLoadKeyW( + UInt32 hKey, + string lpSubKey, + string lpFile); + + [DllImport("advapi32.dll", CharSet = CharSet.Unicode)] + public static extern int RegUnLoadKeyW( + UInt32 hKey, + string lpSubKey); + } + + public class Win32Exception : System.ComponentModel.Win32Exception + { + private string _msg; + public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { } + public Win32Exception(int errorCode, string message) : base(errorCode) + { + _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode); + } + public override string Message { get { return _msg; } } + public static explicit operator Win32Exception(string message) { return new Win32Exception(message); } + } + + public class Hive : IDisposable + { + private const UInt32 SCOPE = 0x80000002; // HKLM + private string hiveKey; + private bool loaded = false; + + public Hive(string hiveKey, string hivePath) + { + this.hiveKey = hiveKey; + int ret = NativeMethods.RegLoadKeyW(SCOPE, hiveKey, hivePath); + if (ret != 0) + throw new Win32Exception(ret, String.Format("Failed to load registry hive at {0}", hivePath)); + loaded = true; + } + + public static void UnloadHive(string hiveKey) + { + int ret = NativeMethods.RegUnLoadKeyW(SCOPE, hiveKey); + if (ret != 0) + throw new Win32Exception(ret, String.Format("Failed to unload registry hive at {0}", hiveKey)); + } + + public void Dispose() + { + if (loaded) + { + // Make sure the garbage collector disposes all unused handles and waits until it is complete + GC.Collect(); + GC.WaitForPendingFinalizers(); + + UnloadHive(hiveKey); + loaded = false; + } + GC.SuppressFinalize(this); + } + ~Hive() { this.Dispose(); } + } +} +'@ + +# fire a warning if the property name isn't specified, the (Default) key ($null) can only be a string +if ($null -eq $name -and $type -ne "string") { + Add-Warning -obj $result -message "the data type when name is not specified can only be 'string', the type has automatically been converted" + $type = "string" +} + +# Check that the registry path is in PSDrive format: HKCC, HKCR, HKCU, HKLM, HKU +if ($path -notmatch "^HK(CC|CR|CU|LM|U):\\") { + Fail-Json $result "path: $path is not a valid powershell path, see module documentation for examples." +} + +# Add a warning if the path does not contains a \ and is not the leaf path +$registry_path = (Split-Path -Path $path -NoQualifier).Substring(1) # removes the hive: and leading \ +$registry_leaf = Split-Path -Path $path -Leaf +if ($registry_path -ne $registry_leaf -and -not $registry_path.Contains('\')) { + $msg = "path is not using '\' as a separator, support for '/' as a separator will be removed in a future Ansible version" + Add-DeprecationWarning -obj $result -message $msg -version 2.12 + $registry_path = $registry_path.Replace('/', '\') +} + +# Simplified version of Convert-HexStringToByteArray from +# https://cyber-defense.sans.org/blog/2010/02/11/powershell-byte-array-hex-convert +# Expects a hex in the format you get when you run reg.exe export, +# and converts to a byte array so powershell can modify binary registry entries +# import format is like 'hex:be,ef,be,ef,be,ef,be,ef,be,ef' +Function Convert-RegExportHexStringToByteArray($string) { + # Remove 'hex:' from the front of the string if present + $string = $string.ToLower() -replace '^hex\:','' + + # Remove whitespace and any other non-hex crud. + $string = $string -replace '[^a-f0-9\\,x\-\:]','' + + # Turn commas into colons + $string = $string -replace ',',':' + + # Maybe there's nothing left over to convert... + if ($string.Length -eq 0) { + return ,@() + } + + # Split string with or without colon delimiters. + if ($string.Length -eq 1) { + return ,@([System.Convert]::ToByte($string,16)) + } elseif (($string.Length % 2 -eq 0) -and ($string.IndexOf(":") -eq -1)) { + return ,@($string -split '([a-f0-9]{2})' | foreach-object { if ($_) {[System.Convert]::ToByte($_,16)}}) + } elseif ($string.IndexOf(":") -ne -1) { + return ,@($string -split ':+' | foreach-object {[System.Convert]::ToByte($_,16)}) + } else { + return ,@() + } +} + +Function Compare-RegistryProperties($existing, $new) { + # Outputs $true if the property values don't match + if ($existing -is [Array]) { + (Compare-Object -ReferenceObject $existing -DifferenceObject $new -SyncWindow 0).Length -ne 0 + } else { + $existing -cne $new + } +} + +Function Get-DiffValue { + param( + [Parameter(Mandatory=$true)][Microsoft.Win32.RegistryValueKind]$Type, + [Parameter(Mandatory=$true)][Object]$Value + ) + + $diff = @{ type = $Type.ToString(); value = $Value } + + $enum = [Microsoft.Win32.RegistryValueKind] + if ($Type -in @($enum::Binary, $enum::None)) { + $diff.value = [System.Collections.Generic.List`1[String]]@() + foreach ($dec_value in $Value) { + $diff.value.Add("0x{0:x2}" -f $dec_value) + } + } elseif ($Type -eq $enum::DWord) { + $diff.value = "0x{0:x8}" -f $Value + } elseif ($Type -eq $enum::QWord) { + $diff.value = "0x{0:x16}" -f $Value + } + + return $diff +} + +Function Set-StateAbsent { + param( + # Used for diffs and exception messages to match up against Ansible input + [Parameter(Mandatory=$true)][String]$PrintPath, + [Parameter(Mandatory=$true)][Microsoft.Win32.RegistryKey]$Hive, + [Parameter(Mandatory=$true)][String]$Path, + [String]$Name, + [Switch]$DeleteKey + ) + + $key = $Hive.OpenSubKey($Path, $true) + if ($null -eq $key) { + # Key does not exist, no need to delete anything + return + } + + try { + if ($DeleteKey -and -not $Name) { + # delete_key=yes is set and name is null/empty, so delete the entire key + $key.Dispose() + $key = $null + if (-not $check_mode) { + try { + $Hive.DeleteSubKeyTree($Path, $false) + } catch { + Fail-Json -obj $result -message "failed to delete registry key at $($PrintPath): $($_.Exception.Message)" + } + } + $result.changed = $true + + if ($diff_mode) { + $result.diff.before = @{$PrintPath = @{}} + $result.diff.after = @{} + } + } else { + # delete_key=no or name is not null/empty, delete the property not the full key + $property = $key.GetValue($Name) + if ($null -eq $property) { + # property does not exist + return + } + $property_type = $key.GetValueKind($Name) # used for the diff + + if (-not $check_mode) { + try { + $key.DeleteValue($Name) + } catch { + Fail-Json -obj $result -message "failed to delete registry property '$Name' at $($PrintPath): $($_.Exception.Message)" + } + } + + $result.changed = $true + if ($diff_mode) { + $diff_value = Get-DiffValue -Type $property_type -Value $property + $result.diff.before = @{ $PrintPath = @{ $Name = $diff_value } } + $result.diff.after = @{ $PrintPath = @{} } + } + } + } finally { + if ($key) { + $key.Dispose() + } + } +} + +Function Set-StatePresent { + param( + [Parameter(Mandatory=$true)][String]$PrintPath, + [Parameter(Mandatory=$true)][Microsoft.Win32.RegistryKey]$Hive, + [Parameter(Mandatory=$true)][String]$Path, + [String]$Name, + [Object]$Data, + [Microsoft.Win32.RegistryValueKind]$Type + ) + + $key = $Hive.OpenSubKey($Path, $true) + try { + if ($null -eq $key) { + # the key does not exist, create it so the next steps work + if (-not $check_mode) { + try { + $key = $Hive.CreateSubKey($Path) + } catch { + Fail-Json -obj $result -message "failed to create registry key at $($PrintPath): $($_.Exception.Message)" + } + } + $result.changed = $true + + if ($diff_mode) { + $result.diff.before = @{} + $result.diff.after = @{$PrintPath = @{}} + } + } elseif ($diff_mode) { + # Make sure the diff is in an expected state for the key + $result.diff.before = @{$PrintPath = @{}} + $result.diff.after = @{$PrintPath = @{}} + } + + if ($null -eq $key -or $null -eq $Data) { + # Check mode and key was created above, we cannot do any more work, or $Data is $null which happens when + # we create a new key but haven't explicitly set the data + return + } + + $property = $key.GetValue($Name, $null, [Microsoft.Win32.RegistryValueOptions]::DoNotExpandEnvironmentNames) + if ($null -ne $property) { + # property exists, need to compare the values and type + $existing_type = $key.GetValueKind($name) + $change_value = $false + + if ($Type -ne $existing_type) { + $change_value = $true + $result.data_type_changed = $true + $data_mismatch = Compare-RegistryProperties -existing $property -new $Data + if ($data_mismatch) { + $result.data_changed = $true + } + } else { + $data_mismatch = Compare-RegistryProperties -existing $property -new $Data + if ($data_mismatch) { + $change_value = $true + $result.data_changed = $true + } + } + + if ($change_value) { + if (-not $check_mode) { + try { + $key.SetValue($Name, $Data, $Type) + } catch { + Fail-Json -obj $result -message "failed to change registry property '$Name' at $($PrintPath): $($_.Exception.Message)" + } + } + $result.changed = $true + + if ($diff_mode) { + $result.diff.before.$PrintPath.$Name = Get-DiffValue -Type $existing_type -Value $property + $result.diff.after.$PrintPath.$Name = Get-DiffValue -Type $Type -Value $Data + } + } elseif ($diff_mode) { + $diff_value = Get-DiffValue -Type $existing_type -Value $property + $result.diff.before.$PrintPath.$Name = $diff_value + $result.diff.after.$PrintPath.$Name = $diff_value + } + } else { + # property doesn't exist just create a new one + if (-not $check_mode) { + try { + $key.SetValue($Name, $Data, $Type) + } catch { + Fail-Json -obj $result -message "failed to create registry property '$Name' at $($PrintPath): $($_.Exception.Message)" + } + } + $result.changed = $true + + if ($diff_mode) { + $result.diff.after.$PrintPath.$Name = Get-DiffValue -Type $Type -Value $Data + } + } + } finally { + if ($key) { + $key.Dispose() + } + } +} + +# convert property names "" to $null as "" refers to (Default) +if ($name -eq "") { + $name = $null +} + +# convert the data to the required format +if ($type -in @("binary", "none")) { + if ($null -eq $data) { + $data = "" + } + + # convert the data from string to byte array if in hex: format + if ($data -is [String]) { + $data = [byte[]](Convert-RegExportHexStringToByteArray -string $data) + } elseif ($data -is [Int]) { + if ($data -gt 255) { + Fail-Json $result "cannot convert binary data '$data' to byte array, please specify this value as a yaml byte array or a comma separated hex value string" + } + $data = [byte[]]@([byte]$data) + } elseif ($data -is [Array]) { + $data = [byte[]]$data + } +} elseif ($type -in @("dword", "qword")) { + # dword's and dword's don't allow null values, set to 0 + if ($null -eq $data) { + $data = 0 + } + + if ($data -is [String]) { + # if the data is a string we need to convert it to an unsigned int64 + # it needs to be unsigned as Ansible passes in an unsigned value while + # powershell uses a signed data type. The value will then be converted + # below + $data = [UInt64]$data + } + + if ($type -eq "dword") { + if ($data -gt [UInt32]::MaxValue) { + Fail-Json $result "data cannot be larger than 0xffffffff when type is dword" + } elseif ($data -gt [Int32]::MaxValue) { + # when dealing with larger int32 (> 2147483647 or 0x7FFFFFFF) powershell + # automatically converts it to a signed int64. We need to convert this to + # signed int32 by parsing the hex string value. + $data = "0x$("{0:x}" -f $data)" + } + $data = [Int32]$data + } else { + if ($data -gt [UInt64]::MaxValue) { + Fail-Json $result "data cannot be larger than 0xffffffffffffffff when type is qword" + } elseif ($data -gt [Int64]::MaxValue) { + $data = "0x$("{0:x}" -f $data)" + } + $data = [Int64]$data + } +} elseif ($type -in @("string", "expandstring") -and $name) { + # a null string or expandstring must be empty quotes + # Only do this if $name has been defined (not the default key) + if ($null -eq $data) { + $data = "" + } +} elseif ($type -eq "multistring") { + # convert the data for a multistring to a String[] array + if ($null -eq $data) { + $data = [String[]]@() + } elseif ($data -isnot [Array]) { + $new_data = New-Object -TypeName String[] -ArgumentList 1 + $new_data[0] = $data.ToString([CultureInfo]::InvariantCulture) + $data = $new_data + } else { + $new_data = New-Object -TypeName String[] -ArgumentList $data.Count + foreach ($entry in $data) { + $new_data[$data.IndexOf($entry)] = $entry.ToString([CultureInfo]::InvariantCulture) + } + $data = $new_data + } +} + +# convert the type string to the .NET class +$type = [System.Enum]::Parse([Microsoft.Win32.RegistryValueKind], $type, $true) + +$registry_hive = switch(Split-Path -Path $path -Qualifier) { + "HKCR:" { [Microsoft.Win32.Registry]::ClassesRoot } + "HKCC:" { [Microsoft.Win32.Registry]::CurrentConfig } + "HKCU:" { [Microsoft.Win32.Registry]::CurrentUser } + "HKLM:" { [Microsoft.Win32.Registry]::LocalMachine } + "HKU:" { [Microsoft.Win32.Registry]::Users } +} +$loaded_hive = $null +try { + if ($hive) { + if (-not (Test-Path -LiteralPath $hive)) { + Fail-Json -obj $result -message "hive at path '$hive' is not valid or accessible, cannot load hive" + } + + $original_tmp = $env:TMP + $env:TMP = $_remote_tmp + Add-Type -TypeDefinition $registry_util + $env:TMP = $original_tmp + + try { + Set-AnsiblePrivilege -Name SeBackupPrivilege -Value $true + Set-AnsiblePrivilege -Name SeRestorePrivilege -Value $true + } catch [System.ComponentModel.Win32Exception] { + Fail-Json -obj $result -message "failed to enable SeBackupPrivilege and SeRestorePrivilege for the current process: $($_.Exception.Message)" + } + + if (Test-Path -Path HKLM:\ANSIBLE) { + Add-Warning -obj $result -message "hive already loaded at HKLM:\ANSIBLE, had to unload hive for win_regedit to continue" + try { + [Ansible.WinRegedit.Hive]::UnloadHive("ANSIBLE") + } catch [System.ComponentModel.Win32Exception] { + Fail-Json -obj $result -message "failed to unload registry hive HKLM:\ANSIBLE from $($hive): $($_.Exception.Message)" + } + } + + try { + $loaded_hive = New-Object -TypeName Ansible.WinRegedit.Hive -ArgumentList "ANSIBLE", $hive + } catch [System.ComponentModel.Win32Exception] { + Fail-Json -obj $result -message "failed to load registry hive from '$hive' to HKLM:\ANSIBLE: $($_.Exception.Message)" + } + } + + if ($state -eq "present") { + Set-StatePresent -PrintPath $path -Hive $registry_hive -Path $registry_path -Name $name -Data $data -Type $type + } else { + Set-StateAbsent -PrintPath $path -Hive $registry_hive -Path $registry_path -Name $name -DeleteKey:$delete_key + } +} finally { + $registry_hive.Dispose() + if ($loaded_hive) { + $loaded_hive.Dispose() + } +} + +Exit-Json $result + diff --git a/test/support/windows-integration/plugins/modules/win_regedit.py b/test/support/windows-integration/plugins/modules/win_regedit.py new file mode 100644 index 00000000..2c0fff71 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_regedit.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Adam Keech +# Copyright: (c) 2015, Josh Ludwig +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + + +DOCUMENTATION = r''' +--- +module: win_regedit +version_added: '2.0' +short_description: Add, change, or remove registry keys and values +description: +- Add, modify or remove registry keys and values. +- More information about the windows registry from Wikipedia + U(https://en.wikipedia.org/wiki/Windows_Registry). +options: + path: + description: + - Name of the registry path. + - 'Should be in one of the following registry hives: HKCC, HKCR, HKCU, + HKLM, HKU.' + type: str + required: yes + aliases: [ key ] + name: + description: + - Name of the registry entry in the above C(path) parameters. + - If not provided, or empty then the '(Default)' property for the key will + be used. + type: str + aliases: [ entry, value ] + data: + description: + - Value of the registry entry C(name) in C(path). + - If not specified then the value for the property will be null for the + corresponding C(type). + - Binary and None data should be expressed in a yaml byte array or as comma + separated hex values. + - An easy way to generate this is to run C(regedit.exe) and use the + I(export) option to save the registry values to a file. + - In the exported file, binary value will look like C(hex:be,ef,be,ef), the + C(hex:) prefix is optional. + - DWORD and QWORD values should either be represented as a decimal number + or a hex value. + - Multistring values should be passed in as a list. + - See the examples for more details on how to format this data. + type: str + type: + description: + - The registry value data type. + type: str + choices: [ binary, dword, expandstring, multistring, string, qword ] + default: string + aliases: [ datatype ] + state: + description: + - The state of the registry entry. + type: str + choices: [ absent, present ] + default: present + delete_key: + description: + - When C(state) is 'absent' then this will delete the entire key. + - If C(no) then it will only clear out the '(Default)' property for + that key. + type: bool + default: yes + version_added: '2.4' + hive: + description: + - A path to a hive key like C:\Users\Default\NTUSER.DAT to load in the + registry. + - This hive is loaded under the HKLM:\ANSIBLE key which can then be used + in I(name) like any other path. + - This can be used to load the default user profile registry hive or any + other hive saved as a file. + - Using this function requires the user to have the C(SeRestorePrivilege) + and C(SeBackupPrivilege) privileges enabled. + type: path + version_added: '2.5' +notes: +- Check-mode C(-C/--check) and diff output C(-D/--diff) are supported, so that you can test every change against the active configuration before + applying changes. +- Beware that some registry hives (C(HKEY_USERS) in particular) do not allow to create new registry paths in the root folder. +- Since ansible 2.4, when checking if a string registry value has changed, a case-sensitive test is used. Previously the test was case-insensitive. +seealso: +- module: win_reg_stat +- module: win_regmerge +author: +- Adam Keech (@smadam813) +- Josh Ludwig (@joshludwig) +- Jordan Borean (@jborean93) +''' + +EXAMPLES = r''' +- name: Create registry path MyCompany + win_regedit: + path: HKCU:\Software\MyCompany + +- name: Add or update registry path MyCompany, with entry 'hello', and containing 'world' + win_regedit: + path: HKCU:\Software\MyCompany + name: hello + data: world + +- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 1337 as the decimal value + win_regedit: + path: HKCU:\Software\MyCompany + name: hello + data: 1337 + type: dword + +- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 0xff2500ae as the hex value + win_regedit: + path: HKCU:\Software\MyCompany + name: hello + data: 0xff2500ae + type: dword + +- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in hex-string format + win_regedit: + path: HKCU:\Software\MyCompany + name: hello + data: hex:be,ef,be,ef,be,ef,be,ef,be,ef + type: binary + +- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in yaml format + win_regedit: + path: HKCU:\Software\MyCompany + name: hello + data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef] + type: binary + +- name: Add or update registry path MyCompany, with expand string entry 'hello' + win_regedit: + path: HKCU:\Software\MyCompany + name: hello + data: '%appdata%\local' + type: expandstring + +- name: Add or update registry path MyCompany, with multi string entry 'hello' + win_regedit: + path: HKCU:\Software\MyCompany + name: hello + data: ['hello', 'world'] + type: multistring + +- name: Disable keyboard layout hotkey for all users (changes existing) + win_regedit: + path: HKU:\.DEFAULT\Keyboard Layout\Toggle + name: Layout Hotkey + data: 3 + type: dword + +- name: Disable language hotkey for current users (adds new) + win_regedit: + path: HKCU:\Keyboard Layout\Toggle + name: Language Hotkey + data: 3 + type: dword + +- name: Remove registry path MyCompany (including all entries it contains) + win_regedit: + path: HKCU:\Software\MyCompany + state: absent + delete_key: yes + +- name: Clear the existing (Default) entry at path MyCompany + win_regedit: + path: HKCU:\Software\MyCompany + state: absent + delete_key: no + +- name: Remove entry 'hello' from registry path MyCompany + win_regedit: + path: HKCU:\Software\MyCompany + name: hello + state: absent + +- name: Change default mouse trailing settings for new users + win_regedit: + path: HKLM:\ANSIBLE\Control Panel\Mouse + name: MouseTrails + data: 10 + type: str + state: present + hive: C:\Users\Default\NTUSER.dat +''' + +RETURN = r''' +data_changed: + description: Whether this invocation changed the data in the registry value. + returned: success + type: bool + sample: false +data_type_changed: + description: Whether this invocation changed the datatype of the registry value. + returned: success + type: bool + sample: true +''' diff --git a/test/support/windows-integration/plugins/modules/win_security_policy.ps1 b/test/support/windows-integration/plugins/modules/win_security_policy.ps1 new file mode 100644 index 00000000..274204b6 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_security_policy.ps1 @@ -0,0 +1,196 @@ +#!powershell + +# Copyright: (c) 2017, Jordan Borean +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy + +$ErrorActionPreference = 'Stop' + +$params = Parse-Args $args -supports_check_mode $true +$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false +$diff_mode = Get-AnsibleParam -obj $Params -name "_ansible_diff" -type "bool" -default $false + +$section = Get-AnsibleParam -obj $params -name "section" -type "str" -failifempty $true +$key = Get-AnsibleParam -obj $params -name "key" -type "str" -failifempty $true +$value = Get-AnsibleParam -obj $params -name "value" -failifempty $true + +$result = @{ + changed = $false + section = $section + key = $key + value = $value +} + +if ($diff_mode) { + $result.diff = @{} +} + +Function Run-SecEdit($arguments) { + $stdout = $null + $stderr = $null + $log_path = [IO.Path]::GetTempFileName() + $arguments = $arguments + @("/log", $log_path) + + try { + $stdout = &SecEdit.exe $arguments | Out-String + } catch { + $stderr = $_.Exception.Message + } + $log = Get-Content -Path $log_path + Remove-Item -Path $log_path -Force + + $return = @{ + log = ($log -join "`n").Trim() + stdout = $stdout + stderr = $stderr + rc = $LASTEXITCODE + } + + return $return +} + +Function Export-SecEdit() { + $secedit_ini_path = [IO.Path]::GetTempFileName() + # while this will technically make a change to the system in check mode by + # creating a new file, we need these values to be able to do anything + # substantial in check mode + $export_result = Run-SecEdit -arguments @("/export", "/cfg", $secedit_ini_path, "/quiet") + + # check the return code and if the file has been populated, otherwise error out + if (($export_result.rc -ne 0) -or ((Get-Item -Path $secedit_ini_path).Length -eq 0)) { + Remove-Item -Path $secedit_ini_path -Force + $result.rc = $export_result.rc + $result.stdout = $export_result.stdout + $result.stderr = $export_result.stderr + Fail-Json $result "Failed to export secedit.ini file to $($secedit_ini_path)" + } + $secedit_ini = ConvertFrom-Ini -file_path $secedit_ini_path + + return $secedit_ini +} + +Function Import-SecEdit($ini) { + $secedit_ini_path = [IO.Path]::GetTempFileName() + $secedit_db_path = [IO.Path]::GetTempFileName() + Remove-Item -Path $secedit_db_path -Force # needs to be deleted for SecEdit.exe /import to work + + $ini_contents = ConvertTo-Ini -ini $ini + Set-Content -Path $secedit_ini_path -Value $ini_contents + $result.changed = $true + + $import_result = Run-SecEdit -arguments @("/configure", "/db", $secedit_db_path, "/cfg", $secedit_ini_path, "/quiet") + $result.import_log = $import_result.log + Remove-Item -Path $secedit_ini_path -Force + if ($import_result.rc -ne 0) { + $result.rc = $import_result.rc + $result.stdout = $import_result.stdout + $result.stderr = $import_result.stderr + Fail-Json $result "Failed to import secedit.ini file from $($secedit_ini_path)" + } +} + +Function ConvertTo-Ini($ini) { + $content = @() + foreach ($key in $ini.GetEnumerator()) { + $section = $key.Name + $values = $key.Value + + $content += "[$section]" + foreach ($value in $values.GetEnumerator()) { + $value_key = $value.Name + $value_value = $value.Value + + if ($null -ne $value_value) { + $content += "$value_key = $value_value" + } + } + } + + return $content -join "`r`n" +} + +Function ConvertFrom-Ini($file_path) { + $ini = @{} + switch -Regex -File $file_path { + "^\[(.+)\]" { + $section = $matches[1] + $ini.$section = @{} + } + "(.+?)\s*=(.*)" { + $name = $matches[1].Trim() + $value = $matches[2].Trim() + if ($value -match "^\d+$") { + $value = [int]$value + } elseif ($value.StartsWith('"') -and $value.EndsWith('"')) { + $value = $value.Substring(1, $value.Length - 2) + } + + $ini.$section.$name = $value + } + } + + return $ini +} + +if ($section -eq "Privilege Rights") { + Add-Warning -obj $result -message "Using this module to edit rights and privileges is error-prone, use the win_user_right module instead" +} + +$will_change = $false +$secedit_ini = Export-SecEdit +if (-not ($secedit_ini.ContainsKey($section))) { + Fail-Json $result "The section '$section' does not exist in SecEdit.exe output ini" +} + +if ($secedit_ini.$section.ContainsKey($key)) { + $current_value = $secedit_ini.$section.$key + + if ($current_value -cne $value) { + if ($diff_mode) { + $result.diff.prepared = @" +[$section] +-$key = $current_value ++$key = $value +"@ + } + + $secedit_ini.$section.$key = $value + $will_change = $true + } +} elseif ([string]$value -eq "") { + # Value is requested to be removed, and has already been removed, do nothing +} else { + if ($diff_mode) { + $result.diff.prepared = @" +[$section] ++$key = $value +"@ + } + $secedit_ini.$section.$key = $value + $will_change = $true +} + +if ($will_change -eq $true) { + $result.changed = $true + if (-not $check_mode) { + Import-SecEdit -ini $secedit_ini + + # secedit doesn't error out on improper entries, re-export and verify + # the changes occurred + $verification_ini = Export-SecEdit + $new_section_values = $verification_ini.$section + if ($new_section_values.ContainsKey($key)) { + $new_value = $new_section_values.$key + if ($new_value -cne $value) { + Fail-Json $result "Failed to change the value for key '$key' in section '$section', the value is still $new_value" + } + } elseif ([string]$value -eq "") { + # Value was empty, so OK if no longer in the result + } else { + Fail-Json $result "The key '$key' in section '$section' is not a valid key, cannot set this value" + } + } +} + +Exit-Json $result diff --git a/test/support/windows-integration/plugins/modules/win_security_policy.py b/test/support/windows-integration/plugins/modules/win_security_policy.py new file mode 100644 index 00000000..d582a532 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_security_policy.py @@ -0,0 +1,126 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub, actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_security_policy +version_added: '2.4' +short_description: Change local security policy settings +description: +- Allows you to set the local security policies that are configured by + SecEdit.exe. +options: + section: + description: + - The ini section the key exists in. + - If the section does not exist then the module will return an error. + - Example sections to use are 'Account Policies', 'Local Policies', + 'Event Log', 'Restricted Groups', 'System Services', 'Registry' and + 'File System' + - If wanting to edit the C(Privilege Rights) section, use the + M(win_user_right) module instead. + type: str + required: yes + key: + description: + - The ini key of the section or policy name to modify. + - The module will return an error if this key is invalid. + type: str + required: yes + value: + description: + - The value for the ini key or policy name. + - If the key takes in a boolean value then 0 = False and 1 = True. + type: str + required: yes +notes: +- This module uses the SecEdit.exe tool to configure the values, more details + of the areas and keys that can be configured can be found here + U(https://msdn.microsoft.com/en-us/library/bb742512.aspx). +- If you are in a domain environment these policies may be set by a GPO policy, + this module can temporarily change these values but the GPO will override + it if the value differs. +- You can also run C(SecEdit.exe /export /cfg C:\temp\output.ini) to view the + current policies set on your system. +- When assigning user rights, use the M(win_user_right) module instead. +seealso: +- module: win_user_right +author: +- Jordan Borean (@jborean93) +''' + +EXAMPLES = r''' +- name: Change the guest account name + win_security_policy: + section: System Access + key: NewGuestName + value: Guest Account + +- name: Set the maximum password age + win_security_policy: + section: System Access + key: MaximumPasswordAge + value: 15 + +- name: Do not store passwords using reversible encryption + win_security_policy: + section: System Access + key: ClearTextPassword + value: 0 + +- name: Enable system events + win_security_policy: + section: Event Audit + key: AuditSystemEvents + value: 1 +''' + +RETURN = r''' +rc: + description: The return code after a failure when running SecEdit.exe. + returned: failure with secedit calls + type: int + sample: -1 +stdout: + description: The output of the STDOUT buffer after a failure when running + SecEdit.exe. + returned: failure with secedit calls + type: str + sample: check log for error details +stderr: + description: The output of the STDERR buffer after a failure when running + SecEdit.exe. + returned: failure with secedit calls + type: str + sample: failed to import security policy +import_log: + description: The log of the SecEdit.exe /configure job that configured the + local policies. This is used for debugging purposes on failures. + returned: secedit.exe /import run and change occurred + type: str + sample: Completed 6 percent (0/15) \tProcess Privilege Rights area. +key: + description: The key in the section passed to the module to modify. + returned: success + type: str + sample: NewGuestName +section: + description: The section passed to the module to modify. + returned: success + type: str + sample: System Access +value: + description: The value passed to the module to modify to. + returned: success + type: str + sample: Guest Account +''' diff --git a/test/support/windows-integration/plugins/modules/win_shell.ps1 b/test/support/windows-integration/plugins/modules/win_shell.ps1 new file mode 100644 index 00000000..54aef8de --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_shell.ps1 @@ -0,0 +1,138 @@ +#!powershell + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.CommandUtil +#Requires -Module Ansible.ModuleUtils.FileUtil + +# TODO: add check mode support + +Set-StrictMode -Version 2 +$ErrorActionPreference = "Stop" + +# Cleanse CLIXML from stderr (sift out error stream data, discard others for now) +Function Cleanse-Stderr($raw_stderr) { + Try { + # NB: this regex isn't perfect, but is decent at finding CLIXML amongst other stderr noise + If($raw_stderr -match "(?s)(?.*)#< CLIXML(?.*)(?)(?.*)") { + $clixml = [xml]$matches["clixml"] + + $merged_stderr = "{0}{1}{2}{3}" -f @( + $matches["prenoise1"], + $matches["prenoise2"], + # filter out just the Error-tagged strings for now, and zap embedded CRLF chars + ($clixml.Objs.ChildNodes | Where-Object { $_.Name -eq 'S' } | Where-Object { $_.S -eq 'Error' } | ForEach-Object { $_.'#text'.Replace('_x000D__x000A_','') } | Out-String), + $matches["postnoise"]) | Out-String + + return $merged_stderr.Trim() + + # FUTURE: parse/return other streams + } + Else { + $raw_stderr + } + } + Catch { + "***EXCEPTION PARSING CLIXML: $_***" + $raw_stderr + } +} + +$params = Parse-Args $args -supports_check_mode $false + +$raw_command_line = Get-AnsibleParam -obj $params -name "_raw_params" -type "str" -failifempty $true +$chdir = Get-AnsibleParam -obj $params -name "chdir" -type "path" +$executable = Get-AnsibleParam -obj $params -name "executable" -type "path" +$creates = Get-AnsibleParam -obj $params -name "creates" -type "path" +$removes = Get-AnsibleParam -obj $params -name "removes" -type "path" +$stdin = Get-AnsibleParam -obj $params -name "stdin" -type "str" +$no_profile = Get-AnsibleParam -obj $params -name "no_profile" -type "bool" -default $false +$output_encoding_override = Get-AnsibleParam -obj $params -name "output_encoding_override" -type "str" + +$raw_command_line = $raw_command_line.Trim() + +$result = @{ + changed = $true + cmd = $raw_command_line +} + +if ($creates -and $(Test-AnsiblePath -Path $creates)) { + Exit-Json @{msg="skipped, since $creates exists";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0} +} + +if ($removes -and -not $(Test-AnsiblePath -Path $removes)) { + Exit-Json @{msg="skipped, since $removes does not exist";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0} +} + +$exec_args = $null +If(-not $executable -or $executable -eq "powershell") { + $exec_application = "powershell.exe" + + # force input encoding to preamble-free UTF8 so PS sub-processes (eg, Start-Job) don't blow up + $raw_command_line = "[Console]::InputEncoding = New-Object Text.UTF8Encoding `$false; " + $raw_command_line + + # Base64 encode the command so we don't have to worry about the various levels of escaping + $encoded_command = [Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($raw_command_line)) + + if ($stdin) { + $exec_args = "-encodedcommand $encoded_command" + } else { + $exec_args = "-noninteractive -encodedcommand $encoded_command" + } + + if ($no_profile) { + $exec_args = "-noprofile $exec_args" + } +} +Else { + # FUTURE: support arg translation from executable (or executable_args?) to process arguments for arbitrary interpreter? + $exec_application = $executable + if (-not ($exec_application.EndsWith(".exe"))) { + $exec_application = "$($exec_application).exe" + } + $exec_args = "/c $raw_command_line" +} + +$command = "`"$exec_application`" $exec_args" +$run_command_arg = @{ + command = $command +} +if ($chdir) { + $run_command_arg['working_directory'] = $chdir +} +if ($stdin) { + $run_command_arg['stdin'] = $stdin +} +if ($output_encoding_override) { + $run_command_arg['output_encoding_override'] = $output_encoding_override +} + +$start_datetime = [DateTime]::UtcNow +try { + $command_result = Run-Command @run_command_arg +} catch { + $result.changed = $false + try { + $result.rc = $_.Exception.NativeErrorCode + } catch { + $result.rc = 2 + } + Fail-Json -obj $result -message $_.Exception.Message +} + +# TODO: decode CLIXML stderr output (and other streams?) +$result.stdout = $command_result.stdout +$result.stderr = Cleanse-Stderr $command_result.stderr +$result.rc = $command_result.rc + +$end_datetime = [DateTime]::UtcNow +$result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff") + +If ($result.rc -ne 0) { + Fail-Json -obj $result -message "non-zero return code" +} + +Exit-Json $result diff --git a/test/support/windows-integration/plugins/modules/win_shell.py b/test/support/windows-integration/plugins/modules/win_shell.py new file mode 100644 index 00000000..ee2cd762 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_shell.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Ansible, inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_shell +short_description: Execute shell commands on target hosts +version_added: 2.2 +description: + - The C(win_shell) module takes the command name followed by a list of space-delimited arguments. + It is similar to the M(win_command) module, but runs + the command via a shell (defaults to PowerShell) on the target host. + - For non-Windows targets, use the M(shell) module instead. +options: + free_form: + description: + - The C(win_shell) module takes a free form command to run. + - There is no parameter actually named 'free form'. See the examples! + type: str + required: yes + creates: + description: + - A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped. + type: path + removes: + description: + - A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped. + type: path + chdir: + description: + - Set the specified path as the current working directory before executing a command + type: path + executable: + description: + - Change the shell used to execute the command (eg, C(cmd)). + - The target shell must accept a C(/c) parameter followed by the raw command line to be executed. + type: path + stdin: + description: + - Set the stdin of the command directly to the specified value. + type: str + version_added: '2.5' + no_profile: + description: + - Do not load the user profile before running a command. This is only valid + when using PowerShell as the executable. + type: bool + default: no + version_added: '2.8' + output_encoding_override: + description: + - This option overrides the encoding of stdout/stderr output. + - You can use this option when you need to run a command which ignore the console's codepage. + - You should only need to use this option in very rare circumstances. + - This value can be any valid encoding C(Name) based on the output of C([System.Text.Encoding]::GetEncodings()). + See U(https://docs.microsoft.com/dotnet/api/system.text.encoding.getencodings). + type: str + version_added: '2.10' +notes: + - If you want to run an executable securely and predictably, it may be + better to use the M(win_command) module instead. Best practices when writing + playbooks will follow the trend of using M(win_command) unless C(win_shell) is + explicitly required. When running ad-hoc commands, use your best judgement. + - WinRM will not return from a command execution until all child processes created have exited. + Thus, it is not possible to use C(win_shell) to spawn long-running child or background processes. + Consider creating a Windows service for managing background processes. +seealso: +- module: psexec +- module: raw +- module: script +- module: shell +- module: win_command +- module: win_psexec +author: + - Matt Davis (@nitzmahone) +''' + +EXAMPLES = r''' +# Execute a command in the remote shell; stdout goes to the specified +# file on the remote. +- win_shell: C:\somescript.ps1 >> C:\somelog.txt + +# Change the working directory to somedir/ before executing the command. +- win_shell: C:\somescript.ps1 >> C:\somelog.txt chdir=C:\somedir + +# You can also use the 'args' form to provide the options. This command +# will change the working directory to somedir/ and will only run when +# somedir/somelog.txt doesn't exist. +- win_shell: C:\somescript.ps1 >> C:\somelog.txt + args: + chdir: C:\somedir + creates: C:\somelog.txt + +# Run a command under a non-Powershell interpreter (cmd in this case) +- win_shell: echo %HOMEDIR% + args: + executable: cmd + register: homedir_out + +- name: Run multi-lined shell commands + win_shell: | + $value = Test-Path -Path C:\temp + if ($value) { + Remove-Item -Path C:\temp -Force + } + New-Item -Path C:\temp -ItemType Directory + +- name: Retrieve the input based on stdin + win_shell: '$string = [Console]::In.ReadToEnd(); Write-Output $string.Trim()' + args: + stdin: Input message +''' + +RETURN = r''' +msg: + description: Changed. + returned: always + type: bool + sample: true +start: + description: The command execution start time. + returned: always + type: str + sample: '2016-02-25 09:18:26.429568' +end: + description: The command execution end time. + returned: always + type: str + sample: '2016-02-25 09:18:26.755339' +delta: + description: The command execution delta time. + returned: always + type: str + sample: '0:00:00.325771' +stdout: + description: The command standard output. + returned: always + type: str + sample: 'Clustering node rabbit@slave1 with rabbit@master ...' +stderr: + description: The command standard error. + returned: always + type: str + sample: 'ls: cannot access foo: No such file or directory' +cmd: + description: The command executed by the task. + returned: always + type: str + sample: 'rabbitmqctl join_cluster rabbit@master' +rc: + description: The command return code (0 means success). + returned: always + type: int + sample: 0 +stdout_lines: + description: The command standard output split in lines. + returned: always + type: list + sample: [u'Clustering node rabbit@slave1 with rabbit@master ...'] +''' diff --git a/test/support/windows-integration/plugins/modules/win_stat.ps1 b/test/support/windows-integration/plugins/modules/win_stat.ps1 new file mode 100644 index 00000000..071eb11c --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_stat.ps1 @@ -0,0 +1,186 @@ +#!powershell + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic +#Requires -Module Ansible.ModuleUtils.FileUtil +#Requires -Module Ansible.ModuleUtils.LinkUtil + +function ConvertTo-Timestamp($start_date, $end_date) { + if ($start_date -and $end_date) { + return (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds + } +} + +function Get-FileChecksum($path, $algorithm) { + switch ($algorithm) { + 'md5' { $sp = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider } + 'sha1' { $sp = New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider } + 'sha256' { $sp = New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider } + 'sha384' { $sp = New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider } + 'sha512' { $sp = New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider } + default { Fail-Json -obj $result -message "Unsupported hash algorithm supplied '$algorithm'" } + } + + $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::ReadWrite) + try { + $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower() + } finally { + $fp.Dispose() + } + + return $hash +} + +function Get-FileInfo { + param([String]$Path, [Switch]$Follow) + + $info = Get-AnsibleItem -Path $Path -ErrorAction SilentlyContinue + $link_info = $null + if ($null -ne $info) { + try { + $link_info = Get-Link -link_path $info.FullName + } catch { + $module.Warn("Failed to check/get link info for file: $($_.Exception.Message)") + } + + # If follow=true we want to follow the link all the way back to root object + if ($Follow -and $null -ne $link_info -and $link_info.Type -in @("SymbolicLink", "JunctionPoint")) { + $info, $link_info = Get-FileInfo -Path $link_info.AbsolutePath -Follow + } + } + + return $info, $link_info +} + +$spec = @{ + options = @{ + path = @{ type='path'; required=$true; aliases=@( 'dest', 'name' ) } + get_checksum = @{ type='bool'; default=$true } + checksum_algorithm = @{ type='str'; default='sha1'; choices=@( 'md5', 'sha1', 'sha256', 'sha384', 'sha512' ) } + follow = @{ type='bool'; default=$false } + } + supports_check_mode = $true +} + +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + +$path = $module.Params.path +$get_checksum = $module.Params.get_checksum +$checksum_algorithm = $module.Params.checksum_algorithm +$follow = $module.Params.follow + +$module.Result.stat = @{ exists=$false } + +Load-LinkUtils +$info, $link_info = Get-FileInfo -Path $path -Follow:$follow +If ($null -ne $info) { + $epoch_date = Get-Date -Date "01/01/1970" + $attributes = @() + foreach ($attribute in ($info.Attributes -split ',')) { + $attributes += $attribute.Trim() + } + + # default values that are always set, specific values are set below this + # but are kept commented for easier readability + $stat = @{ + exists = $true + attributes = $info.Attributes.ToString() + isarchive = ($attributes -contains "Archive") + isdir = $false + ishidden = ($attributes -contains "Hidden") + isjunction = $false + islnk = $false + isreadonly = ($attributes -contains "ReadOnly") + isreg = $false + isshared = $false + nlink = 1 # Number of links to the file (hard links), overriden below if islnk + # lnk_target = islnk or isjunction Target of the symlink. Note that relative paths remain relative + # lnk_source = islnk os isjunction Target of the symlink normalized for the remote filesystem + hlnk_targets = @() + creationtime = (ConvertTo-Timestamp -start_date $epoch_date -end_date $info.CreationTime) + lastaccesstime = (ConvertTo-Timestamp -start_date $epoch_date -end_date $info.LastAccessTime) + lastwritetime = (ConvertTo-Timestamp -start_date $epoch_date -end_date $info.LastWriteTime) + # size = a file and directory - calculated below + path = $info.FullName + filename = $info.Name + # extension = a file + # owner = set outsite this dict in case it fails + # sharename = a directory and isshared is True + # checksum = a file and get_checksum: True + } + try { + $stat.owner = $info.GetAccessControl().Owner + } catch { + # may not have rights, historical behaviour was to just set to $null + # due to ErrorActionPreference being set to "Continue" + $stat.owner = $null + } + + # values that are set according to the type of file + if ($info.Attributes.HasFlag([System.IO.FileAttributes]::Directory)) { + $stat.isdir = $true + $share_info = Get-CimInstance -ClassName Win32_Share -Filter "Path='$($stat.path -replace '\\', '\\')'" + if ($null -ne $share_info) { + $stat.isshared = $true + $stat.sharename = $share_info.Name + } + + try { + $size = 0 + foreach ($file in $info.EnumerateFiles("*", [System.IO.SearchOption]::AllDirectories)) { + $size += $file.Length + } + $stat.size = $size + } catch { + $stat.size = 0 + } + } else { + $stat.extension = $info.Extension + $stat.isreg = $true + $stat.size = $info.Length + + if ($get_checksum) { + try { + $stat.checksum = Get-FileChecksum -path $path -algorithm $checksum_algorithm + } catch { + $module.FailJson("Failed to get hash of file, set get_checksum to False to ignore this error: $($_.Exception.Message)", $_) + } + } + } + + # Get symbolic link, junction point, hard link info + if ($null -ne $link_info) { + switch ($link_info.Type) { + "SymbolicLink" { + $stat.islnk = $true + $stat.isreg = $false + $stat.lnk_target = $link_info.TargetPath + $stat.lnk_source = $link_info.AbsolutePath + break + } + "JunctionPoint" { + $stat.isjunction = $true + $stat.isreg = $false + $stat.lnk_target = $link_info.TargetPath + $stat.lnk_source = $link_info.AbsolutePath + break + } + "HardLink" { + $stat.lnk_type = "hard" + $stat.nlink = $link_info.HardTargets.Count + + # remove current path from the targets + $hlnk_targets = $link_info.HardTargets | Where-Object { $_ -ne $stat.path } + $stat.hlnk_targets = @($hlnk_targets) + break + } + } + } + + $module.Result.stat = $stat +} + +$module.ExitJson() + diff --git a/test/support/windows-integration/plugins/modules/win_stat.py b/test/support/windows-integration/plugins/modules/win_stat.py new file mode 100644 index 00000000..0676b5b2 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_stat.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_stat +version_added: "1.7" +short_description: Get information about Windows files +description: + - Returns information about a Windows file. + - For non-Windows targets, use the M(stat) module instead. +options: + path: + description: + - The full path of the file/object to get the facts of; both forward and + back slashes are accepted. + type: path + required: yes + aliases: [ dest, name ] + get_checksum: + description: + - Whether to return a checksum of the file (default sha1) + type: bool + default: yes + version_added: "2.1" + checksum_algorithm: + description: + - Algorithm to determine checksum of file. + - Will throw an error if the host is unable to use specified algorithm. + type: str + default: sha1 + choices: [ md5, sha1, sha256, sha384, sha512 ] + version_added: "2.3" + follow: + description: + - Whether to follow symlinks or junction points. + - In the case of C(path) pointing to another link, then that will + be followed until no more links are found. + type: bool + default: no + version_added: "2.8" +seealso: +- module: stat +- module: win_acl +- module: win_file +- module: win_owner +author: +- Chris Church (@cchurch) +''' + +EXAMPLES = r''' +- name: Obtain information about a file + win_stat: + path: C:\foo.ini + register: file_info + +- name: Obtain information about a folder + win_stat: + path: C:\bar + register: folder_info + +- name: Get MD5 checksum of a file + win_stat: + path: C:\foo.ini + get_checksum: yes + checksum_algorithm: md5 + register: md5_checksum + +- debug: + var: md5_checksum.stat.checksum + +- name: Get SHA1 checksum of file + win_stat: + path: C:\foo.ini + get_checksum: yes + register: sha1_checksum + +- debug: + var: sha1_checksum.stat.checksum + +- name: Get SHA256 checksum of file + win_stat: + path: C:\foo.ini + get_checksum: yes + checksum_algorithm: sha256 + register: sha256_checksum + +- debug: + var: sha256_checksum.stat.checksum +''' + +RETURN = r''' +changed: + description: Whether anything was changed + returned: always + type: bool + sample: true +stat: + description: dictionary containing all the stat data + returned: success + type: complex + contains: + attributes: + description: Attributes of the file at path in raw form. + returned: success, path exists + type: str + sample: "Archive, Hidden" + checksum: + description: The checksum of a file based on checksum_algorithm specified. + returned: success, path exist, path is a file, get_checksum == True + checksum_algorithm specified is supported + type: str + sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98 + creationtime: + description: The create time of the file represented in seconds since epoch. + returned: success, path exists + type: float + sample: 1477984205.15 + exists: + description: If the path exists or not. + returned: success + type: bool + sample: true + extension: + description: The extension of the file at path. + returned: success, path exists, path is a file + type: str + sample: ".ps1" + filename: + description: The name of the file (without path). + returned: success, path exists, path is a file + type: str + sample: foo.ini + hlnk_targets: + description: List of other files pointing to the same file (hard links), excludes the current file. + returned: success, path exists + type: list + sample: + - C:\temp\file.txt + - C:\Windows\update.log + isarchive: + description: If the path is ready for archiving or not. + returned: success, path exists + type: bool + sample: true + isdir: + description: If the path is a directory or not. + returned: success, path exists + type: bool + sample: true + ishidden: + description: If the path is hidden or not. + returned: success, path exists + type: bool + sample: true + isjunction: + description: If the path is a junction point or not. + returned: success, path exists + type: bool + sample: true + islnk: + description: If the path is a symbolic link or not. + returned: success, path exists + type: bool + sample: true + isreadonly: + description: If the path is read only or not. + returned: success, path exists + type: bool + sample: true + isreg: + description: If the path is a regular file. + returned: success, path exists + type: bool + sample: true + isshared: + description: If the path is shared or not. + returned: success, path exists + type: bool + sample: true + lastaccesstime: + description: The last access time of the file represented in seconds since epoch. + returned: success, path exists + type: float + sample: 1477984205.15 + lastwritetime: + description: The last modification time of the file represented in seconds since epoch. + returned: success, path exists + type: float + sample: 1477984205.15 + lnk_source: + description: Target of the symlink normalized for the remote filesystem. + returned: success, path exists and the path is a symbolic link or junction point + type: str + sample: C:\temp\link + lnk_target: + description: Target of the symlink. Note that relative paths remain relative. + returned: success, path exists and the path is a symbolic link or junction point + type: str + sample: ..\link + nlink: + description: Number of links to the file (hard links). + returned: success, path exists + type: int + sample: 1 + owner: + description: The owner of the file. + returned: success, path exists + type: str + sample: BUILTIN\Administrators + path: + description: The full absolute path to the file. + returned: success, path exists, file exists + type: str + sample: C:\foo.ini + sharename: + description: The name of share if folder is shared. + returned: success, path exists, file is a directory and isshared == True + type: str + sample: file-share + size: + description: The size in bytes of a file or folder. + returned: success, path exists, file is not a link + type: int + sample: 1024 +''' diff --git a/test/support/windows-integration/plugins/modules/win_tempfile.ps1 b/test/support/windows-integration/plugins/modules/win_tempfile.ps1 new file mode 100644 index 00000000..9a1a7174 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_tempfile.ps1 @@ -0,0 +1,72 @@ +#!powershell + +# Copyright: (c) 2017, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.Basic + +Function New-TempFile { + Param ([string]$path, [string]$prefix, [string]$suffix, [string]$type, [bool]$checkmode) + $temppath = $null + $curerror = $null + $attempt = 0 + + # Since we don't know if the file already exists, we try 5 times with a random name + do { + $attempt += 1 + $randomname = [System.IO.Path]::GetRandomFileName() + $temppath = (Join-Path -Path $path -ChildPath "$prefix$randomname$suffix") + Try { + $file = New-Item -Path $temppath -ItemType $type -WhatIf:$checkmode + # Makes sure we get the full absolute path of the created temp file and not a relative or DOS 8.3 dir + if (-not $checkmode) { + $temppath = $file.FullName + } else { + # Just rely on GetFulLpath for check mode + $temppath = [System.IO.Path]::GetFullPath($temppath) + } + } Catch { + $temppath = $null + $curerror = $_ + } + } until (($null -ne $temppath) -or ($attempt -ge 5)) + + # If it fails 5 times, something is wrong and we have to report the details + if ($null -eq $temppath) { + $module.FailJson("No random temporary file worked in $attempt attempts. Error: $($curerror.Exception.Message)", $curerror) + } + + return $temppath.ToString() +} + +$spec = @{ + options = @{ + path = @{ type='path'; default='%TEMP%'; aliases=@( 'dest' ) } + state = @{ type='str'; default='file'; choices=@( 'directory', 'file') } + prefix = @{ type='str'; default='ansible.' } + suffix = @{ type='str' } + } + supports_check_mode = $true +} + +$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + +$path = $module.Params.path +$state = $module.Params.state +$prefix = $module.Params.prefix +$suffix = $module.Params.suffix + +# Expand environment variables on non-path types +if ($null -ne $prefix) { + $prefix = [System.Environment]::ExpandEnvironmentVariables($prefix) +} +if ($null -ne $suffix) { + $suffix = [System.Environment]::ExpandEnvironmentVariables($suffix) +} + +$module.Result.changed = $true +$module.Result.state = $state + +$module.Result.path = New-TempFile -Path $path -Prefix $prefix -Suffix $suffix -Type $state -CheckMode $module.CheckMode + +$module.ExitJson() diff --git a/test/support/windows-integration/plugins/modules/win_tempfile.py b/test/support/windows-integration/plugins/modules/win_tempfile.py new file mode 100644 index 00000000..58dd6501 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_tempfile.py @@ -0,0 +1,67 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# Copyright: (c) 2017, Dag Wieers +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_tempfile +version_added: "2.3" +short_description: Creates temporary files and directories +description: + - Creates temporary files and directories. + - For non-Windows targets, please use the M(tempfile) module instead. +options: + state: + description: + - Whether to create file or directory. + type: str + choices: [ directory, file ] + default: file + path: + description: + - Location where temporary file or directory should be created. + - If path is not specified default system temporary directory (%TEMP%) will be used. + type: path + default: '%TEMP%' + aliases: [ dest ] + prefix: + description: + - Prefix of file/directory name created by module. + type: str + default: ansible. + suffix: + description: + - Suffix of file/directory name created by module. + type: str + default: '' +seealso: +- module: tempfile +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r""" +- name: Create temporary build directory + win_tempfile: + state: directory + suffix: build + +- name: Create temporary file + win_tempfile: + state: file + suffix: temp +""" + +RETURN = r''' +path: + description: The absolute path to the created file or directory. + returned: success + type: str + sample: C:\Users\Administrator\AppData\Local\Temp\ansible.bMlvdk +''' diff --git a/test/support/windows-integration/plugins/modules/win_template.py b/test/support/windows-integration/plugins/modules/win_template.py new file mode 100644 index 00000000..bd8b2492 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_template.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a virtual module that is entirely implemented server side + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_template +version_added: "1.9.2" +short_description: Template a file out to a remote server +options: + backup: + description: + - Determine whether a backup should be created. + - When set to C(yes), create a backup file including the timestamp information + so you can get the original file back if you somehow clobbered it incorrectly. + type: bool + default: no + version_added: '2.8' + newline_sequence: + default: '\r\n' + force: + version_added: '2.4' +notes: +- Beware fetching files from windows machines when creating templates because certain tools, such as Powershell ISE, + and regedit's export facility add a Byte Order Mark as the first character of the file, which can cause tracebacks. +- You can use the M(win_copy) module with the C(content:) option if you prefer the template inline, as part of the + playbook. +- For Linux you can use M(template) which uses '\\n' as C(newline_sequence) by default. +seealso: +- module: win_copy +- module: copy +- module: template +author: +- Jon Hawkesworth (@jhawkesworth) +extends_documentation_fragment: +- template_common +''' + +EXAMPLES = r''' +- name: Create a file from a Jinja2 template + win_template: + src: /mytemplates/file.conf.j2 + dest: C:\Temp\file.conf + +- name: Create a Unix-style file from a Jinja2 template + win_template: + src: unix/config.conf.j2 + dest: C:\share\unix\config.conf + newline_sequence: '\n' + backup: yes +''' + +RETURN = r''' +backup_file: + description: Name of the backup file that was created. + returned: if backup=yes + type: str + sample: C:\Path\To\File.txt.11540.20150212-220915.bak +''' diff --git a/test/support/windows-integration/plugins/modules/win_user.ps1 b/test/support/windows-integration/plugins/modules/win_user.ps1 new file mode 100644 index 00000000..54905cb2 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_user.ps1 @@ -0,0 +1,273 @@ +#!powershell + +# Copyright: (c) 2014, Paul Durivage +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#AnsibleRequires -CSharpUtil Ansible.AccessToken +#Requires -Module Ansible.ModuleUtils.Legacy + +######## +$ADS_UF_PASSWD_CANT_CHANGE = 64 +$ADS_UF_DONT_EXPIRE_PASSWD = 65536 + +$adsi = [ADSI]"WinNT://$env:COMPUTERNAME" + +function Get-User($user) { + $adsi.Children | Where-Object {$_.SchemaClassName -eq 'user' -and $_.Name -eq $user } + return +} + +function Get-UserFlag($user, $flag) { + If ($user.UserFlags[0] -band $flag) { + $true + } + Else { + $false + } +} + +function Set-UserFlag($user, $flag) { + $user.UserFlags = ($user.UserFlags[0] -BOR $flag) +} + +function Clear-UserFlag($user, $flag) { + $user.UserFlags = ($user.UserFlags[0] -BXOR $flag) +} + +function Get-Group($grp) { + $adsi.Children | Where-Object { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp } + return +} + +Function Test-LocalCredential { + param([String]$Username, [String]$Password) + + try { + $handle = [Ansible.AccessToken.TokenUtil]::LogonUser($Username, $null, $Password, "Network", "Default") + $handle.Dispose() + $valid_credentials = $true + } catch [Ansible.AccessToken.Win32Exception] { + # following errors indicate the creds are correct but the user was + # unable to log on for other reasons, which we don't care about + $success_codes = @( + 0x0000052F, # ERROR_ACCOUNT_RESTRICTION + 0x00000530, # ERROR_INVALID_LOGON_HOURS + 0x00000531, # ERROR_INVALID_WORKSTATION + 0x00000569 # ERROR_LOGON_TYPE_GRANTED + ) + + if ($_.Exception.NativeErrorCode -eq 0x0000052E) { + # ERROR_LOGON_FAILURE - the user or pass was incorrect + $valid_credentials = $false + } elseif ($_.Exception.NativeErrorCode -in $success_codes) { + $valid_credentials = $true + } else { + # an unknown failure, reraise exception + throw $_ + } + } + return $valid_credentials +} + +######## + +$params = Parse-Args $args; + +$result = @{ + changed = $false +}; + +$username = Get-AnsibleParam -obj $params -name "name" -type "str" -failifempty $true +$fullname = Get-AnsibleParam -obj $params -name "fullname" -type "str" +$description = Get-AnsibleParam -obj $params -name "description" -type "str" +$password = Get-AnsibleParam -obj $params -name "password" -type "str" +$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "present","absent","query" +$update_password = Get-AnsibleParam -obj $params -name "update_password" -type "str" -default "always" -validateset "always","on_create" +$password_expired = Get-AnsibleParam -obj $params -name "password_expired" -type "bool" +$password_never_expires = Get-AnsibleParam -obj $params -name "password_never_expires" -type "bool" +$user_cannot_change_password = Get-AnsibleParam -obj $params -name "user_cannot_change_password" -type "bool" +$account_disabled = Get-AnsibleParam -obj $params -name "account_disabled" -type "bool" +$account_locked = Get-AnsibleParam -obj $params -name "account_locked" -type "bool" +$groups = Get-AnsibleParam -obj $params -name "groups" +$groups_action = Get-AnsibleParam -obj $params -name "groups_action" -type "str" -default "replace" -validateset "add","remove","replace" + +If ($null -ne $account_locked -and $account_locked) { + Fail-Json $result "account_locked must be set to 'no' if provided" +} + +If ($null -ne $groups) { + If ($groups -is [System.String]) { + [string[]]$groups = $groups.Split(",") + } + ElseIf ($groups -isnot [System.Collections.IList]) { + Fail-Json $result "groups must be a string or array" + } + $groups = $groups | ForEach-Object { ([string]$_).Trim() } | Where-Object { $_ } + If ($null -eq $groups) { + $groups = @() + } +} + +$user_obj = Get-User $username + +If ($state -eq 'present') { + # Add or update user + try { + If (-not $user_obj) { + $user_obj = $adsi.Create("User", $username) + If ($null -ne $password) { + $user_obj.SetPassword($password) + } + $user_obj.SetInfo() + $result.changed = $true + } + ElseIf (($null -ne $password) -and ($update_password -eq 'always')) { + # ValidateCredentials will fail if either of these are true- just force update... + If($user_obj.AccountDisabled -or $user_obj.PasswordExpired) { + $password_match = $false + } + Else { + try { + $password_match = Test-LocalCredential -Username $username -Password $password + } catch [System.ComponentModel.Win32Exception] { + Fail-Json -obj $result -message "Failed to validate the user's credentials: $($_.Exception.Message)" + } + } + + If (-not $password_match) { + $user_obj.SetPassword($password) + $result.changed = $true + } + } + If (($null -ne $fullname) -and ($fullname -ne $user_obj.FullName[0])) { + $user_obj.FullName = $fullname + $result.changed = $true + } + If (($null -ne $description) -and ($description -ne $user_obj.Description[0])) { + $user_obj.Description = $description + $result.changed = $true + } + If (($null -ne $password_expired) -and ($password_expired -ne ($user_obj.PasswordExpired | ConvertTo-Bool))) { + $user_obj.PasswordExpired = If ($password_expired) { 1 } Else { 0 } + $result.changed = $true + } + If (($null -ne $password_never_expires) -and ($password_never_expires -ne (Get-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD))) { + If ($password_never_expires) { + Set-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD + } + Else { + Clear-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD + } + $result.changed = $true + } + If (($null -ne $user_cannot_change_password) -and ($user_cannot_change_password -ne (Get-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE))) { + If ($user_cannot_change_password) { + Set-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE + } + Else { + Clear-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE + } + $result.changed = $true + } + If (($null -ne $account_disabled) -and ($account_disabled -ne $user_obj.AccountDisabled)) { + $user_obj.AccountDisabled = $account_disabled + $result.changed = $true + } + If (($null -ne $account_locked) -and ($account_locked -ne $user_obj.IsAccountLocked)) { + $user_obj.IsAccountLocked = $account_locked + $result.changed = $true + } + If ($result.changed) { + $user_obj.SetInfo() + } + If ($null -ne $groups) { + [string[]]$current_groups = $user_obj.Groups() | ForEach-Object { $_.GetType().InvokeMember("Name", "GetProperty", $null, $_, $null) } + If (($groups_action -eq "remove") -or ($groups_action -eq "replace")) { + ForEach ($grp in $current_groups) { + If ((($groups_action -eq "remove") -and ($groups -contains $grp)) -or (($groups_action -eq "replace") -and ($groups -notcontains $grp))) { + $group_obj = Get-Group $grp + If ($group_obj) { + $group_obj.Remove($user_obj.Path) + $result.changed = $true + } + Else { + Fail-Json $result "group '$grp' not found" + } + } + } + } + If (($groups_action -eq "add") -or ($groups_action -eq "replace")) { + ForEach ($grp in $groups) { + If ($current_groups -notcontains $grp) { + $group_obj = Get-Group $grp + If ($group_obj) { + $group_obj.Add($user_obj.Path) + $result.changed = $true + } + Else { + Fail-Json $result "group '$grp' not found" + } + } + } + } + } + } + catch { + Fail-Json $result $_.Exception.Message + } +} +ElseIf ($state -eq 'absent') { + # Remove user + try { + If ($user_obj) { + $username = $user_obj.Name.Value + $adsi.delete("User", $user_obj.Name.Value) + $result.changed = $true + $result.msg = "User '$username' deleted successfully" + $user_obj = $null + } else { + $result.msg = "User '$username' was not found" + } + } + catch { + Fail-Json $result $_.Exception.Message + } +} + +try { + If ($user_obj -and $user_obj -is [System.DirectoryServices.DirectoryEntry]) { + $user_obj.RefreshCache() + $result.name = $user_obj.Name[0] + $result.fullname = $user_obj.FullName[0] + $result.path = $user_obj.Path + $result.description = $user_obj.Description[0] + $result.password_expired = ($user_obj.PasswordExpired | ConvertTo-Bool) + $result.password_never_expires = (Get-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD) + $result.user_cannot_change_password = (Get-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE) + $result.account_disabled = $user_obj.AccountDisabled + $result.account_locked = $user_obj.IsAccountLocked + $result.sid = (New-Object System.Security.Principal.SecurityIdentifier($user_obj.ObjectSid.Value, 0)).Value + $user_groups = @() + ForEach ($grp in $user_obj.Groups()) { + $group_result = @{ + name = $grp.GetType().InvokeMember("Name", "GetProperty", $null, $grp, $null) + path = $grp.GetType().InvokeMember("ADsPath", "GetProperty", $null, $grp, $null) + } + $user_groups += $group_result; + } + $result.groups = $user_groups + $result.state = "present" + } + Else { + $result.name = $username + if ($state -eq 'query') { + $result.msg = "User '$username' was not found" + } + $result.state = "absent" + } +} +catch { + Fail-Json $result $_.Exception.Message +} + +Exit-Json $result diff --git a/test/support/windows-integration/plugins/modules/win_user.py b/test/support/windows-integration/plugins/modules/win_user.py new file mode 100644 index 00000000..5fc0633d --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_user.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Matt Martz , and others +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = r''' +--- +module: win_user +version_added: "1.7" +short_description: Manages local Windows user accounts +description: + - Manages local Windows user accounts. + - For non-Windows targets, use the M(user) module instead. +options: + name: + description: + - Name of the user to create, remove or modify. + type: str + required: yes + fullname: + description: + - Full name of the user. + type: str + version_added: "1.9" + description: + description: + - Description of the user. + type: str + version_added: "1.9" + password: + description: + - Optionally set the user's password to this (plain text) value. + type: str + update_password: + description: + - C(always) will update passwords if they differ. C(on_create) will + only set the password for newly created users. + type: str + choices: [ always, on_create ] + default: always + version_added: "1.9" + password_expired: + description: + - C(yes) will require the user to change their password at next login. + - C(no) will clear the expired password flag. + type: bool + version_added: "1.9" + password_never_expires: + description: + - C(yes) will set the password to never expire. + - C(no) will allow the password to expire. + type: bool + version_added: "1.9" + user_cannot_change_password: + description: + - C(yes) will prevent the user from changing their password. + - C(no) will allow the user to change their password. + type: bool + version_added: "1.9" + account_disabled: + description: + - C(yes) will disable the user account. + - C(no) will clear the disabled flag. + type: bool + version_added: "1.9" + account_locked: + description: + - C(no) will unlock the user account if locked. + choices: [ 'no' ] + version_added: "1.9" + groups: + description: + - Adds or removes the user from this comma-separated list of groups, + depending on the value of I(groups_action). + - When I(groups_action) is C(replace) and I(groups) is set to the empty + string ('groups='), the user is removed from all groups. + version_added: "1.9" + groups_action: + description: + - If C(add), the user is added to each group in I(groups) where not + already a member. + - If C(replace), the user is added as a member of each group in + I(groups) and removed from any other groups. + - If C(remove), the user is removed from each group in I(groups). + type: str + choices: [ add, replace, remove ] + default: replace + version_added: "1.9" + state: + description: + - When C(absent), removes the user account if it exists. + - When C(present), creates or updates the user account. + - When C(query) (new in 1.9), retrieves the user account details + without making any changes. + type: str + choices: [ absent, present, query ] + default: present +seealso: +- module: user +- module: win_domain_membership +- module: win_domain_user +- module: win_group +- module: win_group_membership +- module: win_user_profile +author: + - Paul Durivage (@angstwad) + - Chris Church (@cchurch) +''' + +EXAMPLES = r''' +- name: Ensure user bob is present + win_user: + name: bob + password: B0bP4ssw0rd + state: present + groups: + - Users + +- name: Ensure user bob is absent + win_user: + name: bob + state: absent +''' + +RETURN = r''' +account_disabled: + description: Whether the user is disabled. + returned: user exists + type: bool + sample: false +account_locked: + description: Whether the user is locked. + returned: user exists + type: bool + sample: false +description: + description: The description set for the user. + returned: user exists + type: str + sample: Username for test +fullname: + description: The full name set for the user. + returned: user exists + type: str + sample: Test Username +groups: + description: A list of groups and their ADSI path the user is a member of. + returned: user exists + type: list + sample: [ + { + "name": "Administrators", + "path": "WinNT://WORKGROUP/USER-PC/Administrators" + } + ] +name: + description: The name of the user + returned: always + type: str + sample: username +password_expired: + description: Whether the password is expired. + returned: user exists + type: bool + sample: false +password_never_expires: + description: Whether the password is set to never expire. + returned: user exists + type: bool + sample: true +path: + description: The ADSI path for the user. + returned: user exists + type: str + sample: "WinNT://WORKGROUP/USER-PC/username" +sid: + description: The SID for the user. + returned: user exists + type: str + sample: S-1-5-21-3322259488-2828151810-3939402796-1001 +user_cannot_change_password: + description: Whether the user can change their own password. + returned: user exists + type: bool + sample: false +''' diff --git a/test/support/windows-integration/plugins/modules/win_user_right.ps1 b/test/support/windows-integration/plugins/modules/win_user_right.ps1 new file mode 100644 index 00000000..3fac52a8 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_user_right.ps1 @@ -0,0 +1,349 @@ +#!powershell + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.SID + +$ErrorActionPreference = 'Stop' + +$params = Parse-Args $args -supports_check_mode $true +$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false +$diff_mode = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false +$_remote_tmp = Get-AnsibleParam $params "_ansible_remote_tmp" -type "path" -default $env:TMP + +$name = Get-AnsibleParam -obj $params -name "name" -type "str" -failifempty $true +$users = Get-AnsibleParam -obj $params -name "users" -type "list" -failifempty $true +$action = Get-AnsibleParam -obj $params -name "action" -type "str" -default "set" -validateset "add","remove","set" + +$result = @{ + changed = $false + added = @() + removed = @() +} + +if ($diff_mode) { + $result.diff = @{} +} + +$sec_helper_util = @" +using System; +using System.ComponentModel; +using System.Runtime.InteropServices; +using System.Security.Principal; + +namespace Ansible +{ + public class LsaRightHelper : IDisposable + { + // Code modified from https://gallery.technet.microsoft.com/scriptcenter/Grant-Revoke-Query-user-26e259b0 + + enum Access : int + { + POLICY_READ = 0x20006, + POLICY_ALL_ACCESS = 0x00F0FFF, + POLICY_EXECUTE = 0X20801, + POLICY_WRITE = 0X207F8 + } + + IntPtr lsaHandle; + + const string LSA_DLL = "advapi32.dll"; + const CharSet DEFAULT_CHAR_SET = CharSet.Unicode; + + const uint STATUS_NO_MORE_ENTRIES = 0x8000001a; + const uint STATUS_NO_SUCH_PRIVILEGE = 0xc0000060; + + internal sealed class Sid : IDisposable + { + public IntPtr pSid = IntPtr.Zero; + public SecurityIdentifier sid = null; + + public Sid(string sidString) + { + try + { + sid = new SecurityIdentifier(sidString); + } catch + { + throw new ArgumentException(String.Format("SID string {0} could not be converted to SecurityIdentifier", sidString)); + } + + Byte[] buffer = new Byte[sid.BinaryLength]; + sid.GetBinaryForm(buffer, 0); + + pSid = Marshal.AllocHGlobal(sid.BinaryLength); + Marshal.Copy(buffer, 0, pSid, sid.BinaryLength); + } + + public void Dispose() + { + if (pSid != IntPtr.Zero) + { + Marshal.FreeHGlobal(pSid); + pSid = IntPtr.Zero; + } + GC.SuppressFinalize(this); + } + ~Sid() { Dispose(); } + } + + [StructLayout(LayoutKind.Sequential)] + private struct LSA_OBJECT_ATTRIBUTES + { + public int Length; + public IntPtr RootDirectory; + public IntPtr ObjectName; + public int Attributes; + public IntPtr SecurityDescriptor; + public IntPtr SecurityQualityOfService; + } + + [StructLayout(LayoutKind.Sequential, CharSet = DEFAULT_CHAR_SET)] + private struct LSA_UNICODE_STRING + { + public ushort Length; + public ushort MaximumLength; + [MarshalAs(UnmanagedType.LPWStr)] + public string Buffer; + } + + [StructLayout(LayoutKind.Sequential)] + private struct LSA_ENUMERATION_INFORMATION + { + public IntPtr Sid; + } + + [DllImport(LSA_DLL, CharSet = DEFAULT_CHAR_SET, SetLastError = true)] + private static extern uint LsaOpenPolicy( + LSA_UNICODE_STRING[] SystemName, + ref LSA_OBJECT_ATTRIBUTES ObjectAttributes, + int AccessMask, + out IntPtr PolicyHandle + ); + + [DllImport(LSA_DLL, CharSet = DEFAULT_CHAR_SET, SetLastError = true)] + private static extern uint LsaAddAccountRights( + IntPtr PolicyHandle, + IntPtr pSID, + LSA_UNICODE_STRING[] UserRights, + int CountOfRights + ); + + [DllImport(LSA_DLL, CharSet = DEFAULT_CHAR_SET, SetLastError = true)] + private static extern uint LsaRemoveAccountRights( + IntPtr PolicyHandle, + IntPtr pSID, + bool AllRights, + LSA_UNICODE_STRING[] UserRights, + int CountOfRights + ); + + [DllImport(LSA_DLL, CharSet = DEFAULT_CHAR_SET, SetLastError = true)] + private static extern uint LsaEnumerateAccountsWithUserRight( + IntPtr PolicyHandle, + LSA_UNICODE_STRING[] UserRights, + out IntPtr EnumerationBuffer, + out ulong CountReturned + ); + + [DllImport(LSA_DLL)] + private static extern int LsaNtStatusToWinError(int NTSTATUS); + + [DllImport(LSA_DLL)] + private static extern int LsaClose(IntPtr PolicyHandle); + + [DllImport(LSA_DLL)] + private static extern int LsaFreeMemory(IntPtr Buffer); + + public LsaRightHelper() + { + LSA_OBJECT_ATTRIBUTES lsaAttr; + lsaAttr.RootDirectory = IntPtr.Zero; + lsaAttr.ObjectName = IntPtr.Zero; + lsaAttr.Attributes = 0; + lsaAttr.SecurityDescriptor = IntPtr.Zero; + lsaAttr.SecurityQualityOfService = IntPtr.Zero; + lsaAttr.Length = Marshal.SizeOf(typeof(LSA_OBJECT_ATTRIBUTES)); + + lsaHandle = IntPtr.Zero; + + LSA_UNICODE_STRING[] system = new LSA_UNICODE_STRING[1]; + system[0] = InitLsaString(""); + + uint ret = LsaOpenPolicy(system, ref lsaAttr, (int)Access.POLICY_ALL_ACCESS, out lsaHandle); + if (ret != 0) + throw new Win32Exception(LsaNtStatusToWinError((int)ret)); + } + + public void AddPrivilege(string sidString, string privilege) + { + uint ret = 0; + using (Sid sid = new Sid(sidString)) + { + LSA_UNICODE_STRING[] privileges = new LSA_UNICODE_STRING[1]; + privileges[0] = InitLsaString(privilege); + ret = LsaAddAccountRights(lsaHandle, sid.pSid, privileges, 1); + } + if (ret != 0) + throw new Win32Exception(LsaNtStatusToWinError((int)ret)); + } + + public void RemovePrivilege(string sidString, string privilege) + { + uint ret = 0; + using (Sid sid = new Sid(sidString)) + { + LSA_UNICODE_STRING[] privileges = new LSA_UNICODE_STRING[1]; + privileges[0] = InitLsaString(privilege); + ret = LsaRemoveAccountRights(lsaHandle, sid.pSid, false, privileges, 1); + } + if (ret != 0) + throw new Win32Exception(LsaNtStatusToWinError((int)ret)); + } + + public string[] EnumerateAccountsWithUserRight(string privilege) + { + uint ret = 0; + ulong count = 0; + LSA_UNICODE_STRING[] rights = new LSA_UNICODE_STRING[1]; + rights[0] = InitLsaString(privilege); + IntPtr buffer = IntPtr.Zero; + + ret = LsaEnumerateAccountsWithUserRight(lsaHandle, rights, out buffer, out count); + switch (ret) + { + case 0: + string[] accounts = new string[count]; + for (int i = 0; i < (int)count; i++) + { + LSA_ENUMERATION_INFORMATION LsaInfo = (LSA_ENUMERATION_INFORMATION)Marshal.PtrToStructure( + IntPtr.Add(buffer, i * Marshal.SizeOf(typeof(LSA_ENUMERATION_INFORMATION))), + typeof(LSA_ENUMERATION_INFORMATION)); + + accounts[i] = new SecurityIdentifier(LsaInfo.Sid).ToString(); + } + LsaFreeMemory(buffer); + return accounts; + + case STATUS_NO_MORE_ENTRIES: + return new string[0]; + + case STATUS_NO_SUCH_PRIVILEGE: + throw new ArgumentException(String.Format("Invalid privilege {0} not found in LSA database", privilege)); + + default: + throw new Win32Exception(LsaNtStatusToWinError((int)ret)); + } + } + + static LSA_UNICODE_STRING InitLsaString(string s) + { + // Unicode strings max. 32KB + if (s.Length > 0x7ffe) + throw new ArgumentException("String too long"); + + LSA_UNICODE_STRING lus = new LSA_UNICODE_STRING(); + lus.Buffer = s; + lus.Length = (ushort)(s.Length * sizeof(char)); + lus.MaximumLength = (ushort)(lus.Length + sizeof(char)); + + return lus; + } + + public void Dispose() + { + if (lsaHandle != IntPtr.Zero) + { + LsaClose(lsaHandle); + lsaHandle = IntPtr.Zero; + } + GC.SuppressFinalize(this); + } + ~LsaRightHelper() { Dispose(); } + } +} +"@ + +$original_tmp = $env:TMP +$env:TMP = $_remote_tmp +Add-Type -TypeDefinition $sec_helper_util +$env:TMP = $original_tmp + +Function Compare-UserList($existing_users, $new_users) { + $added_users = [String[]]@() + $removed_users = [String[]]@() + if ($action -eq "add") { + $added_users = [Linq.Enumerable]::Except($new_users, $existing_users) + } elseif ($action -eq "remove") { + $removed_users = [Linq.Enumerable]::Intersect($new_users, $existing_users) + } else { + $added_users = [Linq.Enumerable]::Except($new_users, $existing_users) + $removed_users = [Linq.Enumerable]::Except($existing_users, $new_users) + } + + $change_result = @{ + added = $added_users + removed = $removed_users + } + + return $change_result +} + +# C# class we can use to enumerate/add/remove rights +$lsa_helper = New-Object -TypeName Ansible.LsaRightHelper + +$new_users = [System.Collections.ArrayList]@() +foreach ($user in $users) { + $user_sid = Convert-ToSID -account_name $user + $new_users.Add($user_sid) > $null +} +$new_users = [String[]]$new_users.ToArray() +try { + $existing_users = $lsa_helper.EnumerateAccountsWithUserRight($name) +} catch [ArgumentException] { + Fail-Json -obj $result -message "the specified right $name is not a valid right" +} catch { + Fail-Json -obj $result -message "failed to enumerate existing accounts with right: $($_.Exception.Message)" +} + +$change_result = Compare-UserList -existing_users $existing_users -new_user $new_users +if (($change_result.added.Length -gt 0) -or ($change_result.removed.Length -gt 0)) { + $result.changed = $true + $diff_text = "[$name]`n" + + # used in diff mode calculation + $new_user_list = [System.Collections.ArrayList]$existing_users + foreach ($user in $change_result.removed) { + if (-not $check_mode) { + $lsa_helper.RemovePrivilege($user, $name) + } + $user_name = Convert-FromSID -sid $user + $result.removed += $user_name + $diff_text += "-$user_name`n" + $new_user_list.Remove($user) > $null + } + foreach ($user in $change_result.added) { + if (-not $check_mode) { + $lsa_helper.AddPrivilege($user, $name) + } + $user_name = Convert-FromSID -sid $user + $result.added += $user_name + $diff_text += "+$user_name`n" + $new_user_list.Add($user) > $null + } + + if ($diff_mode) { + if ($new_user_list.Count -eq 0) { + $diff_text = "-$diff_text" + } else { + if ($existing_users.Count -eq 0) { + $diff_text = "+$diff_text" + } + } + $result.diff.prepared = $diff_text + } +} + +Exit-Json $result diff --git a/test/support/windows-integration/plugins/modules/win_user_right.py b/test/support/windows-integration/plugins/modules/win_user_right.py new file mode 100644 index 00000000..55882083 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_user_right.py @@ -0,0 +1,108 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_user_right +version_added: '2.4' +short_description: Manage Windows User Rights +description: +- Add, remove or set User Rights for a group or users or groups. +- You can set user rights for both local and domain accounts. +options: + name: + description: + - The name of the User Right as shown by the C(Constant Name) value from + U(https://technet.microsoft.com/en-us/library/dd349804.aspx). + - The module will return an error if the right is invalid. + type: str + required: yes + users: + description: + - A list of users or groups to add/remove on the User Right. + - These can be in the form DOMAIN\user-group, user-group@DOMAIN.COM for + domain users/groups. + - For local users/groups it can be in the form user-group, .\user-group, + SERVERNAME\user-group where SERVERNAME is the name of the remote server. + - You can also add special local accounts like SYSTEM and others. + - Can be set to an empty list with I(action=set) to remove all accounts + from the right. + type: list + required: yes + action: + description: + - C(add) will add the users/groups to the existing right. + - C(remove) will remove the users/groups from the existing right. + - C(set) will replace the users/groups of the existing right. + type: str + default: set + choices: [ add, remove, set ] +notes: +- If the server is domain joined this module can change a right but if a GPO + governs this right then the changes won't last. +seealso: +- module: win_group +- module: win_group_membership +- module: win_user +author: +- Jordan Borean (@jborean93) +''' + +EXAMPLES = r''' +--- +- name: Replace the entries of Deny log on locally + win_user_right: + name: SeDenyInteractiveLogonRight + users: + - Guest + - Users + action: set + +- name: Add account to Log on as a service + win_user_right: + name: SeServiceLogonRight + users: + - .\Administrator + - '{{ansible_hostname}}\local-user' + action: add + +- name: Remove accounts who can create Symbolic links + win_user_right: + name: SeCreateSymbolicLinkPrivilege + users: + - SYSTEM + - Administrators + - DOMAIN\User + - group@DOMAIN.COM + action: remove + +- name: Remove all accounts who cannot log on remote interactively + win_user_right: + name: SeDenyRemoteInteractiveLogonRight + users: [] +''' + +RETURN = r''' +added: + description: A list of accounts that were added to the right, this is empty + if no accounts were added. + returned: success + type: list + sample: ["NT AUTHORITY\\SYSTEM", "DOMAIN\\User"] +removed: + description: A list of accounts that were removed from the right, this is + empty if no accounts were removed. + returned: success + type: list + sample: ["SERVERNAME\\Administrator", "BUILTIN\\Administrators"] +''' diff --git a/test/support/windows-integration/plugins/modules/win_wait_for.ps1 b/test/support/windows-integration/plugins/modules/win_wait_for.ps1 new file mode 100644 index 00000000..e0a9a720 --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_wait_for.ps1 @@ -0,0 +1,259 @@ +#!powershell + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.FileUtil + +$ErrorActionPreference = "Stop" + +$params = Parse-Args -arguments $args -supports_check_mode $true + +$connect_timeout = Get-AnsibleParam -obj $params -name "connect_timeout" -type "int" -default 5 +$delay = Get-AnsibleParam -obj $params -name "delay" -type "int" +$exclude_hosts = Get-AnsibleParam -obj $params -name "exclude_hosts" -type "list" +$hostname = Get-AnsibleParam -obj $params -name "host" -type "str" -default "127.0.0.1" +$path = Get-AnsibleParam -obj $params -name "path" -type "path" +$port = Get-AnsibleParam -obj $params -name "port" -type "int" +$regex = Get-AnsibleParam -obj $params -name "regex" -type "str" -aliases "search_regex","regexp" +$sleep = Get-AnsibleParam -obj $params -name "sleep" -type "int" -default 1 +$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "started" -validateset "present","started","stopped","absent","drained" +$timeout = Get-AnsibleParam -obj $params -name "timeout" -type "int" -default 300 + +$result = @{ + changed = $false + elapsed = 0 +} + +# validate the input with the various options +if ($null -ne $port -and $null -ne $path) { + Fail-Json $result "port and path parameter can not both be passed to win_wait_for" +} +if ($null -ne $exclude_hosts -and $state -ne "drained") { + Fail-Json $result "exclude_hosts should only be with state=drained" +} +if ($null -ne $path) { + if ($state -in @("stopped","drained")) { + Fail-Json $result "state=$state should only be used for checking a port in the win_wait_for module" + } + + if ($null -ne $exclude_hosts) { + Fail-Json $result "exclude_hosts should only be used when checking a port and state=drained in the win_wait_for module" + } +} + +if ($null -ne $port) { + if ($null -ne $regex) { + Fail-Json $result "regex should by used when checking a string in a file in the win_wait_for module" + } + + if ($null -ne $exclude_hosts -and $state -ne "drained") { + Fail-Json $result "exclude_hosts should be used when state=drained in the win_wait_for module" + } +} + +Function Test-Port($hostname, $port) { + $timeout = $connect_timeout * 1000 + $socket = New-Object -TypeName System.Net.Sockets.TcpClient + $connect = $socket.BeginConnect($hostname, $port, $null, $null) + $wait = $connect.AsyncWaitHandle.WaitOne($timeout, $false) + + if ($wait) { + try { + $socket.EndConnect($connect) | Out-Null + $valid = $true + } catch { + $valid = $false + } + } else { + $valid = $false + } + + $socket.Close() + $socket.Dispose() + + $valid +} + +Function Get-PortConnections($hostname, $port) { + $connections = @() + + $conn_info = [Net.NetworkInformation.IPGlobalProperties]::GetIPGlobalProperties() + if ($hostname -eq "0.0.0.0") { + $active_connections = $conn_info.GetActiveTcpConnections() | Where-Object { $_.LocalEndPoint.Port -eq $port } + } else { + $active_connections = $conn_info.GetActiveTcpConnections() | Where-Object { $_.LocalEndPoint.Address -eq $hostname -and $_.LocalEndPoint.Port -eq $port } + } + + if ($null -ne $active_connections) { + foreach ($active_connection in $active_connections) { + $connections += $active_connection.RemoteEndPoint.Address + } + } + + $connections +} + +$module_start = Get-Date + +if ($null -ne $delay) { + Start-Sleep -Seconds $delay +} + +$attempts = 0 +if ($null -eq $path -and $null -eq $port -and $state -ne "drained") { + Start-Sleep -Seconds $timeout +} elseif ($null -ne $path) { + if ($state -in @("present", "started")) { + # check if the file exists or string exists in file + $start_time = Get-Date + $complete = $false + while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) { + $attempts += 1 + if (Test-AnsiblePath -Path $path) { + if ($null -eq $regex) { + $complete = $true + break + } else { + $file_contents = Get-Content -Path $path -Raw + if ($file_contents -match $regex) { + $complete = $true + break + } + } + } + Start-Sleep -Seconds $sleep + } + + if ($complete -eq $false) { + $result.elapsed = ((Get-Date) - $module_start).TotalSeconds + $result.wait_attempts = $attempts + if ($null -eq $regex) { + Fail-Json $result "timeout while waiting for file $path to be present" + } else { + Fail-Json $result "timeout while waiting for string regex $regex in file $path to match" + } + } + } elseif ($state -in @("absent")) { + # check if the file is deleted or string doesn't exist in file + $start_time = Get-Date + $complete = $false + while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) { + $attempts += 1 + if (Test-AnsiblePath -Path $path) { + if ($null -ne $regex) { + $file_contents = Get-Content -Path $path -Raw + if ($file_contents -notmatch $regex) { + $complete = $true + break + } + } + } else { + $complete = $true + break + } + + Start-Sleep -Seconds $sleep + } + + if ($complete -eq $false) { + $result.elapsed = ((Get-Date) - $module_start).TotalSeconds + $result.wait_attempts = $attempts + if ($null -eq $regex) { + Fail-Json $result "timeout while waiting for file $path to be absent" + } else { + Fail-Json $result "timeout while waiting for string regex $regex in file $path to not match" + } + } + } +} elseif ($null -ne $port) { + if ($state -in @("started","present")) { + # check that the port is online and is listening + $start_time = Get-Date + $complete = $false + while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) { + $attempts += 1 + $port_result = Test-Port -hostname $hostname -port $port + if ($port_result -eq $true) { + $complete = $true + break + } + + Start-Sleep -Seconds $sleep + } + + if ($complete -eq $false) { + $result.elapsed = ((Get-Date) - $module_start).TotalSeconds + $result.wait_attempts = $attempts + Fail-Json $result "timeout while waiting for $($hostname):$port to start listening" + } + } elseif ($state -in @("stopped","absent")) { + # check that the port is offline and is not listening + $start_time = Get-Date + $complete = $false + while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) { + $attempts += 1 + $port_result = Test-Port -hostname $hostname -port $port + if ($port_result -eq $false) { + $complete = $true + break + } + + Start-Sleep -Seconds $sleep + } + + if ($complete -eq $false) { + $result.elapsed = ((Get-Date) - $module_start).TotalSeconds + $result.wait_attempts = $attempts + Fail-Json $result "timeout while waiting for $($hostname):$port to stop listening" + } + } elseif ($state -eq "drained") { + # check that the local port is online but has no active connections + $start_time = Get-Date + $complete = $false + while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) { + $attempts += 1 + $active_connections = Get-PortConnections -hostname $hostname -port $port + if ($null -eq $active_connections) { + $complete = $true + break + } elseif ($active_connections.Count -eq 0) { + # no connections on port + $complete = $true + break + } else { + # there are listeners, check if we should ignore any hosts + if ($null -ne $exclude_hosts) { + $connection_info = $active_connections + foreach ($exclude_host in $exclude_hosts) { + try { + $exclude_ips = [System.Net.Dns]::GetHostAddresses($exclude_host) | ForEach-Object { Write-Output $_.IPAddressToString } + $connection_info = $connection_info | Where-Object { $_ -notin $exclude_ips } + } catch { # ignore invalid hostnames + Add-Warning -obj $result -message "Invalid hostname specified $exclude_host" + } + } + + if ($connection_info.Count -eq 0) { + $complete = $true + break + } + } + } + + Start-Sleep -Seconds $sleep + } + + if ($complete -eq $false) { + $result.elapsed = ((Get-Date) - $module_start).TotalSeconds + $result.wait_attempts = $attempts + Fail-Json $result "timeout while waiting for $($hostname):$port to drain" + } + } +} + +$result.elapsed = ((Get-Date) - $module_start).TotalSeconds +$result.wait_attempts = $attempts + +Exit-Json $result diff --git a/test/support/windows-integration/plugins/modules/win_wait_for.py b/test/support/windows-integration/plugins/modules/win_wait_for.py new file mode 100644 index 00000000..85721e7d --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_wait_for.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub, actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_wait_for +version_added: '2.4' +short_description: Waits for a condition before continuing +description: +- You can wait for a set amount of time C(timeout), this is the default if + nothing is specified. +- Waiting for a port to become available is useful for when services are not + immediately available after their init scripts return which is true of + certain Java application servers. +- You can wait for a file to exist or not exist on the filesystem. +- This module can also be used to wait for a regex match string to be present + in a file. +- You can wait for active connections to be closed before continuing on a + local port. +options: + connect_timeout: + description: + - The maximum number of seconds to wait for a connection to happen before + closing and retrying. + type: int + default: 5 + delay: + description: + - The number of seconds to wait before starting to poll. + type: int + exclude_hosts: + description: + - The list of hosts or IPs to ignore when looking for active TCP + connections when C(state=drained). + type: list + host: + description: + - A resolvable hostname or IP address to wait for. + - If C(state=drained) then it will only check for connections on the IP + specified, you can use '0.0.0.0' to use all host IPs. + type: str + default: '127.0.0.1' + path: + description: + - The path to a file on the filesystem to check. + - If C(state) is present or started then it will wait until the file + exists. + - If C(state) is absent then it will wait until the file does not exist. + type: path + port: + description: + - The port number to poll on C(host). + type: int + regex: + description: + - Can be used to match a string in a file. + - If C(state) is present or started then it will wait until the regex + matches. + - If C(state) is absent then it will wait until the regex does not match. + - Defaults to a multiline regex. + type: str + aliases: [ "search_regex", "regexp" ] + sleep: + description: + - Number of seconds to sleep between checks. + type: int + default: 1 + state: + description: + - When checking a port, C(started) will ensure the port is open, C(stopped) + will check that is it closed and C(drained) will check for active + connections. + - When checking for a file or a search string C(present) or C(started) will + ensure that the file or string is present, C(absent) will check that the + file or search string is absent or removed. + type: str + choices: [ absent, drained, present, started, stopped ] + default: started + timeout: + description: + - The maximum number of seconds to wait for. + type: int + default: 300 +seealso: +- module: wait_for +- module: win_wait_for_process +author: +- Jordan Borean (@jborean93) +''' + +EXAMPLES = r''' +- name: Wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds + win_wait_for: + port: 8000 + delay: 10 + +- name: Wait 150 seconds for port 8000 of any IP to close active connections + win_wait_for: + host: 0.0.0.0 + port: 8000 + state: drained + timeout: 150 + +- name: Wait for port 8000 of any IP to close active connection, ignoring certain hosts + win_wait_for: + host: 0.0.0.0 + port: 8000 + state: drained + exclude_hosts: ['10.2.1.2', '10.2.1.3'] + +- name: Wait for file C:\temp\log.txt to exist before continuing + win_wait_for: + path: C:\temp\log.txt + +- name: Wait until process complete is in the file before continuing + win_wait_for: + path: C:\temp\log.txt + regex: process complete + +- name: Wait until file is removed + win_wait_for: + path: C:\temp\log.txt + state: absent + +- name: Wait until port 1234 is offline but try every 10 seconds + win_wait_for: + port: 1234 + state: absent + sleep: 10 +''' + +RETURN = r''' +wait_attempts: + description: The number of attempts to poll the file or port before module + finishes. + returned: always + type: int + sample: 1 +elapsed: + description: The elapsed seconds between the start of poll and the end of the + module. This includes the delay if the option is set. + returned: always + type: float + sample: 2.1406487 +''' diff --git a/test/support/windows-integration/plugins/modules/win_whoami.ps1 b/test/support/windows-integration/plugins/modules/win_whoami.ps1 new file mode 100644 index 00000000..6c9965af --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_whoami.ps1 @@ -0,0 +1,837 @@ +#!powershell + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#Requires -Module Ansible.ModuleUtils.Legacy +#Requires -Module Ansible.ModuleUtils.CamelConversion + +$ErrorActionPreference = "Stop" + +$params = Parse-Args $args -supports_check_mode $true +$_remote_tmp = Get-AnsibleParam $params "_ansible_remote_tmp" -type "path" -default $env:TMP + +$session_util = @' +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.InteropServices; +using System.Security.Principal; +using System.Text; + +namespace Ansible +{ + public class SessionInfo + { + // SECURITY_LOGON_SESSION_DATA + public UInt64 LogonId { get; internal set; } + public Sid Account { get; internal set; } + public string LoginDomain { get; internal set; } + public string AuthenticationPackage { get; internal set; } + public SECURITY_LOGON_TYPE LogonType { get; internal set; } + public string LoginTime { get; internal set; } + public string LogonServer { get; internal set; } + public string DnsDomainName { get; internal set; } + public string Upn { get; internal set; } + public ArrayList UserFlags { get; internal set; } + + // TOKEN_STATISTICS + public SECURITY_IMPERSONATION_LEVEL ImpersonationLevel { get; internal set; } + public TOKEN_TYPE TokenType { get; internal set; } + + // TOKEN_GROUPS + public ArrayList Groups { get; internal set; } + public ArrayList Rights { get; internal set; } + + // TOKEN_MANDATORY_LABEL + public Sid Label { get; internal set; } + + // TOKEN_PRIVILEGES + public Hashtable Privileges { get; internal set; } + } + + public class Win32Exception : System.ComponentModel.Win32Exception + { + private string _msg; + public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { } + public Win32Exception(int errorCode, string message) : base(errorCode) + { + _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode); + } + public override string Message { get { return _msg; } } + public static explicit operator Win32Exception(string message) { return new Win32Exception(message); } + } + + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + public struct LSA_UNICODE_STRING + { + public UInt16 Length; + public UInt16 MaximumLength; + public IntPtr buffer; + } + + [StructLayout(LayoutKind.Sequential)] + public struct LUID + { + public UInt32 LowPart; + public Int32 HighPart; + } + + [StructLayout(LayoutKind.Sequential)] + public struct SECURITY_LOGON_SESSION_DATA + { + public UInt32 Size; + public LUID LogonId; + public LSA_UNICODE_STRING Username; + public LSA_UNICODE_STRING LoginDomain; + public LSA_UNICODE_STRING AuthenticationPackage; + public SECURITY_LOGON_TYPE LogonType; + public UInt32 Session; + public IntPtr Sid; + public UInt64 LoginTime; + public LSA_UNICODE_STRING LogonServer; + public LSA_UNICODE_STRING DnsDomainName; + public LSA_UNICODE_STRING Upn; + public UInt32 UserFlags; + public LSA_LAST_INTER_LOGON_INFO LastLogonInfo; + public LSA_UNICODE_STRING LogonScript; + public LSA_UNICODE_STRING ProfilePath; + public LSA_UNICODE_STRING HomeDirectory; + public LSA_UNICODE_STRING HomeDirectoryDrive; + public UInt64 LogoffTime; + public UInt64 KickOffTime; + public UInt64 PasswordLastSet; + public UInt64 PasswordCanChange; + public UInt64 PasswordMustChange; + } + + [StructLayout(LayoutKind.Sequential)] + public struct LSA_LAST_INTER_LOGON_INFO + { + public UInt64 LastSuccessfulLogon; + public UInt64 LastFailedLogon; + public UInt32 FailedAttemptCountSinceLastSuccessfulLogon; + } + + public enum TOKEN_TYPE + { + TokenPrimary = 1, + TokenImpersonation + } + + public enum SECURITY_IMPERSONATION_LEVEL + { + SecurityAnonymous, + SecurityIdentification, + SecurityImpersonation, + SecurityDelegation + } + + public enum SECURITY_LOGON_TYPE + { + System = 0, // Used only by the Sytem account + Interactive = 2, + Network, + Batch, + Service, + Proxy, + Unlock, + NetworkCleartext, + NewCredentials, + RemoteInteractive, + CachedInteractive, + CachedRemoteInteractive, + CachedUnlock + } + + [Flags] + public enum TokenGroupAttributes : uint + { + SE_GROUP_ENABLED = 0x00000004, + SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002, + SE_GROUP_INTEGRITY = 0x00000020, + SE_GROUP_INTEGRITY_ENABLED = 0x00000040, + SE_GROUP_LOGON_ID = 0xC0000000, + SE_GROUP_MANDATORY = 0x00000001, + SE_GROUP_OWNER = 0x00000008, + SE_GROUP_RESOURCE = 0x20000000, + SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010, + } + + [Flags] + public enum UserFlags : uint + { + LOGON_OPTIMIZED = 0x4000, + LOGON_WINLOGON = 0x8000, + LOGON_PKINIT = 0x10000, + LOGON_NOT_OPTMIZED = 0x20000, + } + + [StructLayout(LayoutKind.Sequential)] + public struct SID_AND_ATTRIBUTES + { + public IntPtr Sid; + public UInt32 Attributes; + } + + [StructLayout(LayoutKind.Sequential)] + public struct LUID_AND_ATTRIBUTES + { + public LUID Luid; + public UInt32 Attributes; + } + + [StructLayout(LayoutKind.Sequential)] + public struct TOKEN_GROUPS + { + public UInt32 GroupCount; + [MarshalAs(UnmanagedType.ByValArray, SizeConst = 1)] + public SID_AND_ATTRIBUTES[] Groups; + } + + [StructLayout(LayoutKind.Sequential)] + public struct TOKEN_MANDATORY_LABEL + { + public SID_AND_ATTRIBUTES Label; + } + + [StructLayout(LayoutKind.Sequential)] + public struct TOKEN_STATISTICS + { + public LUID TokenId; + public LUID AuthenticationId; + public UInt64 ExpirationTime; + public TOKEN_TYPE TokenType; + public SECURITY_IMPERSONATION_LEVEL ImpersonationLevel; + public UInt32 DynamicCharged; + public UInt32 DynamicAvailable; + public UInt32 GroupCount; + public UInt32 PrivilegeCount; + public LUID ModifiedId; + } + + [StructLayout(LayoutKind.Sequential)] + public struct TOKEN_PRIVILEGES + { + public UInt32 PrivilegeCount; + [MarshalAs(UnmanagedType.ByValArray, SizeConst = 1)] + public LUID_AND_ATTRIBUTES[] Privileges; + } + + public class AccessToken : IDisposable + { + public enum TOKEN_INFORMATION_CLASS + { + TokenUser = 1, + TokenGroups, + TokenPrivileges, + TokenOwner, + TokenPrimaryGroup, + TokenDefaultDacl, + TokenSource, + TokenType, + TokenImpersonationLevel, + TokenStatistics, + TokenRestrictedSids, + TokenSessionId, + TokenGroupsAndPrivileges, + TokenSessionReference, + TokenSandBoxInert, + TokenAuditPolicy, + TokenOrigin, + TokenElevationType, + TokenLinkedToken, + TokenElevation, + TokenHasRestrictions, + TokenAccessInformation, + TokenVirtualizationAllowed, + TokenVirtualizationEnabled, + TokenIntegrityLevel, + TokenUIAccess, + TokenMandatoryPolicy, + TokenLogonSid, + TokenIsAppContainer, + TokenCapabilities, + TokenAppContainerSid, + TokenAppContainerNumber, + TokenUserClaimAttributes, + TokenDeviceClaimAttributes, + TokenRestrictedUserClaimAttributes, + TokenRestrictedDeviceClaimAttributes, + TokenDeviceGroups, + TokenRestrictedDeviceGroups, + TokenSecurityAttributes, + TokenIsRestricted, + MaxTokenInfoClass + } + + public IntPtr hToken = IntPtr.Zero; + + [DllImport("kernel32.dll")] + private static extern IntPtr GetCurrentProcess(); + + [DllImport("advapi32.dll", SetLastError = true)] + private static extern bool OpenProcessToken( + IntPtr ProcessHandle, + TokenAccessLevels DesiredAccess, + out IntPtr TokenHandle); + + [DllImport("advapi32.dll", SetLastError = true)] + private static extern bool GetTokenInformation( + IntPtr TokenHandle, + TOKEN_INFORMATION_CLASS TokenInformationClass, + IntPtr TokenInformation, + UInt32 TokenInformationLength, + out UInt32 ReturnLength); + + public AccessToken(TokenAccessLevels tokenAccessLevels) + { + IntPtr currentProcess = GetCurrentProcess(); + if (!OpenProcessToken(currentProcess, tokenAccessLevels, out hToken)) + throw new Win32Exception("OpenProcessToken() for current process failed"); + } + + public IntPtr GetTokenInformation(out T tokenInformation, TOKEN_INFORMATION_CLASS tokenClass) + { + UInt32 tokenLength = 0; + GetTokenInformation(hToken, tokenClass, IntPtr.Zero, 0, out tokenLength); + + IntPtr infoPtr = Marshal.AllocHGlobal((int)tokenLength); + + if (!GetTokenInformation(hToken, tokenClass, infoPtr, tokenLength, out tokenLength)) + throw new Win32Exception(String.Format("GetTokenInformation() data for {0} failed", tokenClass.ToString())); + + tokenInformation = (T)Marshal.PtrToStructure(infoPtr, typeof(T)); + return infoPtr; + } + + public void Dispose() + { + GC.SuppressFinalize(this); + } + + ~AccessToken() { Dispose(); } + } + + public class LsaHandle : IDisposable + { + [Flags] + public enum DesiredAccess : uint + { + POLICY_VIEW_LOCAL_INFORMATION = 0x00000001, + POLICY_VIEW_AUDIT_INFORMATION = 0x00000002, + POLICY_GET_PRIVATE_INFORMATION = 0x00000004, + POLICY_TRUST_ADMIN = 0x00000008, + POLICY_CREATE_ACCOUNT = 0x00000010, + POLICY_CREATE_SECRET = 0x00000020, + POLICY_CREATE_PRIVILEGE = 0x00000040, + POLICY_SET_DEFAULT_QUOTA_LIMITS = 0x00000080, + POLICY_SET_AUDIT_REQUIREMENTS = 0x00000100, + POLICY_AUDIT_LOG_ADMIN = 0x00000200, + POLICY_SERVER_ADMIN = 0x00000400, + POLICY_LOOKUP_NAMES = 0x00000800, + POLICY_NOTIFICATION = 0x00001000 + } + + public IntPtr handle = IntPtr.Zero; + + [DllImport("advapi32.dll", SetLastError = true)] + private static extern uint LsaOpenPolicy( + LSA_UNICODE_STRING[] SystemName, + ref LSA_OBJECT_ATTRIBUTES ObjectAttributes, + DesiredAccess AccessMask, + out IntPtr PolicyHandle); + + [DllImport("advapi32.dll", SetLastError = true)] + private static extern uint LsaClose( + IntPtr ObjectHandle); + + [DllImport("advapi32.dll", SetLastError = false)] + private static extern int LsaNtStatusToWinError( + uint Status); + + [StructLayout(LayoutKind.Sequential)] + public struct LSA_OBJECT_ATTRIBUTES + { + public int Length; + public IntPtr RootDirectory; + public IntPtr ObjectName; + public int Attributes; + public IntPtr SecurityDescriptor; + public IntPtr SecurityQualityOfService; + } + + public LsaHandle(DesiredAccess desiredAccess) + { + LSA_OBJECT_ATTRIBUTES lsaAttr; + lsaAttr.RootDirectory = IntPtr.Zero; + lsaAttr.ObjectName = IntPtr.Zero; + lsaAttr.Attributes = 0; + lsaAttr.SecurityDescriptor = IntPtr.Zero; + lsaAttr.SecurityQualityOfService = IntPtr.Zero; + lsaAttr.Length = Marshal.SizeOf(typeof(LSA_OBJECT_ATTRIBUTES)); + LSA_UNICODE_STRING[] system = new LSA_UNICODE_STRING[1]; + system[0].buffer = IntPtr.Zero; + + uint res = LsaOpenPolicy(system, ref lsaAttr, desiredAccess, out handle); + if (res != 0) + throw new Win32Exception(LsaNtStatusToWinError(res), "LsaOpenPolicy() failed"); + } + + public void Dispose() + { + if (handle != IntPtr.Zero) + { + LsaClose(handle); + handle = IntPtr.Zero; + } + GC.SuppressFinalize(this); + } + + ~LsaHandle() { Dispose(); } + } + + public class Sid + { + public string SidString { get; internal set; } + public string DomainName { get; internal set; } + public string AccountName { get; internal set; } + public SID_NAME_USE SidType { get; internal set; } + + public enum SID_NAME_USE + { + SidTypeUser = 1, + SidTypeGroup, + SidTypeDomain, + SidTypeAlias, + SidTypeWellKnownGroup, + SidTypeDeletedAccount, + SidTypeInvalid, + SidTypeUnknown, + SidTypeComputer, + SidTypeLabel, + SidTypeLogon, + } + + [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)] + private static extern bool LookupAccountSid( + string lpSystemName, + [MarshalAs(UnmanagedType.LPArray)] + byte[] Sid, + StringBuilder lpName, + ref UInt32 cchName, + StringBuilder ReferencedDomainName, + ref UInt32 cchReferencedDomainName, + out SID_NAME_USE peUse); + + public Sid(IntPtr sidPtr) + { + SecurityIdentifier sid; + try + { + sid = new SecurityIdentifier(sidPtr); + } + catch (Exception e) + { + throw new ArgumentException(String.Format("Failed to cast IntPtr to SecurityIdentifier: {0}", e)); + } + + SetSidInfo(sid); + } + + public Sid(SecurityIdentifier sid) + { + SetSidInfo(sid); + } + + public override string ToString() + { + return SidString; + } + + private void SetSidInfo(SecurityIdentifier sid) + { + byte[] sidBytes = new byte[sid.BinaryLength]; + sid.GetBinaryForm(sidBytes, 0); + + StringBuilder lpName = new StringBuilder(); + UInt32 cchName = 0; + StringBuilder referencedDomainName = new StringBuilder(); + UInt32 cchReferencedDomainName = 0; + SID_NAME_USE peUse; + LookupAccountSid(null, sidBytes, lpName, ref cchName, referencedDomainName, ref cchReferencedDomainName, out peUse); + + lpName.EnsureCapacity((int)cchName); + referencedDomainName.EnsureCapacity((int)cchReferencedDomainName); + + SidString = sid.ToString(); + if (!LookupAccountSid(null, sidBytes, lpName, ref cchName, referencedDomainName, ref cchReferencedDomainName, out peUse)) + { + int lastError = Marshal.GetLastWin32Error(); + + if (lastError != 1332 && lastError != 1789) // Fails to lookup Logon Sid + { + throw new Win32Exception(lastError, String.Format("LookupAccountSid() failed for SID: {0} {1}", sid.ToString(), lastError)); + } + else if (SidString.StartsWith("S-1-5-5-")) + { + AccountName = String.Format("LogonSessionId_{0}", SidString.Substring(8)); + DomainName = "NT AUTHORITY"; + SidType = SID_NAME_USE.SidTypeLogon; + } + else + { + AccountName = null; + DomainName = null; + SidType = SID_NAME_USE.SidTypeUnknown; + } + } + else + { + AccountName = lpName.ToString(); + DomainName = referencedDomainName.ToString(); + SidType = peUse; + } + } + } + + public class SessionUtil + { + [DllImport("secur32.dll", SetLastError = false)] + private static extern uint LsaFreeReturnBuffer( + IntPtr Buffer); + + [DllImport("secur32.dll", SetLastError = false)] + private static extern uint LsaEnumerateLogonSessions( + out UInt64 LogonSessionCount, + out IntPtr LogonSessionList); + + [DllImport("secur32.dll", SetLastError = false)] + private static extern uint LsaGetLogonSessionData( + IntPtr LogonId, + out IntPtr ppLogonSessionData); + + [DllImport("advapi32.dll", SetLastError = false)] + private static extern int LsaNtStatusToWinError( + uint Status); + + [DllImport("advapi32", SetLastError = true)] + private static extern uint LsaEnumerateAccountRights( + IntPtr PolicyHandle, + IntPtr AccountSid, + out IntPtr UserRights, + out UInt64 CountOfRights); + + [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)] + private static extern bool LookupPrivilegeName( + string lpSystemName, + ref LUID lpLuid, + StringBuilder lpName, + ref UInt32 cchName); + + private const UInt32 SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001; + private const UInt32 SE_PRIVILEGE_ENABLED = 0x00000002; + private const UInt32 STATUS_OBJECT_NAME_NOT_FOUND = 0xC0000034; + private const UInt32 STATUS_ACCESS_DENIED = 0xC0000022; + + public static SessionInfo GetSessionInfo() + { + AccessToken accessToken = new AccessToken(TokenAccessLevels.Query); + + // Get Privileges + Hashtable privilegeInfo = new Hashtable(); + TOKEN_PRIVILEGES privileges; + IntPtr privilegesPtr = accessToken.GetTokenInformation(out privileges, AccessToken.TOKEN_INFORMATION_CLASS.TokenPrivileges); + LUID_AND_ATTRIBUTES[] luidAndAttributes = new LUID_AND_ATTRIBUTES[privileges.PrivilegeCount]; + try + { + PtrToStructureArray(luidAndAttributes, privilegesPtr.ToInt64() + Marshal.SizeOf(privileges.PrivilegeCount)); + } + finally + { + Marshal.FreeHGlobal(privilegesPtr); + } + foreach (LUID_AND_ATTRIBUTES luidAndAttribute in luidAndAttributes) + { + LUID privLuid = luidAndAttribute.Luid; + UInt32 privNameLen = 0; + StringBuilder privName = new StringBuilder(); + LookupPrivilegeName(null, ref privLuid, null, ref privNameLen); + privName.EnsureCapacity((int)(privNameLen + 1)); + if (!LookupPrivilegeName(null, ref privLuid, privName, ref privNameLen)) + throw new Win32Exception("LookupPrivilegeName() failed"); + + string state = "disabled"; + if ((luidAndAttribute.Attributes & SE_PRIVILEGE_ENABLED) == SE_PRIVILEGE_ENABLED) + state = "enabled"; + if ((luidAndAttribute.Attributes & SE_PRIVILEGE_ENABLED_BY_DEFAULT) == SE_PRIVILEGE_ENABLED_BY_DEFAULT) + state = "enabled-by-default"; + privilegeInfo.Add(privName.ToString(), state); + } + + // Get Current Process LogonSID, User Rights and Groups + ArrayList userRights = new ArrayList(); + ArrayList userGroups = new ArrayList(); + TOKEN_GROUPS groups; + IntPtr groupsPtr = accessToken.GetTokenInformation(out groups, AccessToken.TOKEN_INFORMATION_CLASS.TokenGroups); + SID_AND_ATTRIBUTES[] sidAndAttributes = new SID_AND_ATTRIBUTES[groups.GroupCount]; + LsaHandle lsaHandle = null; + // We can only get rights if we are an admin + if (new WindowsPrincipal(WindowsIdentity.GetCurrent()).IsInRole(WindowsBuiltInRole.Administrator)) + lsaHandle = new LsaHandle(LsaHandle.DesiredAccess.POLICY_LOOKUP_NAMES); + try + { + PtrToStructureArray(sidAndAttributes, groupsPtr.ToInt64() + IntPtr.Size); + foreach (SID_AND_ATTRIBUTES sidAndAttribute in sidAndAttributes) + { + TokenGroupAttributes attributes = (TokenGroupAttributes)sidAndAttribute.Attributes; + if (attributes.HasFlag(TokenGroupAttributes.SE_GROUP_ENABLED) && lsaHandle != null) + { + ArrayList rights = GetAccountRights(lsaHandle.handle, sidAndAttribute.Sid); + foreach (string right in rights) + { + // Includes both Privileges and Account Rights, only add the ones with Logon in the name + // https://msdn.microsoft.com/en-us/library/windows/desktop/bb545671(v=vs.85).aspx + if (!userRights.Contains(right) && right.Contains("Logon")) + userRights.Add(right); + } + } + // Do not include the Logon SID in the groups category + if (!attributes.HasFlag(TokenGroupAttributes.SE_GROUP_LOGON_ID)) + { + Hashtable groupInfo = new Hashtable(); + Sid group = new Sid(sidAndAttribute.Sid); + ArrayList groupAttributes = new ArrayList(); + foreach (TokenGroupAttributes attribute in Enum.GetValues(typeof(TokenGroupAttributes))) + { + if (attributes.HasFlag(attribute)) + { + string attributeName = attribute.ToString().Substring(9); + attributeName = attributeName.Replace('_', ' '); + attributeName = attributeName.First().ToString().ToUpper() + attributeName.Substring(1).ToLower(); + groupAttributes.Add(attributeName); + } + } + // Using snake_case here as I can't generically convert all dict keys in PS (see Privileges) + groupInfo.Add("sid", group.SidString); + groupInfo.Add("domain_name", group.DomainName); + groupInfo.Add("account_name", group.AccountName); + groupInfo.Add("type", group.SidType); + groupInfo.Add("attributes", groupAttributes); + userGroups.Add(groupInfo); + } + } + } + finally + { + Marshal.FreeHGlobal(groupsPtr); + if (lsaHandle != null) + lsaHandle.Dispose(); + } + + // Get Integrity Level + Sid integritySid = null; + TOKEN_MANDATORY_LABEL mandatoryLabel; + IntPtr mandatoryLabelPtr = accessToken.GetTokenInformation(out mandatoryLabel, AccessToken.TOKEN_INFORMATION_CLASS.TokenIntegrityLevel); + Marshal.FreeHGlobal(mandatoryLabelPtr); + integritySid = new Sid(mandatoryLabel.Label.Sid); + + // Get Token Statistics + TOKEN_STATISTICS tokenStats; + IntPtr tokenStatsPtr = accessToken.GetTokenInformation(out tokenStats, AccessToken.TOKEN_INFORMATION_CLASS.TokenStatistics); + Marshal.FreeHGlobal(tokenStatsPtr); + + SessionInfo sessionInfo = GetSessionDataForLogonSession(tokenStats.AuthenticationId); + sessionInfo.Groups = userGroups; + sessionInfo.Label = integritySid; + sessionInfo.ImpersonationLevel = tokenStats.ImpersonationLevel; + sessionInfo.TokenType = tokenStats.TokenType; + sessionInfo.Privileges = privilegeInfo; + sessionInfo.Rights = userRights; + return sessionInfo; + } + + private static ArrayList GetAccountRights(IntPtr lsaHandle, IntPtr sid) + { + UInt32 res; + ArrayList rights = new ArrayList(); + IntPtr userRightsPointer = IntPtr.Zero; + UInt64 countOfRights = 0; + + res = LsaEnumerateAccountRights(lsaHandle, sid, out userRightsPointer, out countOfRights); + if (res != 0 && res != STATUS_OBJECT_NAME_NOT_FOUND) + throw new Win32Exception(LsaNtStatusToWinError(res), "LsaEnumerateAccountRights() failed"); + else if (res != STATUS_OBJECT_NAME_NOT_FOUND) + { + LSA_UNICODE_STRING[] userRights = new LSA_UNICODE_STRING[countOfRights]; + PtrToStructureArray(userRights, userRightsPointer.ToInt64()); + rights = new ArrayList(); + foreach (LSA_UNICODE_STRING right in userRights) + rights.Add(Marshal.PtrToStringUni(right.buffer)); + } + + return rights; + } + + private static SessionInfo GetSessionDataForLogonSession(LUID logonSession) + { + uint res; + UInt64 count = 0; + IntPtr luidPtr = IntPtr.Zero; + SessionInfo sessionInfo = null; + UInt64 processDataId = ConvertLuidToUint(logonSession); + + res = LsaEnumerateLogonSessions(out count, out luidPtr); + if (res != 0) + throw new Win32Exception(LsaNtStatusToWinError(res), "LsaEnumerateLogonSessions() failed"); + Int64 luidAddr = luidPtr.ToInt64(); + + try + { + for (UInt64 i = 0; i < count; i++) + { + IntPtr dataPointer = IntPtr.Zero; + res = LsaGetLogonSessionData(luidPtr, out dataPointer); + if (res == STATUS_ACCESS_DENIED) // Non admins won't be able to get info for session's that are not their own + { + luidPtr = new IntPtr(luidPtr.ToInt64() + Marshal.SizeOf(typeof(LUID))); + continue; + } + else if (res != 0) + throw new Win32Exception(LsaNtStatusToWinError(res), String.Format("LsaGetLogonSessionData() failed {0}", res)); + + SECURITY_LOGON_SESSION_DATA sessionData = (SECURITY_LOGON_SESSION_DATA)Marshal.PtrToStructure(dataPointer, typeof(SECURITY_LOGON_SESSION_DATA)); + UInt64 sessionDataid = ConvertLuidToUint(sessionData.LogonId); + + if (sessionDataid == processDataId) + { + ArrayList userFlags = new ArrayList(); + UserFlags flags = (UserFlags)sessionData.UserFlags; + foreach (UserFlags flag in Enum.GetValues(typeof(UserFlags))) + { + if (flags.HasFlag(flag)) + { + string flagName = flag.ToString().Substring(6); + flagName = flagName.Replace('_', ' '); + flagName = flagName.First().ToString().ToUpper() + flagName.Substring(1).ToLower(); + userFlags.Add(flagName); + } + } + + sessionInfo = new SessionInfo() + { + AuthenticationPackage = Marshal.PtrToStringUni(sessionData.AuthenticationPackage.buffer), + DnsDomainName = Marshal.PtrToStringUni(sessionData.DnsDomainName.buffer), + LoginDomain = Marshal.PtrToStringUni(sessionData.LoginDomain.buffer), + LoginTime = ConvertIntegerToDateString(sessionData.LoginTime), + LogonId = ConvertLuidToUint(sessionData.LogonId), + LogonServer = Marshal.PtrToStringUni(sessionData.LogonServer.buffer), + LogonType = sessionData.LogonType, + Upn = Marshal.PtrToStringUni(sessionData.Upn.buffer), + UserFlags = userFlags, + Account = new Sid(sessionData.Sid) + }; + break; + } + luidPtr = new IntPtr(luidPtr.ToInt64() + Marshal.SizeOf(typeof(LUID))); + } + } + finally + { + LsaFreeReturnBuffer(new IntPtr(luidAddr)); + } + + if (sessionInfo == null) + throw new Exception(String.Format("Could not find the data for logon session {0}", processDataId)); + return sessionInfo; + } + + private static string ConvertIntegerToDateString(UInt64 time) + { + if (time == 0) + return null; + if (time > (UInt64)DateTime.MaxValue.ToFileTime()) + return null; + + DateTime dateTime = DateTime.FromFileTime((long)time); + return dateTime.ToString("o"); + } + + private static UInt64 ConvertLuidToUint(LUID luid) + { + UInt32 low = luid.LowPart; + UInt64 high = (UInt64)luid.HighPart; + high = high << 32; + UInt64 uintValue = (high | (UInt64)low); + return uintValue; + } + + private static void PtrToStructureArray(T[] array, Int64 pointerAddress) + { + Int64 pointerOffset = pointerAddress; + for (int i = 0; i < array.Length; i++, pointerOffset += Marshal.SizeOf(typeof(T))) + array[i] = (T)Marshal.PtrToStructure(new IntPtr(pointerOffset), typeof(T)); + } + + public static IEnumerable GetValues() + { + return Enum.GetValues(typeof(T)).Cast(); + } + } +} +'@ + +$original_tmp = $env:TMP +$env:TMP = $_remote_tmp +Add-Type -TypeDefinition $session_util +$env:TMP = $original_tmp + +$session_info = [Ansible.SessionUtil]::GetSessionInfo() + +Function Convert-Value($value) { + $new_value = $value + if ($value -is [System.Collections.ArrayList]) { + $new_value = [System.Collections.ArrayList]@() + foreach ($list_value in $value) { + $new_list_value = Convert-Value -value $list_value + [void]$new_value.Add($new_list_value) + } + } elseif ($value -is [Hashtable]) { + $new_value = @{} + foreach ($entry in $value.GetEnumerator()) { + $entry_value = Convert-Value -value $entry.Value + # manually convert Sid type entry to remove the SidType prefix + if ($entry.Name -eq "type") { + $entry_value = $entry_value.Replace("SidType", "") + } + $new_value[$entry.Name] = $entry_value + } + } elseif ($value -is [Ansible.Sid]) { + $new_value = @{ + sid = $value.SidString + account_name = $value.AccountName + domain_name = $value.DomainName + type = $value.SidType.ToString().Replace("SidType", "") + } + } elseif ($value -is [Enum]) { + $new_value = $value.ToString() + } + + return ,$new_value +} + +$result = @{ + changed = $false +} + +$properties = [type][Ansible.SessionInfo] +foreach ($property in $properties.DeclaredProperties) { + $property_name = $property.Name + $property_value = $session_info.$property_name + $snake_name = Convert-StringToSnakeCase -string $property_name + + $result.$snake_name = Convert-Value -value $property_value +} + +Exit-Json -obj $result diff --git a/test/support/windows-integration/plugins/modules/win_whoami.py b/test/support/windows-integration/plugins/modules/win_whoami.py new file mode 100644 index 00000000..d647374b --- /dev/null +++ b/test/support/windows-integration/plugins/modules/win_whoami.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = r''' +--- +module: win_whoami +version_added: "2.5" +short_description: Get information about the current user and process +description: +- Designed to return the same information as the C(whoami /all) command. +- Also includes information missing from C(whoami) such as logon metadata like + logon rights, id, type. +notes: +- If running this module with a non admin user, the logon rights will be an + empty list as Administrator rights are required to query LSA for the + information. +seealso: +- module: win_credential +- module: win_group_membership +- module: win_user_right +author: +- Jordan Borean (@jborean93) +''' + +EXAMPLES = r''' +- name: Get whoami information + win_whoami: +''' + +RETURN = r''' +authentication_package: + description: The name of the authentication package used to authenticate the + user in the session. + returned: success + type: str + sample: Negotiate +user_flags: + description: The user flags for the logon session, see UserFlags in + U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380128). + returned: success + type: str + sample: Winlogon +upn: + description: The user principal name of the current user. + returned: success + type: str + sample: Administrator@DOMAIN.COM +logon_type: + description: The logon type that identifies the logon method, see + U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380129.aspx). + returned: success + type: str + sample: Network +privileges: + description: A dictionary of privileges and their state on the logon token. + returned: success + type: dict + sample: { + "SeChangeNotifyPrivileges": "enabled-by-default", + "SeRemoteShutdownPrivilege": "disabled", + "SeDebugPrivilege": "enabled" + } +label: + description: The mandatory label set to the logon session. + returned: success + type: complex + contains: + domain_name: + description: The domain name of the label SID. + returned: success + type: str + sample: Mandatory Label + sid: + description: The SID in string form. + returned: success + type: str + sample: S-1-16-12288 + account_name: + description: The account name of the label SID. + returned: success + type: str + sample: High Mandatory Level + type: + description: The type of SID. + returned: success + type: str + sample: Label +impersonation_level: + description: The impersonation level of the token, only valid if + C(token_type) is C(TokenImpersonation), see + U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa379572.aspx). + returned: success + type: str + sample: SecurityAnonymous +login_time: + description: The logon time in ISO 8601 format + returned: success + type: str + sample: '2017-11-27T06:24:14.3321665+10:00' +groups: + description: A list of groups and attributes that the user is a member of. + returned: success + type: list + sample: [ + { + "account_name": "Domain Users", + "domain_name": "DOMAIN", + "attributes": [ + "Mandatory", + "Enabled by default", + "Enabled" + ], + "sid": "S-1-5-21-1654078763-769949647-2968445802-513", + "type": "Group" + }, + { + "account_name": "Administrators", + "domain_name": "BUILTIN", + "attributes": [ + "Mandatory", + "Enabled by default", + "Enabled", + "Owner" + ], + "sid": "S-1-5-32-544", + "type": "Alias" + } + ] +account: + description: The running account SID details. + returned: success + type: complex + contains: + domain_name: + description: The domain name of the account SID. + returned: success + type: str + sample: DOMAIN + sid: + description: The SID in string form. + returned: success + type: str + sample: S-1-5-21-1654078763-769949647-2968445802-500 + account_name: + description: The account name of the account SID. + returned: success + type: str + sample: Administrator + type: + description: The type of SID. + returned: success + type: str + sample: User +login_domain: + description: The name of the domain used to authenticate the owner of the + session. + returned: success + type: str + sample: DOMAIN +rights: + description: A list of logon rights assigned to the logon. + returned: success and running user is a member of the local Administrators group + type: list + sample: [ + "SeNetworkLogonRight", + "SeInteractiveLogonRight", + "SeBatchLogonRight", + "SeRemoteInteractiveLogonRight" + ] +logon_server: + description: The name of the server used to authenticate the owner of the + logon session. + returned: success + type: str + sample: DC01 +logon_id: + description: The unique identifier of the logon session. + returned: success + type: int + sample: 20470143 +dns_domain_name: + description: The DNS name of the logon session, this is an empty string if + this is not set. + returned: success + type: str + sample: DOMAIN.COM +token_type: + description: The token type to indicate whether it is a primary or + impersonation token. + returned: success + type: str + sample: TokenPrimary +''' diff --git a/test/units/__init__.py b/test/units/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/_vendor/test_vendor.py b/test/units/_vendor/test_vendor.py new file mode 100644 index 00000000..6a0fa385 --- /dev/null +++ b/test/units/_vendor/test_vendor.py @@ -0,0 +1,65 @@ +# (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import pkgutil +import pytest +import sys + +from units.compat.mock import MagicMock, NonCallableMagicMock, patch + + +def reset_internal_vendor_package(): + import ansible + ansible_vendor_path = os.path.join(os.path.dirname(ansible.__file__), '_vendor') + + if ansible_vendor_path in sys.path: + sys.path.remove(ansible_vendor_path) + + for pkg in ['ansible._vendor', 'ansible']: + if pkg in sys.modules: + del sys.modules[pkg] + + +def test_package_path_masking(): + from ansible import _vendor + + assert hasattr(_vendor, '__path__') and _vendor.__path__ == [] + + +def test_no_vendored(): + reset_internal_vendor_package() + with patch.object(pkgutil, 'iter_modules', return_value=[]): + previous_path = list(sys.path) + import ansible + ansible_vendor_path = os.path.join(os.path.dirname(ansible.__file__), '_vendor') + + assert ansible_vendor_path not in sys.path + assert sys.path == previous_path + + +def test_vendored(vendored_pkg_names=None): + if not vendored_pkg_names: + vendored_pkg_names = ['boguspkg'] + reset_internal_vendor_package() + with patch.object(pkgutil, 'iter_modules', return_value=list((None, p, None) for p in vendored_pkg_names)): + previous_path = list(sys.path) + import ansible + ansible_vendor_path = os.path.join(os.path.dirname(ansible.__file__), '_vendor') + assert sys.path[0] == ansible_vendor_path + + if ansible_vendor_path in previous_path: + previous_path.remove(ansible_vendor_path) + + assert sys.path[1:] == previous_path + + +def test_vendored_conflict(): + with pytest.warns(UserWarning) as w: + import pkgutil + import sys + test_vendored(vendored_pkg_names=['sys', 'pkgutil']) # pass a real package we know is already loaded + assert 'pkgutil, sys' in str(w[0].message) # ensure both conflicting modules are listed and sorted diff --git a/test/units/ansible_test/__init__.py b/test/units/ansible_test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/ansible_test/ci/__init__.py b/test/units/ansible_test/ci/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/ansible_test/ci/test_azp.py b/test/units/ansible_test/ci/test_azp.py new file mode 100644 index 00000000..69c4fa49 --- /dev/null +++ b/test/units/ansible_test/ci/test_azp.py @@ -0,0 +1,31 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from .util import common_auth_test + + +def test_auth(): + # noinspection PyProtectedMember + from ansible_test._internal.ci.azp import ( + AzurePipelinesAuthHelper, + ) + + class TestAzurePipelinesAuthHelper(AzurePipelinesAuthHelper): + def __init__(self): + self.public_key_pem = None + self.private_key_pem = None + + def publish_public_key(self, public_key_pem): + # avoid publishing key + self.public_key_pem = public_key_pem + + def initialize_private_key(self): + # cache in memory instead of on disk + if not self.private_key_pem: + self.private_key_pem = self.generate_private_key() + + return self.private_key_pem + + auth = TestAzurePipelinesAuthHelper() + + common_auth_test(auth) diff --git a/test/units/ansible_test/ci/test_shippable.py b/test/units/ansible_test/ci/test_shippable.py new file mode 100644 index 00000000..08b276c7 --- /dev/null +++ b/test/units/ansible_test/ci/test_shippable.py @@ -0,0 +1,31 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from .util import common_auth_test + + +def test_auth(): + # noinspection PyProtectedMember + from ansible_test._internal.ci.shippable import ( + ShippableAuthHelper, + ) + + class TestShippableAuthHelper(ShippableAuthHelper): + def __init__(self): + self.public_key_pem = None + self.private_key_pem = None + + def publish_public_key(self, public_key_pem): + # avoid publishing key + self.public_key_pem = public_key_pem + + def initialize_private_key(self): + # cache in memory instead of on disk + if not self.private_key_pem: + self.private_key_pem = self.generate_private_key() + + return self.private_key_pem + + auth = TestShippableAuthHelper() + + common_auth_test(auth) diff --git a/test/units/ansible_test/ci/util.py b/test/units/ansible_test/ci/util.py new file mode 100644 index 00000000..ba8e358b --- /dev/null +++ b/test/units/ansible_test/ci/util.py @@ -0,0 +1,53 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import base64 +import json +import re + + +def common_auth_test(auth): + private_key_pem = auth.initialize_private_key() + public_key_pem = auth.public_key_pem + + extract_pem_key(private_key_pem, private=True) + extract_pem_key(public_key_pem, private=False) + + request = dict(hello='World') + auth.sign_request(request) + + verify_signature(request, public_key_pem) + + +def extract_pem_key(value, private): + assert isinstance(value, type(u'')) + + key_type = '(EC )?PRIVATE' if private else 'PUBLIC' + pattern = r'^-----BEGIN ' + key_type + r' KEY-----\n(?P.*?)\n-----END ' + key_type + r' KEY-----\n$' + match = re.search(pattern, value, flags=re.DOTALL) + + assert match, 'key "%s" does not match pattern "%s"' % (value, pattern) + + base64.b64decode(match.group('key')) # make sure the key can be decoded + + +def verify_signature(request, public_key_pem): + signature = request.pop('signature') + payload_bytes = json.dumps(request, sort_keys=True).encode() + + assert isinstance(signature, type(u'')) + + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import hashes + from cryptography.hazmat.primitives.asymmetric import ec + from cryptography.hazmat.primitives.serialization import load_pem_public_key + + public_key = load_pem_public_key(public_key_pem.encode(), default_backend()) + + verifier = public_key.verifier( + base64.b64decode(signature.encode()), + ec.ECDSA(hashes.SHA256()), + ) + + verifier.update(payload_bytes) + verifier.verify() diff --git a/test/units/ansible_test/conftest.py b/test/units/ansible_test/conftest.py new file mode 100644 index 00000000..9ec9a02f --- /dev/null +++ b/test/units/ansible_test/conftest.py @@ -0,0 +1,14 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import os +import pytest +import sys + + +@pytest.fixture(autouse=True, scope='session') +def ansible_test(): + """Make ansible_test available on sys.path for unit testing ansible-test.""" + test_lib = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'lib') + sys.path.insert(0, test_lib) diff --git a/test/units/ansible_test/test_docker_util.py b/test/units/ansible_test/test_docker_util.py new file mode 100644 index 00000000..8427f0f2 --- /dev/null +++ b/test/units/ansible_test/test_docker_util.py @@ -0,0 +1,131 @@ +# This file is part of Ansible +# -*- coding: utf-8 -*- +# +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +import pytest +from units.compat.mock import call, patch, MagicMock + +# docker images quay.io/ansible/centos7-test-container --format '{{json .}}' +DOCKER_OUTPUT_MULTIPLE = """ +{"Containers":"N/A","CreatedAt":"2020-06-11 17:05:58 -0500 CDT","CreatedSince":"3 months ago","Digest":"\u003cnone\u003e","ID":"b0f914b26cc1","Repository":"quay.io/ansible/centos7-test-container","SharedSize":"N/A","Size":"556MB","Tag":"1.17.0","UniqueSize":"N/A","VirtualSize":"555.6MB"} +{"Containers":"N/A","CreatedAt":"2020-06-11 17:05:58 -0500 CDT","CreatedSince":"3 months ago","Digest":"\u003cnone\u003e","ID":"b0f914b26cc1","Repository":"quay.io/ansible/centos7-test-container","SharedSize":"N/A","Size":"556MB","Tag":"latest","UniqueSize":"N/A","VirtualSize":"555.6MB"} +{"Containers":"N/A","CreatedAt":"2019-04-01 19:59:39 -0500 CDT","CreatedSince":"18 months ago","Digest":"\u003cnone\u003e","ID":"dd3d10e03dd3","Repository":"quay.io/ansible/centos7-test-container","SharedSize":"N/A","Size":"678MB","Tag":"1.8.0","UniqueSize":"N/A","VirtualSize":"678MB"} +""".lstrip() # noqa: E501 + +PODMAN_OUTPUT = """ +[ + { + "id": "dd3d10e03dd3580de865560c3440c812a33fd7a1fca8ed8e4a1219ff3d809e3a", + "names": [ + "quay.io/ansible/centos7-test-container:1.8.0" + ], + "digest": "sha256:6e5d9c99aa558779715a80715e5cf0c227a4b59d95e6803c148290c5d0d9d352", + "created": "2019-04-02T00:59:39.234584184Z", + "size": 702761933 + }, + { + "id": "b0f914b26cc1088ab8705413c2f2cf247306ceeea51260d64c26894190d188bd", + "names": [ + "quay.io/ansible/centos7-test-container:latest" + ], + "digest": "sha256:d8431aa74f60f4ff0f1bd36bc9a227bbb2066330acd8bf25e29d8614ee99e39c", + "created": "2020-06-11T22:05:58.382459136Z", + "size": 578513505 + } +] +""".lstrip() + + +@pytest.fixture +def docker_images(): + from ansible_test._internal.docker_util import docker_images + return docker_images + + +@pytest.fixture +def ansible_test(ansible_test): + import ansible_test + return ansible_test + + +@pytest.fixture +def subprocess_error(): + from ansible_test._internal.util import SubprocessError + return SubprocessError + + +@pytest.mark.parametrize( + ('returned_items_count', 'patched_dc_stdout'), + ( + (3, (DOCKER_OUTPUT_MULTIPLE, '')), + (2, (PODMAN_OUTPUT, '')), + (0, ('', '')), + ), + ids=('docker JSONL', 'podman JSON sequence', 'empty output')) +def test_docker_images(docker_images, mocker, returned_items_count, patched_dc_stdout): + mocker.patch( + 'ansible_test._internal.docker_util.docker_command', + return_value=patched_dc_stdout) + ret = docker_images('', 'quay.io/ansible/centos7-test-container') + assert len(ret) == returned_items_count + + +def test_podman_fallback(ansible_test, docker_images, subprocess_error, mocker): + '''Test podman >2 && <2.2 fallback''' + + cmd = ['docker', 'images', 'quay.io/ansible/centos7-test-container', '--format', '{{json .}}'] + docker_command_results = [ + subprocess_error(cmd, status=1, stderr='function "json" not defined'), + (PODMAN_OUTPUT, ''), + ] + mocker.patch( + 'ansible_test._internal.docker_util.docker_command', + side_effect=docker_command_results) + + ret = docker_images('', 'quay.io/ansible/centos7-test-container') + calls = [ + call( + '', + ['images', 'quay.io/ansible/centos7-test-container', '--format', '{{json .}}'], + capture=True, + always=True), + call( + '', + ['images', 'quay.io/ansible/centos7-test-container', '--format', 'json'], + capture=True, + always=True), + ] + ansible_test._internal.docker_util.docker_command.assert_has_calls(calls) + assert len(ret) == 2 + + +def test_podman_no_such_image(ansible_test, docker_images, subprocess_error, mocker): + '''Test podman "no such image" error''' + + cmd = ['docker', 'images', 'quay.io/ansible/centos7-test-container', '--format', '{{json .}}'] + exc = subprocess_error(cmd, status=1, stderr='no such image'), + mocker.patch( + 'ansible_test._internal.docker_util.docker_command', + side_effect=exc) + ret = docker_images('', 'quay.io/ansible/centos7-test-container') + assert ret == [] diff --git a/test/units/cli/__init__.py b/test/units/cli/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/arguments/test_optparse_helpers.py b/test/units/cli/arguments/test_optparse_helpers.py new file mode 100644 index 00000000..0e80fba9 --- /dev/null +++ b/test/units/cli/arguments/test_optparse_helpers.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys + +import pytest + +from ansible import constants as C +from ansible.cli.arguments import option_helpers as opt_help +from ansible import __path__ as ansible_path +from ansible.release import __version__ as ansible_version + +if C.DEFAULT_MODULE_PATH is None: + cpath = u'Default w/o overrides' +else: + cpath = C.DEFAULT_MODULE_PATH + +FAKE_PROG = u'ansible-cli-test' +VERSION_OUTPUT = opt_help.version(prog=FAKE_PROG) + + +@pytest.mark.parametrize( + 'must_have', [ + FAKE_PROG + u' %s' % ansible_version, + u'config file = %s' % C.CONFIG_FILE, + u'configured module search path = %s' % cpath, + u'ansible python module location = %s' % ':'.join(ansible_path), + u'executable location = ', + u'python version = %s' % ''.join(sys.version.splitlines()), + ] +) +def test_option_helper_version(must_have): + assert must_have in VERSION_OUTPUT diff --git a/test/units/cli/galaxy/test_collection_extract_tar.py b/test/units/cli/galaxy/test_collection_extract_tar.py new file mode 100644 index 00000000..526442cc --- /dev/null +++ b/test/units/cli/galaxy/test_collection_extract_tar.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.errors import AnsibleError +from ansible.galaxy.collection import _extract_tar_dir + + +@pytest.fixture +def fake_tar_obj(mocker): + m_tarfile = mocker.Mock() + m_tarfile.type = mocker.Mock(return_value=b'99') + m_tarfile.SYMTYPE = mocker.Mock(return_value=b'22') + + return m_tarfile + + +def test_extract_tar_member_trailing_sep(mocker): + m_tarfile = mocker.Mock() + m_tarfile.getmember = mocker.Mock(side_effect=KeyError) + + with pytest.raises(AnsibleError, match='Unable to extract'): + _extract_tar_dir(m_tarfile, '/some/dir/', b'/some/dest') + + assert m_tarfile.getmember.call_count == 1 + + +def test_extract_tar_member_no_trailing_sep(mocker): + m_tarfile = mocker.Mock() + m_tarfile.getmember = mocker.Mock(side_effect=KeyError) + + with pytest.raises(AnsibleError, match='Unable to extract'): + _extract_tar_dir(m_tarfile, '/some/dir', b'/some/dest') + + assert m_tarfile.getmember.call_count == 2 + + +def test_extract_tar_dir_exists(mocker, fake_tar_obj): + mocker.patch('os.makedirs', return_value=None) + m_makedir = mocker.patch('os.mkdir', return_value=None) + mocker.patch('os.path.isdir', return_value=True) + + _extract_tar_dir(fake_tar_obj, '/some/dir', b'/some/dest') + + assert not m_makedir.called + + +def test_extract_tar_dir_does_not_exist(mocker, fake_tar_obj): + mocker.patch('os.makedirs', return_value=None) + m_makedir = mocker.patch('os.mkdir', return_value=None) + mocker.patch('os.path.isdir', return_value=False) + + _extract_tar_dir(fake_tar_obj, '/some/dir', b'/some/dest') + + assert m_makedir.called + assert m_makedir.call_args[0] == (b'/some/dir', 0o0755) diff --git a/test/units/cli/galaxy/test_display_collection.py b/test/units/cli/galaxy/test_display_collection.py new file mode 100644 index 00000000..d4a3b31d --- /dev/null +++ b/test/units/cli/galaxy/test_display_collection.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.cli.galaxy import _display_collection + + +@pytest.fixture +def collection_object(mocker): + def _cobj(fqcn='sandwiches.ham'): + cobj = mocker.MagicMock(latest_version='1.5.0') + cobj.__str__.return_value = fqcn + return cobj + return _cobj + + +def test_display_collection(capsys, collection_object): + _display_collection(collection_object()) + out, err = capsys.readouterr() + + assert out == 'sandwiches.ham 1.5.0 \n' + + +def test_display_collections_small_max_widths(capsys, collection_object): + _display_collection(collection_object(), 1, 1) + out, err = capsys.readouterr() + + assert out == 'sandwiches.ham 1.5.0 \n' + + +def test_display_collections_large_max_widths(capsys, collection_object): + _display_collection(collection_object(), 20, 20) + out, err = capsys.readouterr() + + assert out == 'sandwiches.ham 1.5.0 \n' + + +def test_display_collection_small_minimum_widths(capsys, collection_object): + _display_collection(collection_object('a.b'), min_cwidth=0, min_vwidth=0) + out, err = capsys.readouterr() + + assert out == 'a.b 1.5.0 \n' diff --git a/test/units/cli/galaxy/test_display_header.py b/test/units/cli/galaxy/test_display_header.py new file mode 100644 index 00000000..ae926b0d --- /dev/null +++ b/test/units/cli/galaxy/test_display_header.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.cli.galaxy import _display_header + + +def test_display_header_default(capsys): + _display_header('/collections/path', 'h1', 'h2') + out, err = capsys.readouterr() + out_lines = out.splitlines() + + assert out_lines[0] == '' + assert out_lines[1] == '# /collections/path' + assert out_lines[2] == 'h1 h2 ' + assert out_lines[3] == '---------- -------' + + +def test_display_header_widths(capsys): + _display_header('/collections/path', 'Collection', 'Version', 18, 18) + out, err = capsys.readouterr() + out_lines = out.splitlines() + + assert out_lines[0] == '' + assert out_lines[1] == '# /collections/path' + assert out_lines[2] == 'Collection Version ' + assert out_lines[3] == '------------------ ------------------' + + +def test_display_header_small_widths(capsys): + _display_header('/collections/path', 'Col', 'Ver', 1, 1) + out, err = capsys.readouterr() + out_lines = out.splitlines() + + assert out_lines[0] == '' + assert out_lines[1] == '# /collections/path' + assert out_lines[2] == 'Col Ver' + assert out_lines[3] == '--- ---' diff --git a/test/units/cli/galaxy/test_display_role.py b/test/units/cli/galaxy/test_display_role.py new file mode 100644 index 00000000..e23a7725 --- /dev/null +++ b/test/units/cli/galaxy/test_display_role.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.cli.galaxy import _display_role + + +def test_display_role(mocker, capsys): + mocked_galaxy_role = mocker.Mock(install_info=None) + mocked_galaxy_role.name = 'testrole' + _display_role(mocked_galaxy_role) + out, err = capsys.readouterr() + out_lines = out.splitlines() + + assert out_lines[0] == '- testrole, (unknown version)' + + +def test_display_role_known_version(mocker, capsys): + mocked_galaxy_role = mocker.Mock(install_info={'version': '1.0.0'}) + mocked_galaxy_role.name = 'testrole' + _display_role(mocked_galaxy_role) + out, err = capsys.readouterr() + out_lines = out.splitlines() + + assert out_lines[0] == '- testrole, 1.0.0' diff --git a/test/units/cli/galaxy/test_execute_list.py b/test/units/cli/galaxy/test_execute_list.py new file mode 100644 index 00000000..41fee0bf --- /dev/null +++ b/test/units/cli/galaxy/test_execute_list.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible import context +from ansible.cli.galaxy import GalaxyCLI + + +def test_execute_list_role_called(mocker): + """Make sure the correct method is called for a role""" + + gc = GalaxyCLI(['ansible-galaxy', 'role', 'list']) + context.CLIARGS._store = {'type': 'role'} + execute_list_role_mock = mocker.patch('ansible.cli.galaxy.GalaxyCLI.execute_list_role', side_effect=AttributeError('raised intentionally')) + execute_list_collection_mock = mocker.patch('ansible.cli.galaxy.GalaxyCLI.execute_list_collection', side_effect=AttributeError('raised intentionally')) + with pytest.raises(AttributeError): + gc.execute_list() + + assert execute_list_role_mock.call_count == 1 + assert execute_list_collection_mock.call_count == 0 + + +def test_execute_list_collection_called(mocker): + """Make sure the correct method is called for a collection""" + + gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list']) + context.CLIARGS._store = {'type': 'collection'} + execute_list_role_mock = mocker.patch('ansible.cli.galaxy.GalaxyCLI.execute_list_role', side_effect=AttributeError('raised intentionally')) + execute_list_collection_mock = mocker.patch('ansible.cli.galaxy.GalaxyCLI.execute_list_collection', side_effect=AttributeError('raised intentionally')) + with pytest.raises(AttributeError): + gc.execute_list() + + assert execute_list_role_mock.call_count == 0 + assert execute_list_collection_mock.call_count == 1 diff --git a/test/units/cli/galaxy/test_execute_list_collection.py b/test/units/cli/galaxy/test_execute_list_collection.py new file mode 100644 index 00000000..040acf1e --- /dev/null +++ b/test/units/cli/galaxy/test_execute_list_collection.py @@ -0,0 +1,278 @@ +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible import context +from ansible.cli.galaxy import GalaxyCLI +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.galaxy.collection import CollectionRequirement +from ansible.module_utils._text import to_native + + +def path_exists(path): + if to_native(path) == '/root/.ansible/collections/ansible_collections/sandwiches/ham': + return False + elif to_native(path) == '/usr/share/ansible/collections/ansible_collections/sandwiches/reuben': + return False + elif to_native(path) == 'nope': + return False + else: + return True + + +def isdir(path): + if to_native(path) == 'nope': + return False + else: + return True + + +def cliargs(collections_paths=None, collection_name=None): + if collections_paths is None: + collections_paths = ['~/root/.ansible/collections', '/usr/share/ansible/collections'] + + context.CLIARGS._store = { + 'collections_path': collections_paths, + 'collection': collection_name, + 'type': 'collection', + } + + +@pytest.fixture +def mock_collection_objects(mocker): + mocker.patch('ansible.cli.galaxy.GalaxyCLI._resolve_path', side_effect=['/root/.ansible/collections', '/usr/share/ansible/collections']) + mocker.patch('ansible.cli.galaxy.validate_collection_path', + side_effect=['/root/.ansible/collections/ansible_collections', '/usr/share/ansible/collections/ansible_collections']) + + collection_args = ( + ( + 'sandwiches', + 'pbj', + b'/usr/share/ansible/collections/ansible_collections/sandwiches/pbj', + mocker.Mock(), + ['1.0.0', '1.5.0'], + '1.0.0', + False, + ), + ( + 'sandwiches', + 'pbj', + b'/root/.ansible/collections/ansible_collections/sandwiches/pbj', + mocker.Mock(), + ['1.0.0', '1.5.0'], + '1.5.0', + False, + ), + ( + 'sandwiches', + 'ham', + b'/usr/share/ansible/collections/ansible_collections/sandwiches/ham', + mocker.Mock(), + ['1.0.0'], + '1.0.0', + False, + ), + ( + 'sandwiches', + 'reuben', + b'/root/.ansible/collections/ansible_collections/sandwiches/reuben', + mocker.Mock(), + ['1.0.0', '2.5.0'], + '2.5.0', + False, + ), + ) + + collections_path_1 = [CollectionRequirement(*cargs) for cargs in collection_args if to_native(cargs[2]).startswith('/root')] + collections_path_2 = [CollectionRequirement(*cargs) for cargs in collection_args if to_native(cargs[2]).startswith('/usr/share')] + mocker.patch('ansible.cli.galaxy.find_existing_collections', side_effect=[collections_path_1, collections_path_2]) + + +@pytest.fixture +def mock_from_path(mocker): + def _from_path(collection_name='pbj'): + collection_args = { + 'sandwiches.pbj': ( + ( + 'sandwiches', + 'pbj', + b'/root/.ansible/collections/ansible_collections/sandwiches/pbj', + mocker.Mock(), + ['1.0.0', '1.5.0'], + '1.5.0', + False, + ), + ( + 'sandwiches', + 'pbj', + b'/usr/share/ansible/collections/ansible_collections/sandwiches/pbj', + mocker.Mock(), + ['1.0.0', '1.5.0'], + '1.0.0', + False, + ), + ), + 'sandwiches.ham': ( + ( + 'sandwiches', + 'ham', + b'/usr/share/ansible/collections/ansible_collections/sandwiches/ham', + mocker.Mock(), + ['1.0.0'], + '1.0.0', + False, + ), + ), + } + + from_path_objects = [CollectionRequirement(*args) for args in collection_args[collection_name]] + mocker.patch('ansible.galaxy.collection.CollectionRequirement.from_path', side_effect=from_path_objects) + + return _from_path + + +def test_execute_list_collection_all(mocker, capsys, mock_collection_objects): + """Test listing all collections from multiple paths""" + + cliargs() + + mocker.patch('os.path.exists', return_value=True) + mocker.patch('os.path.isdir', return_value=True) + gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list']) + gc.execute_list_collection() + + out, err = capsys.readouterr() + out_lines = out.splitlines() + + assert len(out_lines) == 12 + assert out_lines[0] == '' + assert out_lines[1] == '# /root/.ansible/collections/ansible_collections' + assert out_lines[2] == 'Collection Version' + assert out_lines[3] == '----------------- -------' + assert out_lines[4] == 'sandwiches.pbj 1.5.0 ' + assert out_lines[5] == 'sandwiches.reuben 2.5.0 ' + assert out_lines[6] == '' + assert out_lines[7] == '# /usr/share/ansible/collections/ansible_collections' + assert out_lines[8] == 'Collection Version' + assert out_lines[9] == '-------------- -------' + assert out_lines[10] == 'sandwiches.ham 1.0.0 ' + assert out_lines[11] == 'sandwiches.pbj 1.0.0 ' + + +def test_execute_list_collection_specific(mocker, capsys, mock_collection_objects, mock_from_path): + """Test listing a specific collection""" + + collection_name = 'sandwiches.ham' + mock_from_path(collection_name) + + cliargs(collection_name=collection_name) + mocker.patch('os.path.exists', path_exists) + mocker.patch('os.path.isdir', return_value=True) + mocker.patch('ansible.galaxy.collection.validate_collection_name', collection_name) + mocker.patch('ansible.cli.galaxy._get_collection_widths', return_value=(14, 5)) + + gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', collection_name]) + gc.execute_list_collection() + + out, err = capsys.readouterr() + out_lines = out.splitlines() + + assert len(out_lines) == 5 + assert out_lines[0] == '' + assert out_lines[1] == '# /usr/share/ansible/collections/ansible_collections' + assert out_lines[2] == 'Collection Version' + assert out_lines[3] == '-------------- -------' + assert out_lines[4] == 'sandwiches.ham 1.0.0 ' + + +def test_execute_list_collection_specific_duplicate(mocker, capsys, mock_collection_objects, mock_from_path): + """Test listing a specific collection that exists at multiple paths""" + + collection_name = 'sandwiches.pbj' + mock_from_path(collection_name) + + cliargs(collection_name=collection_name) + mocker.patch('os.path.exists', path_exists) + mocker.patch('os.path.isdir', return_value=True) + mocker.patch('ansible.galaxy.collection.validate_collection_name', collection_name) + + gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', collection_name]) + gc.execute_list_collection() + + out, err = capsys.readouterr() + out_lines = out.splitlines() + + assert len(out_lines) == 10 + assert out_lines[0] == '' + assert out_lines[1] == '# /root/.ansible/collections/ansible_collections' + assert out_lines[2] == 'Collection Version' + assert out_lines[3] == '-------------- -------' + assert out_lines[4] == 'sandwiches.pbj 1.5.0 ' + assert out_lines[5] == '' + assert out_lines[6] == '# /usr/share/ansible/collections/ansible_collections' + assert out_lines[7] == 'Collection Version' + assert out_lines[8] == '-------------- -------' + assert out_lines[9] == 'sandwiches.pbj 1.0.0 ' + + +def test_execute_list_collection_specific_invalid_fqcn(mocker): + """Test an invalid fully qualified collection name (FQCN)""" + + collection_name = 'no.good.name' + + cliargs(collection_name=collection_name) + mocker.patch('os.path.exists', return_value=True) + mocker.patch('os.path.isdir', return_value=True) + + gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', collection_name]) + with pytest.raises(AnsibleError, match='Invalid collection name'): + gc.execute_list_collection() + + +def test_execute_list_collection_no_valid_paths(mocker, capsys): + """Test listing collections when no valid paths are given""" + + cliargs() + + mocker.patch('os.path.exists', return_value=True) + mocker.patch('os.path.isdir', return_value=False) + mocker.patch('ansible.utils.color.ANSIBLE_COLOR', False) + mocker.patch('ansible.cli.galaxy.display.columns', 79) + gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list']) + + with pytest.raises(AnsibleOptionsError, match=r'None of the provided paths were usable.'): + gc.execute_list_collection() + + out, err = capsys.readouterr() + + assert '[WARNING]: - the configured path' in err + assert 'exists, but it\nis not a directory.' in err + + +def test_execute_list_collection_one_invalid_path(mocker, capsys, mock_collection_objects): + """Test listing all collections when one invalid path is given""" + + cliargs() + mocker.patch('os.path.exists', return_value=True) + mocker.patch('os.path.isdir', isdir) + mocker.patch('ansible.cli.galaxy.GalaxyCLI._resolve_path', side_effect=['/root/.ansible/collections', 'nope']) + mocker.patch('ansible.utils.color.ANSIBLE_COLOR', False) + + gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', '-p', 'nope']) + gc.execute_list_collection() + + out, err = capsys.readouterr() + out_lines = out.splitlines() + + assert out_lines[0] == '' + assert out_lines[1] == '# /root/.ansible/collections/ansible_collections' + assert out_lines[2] == 'Collection Version' + assert out_lines[3] == '----------------- -------' + assert out_lines[4] == 'sandwiches.pbj 1.5.0 ' + # Only a partial test of the output + + assert err == '[WARNING]: - the configured path nope, exists, but it is not a directory.\n' diff --git a/test/units/cli/galaxy/test_get_collection_widths.py b/test/units/cli/galaxy/test_get_collection_widths.py new file mode 100644 index 00000000..25649242 --- /dev/null +++ b/test/units/cli/galaxy/test_get_collection_widths.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.cli.galaxy import _get_collection_widths + + +@pytest.fixture +def collection_objects(mocker): + collection_ham = mocker.MagicMock(latest_version='1.5.0') + collection_ham.__str__.return_value = 'sandwiches.ham' + + collection_pbj = mocker.MagicMock(latest_version='2.5') + collection_pbj.__str__.return_value = 'sandwiches.pbj' + + collection_reuben = mocker.MagicMock(latest_version='4') + collection_reuben.__str__.return_value = 'sandwiches.reuben' + + return [collection_ham, collection_pbj, collection_reuben] + + +def test_get_collection_widths(collection_objects): + assert _get_collection_widths(collection_objects) == (17, 5) + + +def test_get_collection_widths_single_collection(mocker): + mocked_collection = mocker.MagicMock(latest_version='3.0.0') + mocked_collection.__str__.return_value = 'sandwiches.club' + # Make this look like it is not iterable + mocker.patch('ansible.cli.galaxy.is_iterable', return_value=False) + + assert _get_collection_widths(mocked_collection) == (15, 5) diff --git a/test/units/cli/test_adhoc.py b/test/units/cli/test_adhoc.py new file mode 100644 index 00000000..0e7475c6 --- /dev/null +++ b/test/units/cli/test_adhoc.py @@ -0,0 +1,113 @@ +# Copyright: (c) 2018, Abhijeet Kasurde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import re + +from ansible import context +from ansible.cli.adhoc import AdHocCLI, display +from ansible.errors import AnsibleOptionsError + + +def test_parse(): + """ Test adhoc parse""" + with pytest.raises(ValueError, match='A non-empty list for args is required'): + adhoc_cli = AdHocCLI([]) + + adhoc_cli = AdHocCLI(['ansibletest']) + with pytest.raises(SystemExit): + adhoc_cli.parse() + + +def test_with_command(): + """ Test simple adhoc command""" + module_name = 'command' + adhoc_cli = AdHocCLI(args=['ansible', '-m', module_name, '-vv', 'localhost']) + adhoc_cli.parse() + assert context.CLIARGS['module_name'] == module_name + assert display.verbosity == 2 + + +def test_simple_command(): + """ Test valid command and its run""" + adhoc_cli = AdHocCLI(['/bin/ansible', '-m', 'command', 'localhost', '-a', 'echo "hi"']) + adhoc_cli.parse() + ret = adhoc_cli.run() + assert ret == 0 + + +def test_no_argument(): + """ Test no argument command""" + adhoc_cli = AdHocCLI(['/bin/ansible', '-m', 'command', 'localhost']) + adhoc_cli.parse() + with pytest.raises(AnsibleOptionsError) as exec_info: + adhoc_cli.run() + assert 'No argument passed to command module' == str(exec_info.value) + + +def test_did_you_mean_playbook(): + """ Test adhoc with yml file as argument parameter""" + adhoc_cli = AdHocCLI(['/bin/ansible', '-m', 'command', 'localhost.yml']) + adhoc_cli.parse() + with pytest.raises(AnsibleOptionsError) as exec_info: + adhoc_cli.run() + assert 'No argument passed to command module (did you mean to run ansible-playbook?)' == str(exec_info.value) + + +def test_play_ds_positive(): + """ Test _play_ds""" + adhoc_cli = AdHocCLI(args=['/bin/ansible', 'localhost', '-m', 'command']) + adhoc_cli.parse() + ret = adhoc_cli._play_ds('command', 10, 2) + assert ret['name'] == 'Ansible Ad-Hoc' + assert ret['tasks'] == [{'action': {'module': 'command', 'args': {}}, 'async_val': 10, 'poll': 2}] + + +def test_play_ds_with_include_role(): + """ Test include_role command with poll""" + adhoc_cli = AdHocCLI(args=['/bin/ansible', 'localhost', '-m', 'include_role']) + adhoc_cli.parse() + ret = adhoc_cli._play_ds('include_role', None, 2) + assert ret['name'] == 'Ansible Ad-Hoc' + assert ret['gather_facts'] == 'no' + + +def test_run_import_playbook(): + """ Test import_playbook which is not allowed with ad-hoc command""" + import_playbook = 'import_playbook' + adhoc_cli = AdHocCLI(args=['/bin/ansible', '-m', import_playbook, 'localhost']) + adhoc_cli.parse() + with pytest.raises(AnsibleOptionsError) as exec_info: + adhoc_cli.run() + assert context.CLIARGS['module_name'] == import_playbook + assert "'%s' is not a valid action for ad-hoc commands" % import_playbook == str(exec_info.value) + + +def test_run_no_extra_vars(): + adhoc_cli = AdHocCLI(args=['/bin/ansible', 'localhost', '-e']) + with pytest.raises(SystemExit) as exec_info: + adhoc_cli.parse() + assert exec_info.value.code == 2 + + +def test_ansible_version(capsys, mocker): + adhoc_cli = AdHocCLI(args=['/bin/ansible', '--version']) + with pytest.raises(SystemExit): + adhoc_cli.run() + version = capsys.readouterr() + try: + version_lines = version.out.splitlines() + except AttributeError: + # Python 2.6 does return a named tuple, so get the first item + version_lines = version[0].splitlines() + + assert len(version_lines) == 6, 'Incorrect number of lines in "ansible --version" output' + assert re.match('ansible [0-9.a-z]+$', version_lines[0]), 'Incorrect ansible version line in "ansible --version" output' + assert re.match(' config file = .*$', version_lines[1]), 'Incorrect config file line in "ansible --version" output' + assert re.match(' configured module search path = .*$', version_lines[2]), 'Incorrect module search path in "ansible --version" output' + assert re.match(' ansible python module location = .*$', version_lines[3]), 'Incorrect python module location in "ansible --version" output' + assert re.match(' executable location = .*$', version_lines[4]), 'Incorrect executable locaction in "ansible --version" output' + assert re.match(' python version = .*$', version_lines[5]), 'Incorrect python version in "ansible --version" output' diff --git a/test/units/cli/test_cli.py b/test/units/cli/test_cli.py new file mode 100644 index 00000000..6dcd9e35 --- /dev/null +++ b/test/units/cli/test_cli.py @@ -0,0 +1,381 @@ +# (c) 2017, Adrian Likins +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import patch, MagicMock + +from units.mock.loader import DictDataLoader + +from ansible.release import __version__ +from ansible.parsing import vault +from ansible import cli + + +class TestCliVersion(unittest.TestCase): + + def test_version_info(self): + version_info = cli.CLI.version_info() + self.assertEqual(version_info['string'], __version__) + + def test_version_info_gitinfo(self): + version_info = cli.CLI.version_info(gitinfo=True) + self.assertIn('python version', version_info['string']) + + +class TestCliBuildVaultIds(unittest.TestCase): + def setUp(self): + self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True) + self.mock_isatty = self.tty_patcher.start() + + def tearDown(self): + self.tty_patcher.stop() + + def test(self): + res = cli.CLI.build_vault_ids(['foo@bar']) + self.assertEqual(res, ['foo@bar']) + + def test_create_new_password_no_vault_id(self): + res = cli.CLI.build_vault_ids([], create_new_password=True) + self.assertEqual(res, ['default@prompt_ask_vault_pass']) + + def test_create_new_password_no_vault_id_no_auto_prompt(self): + res = cli.CLI.build_vault_ids([], auto_prompt=False, create_new_password=True) + self.assertEqual(res, []) + + def test_no_vault_id_no_auto_prompt(self): + # similate 'ansible-playbook site.yml' with out --ask-vault-pass, should not prompt + res = cli.CLI.build_vault_ids([], auto_prompt=False) + self.assertEqual(res, []) + + def test_no_vault_ids_auto_prompt(self): + # create_new_password=False + # simulate 'ansible-vault edit encrypted.yml' + res = cli.CLI.build_vault_ids([], auto_prompt=True) + self.assertEqual(res, ['default@prompt_ask_vault_pass']) + + def test_no_vault_ids_auto_prompt_ask_vault_pass(self): + # create_new_password=False + # simulate 'ansible-vault edit --ask-vault-pass encrypted.yml' + res = cli.CLI.build_vault_ids([], auto_prompt=True, ask_vault_pass=True) + self.assertEqual(res, ['default@prompt_ask_vault_pass']) + + def test_create_new_password_auto_prompt(self): + # simulate 'ansible-vault encrypt somefile.yml' + res = cli.CLI.build_vault_ids([], auto_prompt=True, create_new_password=True) + self.assertEqual(res, ['default@prompt_ask_vault_pass']) + + def test_create_new_password_no_vault_id_ask_vault_pass(self): + res = cli.CLI.build_vault_ids([], ask_vault_pass=True, + create_new_password=True) + self.assertEqual(res, ['default@prompt_ask_vault_pass']) + + def test_create_new_password_with_vault_ids(self): + res = cli.CLI.build_vault_ids(['foo@bar'], create_new_password=True) + self.assertEqual(res, ['foo@bar']) + + def test_create_new_password_no_vault_ids_password_files(self): + res = cli.CLI.build_vault_ids([], vault_password_files=['some-password-file'], + create_new_password=True) + self.assertEqual(res, ['default@some-password-file']) + + def test_everything(self): + res = cli.CLI.build_vault_ids(['blip@prompt', 'baz@prompt_ask_vault_pass', + 'some-password-file', 'qux@another-password-file'], + vault_password_files=['yet-another-password-file', + 'one-more-password-file'], + ask_vault_pass=True, + create_new_password=True, + auto_prompt=False) + + self.assertEqual(set(res), set(['blip@prompt', 'baz@prompt_ask_vault_pass', + 'default@prompt_ask_vault_pass', + 'some-password-file', 'qux@another-password-file', + 'default@yet-another-password-file', + 'default@one-more-password-file'])) + + +class TestCliSetupVaultSecrets(unittest.TestCase): + def setUp(self): + self.fake_loader = DictDataLoader({}) + self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True) + self.mock_isatty = self.tty_patcher.start() + + self.display_v_patcher = patch('ansible.cli.display.verbosity', return_value=6) + self.mock_display_v = self.display_v_patcher.start() + cli.display.verbosity = 5 + + def tearDown(self): + self.tty_patcher.stop() + self.display_v_patcher.stop() + cli.display.verbosity = 0 + + def test(self): + res = cli.CLI.setup_vault_secrets(None, None, auto_prompt=False) + self.assertIsInstance(res, list) + + @patch('ansible.cli.get_file_vault_secret') + def test_password_file(self, mock_file_secret): + filename = '/dev/null/secret' + mock_file_secret.return_value = MagicMock(bytes=b'file1_password', + vault_id='file1', + filename=filename) + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=['secret1@%s' % filename, 'secret2'], + vault_password_files=[filename]) + self.assertIsInstance(res, list) + matches = vault.match_secrets(res, ['secret1']) + self.assertIn('secret1', [x[0] for x in matches]) + match = matches[0][1] + self.assertEqual(match.bytes, b'file1_password') + + @patch('ansible.cli.PromptVaultSecret') + def test_prompt(self, mock_prompt_secret): + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='prompt1') + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=['prompt1@prompt'], + ask_vault_pass=True, + auto_prompt=False) + + self.assertIsInstance(res, list) + matches = vault.match_secrets(res, ['prompt1']) + self.assertIn('prompt1', [x[0] for x in matches]) + match = matches[0][1] + self.assertEqual(match.bytes, b'prompt1_password') + + @patch('ansible.cli.PromptVaultSecret') + def test_prompt_no_tty(self, mock_prompt_secret): + self.mock_isatty.return_value = False + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='prompt1', + name='bytes_should_be_prompt1_password', + spec=vault.PromptVaultSecret) + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=['prompt1@prompt'], + ask_vault_pass=True, + auto_prompt=False) + + self.assertIsInstance(res, list) + self.assertEqual(len(res), 2) + matches = vault.match_secrets(res, ['prompt1']) + self.assertIn('prompt1', [x[0] for x in matches]) + self.assertEqual(len(matches), 1) + + @patch('ansible.cli.get_file_vault_secret') + @patch('ansible.cli.PromptVaultSecret') + def test_prompt_no_tty_and_password_file(self, mock_prompt_secret, mock_file_secret): + self.mock_isatty.return_value = False + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='prompt1') + filename = '/dev/null/secret' + mock_file_secret.return_value = MagicMock(bytes=b'file1_password', + vault_id='file1', + filename=filename) + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=['prompt1@prompt', 'file1@/dev/null/secret'], + ask_vault_pass=True) + + self.assertIsInstance(res, list) + matches = vault.match_secrets(res, ['file1']) + self.assertIn('file1', [x[0] for x in matches]) + self.assertNotIn('prompt1', [x[0] for x in matches]) + match = matches[0][1] + self.assertEqual(match.bytes, b'file1_password') + + def _assert_ids(self, vault_id_names, res, password=b'prompt1_password'): + self.assertIsInstance(res, list) + len_ids = len(vault_id_names) + matches = vault.match_secrets(res, vault_id_names) + self.assertEqual(len(res), len_ids, 'len(res):%s does not match len_ids:%s' % (len(res), len_ids)) + self.assertEqual(len(matches), len_ids) + for index, prompt in enumerate(vault_id_names): + self.assertIn(prompt, [x[0] for x in matches]) + # simple mock, same password/prompt for each mock_prompt_secret + self.assertEqual(matches[index][1].bytes, password) + + @patch('ansible.cli.PromptVaultSecret') + def test_multiple_prompts(self, mock_prompt_secret): + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='prompt1') + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=['prompt1@prompt', + 'prompt2@prompt'], + ask_vault_pass=False) + + vault_id_names = ['prompt1', 'prompt2'] + self._assert_ids(vault_id_names, res) + + @patch('ansible.cli.PromptVaultSecret') + def test_multiple_prompts_and_ask_vault_pass(self, mock_prompt_secret): + self.mock_isatty.return_value = False + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='prompt1') + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=['prompt1@prompt', + 'prompt2@prompt', + 'prompt3@prompt_ask_vault_pass'], + ask_vault_pass=True) + + # We provide some vault-ids and secrets, so auto_prompt shouldn't get triggered, + # so there is + vault_id_names = ['prompt1', 'prompt2', 'prompt3', 'default'] + self._assert_ids(vault_id_names, res) + + @patch('ansible.cli.C') + @patch('ansible.cli.get_file_vault_secret') + @patch('ansible.cli.PromptVaultSecret') + def test_default_file_vault(self, mock_prompt_secret, + mock_file_secret, + mock_config): + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='default') + mock_file_secret.return_value = MagicMock(bytes=b'file1_password', + vault_id='default') + mock_config.DEFAULT_VAULT_PASSWORD_FILE = '/dev/null/faux/vault_password_file' + mock_config.DEFAULT_VAULT_IDENTITY = 'default' + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=[], + create_new_password=False, + ask_vault_pass=False) + + self.assertIsInstance(res, list) + matches = vault.match_secrets(res, ['default']) + # --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts + # if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3 + + self.assertEqual(matches[0][1].bytes, b'file1_password') + self.assertEqual(len(matches), 1) + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=[], + create_new_password=False, + ask_vault_pass=True, + auto_prompt=True) + + self.assertIsInstance(res, list) + matches = vault.match_secrets(res, ['default']) + self.assertEqual(matches[0][1].bytes, b'file1_password') + self.assertEqual(matches[1][1].bytes, b'prompt1_password') + self.assertEqual(len(matches), 2) + + @patch('ansible.cli.get_file_vault_secret') + @patch('ansible.cli.PromptVaultSecret') + def test_default_file_vault_identity_list(self, mock_prompt_secret, + mock_file_secret): + default_vault_ids = ['some_prompt@prompt', + 'some_file@/dev/null/secret'] + + mock_prompt_secret.return_value = MagicMock(bytes=b'some_prompt_password', + vault_id='some_prompt') + + filename = '/dev/null/secret' + mock_file_secret.return_value = MagicMock(bytes=b'some_file_password', + vault_id='some_file', + filename=filename) + + vault_ids = default_vault_ids + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=vault_ids, + create_new_password=False, + ask_vault_pass=True) + + self.assertIsInstance(res, list) + matches = vault.match_secrets(res, ['some_file']) + # --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts + # if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3 + self.assertEqual(matches[0][1].bytes, b'some_file_password') + matches = vault.match_secrets(res, ['some_prompt']) + self.assertEqual(matches[0][1].bytes, b'some_prompt_password') + + @patch('ansible.cli.PromptVaultSecret') + def test_prompt_just_ask_vault_pass(self, mock_prompt_secret): + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='default') + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=[], + create_new_password=False, + ask_vault_pass=True) + + self.assertIsInstance(res, list) + match = vault.match_secrets(res, ['default'])[0][1] + self.assertEqual(match.bytes, b'prompt1_password') + + @patch('ansible.cli.PromptVaultSecret') + def test_prompt_new_password_ask_vault_pass(self, mock_prompt_secret): + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='default') + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=[], + create_new_password=True, + ask_vault_pass=True) + + self.assertIsInstance(res, list) + match = vault.match_secrets(res, ['default'])[0][1] + self.assertEqual(match.bytes, b'prompt1_password') + + @patch('ansible.cli.PromptVaultSecret') + def test_prompt_new_password_vault_id_prompt(self, mock_prompt_secret): + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='some_vault_id') + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=['some_vault_id@prompt'], + create_new_password=True, + ask_vault_pass=False) + + self.assertIsInstance(res, list) + match = vault.match_secrets(res, ['some_vault_id'])[0][1] + self.assertEqual(match.bytes, b'prompt1_password') + + @patch('ansible.cli.PromptVaultSecret') + def test_prompt_new_password_vault_id_prompt_ask_vault_pass(self, mock_prompt_secret): + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='default') + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=['some_vault_id@prompt_ask_vault_pass'], + create_new_password=True, + ask_vault_pass=False) + + self.assertIsInstance(res, list) + match = vault.match_secrets(res, ['some_vault_id'])[0][1] + self.assertEqual(match.bytes, b'prompt1_password') + + @patch('ansible.cli.PromptVaultSecret') + def test_prompt_new_password_vault_id_prompt_ask_vault_pass_ask_vault_pass(self, mock_prompt_secret): + mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', + vault_id='default') + + res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, + vault_ids=['some_vault_id@prompt_ask_vault_pass'], + create_new_password=True, + ask_vault_pass=True) + + self.assertIsInstance(res, list) + match = vault.match_secrets(res, ['some_vault_id'])[0][1] + self.assertEqual(match.bytes, b'prompt1_password') diff --git a/test/units/cli/test_console.py b/test/units/cli/test_console.py new file mode 100644 index 00000000..3acc4faa --- /dev/null +++ b/test/units/cli/test_console.py @@ -0,0 +1,51 @@ +# (c) 2016, Thilo Uttendorfer +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import patch + +from ansible.cli.console import ConsoleCLI + + +class TestConsoleCLI(unittest.TestCase): + def test_parse(self): + cli = ConsoleCLI(['ansible test']) + cli.parse() + self.assertTrue(cli.parser is not None) + + def test_module_args(self): + cli = ConsoleCLI(['ansible test']) + cli.parse() + res = cli.module_args('copy') + self.assertTrue(cli.parser is not None) + self.assertIn('src', res) + self.assertIn('backup', res) + self.assertIsInstance(res, list) + + @patch('ansible.utils.display.Display.display') + def test_helpdefault(self, mock_display): + cli = ConsoleCLI(['ansible test']) + cli.parse() + cli.modules = set(['copy']) + cli.helpdefault('copy') + self.assertTrue(cli.parser is not None) + self.assertTrue(len(mock_display.call_args_list) > 0, + "display.display should have been called but was not") diff --git a/test/units/cli/test_data/collection_skeleton/README.md b/test/units/cli/test_data/collection_skeleton/README.md new file mode 100644 index 00000000..4cfd8afe --- /dev/null +++ b/test/units/cli/test_data/collection_skeleton/README.md @@ -0,0 +1 @@ +A readme \ No newline at end of file diff --git a/test/units/cli/test_data/collection_skeleton/docs/My Collection.md b/test/units/cli/test_data/collection_skeleton/docs/My Collection.md new file mode 100644 index 00000000..6fa917f2 --- /dev/null +++ b/test/units/cli/test_data/collection_skeleton/docs/My Collection.md @@ -0,0 +1 @@ +Welcome to my test collection doc for {{ namespace }}. \ No newline at end of file diff --git a/test/units/cli/test_data/collection_skeleton/galaxy.yml.j2 b/test/units/cli/test_data/collection_skeleton/galaxy.yml.j2 new file mode 100644 index 00000000..b1da267a --- /dev/null +++ b/test/units/cli/test_data/collection_skeleton/galaxy.yml.j2 @@ -0,0 +1,7 @@ +namespace: '{{ namespace }}' +name: '{{ collection_name }}' +version: 0.1.0 +readme: README.md +authors: +- Ansible Cow +- Tu Cow diff --git a/test/units/cli/test_data/collection_skeleton/playbooks/main.yml b/test/units/cli/test_data/collection_skeleton/playbooks/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/test_data/collection_skeleton/playbooks/templates/subfolder/test.conf.j2 b/test/units/cli/test_data/collection_skeleton/playbooks/templates/subfolder/test.conf.j2 new file mode 100644 index 00000000..b4e33641 --- /dev/null +++ b/test/units/cli/test_data/collection_skeleton/playbooks/templates/subfolder/test.conf.j2 @@ -0,0 +1,2 @@ +[defaults] +test_key = {{ test_variable }} diff --git a/test/units/cli/test_data/collection_skeleton/playbooks/templates/test.conf.j2 b/test/units/cli/test_data/collection_skeleton/playbooks/templates/test.conf.j2 new file mode 100644 index 00000000..b4e33641 --- /dev/null +++ b/test/units/cli/test_data/collection_skeleton/playbooks/templates/test.conf.j2 @@ -0,0 +1,2 @@ +[defaults] +test_key = {{ test_variable }} diff --git a/test/units/cli/test_data/collection_skeleton/plugins/action/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/action/.git_keep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/test_data/collection_skeleton/plugins/filter/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/filter/.git_keep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/test_data/collection_skeleton/plugins/inventory/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/inventory/.git_keep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/test_data/collection_skeleton/plugins/lookup/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/lookup/.git_keep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/test_data/collection_skeleton/plugins/module_utils/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/module_utils/.git_keep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/test_data/collection_skeleton/plugins/modules/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/modules/.git_keep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/test_data/collection_skeleton/roles/common/tasks/main.yml.j2 b/test/units/cli/test_data/collection_skeleton/roles/common/tasks/main.yml.j2 new file mode 100644 index 00000000..77adf2ef --- /dev/null +++ b/test/units/cli/test_data/collection_skeleton/roles/common/tasks/main.yml.j2 @@ -0,0 +1,3 @@ +- name: test collection skeleton + debug: + msg: "Namespace: {{ namespace }}" \ No newline at end of file diff --git a/test/units/cli/test_data/collection_skeleton/roles/common/templates/subfolder/test.conf.j2 b/test/units/cli/test_data/collection_skeleton/roles/common/templates/subfolder/test.conf.j2 new file mode 100644 index 00000000..b4e33641 --- /dev/null +++ b/test/units/cli/test_data/collection_skeleton/roles/common/templates/subfolder/test.conf.j2 @@ -0,0 +1,2 @@ +[defaults] +test_key = {{ test_variable }} diff --git a/test/units/cli/test_data/collection_skeleton/roles/common/templates/test.conf.j2 b/test/units/cli/test_data/collection_skeleton/roles/common/templates/test.conf.j2 new file mode 100644 index 00000000..b4e33641 --- /dev/null +++ b/test/units/cli/test_data/collection_skeleton/roles/common/templates/test.conf.j2 @@ -0,0 +1,2 @@ +[defaults] +test_key = {{ test_variable }} diff --git a/test/units/cli/test_data/role_skeleton/.travis.yml b/test/units/cli/test_data/role_skeleton/.travis.yml new file mode 100644 index 00000000..49e7e1c5 --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/test/units/cli/test_data/role_skeleton/README.md b/test/units/cli/test_data/role_skeleton/README.md new file mode 100644 index 00000000..225dd44b --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/test/units/cli/test_data/role_skeleton/defaults/main.yml.j2 b/test/units/cli/test_data/role_skeleton/defaults/main.yml.j2 new file mode 100644 index 00000000..3818e64c --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/defaults/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# defaults file for {{ role_name }} diff --git a/test/units/cli/test_data/role_skeleton/files/.git_keep b/test/units/cli/test_data/role_skeleton/files/.git_keep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/test_data/role_skeleton/handlers/main.yml.j2 b/test/units/cli/test_data/role_skeleton/handlers/main.yml.j2 new file mode 100644 index 00000000..3f4c4967 --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/handlers/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# handlers file for {{ role_name }} diff --git a/test/units/cli/test_data/role_skeleton/inventory b/test/units/cli/test_data/role_skeleton/inventory new file mode 100644 index 00000000..2fbb50c4 --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/inventory @@ -0,0 +1 @@ +localhost diff --git a/test/units/cli/test_data/role_skeleton/meta/main.yml.j2 b/test/units/cli/test_data/role_skeleton/meta/main.yml.j2 new file mode 100644 index 00000000..2fc53cbe --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/meta/main.yml.j2 @@ -0,0 +1,62 @@ +galaxy_info: + author: {{ author }} + description: {{ description }} + company: {{ company }} + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: {{ issue_tracker_url }} + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: {{ license }} + + min_ansible_version: {{ min_ansible_version }} + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If travis integration is configured, only notification for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is + # a keyword that describes and categorizes the role. + # Users find roles by searching for tags. Be sure to + # remove the '[]' above if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of + # alphanumeric characters. Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. + # Be sure to remove the '[]' above if you add dependencies + # to this list. +{%- for dependency in dependencies %} + #- {{ dependency }} +{%- endfor %} diff --git a/test/units/cli/test_data/role_skeleton/tasks/main.yml.j2 b/test/units/cli/test_data/role_skeleton/tasks/main.yml.j2 new file mode 100644 index 00000000..a9880650 --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/tasks/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# tasks file for {{ role_name }} diff --git a/test/units/cli/test_data/role_skeleton/templates/.git_keep b/test/units/cli/test_data/role_skeleton/templates/.git_keep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/cli/test_data/role_skeleton/templates/subfolder/test.conf.j2 b/test/units/cli/test_data/role_skeleton/templates/subfolder/test.conf.j2 new file mode 100644 index 00000000..b4e33641 --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/templates/subfolder/test.conf.j2 @@ -0,0 +1,2 @@ +[defaults] +test_key = {{ test_variable }} diff --git a/test/units/cli/test_data/role_skeleton/templates/test.conf.j2 b/test/units/cli/test_data/role_skeleton/templates/test.conf.j2 new file mode 100644 index 00000000..b4e33641 --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/templates/test.conf.j2 @@ -0,0 +1,2 @@ +[defaults] +test_key = {{ test_variable }} diff --git a/test/units/cli/test_data/role_skeleton/templates_extra/templates.txt.j2 b/test/units/cli/test_data/role_skeleton/templates_extra/templates.txt.j2 new file mode 100644 index 00000000..143d6302 --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/templates_extra/templates.txt.j2 @@ -0,0 +1 @@ +{{ role_name }} diff --git a/test/units/cli/test_data/role_skeleton/tests/test.yml.j2 b/test/units/cli/test_data/role_skeleton/tests/test.yml.j2 new file mode 100644 index 00000000..0c40f95a --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/tests/test.yml.j2 @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - {{ role_name }} diff --git a/test/units/cli/test_data/role_skeleton/vars/main.yml.j2 b/test/units/cli/test_data/role_skeleton/vars/main.yml.j2 new file mode 100644 index 00000000..092d511a --- /dev/null +++ b/test/units/cli/test_data/role_skeleton/vars/main.yml.j2 @@ -0,0 +1,2 @@ +--- +# vars file for {{ role_name }} diff --git a/test/units/cli/test_doc.py b/test/units/cli/test_doc.py new file mode 100644 index 00000000..d93b5aa1 --- /dev/null +++ b/test/units/cli/test_doc.py @@ -0,0 +1,35 @@ +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.cli.doc import DocCLI + + +TTY_IFY_DATA = { + # No substitutions + 'no-op': 'no-op', + 'no-op Z(test)': 'no-op Z(test)', + # Simple cases of all substitutions + 'I(italic)': "`italic'", + 'B(bold)': '*bold*', + 'M(ansible.builtin.module)': '[ansible.builtin.module]', + 'U(https://docs.ansible.com)': 'https://docs.ansible.com', + 'L(the user guide,https://docs.ansible.com/user-guide.html)': 'the user guide ', + 'R(the user guide,user-guide)': 'the user guide', + 'C(/usr/bin/file)': "`/usr/bin/file'", + 'HORIZONTALLINE': '\n{0}\n'.format('-' * 13), + # Multiple substitutions + 'The M(ansible.builtin.yum) module B(MUST) be given the C(package) parameter. See the R(looping docs,using-loops) for more info': + "The [ansible.builtin.yum] module *MUST* be given the `package' parameter. See the looping docs for more info", + # Problem cases + 'IBM(International Business Machines)': 'IBM(International Business Machines)', + 'L(the user guide, https://docs.ansible.com/)': 'the user guide ', + 'R(the user guide, user-guide)': 'the user guide', +} + + +@pytest.mark.parametrize('text, expected', sorted(TTY_IFY_DATA.items())) +def test_ttyify(text, expected): + assert DocCLI.tty_ify(text) == expected diff --git a/test/units/cli/test_galaxy.py b/test/units/cli/test_galaxy.py new file mode 100644 index 00000000..c6c09159 --- /dev/null +++ b/test/units/cli/test_galaxy.py @@ -0,0 +1,1341 @@ +# -*- coding: utf-8 -*- +# (c) 2016, Adrian Likins +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ansible +import json +import os +import pytest +import shutil +import stat +import tarfile +import tempfile +import yaml + +import ansible.constants as C +from ansible import context +from ansible.cli.galaxy import GalaxyCLI +from ansible.galaxy.api import GalaxyAPI +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.utils import context_objects as co +from ansible.utils.display import Display +from units.compat import unittest +from units.compat.mock import patch, MagicMock + + +@pytest.fixture(autouse='function') +def reset_cli_args(): + co.GlobalCLIArgs._Singleton__instance = None + yield + co.GlobalCLIArgs._Singleton__instance = None + + +class TestGalaxy(unittest.TestCase): + @classmethod + def setUpClass(cls): + '''creating prerequisites for installing a role; setUpClass occurs ONCE whereas setUp occurs with every method tested.''' + # class data for easy viewing: role_dir, role_tar, role_name, role_req, role_path + + cls.temp_dir = tempfile.mkdtemp(prefix='ansible-test_galaxy-') + os.chdir(cls.temp_dir) + + if os.path.exists("./delete_me"): + shutil.rmtree("./delete_me") + + # creating framework for a role + gc = GalaxyCLI(args=["ansible-galaxy", "init", "--offline", "delete_me"]) + gc.run() + cls.role_dir = "./delete_me" + cls.role_name = "delete_me" + + # making a temp dir for role installation + cls.role_path = os.path.join(tempfile.mkdtemp(), "roles") + if not os.path.isdir(cls.role_path): + os.makedirs(cls.role_path) + + # creating a tar file name for class data + cls.role_tar = './delete_me.tar.gz' + cls.makeTar(cls.role_tar, cls.role_dir) + + # creating a temp file with installation requirements + cls.role_req = './delete_me_requirements.yml' + fd = open(cls.role_req, "w") + fd.write("- 'src': '%s'\n 'name': '%s'\n 'path': '%s'" % (cls.role_tar, cls.role_name, cls.role_path)) + fd.close() + + @classmethod + def makeTar(cls, output_file, source_dir): + ''' used for making a tarfile from a role directory ''' + # adding directory into a tar file + try: + tar = tarfile.open(output_file, "w:gz") + tar.add(source_dir, arcname=os.path.basename(source_dir)) + except AttributeError: # tarfile obj. has no attribute __exit__ prior to python 2. 7 + pass + finally: # ensuring closure of tarfile obj + tar.close() + + @classmethod + def tearDownClass(cls): + '''After tests are finished removes things created in setUpClass''' + # deleting the temp role directory + if os.path.exists(cls.role_dir): + shutil.rmtree(cls.role_dir) + if os.path.exists(cls.role_req): + os.remove(cls.role_req) + if os.path.exists(cls.role_tar): + os.remove(cls.role_tar) + if os.path.isdir(cls.role_path): + shutil.rmtree(cls.role_path) + + os.chdir('/') + shutil.rmtree(cls.temp_dir) + + def setUp(self): + # Reset the stored command line args + co.GlobalCLIArgs._Singleton__instance = None + self.default_args = ['ansible-galaxy'] + + def tearDown(self): + # Reset the stored command line args + co.GlobalCLIArgs._Singleton__instance = None + + def test_init(self): + galaxy_cli = GalaxyCLI(args=self.default_args) + self.assertTrue(isinstance(galaxy_cli, GalaxyCLI)) + + def test_display_min(self): + gc = GalaxyCLI(args=self.default_args) + role_info = {'name': 'some_role_name'} + display_result = gc._display_role_info(role_info) + self.assertTrue(display_result.find('some_role_name') > -1) + + def test_display_galaxy_info(self): + gc = GalaxyCLI(args=self.default_args) + galaxy_info = {} + role_info = {'name': 'some_role_name', + 'galaxy_info': galaxy_info} + display_result = gc._display_role_info(role_info) + if display_result.find('\n\tgalaxy_info:') == -1: + self.fail('Expected galaxy_info to be indented once') + + def test_run(self): + ''' verifies that the GalaxyCLI object's api is created and that execute() is called. ''' + gc = GalaxyCLI(args=["ansible-galaxy", "install", "--ignore-errors", "imaginary_role"]) + gc.parse() + with patch.object(ansible.cli.CLI, "run", return_value=None) as mock_run: + gc.run() + # testing + self.assertIsInstance(gc.galaxy, ansible.galaxy.Galaxy) + self.assertEqual(mock_run.call_count, 1) + self.assertTrue(isinstance(gc.api, ansible.galaxy.api.GalaxyAPI)) + + def test_execute_remove(self): + # installing role + gc = GalaxyCLI(args=["ansible-galaxy", "install", "-p", self.role_path, "-r", self.role_req, '--force']) + gc.run() + + # location where the role was installed + role_file = os.path.join(self.role_path, self.role_name) + + # removing role + # Have to reset the arguments in the context object manually since we're doing the + # equivalent of running the command line program twice + co.GlobalCLIArgs._Singleton__instance = None + gc = GalaxyCLI(args=["ansible-galaxy", "remove", role_file, self.role_name]) + gc.run() + + # testing role was removed + removed_role = not os.path.exists(role_file) + self.assertTrue(removed_role) + + def test_exit_without_ignore_without_flag(self): + ''' tests that GalaxyCLI exits with the error specified if the --ignore-errors flag is not used ''' + gc = GalaxyCLI(args=["ansible-galaxy", "install", "--server=None", "fake_role_name"]) + with patch.object(ansible.utils.display.Display, "display", return_value=None) as mocked_display: + # testing that error expected is raised + self.assertRaises(AnsibleError, gc.run) + self.assertTrue(mocked_display.called_once_with("- downloading role 'fake_role_name', owned by ")) + + def test_exit_without_ignore_with_flag(self): + ''' tests that GalaxyCLI exits without the error specified if the --ignore-errors flag is used ''' + # testing with --ignore-errors flag + gc = GalaxyCLI(args=["ansible-galaxy", "install", "--server=None", "fake_role_name", "--ignore-errors"]) + with patch.object(ansible.utils.display.Display, "display", return_value=None) as mocked_display: + gc.run() + self.assertTrue(mocked_display.called_once_with("- downloading role 'fake_role_name', owned by ")) + + def test_parse_no_action(self): + ''' testing the options parser when no action is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", ""]) + self.assertRaises(SystemExit, gc.parse) + + def test_parse_invalid_action(self): + ''' testing the options parser when an invalid action is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "NOT_ACTION"]) + self.assertRaises(SystemExit, gc.parse) + + def test_parse_delete(self): + ''' testing the options parser when the action 'delete' is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "delete", "foo", "bar"]) + gc.parse() + self.assertEqual(context.CLIARGS['verbosity'], 0) + + def test_parse_import(self): + ''' testing the options parser when the action 'import' is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "import", "foo", "bar"]) + gc.parse() + self.assertEqual(context.CLIARGS['wait'], True) + self.assertEqual(context.CLIARGS['reference'], None) + self.assertEqual(context.CLIARGS['check_status'], False) + self.assertEqual(context.CLIARGS['verbosity'], 0) + + def test_parse_info(self): + ''' testing the options parser when the action 'info' is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "info", "foo", "bar"]) + gc.parse() + self.assertEqual(context.CLIARGS['offline'], False) + + def test_parse_init(self): + ''' testing the options parser when the action 'init' is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "init", "foo"]) + gc.parse() + self.assertEqual(context.CLIARGS['offline'], False) + self.assertEqual(context.CLIARGS['force'], False) + + def test_parse_install(self): + ''' testing the options parser when the action 'install' is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "install"]) + gc.parse() + self.assertEqual(context.CLIARGS['ignore_errors'], False) + self.assertEqual(context.CLIARGS['no_deps'], False) + self.assertEqual(context.CLIARGS['requirements'], None) + self.assertEqual(context.CLIARGS['force'], False) + + def test_parse_list(self): + ''' testing the options parser when the action 'list' is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "list"]) + gc.parse() + self.assertEqual(context.CLIARGS['verbosity'], 0) + + def test_parse_remove(self): + ''' testing the options parser when the action 'remove' is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "remove", "foo"]) + gc.parse() + self.assertEqual(context.CLIARGS['verbosity'], 0) + + def test_parse_search(self): + ''' testing the options parswer when the action 'search' is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "search"]) + gc.parse() + self.assertEqual(context.CLIARGS['platforms'], None) + self.assertEqual(context.CLIARGS['galaxy_tags'], None) + self.assertEqual(context.CLIARGS['author'], None) + + def test_parse_setup(self): + ''' testing the options parser when the action 'setup' is given ''' + gc = GalaxyCLI(args=["ansible-galaxy", "setup", "source", "github_user", "github_repo", "secret"]) + gc.parse() + self.assertEqual(context.CLIARGS['verbosity'], 0) + self.assertEqual(context.CLIARGS['remove_id'], None) + self.assertEqual(context.CLIARGS['setup_list'], False) + + +class ValidRoleTests(object): + + expected_role_dirs = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests') + + @classmethod + def setUpRole(cls, role_name, galaxy_args=None, skeleton_path=None, use_explicit_type=False): + if galaxy_args is None: + galaxy_args = [] + + if skeleton_path is not None: + cls.role_skeleton_path = skeleton_path + galaxy_args += ['--role-skeleton', skeleton_path] + + # Make temp directory for testing + cls.test_dir = tempfile.mkdtemp() + if not os.path.isdir(cls.test_dir): + os.makedirs(cls.test_dir) + + cls.role_dir = os.path.join(cls.test_dir, role_name) + cls.role_name = role_name + + # create role using default skeleton + args = ['ansible-galaxy'] + if use_explicit_type: + args += ['role'] + args += ['init', '-c', '--offline'] + galaxy_args + ['--init-path', cls.test_dir, cls.role_name] + + gc = GalaxyCLI(args=args) + gc.run() + cls.gc = gc + + if skeleton_path is None: + cls.role_skeleton_path = gc.galaxy.default_role_skeleton_path + + @classmethod + def tearDownClass(cls): + if os.path.isdir(cls.test_dir): + shutil.rmtree(cls.test_dir) + + def test_metadata(self): + with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf: + metadata = yaml.safe_load(mf) + self.assertIn('galaxy_info', metadata, msg='unable to find galaxy_info in metadata') + self.assertIn('dependencies', metadata, msg='unable to find dependencies in metadata') + + def test_readme(self): + readme_path = os.path.join(self.role_dir, 'README.md') + self.assertTrue(os.path.exists(readme_path), msg='Readme doesn\'t exist') + + def test_main_ymls(self): + need_main_ymls = set(self.expected_role_dirs) - set(['meta', 'tests', 'files', 'templates']) + for d in need_main_ymls: + main_yml = os.path.join(self.role_dir, d, 'main.yml') + self.assertTrue(os.path.exists(main_yml)) + expected_string = "---\n# {0} file for {1}".format(d, self.role_name) + with open(main_yml, 'r') as f: + self.assertEqual(expected_string, f.read().strip()) + + def test_role_dirs(self): + for d in self.expected_role_dirs: + self.assertTrue(os.path.isdir(os.path.join(self.role_dir, d)), msg="Expected role subdirectory {0} doesn't exist".format(d)) + + def test_travis_yml(self): + with open(os.path.join(self.role_dir, '.travis.yml'), 'r') as f: + contents = f.read() + + with open(os.path.join(self.role_skeleton_path, '.travis.yml'), 'r') as f: + expected_contents = f.read() + + self.assertEqual(expected_contents, contents, msg='.travis.yml does not match expected') + + def test_readme_contents(self): + with open(os.path.join(self.role_dir, 'README.md'), 'r') as readme: + contents = readme.read() + + with open(os.path.join(self.role_skeleton_path, 'README.md'), 'r') as f: + expected_contents = f.read() + + self.assertEqual(expected_contents, contents, msg='README.md does not match expected') + + def test_test_yml(self): + with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f: + test_playbook = yaml.safe_load(f) + print(test_playbook) + self.assertEqual(len(test_playbook), 1) + self.assertEqual(test_playbook[0]['hosts'], 'localhost') + self.assertEqual(test_playbook[0]['remote_user'], 'root') + self.assertListEqual(test_playbook[0]['roles'], [self.role_name], msg='The list of roles included in the test play doesn\'t match') + + +class TestGalaxyInitDefault(unittest.TestCase, ValidRoleTests): + + @classmethod + def setUpClass(cls): + cls.setUpRole(role_name='delete_me') + + def test_metadata_contents(self): + with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf: + metadata = yaml.safe_load(mf) + self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata') + + +class TestGalaxyInitAPB(unittest.TestCase, ValidRoleTests): + + @classmethod + def setUpClass(cls): + cls.setUpRole('delete_me_apb', galaxy_args=['--type=apb']) + + def test_metadata_apb_tag(self): + with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf: + metadata = yaml.safe_load(mf) + self.assertIn('apb', metadata.get('galaxy_info', dict()).get('galaxy_tags', []), msg='apb tag not set in role metadata') + + def test_metadata_contents(self): + with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf: + metadata = yaml.safe_load(mf) + self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata') + + def test_apb_yml(self): + self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'apb.yml')), msg='apb.yml was not created') + + def test_test_yml(self): + with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f: + test_playbook = yaml.safe_load(f) + print(test_playbook) + self.assertEqual(len(test_playbook), 1) + self.assertEqual(test_playbook[0]['hosts'], 'localhost') + self.assertFalse(test_playbook[0]['gather_facts']) + self.assertEqual(test_playbook[0]['connection'], 'local') + self.assertIsNone(test_playbook[0]['tasks'], msg='We\'re expecting an unset list of tasks in test.yml') + + +class TestGalaxyInitContainer(unittest.TestCase, ValidRoleTests): + + @classmethod + def setUpClass(cls): + cls.setUpRole('delete_me_container', galaxy_args=['--type=container']) + + def test_metadata_container_tag(self): + with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf: + metadata = yaml.safe_load(mf) + self.assertIn('container', metadata.get('galaxy_info', dict()).get('galaxy_tags', []), msg='container tag not set in role metadata') + + def test_metadata_contents(self): + with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf: + metadata = yaml.safe_load(mf) + self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata') + + def test_meta_container_yml(self): + self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'meta', 'container.yml')), msg='container.yml was not created') + + def test_test_yml(self): + with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f: + test_playbook = yaml.safe_load(f) + print(test_playbook) + self.assertEqual(len(test_playbook), 1) + self.assertEqual(test_playbook[0]['hosts'], 'localhost') + self.assertFalse(test_playbook[0]['gather_facts']) + self.assertEqual(test_playbook[0]['connection'], 'local') + self.assertIsNone(test_playbook[0]['tasks'], msg='We\'re expecting an unset list of tasks in test.yml') + + +class TestGalaxyInitSkeleton(unittest.TestCase, ValidRoleTests): + + @classmethod + def setUpClass(cls): + role_skeleton_path = os.path.join(os.path.split(__file__)[0], 'test_data', 'role_skeleton') + cls.setUpRole('delete_me_skeleton', skeleton_path=role_skeleton_path, use_explicit_type=True) + + def test_empty_files_dir(self): + files_dir = os.path.join(self.role_dir, 'files') + self.assertTrue(os.path.isdir(files_dir)) + self.assertListEqual(os.listdir(files_dir), [], msg='we expect the files directory to be empty, is ignore working?') + + def test_template_ignore_jinja(self): + test_conf_j2 = os.path.join(self.role_dir, 'templates', 'test.conf.j2') + self.assertTrue(os.path.exists(test_conf_j2), msg="The test.conf.j2 template doesn't seem to exist, is it being rendered as test.conf?") + with open(test_conf_j2, 'r') as f: + contents = f.read() + expected_contents = '[defaults]\ntest_key = {{ test_variable }}' + self.assertEqual(expected_contents, contents.strip(), msg="test.conf.j2 doesn't contain what it should, is it being rendered?") + + def test_template_ignore_jinja_subfolder(self): + test_conf_j2 = os.path.join(self.role_dir, 'templates', 'subfolder', 'test.conf.j2') + self.assertTrue(os.path.exists(test_conf_j2), msg="The test.conf.j2 template doesn't seem to exist, is it being rendered as test.conf?") + with open(test_conf_j2, 'r') as f: + contents = f.read() + expected_contents = '[defaults]\ntest_key = {{ test_variable }}' + self.assertEqual(expected_contents, contents.strip(), msg="test.conf.j2 doesn't contain what it should, is it being rendered?") + + def test_template_ignore_similar_folder(self): + self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'templates_extra', 'templates.txt'))) + + def test_skeleton_option(self): + self.assertEqual(self.role_skeleton_path, context.CLIARGS['role_skeleton'], msg='Skeleton path was not parsed properly from the command line') + + +@pytest.mark.parametrize('cli_args, expected', [ + (['ansible-galaxy', 'collection', 'init', 'abc.def'], 0), + (['ansible-galaxy', 'collection', 'init', 'abc.def', '-vvv'], 3), + (['ansible-galaxy', '-vv', 'collection', 'init', 'abc.def'], 2), + # Due to our manual parsing we want to verify that -v set in the sub parser takes precedence. This behaviour is + # deprecated and tests should be removed when the code that handles it is removed + (['ansible-galaxy', '-vv', 'collection', 'init', 'abc.def', '-v'], 1), + (['ansible-galaxy', '-vv', 'collection', 'init', 'abc.def', '-vvvv'], 4), + (['ansible-galaxy', '-vvv', 'init', 'name'], 3), + (['ansible-galaxy', '-vvvvv', 'init', '-v', 'name'], 1), +]) +def test_verbosity_arguments(cli_args, expected, monkeypatch): + # Mock out the functions so we don't actually execute anything + for func_name in [f for f in dir(GalaxyCLI) if f.startswith("execute_")]: + monkeypatch.setattr(GalaxyCLI, func_name, MagicMock()) + + cli = GalaxyCLI(args=cli_args) + cli.run() + + assert context.CLIARGS['verbosity'] == expected + + +@pytest.fixture() +def collection_skeleton(request, tmp_path_factory): + name, skeleton_path = request.param + + galaxy_args = ['ansible-galaxy', 'collection', 'init', '-c'] + + if skeleton_path is not None: + galaxy_args += ['--collection-skeleton', skeleton_path] + + test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) + galaxy_args += ['--init-path', test_dir, name] + + GalaxyCLI(args=galaxy_args).run() + namespace_name, collection_name = name.split('.', 1) + collection_dir = os.path.join(test_dir, namespace_name, collection_name) + + return collection_dir + + +@pytest.mark.parametrize('collection_skeleton', [ + ('ansible_test.my_collection', None), +], indirect=True) +def test_collection_default(collection_skeleton): + meta_path = os.path.join(collection_skeleton, 'galaxy.yml') + + with open(meta_path, 'r') as galaxy_meta: + metadata = yaml.safe_load(galaxy_meta) + + assert metadata['namespace'] == 'ansible_test' + assert metadata['name'] == 'my_collection' + assert metadata['authors'] == ['your name '] + assert metadata['readme'] == 'README.md' + assert metadata['version'] == '1.0.0' + assert metadata['description'] == 'your collection description' + assert metadata['license'] == ['GPL-2.0-or-later'] + assert metadata['tags'] == [] + assert metadata['dependencies'] == {} + assert metadata['documentation'] == 'http://docs.example.com' + assert metadata['repository'] == 'http://example.com/repository' + assert metadata['homepage'] == 'http://example.com' + assert metadata['issues'] == 'http://example.com/issue/tracker' + + for d in ['docs', 'plugins', 'roles']: + assert os.path.isdir(os.path.join(collection_skeleton, d)), \ + "Expected collection subdirectory {0} doesn't exist".format(d) + + +@pytest.mark.parametrize('collection_skeleton', [ + ('ansible_test.delete_me_skeleton', os.path.join(os.path.split(__file__)[0], 'test_data', 'collection_skeleton')), +], indirect=True) +def test_collection_skeleton(collection_skeleton): + meta_path = os.path.join(collection_skeleton, 'galaxy.yml') + + with open(meta_path, 'r') as galaxy_meta: + metadata = yaml.safe_load(galaxy_meta) + + assert metadata['namespace'] == 'ansible_test' + assert metadata['name'] == 'delete_me_skeleton' + assert metadata['authors'] == ['Ansible Cow ', 'Tu Cow '] + assert metadata['version'] == '0.1.0' + assert metadata['readme'] == 'README.md' + assert len(metadata) == 5 + + assert os.path.exists(os.path.join(collection_skeleton, 'README.md')) + + # Test empty directories exist and are empty + for empty_dir in ['plugins/action', 'plugins/filter', 'plugins/inventory', 'plugins/lookup', + 'plugins/module_utils', 'plugins/modules']: + + assert os.listdir(os.path.join(collection_skeleton, empty_dir)) == [] + + # Test files that don't end with .j2 were not templated + doc_file = os.path.join(collection_skeleton, 'docs', 'My Collection.md') + with open(doc_file, 'r') as f: + doc_contents = f.read() + assert doc_contents.strip() == 'Welcome to my test collection doc for {{ namespace }}.' + + # Test files that end with .j2 but are in the templates directory were not templated + for template_dir in ['playbooks/templates', 'playbooks/templates/subfolder', + 'roles/common/templates', 'roles/common/templates/subfolder']: + test_conf_j2 = os.path.join(collection_skeleton, template_dir, 'test.conf.j2') + assert os.path.exists(test_conf_j2) + + with open(test_conf_j2, 'r') as f: + contents = f.read() + expected_contents = '[defaults]\ntest_key = {{ test_variable }}' + + assert expected_contents == contents.strip() + + +@pytest.fixture() +def collection_artifact(collection_skeleton, tmp_path_factory): + ''' Creates a collection artifact tarball that is ready to be published and installed ''' + output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output')) + + # Create a file with +x in the collection so we can test the permissions + execute_path = os.path.join(collection_skeleton, 'runme.sh') + with open(execute_path, mode='wb') as fd: + fd.write(b"echo hi") + + # S_ISUID should not be present on extraction. + os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_ISUID | stat.S_IEXEC) + + # Because we call GalaxyCLI in collection_skeleton we need to reset the singleton back to None so it uses the new + # args, we reset the original args once it is done. + orig_cli_args = co.GlobalCLIArgs._Singleton__instance + try: + co.GlobalCLIArgs._Singleton__instance = None + galaxy_args = ['ansible-galaxy', 'collection', 'build', collection_skeleton, '--output-path', output_dir] + gc = GalaxyCLI(args=galaxy_args) + gc.run() + + yield output_dir + finally: + co.GlobalCLIArgs._Singleton__instance = orig_cli_args + + +def test_invalid_skeleton_path(): + expected = "- the skeleton path '/fake/path' does not exist, cannot init collection" + + gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'init', 'my.collection', '--collection-skeleton', + '/fake/path']) + with pytest.raises(AnsibleError, match=expected): + gc.run() + + +@pytest.mark.parametrize("name", [ + "", + "invalid", + "hypen-ns.collection", + "ns.hyphen-collection", + "ns.collection.weird", +]) +def test_invalid_collection_name_init(name): + expected = "Invalid collection name '%s', name must be in the format ." % name + + gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'init', name]) + with pytest.raises(AnsibleError, match=expected): + gc.run() + + +@pytest.mark.parametrize("name, expected", [ + ("", ""), + ("invalid", "invalid"), + ("invalid:1.0.0", "invalid"), + ("hypen-ns.collection", "hypen-ns.collection"), + ("ns.hyphen-collection", "ns.hyphen-collection"), + ("ns.collection.weird", "ns.collection.weird"), +]) +def test_invalid_collection_name_install(name, expected, tmp_path_factory): + install_path = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) + expected = "Invalid collection name '%s', name must be in the format ." % expected + + gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', name, '-p', os.path.join(install_path, 'install')]) + with pytest.raises(AnsibleError, match=expected): + gc.run() + + +@pytest.mark.parametrize('collection_skeleton', [ + ('ansible_test.build_collection', None), +], indirect=True) +def test_collection_build(collection_artifact): + tar_path = os.path.join(collection_artifact, 'ansible_test-build_collection-1.0.0.tar.gz') + assert tarfile.is_tarfile(tar_path) + + with tarfile.open(tar_path, mode='r') as tar: + tar_members = tar.getmembers() + + valid_files = ['MANIFEST.json', 'FILES.json', 'roles', 'docs', 'plugins', 'plugins/README.md', 'README.md', + 'runme.sh'] + assert len(tar_members) == len(valid_files) + + # Verify the uid and gid is 0 and the correct perms are set + for member in tar_members: + assert member.name in valid_files + + assert member.gid == 0 + assert member.gname == '' + assert member.uid == 0 + assert member.uname == '' + if member.isdir() or member.name == 'runme.sh': + assert member.mode == 0o0755 + else: + assert member.mode == 0o0644 + + manifest_file = tar.extractfile(tar_members[0]) + try: + manifest = json.loads(to_text(manifest_file.read())) + finally: + manifest_file.close() + + coll_info = manifest['collection_info'] + file_manifest = manifest['file_manifest_file'] + assert manifest['format'] == 1 + assert len(manifest.keys()) == 3 + + assert coll_info['namespace'] == 'ansible_test' + assert coll_info['name'] == 'build_collection' + assert coll_info['version'] == '1.0.0' + assert coll_info['authors'] == ['your name '] + assert coll_info['readme'] == 'README.md' + assert coll_info['tags'] == [] + assert coll_info['description'] == 'your collection description' + assert coll_info['license'] == ['GPL-2.0-or-later'] + assert coll_info['license_file'] is None + assert coll_info['dependencies'] == {} + assert coll_info['repository'] == 'http://example.com/repository' + assert coll_info['documentation'] == 'http://docs.example.com' + assert coll_info['homepage'] == 'http://example.com' + assert coll_info['issues'] == 'http://example.com/issue/tracker' + assert len(coll_info.keys()) == 14 + + assert file_manifest['name'] == 'FILES.json' + assert file_manifest['ftype'] == 'file' + assert file_manifest['chksum_type'] == 'sha256' + assert file_manifest['chksum_sha256'] is not None # Order of keys makes it hard to verify the checksum + assert file_manifest['format'] == 1 + assert len(file_manifest.keys()) == 5 + + files_file = tar.extractfile(tar_members[1]) + try: + files = json.loads(to_text(files_file.read())) + finally: + files_file.close() + + assert len(files['files']) == 7 + assert files['format'] == 1 + assert len(files.keys()) == 2 + + valid_files_entries = ['.', 'roles', 'docs', 'plugins', 'plugins/README.md', 'README.md', 'runme.sh'] + for file_entry in files['files']: + assert file_entry['name'] in valid_files_entries + assert file_entry['format'] == 1 + + if file_entry['name'] in ['plugins/README.md', 'runme.sh']: + assert file_entry['ftype'] == 'file' + assert file_entry['chksum_type'] == 'sha256' + # Can't test the actual checksum as the html link changes based on the version or the file contents + # don't matter + assert file_entry['chksum_sha256'] is not None + elif file_entry['name'] == 'README.md': + assert file_entry['ftype'] == 'file' + assert file_entry['chksum_type'] == 'sha256' + assert file_entry['chksum_sha256'] == '6d8b5f9b5d53d346a8cd7638a0ec26e75e8d9773d952162779a49d25da6ef4f5' + else: + assert file_entry['ftype'] == 'dir' + assert file_entry['chksum_type'] is None + assert file_entry['chksum_sha256'] is None + + assert len(file_entry.keys()) == 5 + + +@pytest.fixture() +def collection_install(reset_cli_args, tmp_path_factory, monkeypatch): + mock_install = MagicMock() + monkeypatch.setattr(ansible.cli.galaxy, 'install_collections', mock_install) + + mock_warning = MagicMock() + monkeypatch.setattr(ansible.utils.display.Display, 'warning', mock_warning) + + output_dir = to_text((tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output'))) + yield mock_install, mock_warning, output_dir + + +def test_collection_install_with_names(collection_install): + mock_install, mock_warning, output_dir = collection_install + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1', + '--collections-path', output_dir] + GalaxyCLI(args=galaxy_args).run() + + collection_path = os.path.join(output_dir, 'ansible_collections') + assert os.path.isdir(collection_path) + + assert mock_warning.call_count == 1 + assert "The specified collections path '%s' is not part of the configured Ansible collections path" % output_dir \ + in mock_warning.call_args[0][0] + + assert mock_install.call_count == 1 + assert mock_install.call_args[0][0] == [('namespace.collection', '*', None, None), + ('namespace2.collection', '1.0.1', None, None)] + assert mock_install.call_args[0][1] == collection_path + assert len(mock_install.call_args[0][2]) == 1 + assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com' + assert mock_install.call_args[0][2][0].validate_certs is True + assert mock_install.call_args[0][3] is True + assert mock_install.call_args[0][4] is False + assert mock_install.call_args[0][5] is False + assert mock_install.call_args[0][6] is False + assert mock_install.call_args[0][7] is False + + +def test_collection_install_with_requirements_file(collection_install): + mock_install, mock_warning, output_dir = collection_install + + requirements_file = os.path.join(output_dir, 'requirements.yml') + with open(requirements_file, 'wb') as req_obj: + req_obj.write(b'''--- +collections: +- namespace.coll +- name: namespace2.coll + version: '>2.0.1' +''') + + galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file, + '--collections-path', output_dir] + GalaxyCLI(args=galaxy_args).run() + + collection_path = os.path.join(output_dir, 'ansible_collections') + assert os.path.isdir(collection_path) + + assert mock_warning.call_count == 1 + assert "The specified collections path '%s' is not part of the configured Ansible collections path" % output_dir \ + in mock_warning.call_args[0][0] + + assert mock_install.call_count == 1 + assert mock_install.call_args[0][0] == [('namespace.coll', '*', None, None), + ('namespace2.coll', '>2.0.1', None, None)] + assert mock_install.call_args[0][1] == collection_path + assert len(mock_install.call_args[0][2]) == 1 + assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com' + assert mock_install.call_args[0][2][0].validate_certs is True + assert mock_install.call_args[0][3] is True + assert mock_install.call_args[0][4] is False + assert mock_install.call_args[0][5] is False + assert mock_install.call_args[0][6] is False + assert mock_install.call_args[0][7] is False + + +def test_collection_install_with_relative_path(collection_install, monkeypatch): + mock_install = collection_install[0] + + mock_req = MagicMock() + mock_req.return_value = {'collections': [('namespace.coll', '*', None, None)], 'roles': []} + monkeypatch.setattr(ansible.cli.galaxy.GalaxyCLI, '_parse_requirements_file', mock_req) + + monkeypatch.setattr(os, 'makedirs', MagicMock()) + + requirements_file = './requirements.myl' + collections_path = './ansible_collections' + galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file, + '--collections-path', collections_path] + GalaxyCLI(args=galaxy_args).run() + + assert mock_install.call_count == 1 + assert mock_install.call_args[0][0] == [('namespace.coll', '*', None, None)] + assert mock_install.call_args[0][1] == os.path.abspath(collections_path) + assert len(mock_install.call_args[0][2]) == 1 + assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com' + assert mock_install.call_args[0][2][0].validate_certs is True + assert mock_install.call_args[0][3] is True + assert mock_install.call_args[0][4] is False + assert mock_install.call_args[0][5] is False + assert mock_install.call_args[0][6] is False + assert mock_install.call_args[0][7] is False + + assert mock_req.call_count == 1 + assert mock_req.call_args[0][0] == os.path.abspath(requirements_file) + + +def test_collection_install_with_unexpanded_path(collection_install, monkeypatch): + mock_install = collection_install[0] + + mock_req = MagicMock() + mock_req.return_value = {'collections': [('namespace.coll', '*', None, None)], 'roles': []} + monkeypatch.setattr(ansible.cli.galaxy.GalaxyCLI, '_parse_requirements_file', mock_req) + + monkeypatch.setattr(os, 'makedirs', MagicMock()) + + requirements_file = '~/requirements.myl' + collections_path = '~/ansible_collections' + galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file, + '--collections-path', collections_path] + GalaxyCLI(args=galaxy_args).run() + + assert mock_install.call_count == 1 + assert mock_install.call_args[0][0] == [('namespace.coll', '*', None, None)] + assert mock_install.call_args[0][1] == os.path.expanduser(os.path.expandvars(collections_path)) + assert len(mock_install.call_args[0][2]) == 1 + assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com' + assert mock_install.call_args[0][2][0].validate_certs is True + assert mock_install.call_args[0][3] is True + assert mock_install.call_args[0][4] is False + assert mock_install.call_args[0][5] is False + assert mock_install.call_args[0][6] is False + assert mock_install.call_args[0][7] is False + + assert mock_req.call_count == 1 + assert mock_req.call_args[0][0] == os.path.expanduser(os.path.expandvars(requirements_file)) + + +def test_collection_install_in_collection_dir(collection_install, monkeypatch): + mock_install, mock_warning, output_dir = collection_install + + collections_path = C.COLLECTIONS_PATHS[0] + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1', + '--collections-path', collections_path] + GalaxyCLI(args=galaxy_args).run() + + assert mock_warning.call_count == 0 + + assert mock_install.call_count == 1 + assert mock_install.call_args[0][0] == [('namespace.collection', '*', None, None), + ('namespace2.collection', '1.0.1', None, None)] + assert mock_install.call_args[0][1] == os.path.join(collections_path, 'ansible_collections') + assert len(mock_install.call_args[0][2]) == 1 + assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com' + assert mock_install.call_args[0][2][0].validate_certs is True + assert mock_install.call_args[0][3] is True + assert mock_install.call_args[0][4] is False + assert mock_install.call_args[0][5] is False + assert mock_install.call_args[0][6] is False + assert mock_install.call_args[0][7] is False + + +def test_collection_install_with_url(collection_install): + mock_install, dummy, output_dir = collection_install + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'https://foo/bar/foo-bar-v1.0.0.tar.gz', + '--collections-path', output_dir] + GalaxyCLI(args=galaxy_args).run() + + collection_path = os.path.join(output_dir, 'ansible_collections') + assert os.path.isdir(collection_path) + + assert mock_install.call_count == 1 + assert mock_install.call_args[0][0] == [('https://foo/bar/foo-bar-v1.0.0.tar.gz', '*', None, None)] + assert mock_install.call_args[0][1] == collection_path + assert len(mock_install.call_args[0][2]) == 1 + assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com' + assert mock_install.call_args[0][2][0].validate_certs is True + assert mock_install.call_args[0][3] is True + assert mock_install.call_args[0][4] is False + assert mock_install.call_args[0][5] is False + assert mock_install.call_args[0][6] is False + assert mock_install.call_args[0][7] is False + + +def test_collection_install_name_and_requirements_fail(collection_install): + test_path = collection_install[2] + expected = 'The positional collection_name arg and --requirements-file are mutually exclusive.' + + with pytest.raises(AnsibleError, match=expected): + GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', + test_path, '--requirements-file', test_path]).run() + + +def test_collection_install_no_name_and_requirements_fail(collection_install): + test_path = collection_install[2] + expected = 'You must specify a collection name or a requirements file.' + + with pytest.raises(AnsibleError, match=expected): + GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', '--collections-path', test_path]).run() + + +def test_collection_install_path_with_ansible_collections(collection_install): + mock_install, mock_warning, output_dir = collection_install + + collection_path = os.path.join(output_dir, 'ansible_collections') + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1', + '--collections-path', collection_path] + GalaxyCLI(args=galaxy_args).run() + + assert os.path.isdir(collection_path) + + assert mock_warning.call_count == 1 + assert "The specified collections path '%s' is not part of the configured Ansible collections path" \ + % collection_path in mock_warning.call_args[0][0] + + assert mock_install.call_count == 1 + assert mock_install.call_args[0][0] == [('namespace.collection', '*', None, None), + ('namespace2.collection', '1.0.1', None, None)] + assert mock_install.call_args[0][1] == collection_path + assert len(mock_install.call_args[0][2]) == 1 + assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com' + assert mock_install.call_args[0][2][0].validate_certs is True + assert mock_install.call_args[0][3] is True + assert mock_install.call_args[0][4] is False + assert mock_install.call_args[0][5] is False + assert mock_install.call_args[0][6] is False + assert mock_install.call_args[0][7] is False + + +def test_collection_install_ignore_certs(collection_install): + mock_install, mock_warning, output_dir = collection_install + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir, + '--ignore-certs'] + GalaxyCLI(args=galaxy_args).run() + + assert mock_install.call_args[0][3] is False + + +def test_collection_install_force(collection_install): + mock_install, mock_warning, output_dir = collection_install + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir, + '--force'] + GalaxyCLI(args=galaxy_args).run() + + assert mock_install.call_args[0][6] is True + + +def test_collection_install_force_deps(collection_install): + mock_install, mock_warning, output_dir = collection_install + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir, + '--force-with-deps'] + GalaxyCLI(args=galaxy_args).run() + + assert mock_install.call_args[0][7] is True + + +def test_collection_install_no_deps(collection_install): + mock_install, mock_warning, output_dir = collection_install + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir, + '--no-deps'] + GalaxyCLI(args=galaxy_args).run() + + assert mock_install.call_args[0][5] is True + + +def test_collection_install_ignore(collection_install): + mock_install, mock_warning, output_dir = collection_install + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir, + '--ignore-errors'] + GalaxyCLI(args=galaxy_args).run() + + assert mock_install.call_args[0][4] is True + + +def test_collection_install_custom_server(collection_install): + mock_install, mock_warning, output_dir = collection_install + + galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir, + '--server', 'https://galaxy-dev.ansible.com'] + GalaxyCLI(args=galaxy_args).run() + + assert len(mock_install.call_args[0][2]) == 1 + assert mock_install.call_args[0][2][0].api_server == 'https://galaxy-dev.ansible.com' + assert mock_install.call_args[0][2][0].validate_certs is True + + +@pytest.fixture() +def requirements_file(request, tmp_path_factory): + content = request.param + + test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Requirements')) + requirements_file = os.path.join(test_dir, 'requirements.yml') + + if content: + with open(requirements_file, 'wb') as req_obj: + req_obj.write(to_bytes(content)) + + yield requirements_file + + +@pytest.fixture() +def requirements_cli(monkeypatch): + monkeypatch.setattr(GalaxyCLI, 'execute_install', MagicMock()) + cli = GalaxyCLI(args=['ansible-galaxy', 'install']) + cli.run() + return cli + + +@pytest.mark.parametrize('requirements_file', [None], indirect=True) +def test_parse_requirements_file_that_doesnt_exist(requirements_cli, requirements_file): + expected = "The requirements file '%s' does not exist." % to_native(requirements_file) + with pytest.raises(AnsibleError, match=expected): + requirements_cli._parse_requirements_file(requirements_file) + + +@pytest.mark.parametrize('requirements_file', ['not a valid yml file: hi: world'], indirect=True) +def test_parse_requirements_file_that_isnt_yaml(requirements_cli, requirements_file): + expected = "Failed to parse the requirements yml at '%s' with the following error" % to_native(requirements_file) + with pytest.raises(AnsibleError, match=expected): + requirements_cli._parse_requirements_file(requirements_file) + + +@pytest.mark.parametrize('requirements_file', [(''' +# Older role based requirements.yml +- galaxy.role +- anotherrole +''')], indirect=True) +def test_parse_requirements_in_older_format_illega(requirements_cli, requirements_file): + expected = "Expecting requirements file to be a dict with the key 'collections' that contains a list of " \ + "collections to install" + + with pytest.raises(AnsibleError, match=expected): + requirements_cli._parse_requirements_file(requirements_file, allow_old_format=False) + + +@pytest.mark.parametrize('requirements_file', [''' +collections: +- version: 1.0.0 +'''], indirect=True) +def test_parse_requirements_without_mandatory_name_key(requirements_cli, requirements_file): + expected = "Collections requirement entry should contain the key name." + with pytest.raises(AnsibleError, match=expected): + requirements_cli._parse_requirements_file(requirements_file) + + +@pytest.mark.parametrize('requirements_file', [(''' +collections: +- namespace.collection1 +- namespace.collection2 +'''), (''' +collections: +- name: namespace.collection1 +- name: namespace.collection2 +''')], indirect=True) +def test_parse_requirements(requirements_cli, requirements_file): + expected = { + 'roles': [], + 'collections': [('namespace.collection1', '*', None, None), ('namespace.collection2', '*', None, None)] + } + actual = requirements_cli._parse_requirements_file(requirements_file) + + assert actual == expected + + +@pytest.mark.parametrize('requirements_file', [''' +collections: +- name: namespace.collection1 + version: ">=1.0.0,<=2.0.0" + source: https://galaxy-dev.ansible.com +- namespace.collection2'''], indirect=True) +def test_parse_requirements_with_extra_info(requirements_cli, requirements_file): + actual = requirements_cli._parse_requirements_file(requirements_file) + + assert len(actual['roles']) == 0 + assert len(actual['collections']) == 2 + assert actual['collections'][0][0] == 'namespace.collection1' + assert actual['collections'][0][1] == '>=1.0.0,<=2.0.0' + assert actual['collections'][0][2].api_server == 'https://galaxy-dev.ansible.com' + assert actual['collections'][0][2].name == 'explicit_requirement_namespace.collection1' + assert actual['collections'][0][2].token is None + assert actual['collections'][0][2].username is None + assert actual['collections'][0][2].password is None + assert actual['collections'][0][2].validate_certs is True + + assert actual['collections'][1] == ('namespace.collection2', '*', None, None) + + +@pytest.mark.parametrize('requirements_file', [''' +roles: +- username.role_name +- src: username2.role_name2 +- src: ssh://github.com/user/repo + scm: git + +collections: +- namespace.collection2 +'''], indirect=True) +def test_parse_requirements_with_roles_and_collections(requirements_cli, requirements_file): + actual = requirements_cli._parse_requirements_file(requirements_file) + + assert len(actual['roles']) == 3 + assert actual['roles'][0].name == 'username.role_name' + assert actual['roles'][1].name == 'username2.role_name2' + assert actual['roles'][2].name == 'repo' + assert actual['roles'][2].src == 'ssh://github.com/user/repo' + + assert len(actual['collections']) == 1 + assert actual['collections'][0] == ('namespace.collection2', '*', None, None) + + +@pytest.mark.parametrize('requirements_file', [''' +collections: +- name: namespace.collection +- name: namespace2.collection2 + source: https://galaxy-dev.ansible.com/ +- name: namespace3.collection3 + source: server +'''], indirect=True) +def test_parse_requirements_with_collection_source(requirements_cli, requirements_file): + galaxy_api = GalaxyAPI(requirements_cli.api, 'server', 'https://config-server') + requirements_cli.api_servers.append(galaxy_api) + + actual = requirements_cli._parse_requirements_file(requirements_file) + + assert actual['roles'] == [] + assert len(actual['collections']) == 3 + assert actual['collections'][0] == ('namespace.collection', '*', None, None) + + assert actual['collections'][1][0] == 'namespace2.collection2' + assert actual['collections'][1][1] == '*' + assert actual['collections'][1][2].api_server == 'https://galaxy-dev.ansible.com/' + assert actual['collections'][1][2].name == 'explicit_requirement_namespace2.collection2' + assert actual['collections'][1][2].token is None + + assert actual['collections'][2] == ('namespace3.collection3', '*', galaxy_api, None) + + +@pytest.mark.parametrize('requirements_file', [''' +- username.included_role +- src: https://github.com/user/repo +'''], indirect=True) +def test_parse_requirements_roles_with_include(requirements_cli, requirements_file): + reqs = [ + 'ansible.role', + {'include': requirements_file}, + ] + parent_requirements = os.path.join(os.path.dirname(requirements_file), 'parent.yaml') + with open(to_bytes(parent_requirements), 'wb') as req_fd: + req_fd.write(to_bytes(yaml.safe_dump(reqs))) + + actual = requirements_cli._parse_requirements_file(parent_requirements) + + assert len(actual['roles']) == 3 + assert actual['collections'] == [] + assert actual['roles'][0].name == 'ansible.role' + assert actual['roles'][1].name == 'username.included_role' + assert actual['roles'][2].name == 'repo' + assert actual['roles'][2].src == 'https://github.com/user/repo' + + +@pytest.mark.parametrize('requirements_file', [''' +- username.role +- include: missing.yml +'''], indirect=True) +def test_parse_requirements_roles_with_include_missing(requirements_cli, requirements_file): + expected = "Failed to find include requirements file 'missing.yml' in '%s'" % to_native(requirements_file) + + with pytest.raises(AnsibleError, match=expected): + requirements_cli._parse_requirements_file(requirements_file) + + +@pytest.mark.parametrize('requirements_file', [''' +collections: +- namespace.name +roles: +- namespace.name +'''], indirect=True) +def test_install_implicit_role_with_collections(requirements_file, monkeypatch): + mock_collection_install = MagicMock() + monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install) + mock_role_install = MagicMock() + monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + cli = GalaxyCLI(args=['ansible-galaxy', 'install', '-r', requirements_file]) + cli.run() + + assert mock_collection_install.call_count == 1 + assert mock_collection_install.call_args[0][0] == [('namespace.name', '*', None, None)] + assert mock_collection_install.call_args[0][1] == cli._get_default_collection_path() + + assert mock_role_install.call_count == 1 + assert len(mock_role_install.call_args[0][0]) == 1 + assert str(mock_role_install.call_args[0][0][0]) == 'namespace.name' + + found = False + for mock_call in mock_display.mock_calls: + if 'contains collections which will be ignored' in mock_call[1][0]: + found = True + break + assert not found + + +@pytest.mark.parametrize('requirements_file', [''' +collections: +- namespace.name +roles: +- namespace.name +'''], indirect=True) +def test_install_explicit_role_with_collections(requirements_file, monkeypatch): + mock_collection_install = MagicMock() + monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install) + mock_role_install = MagicMock() + monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'vvv', mock_display) + + cli = GalaxyCLI(args=['ansible-galaxy', 'role', 'install', '-r', requirements_file]) + cli.run() + + assert mock_collection_install.call_count == 0 + + assert mock_role_install.call_count == 1 + assert len(mock_role_install.call_args[0][0]) == 1 + assert str(mock_role_install.call_args[0][0][0]) == 'namespace.name' + + found = False + for mock_call in mock_display.mock_calls: + if 'contains collections which will be ignored' in mock_call[1][0]: + found = True + break + assert found + + +@pytest.mark.parametrize('requirements_file', [''' +collections: +- namespace.name +roles: +- namespace.name +'''], indirect=True) +def test_install_role_with_collections_and_path(requirements_file, monkeypatch): + mock_collection_install = MagicMock() + monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install) + mock_role_install = MagicMock() + monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'warning', mock_display) + + cli = GalaxyCLI(args=['ansible-galaxy', 'install', '-p', 'path', '-r', requirements_file]) + cli.run() + + assert mock_collection_install.call_count == 0 + + assert mock_role_install.call_count == 1 + assert len(mock_role_install.call_args[0][0]) == 1 + assert str(mock_role_install.call_args[0][0][0]) == 'namespace.name' + + found = False + for mock_call in mock_display.mock_calls: + if 'contains collections which will be ignored' in mock_call[1][0]: + found = True + break + assert found + + +@pytest.mark.parametrize('requirements_file', [''' +collections: +- namespace.name +roles: +- namespace.name +'''], indirect=True) +def test_install_collection_with_roles(requirements_file, monkeypatch): + mock_collection_install = MagicMock() + monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install) + mock_role_install = MagicMock() + monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'vvv', mock_display) + + cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', '-r', requirements_file]) + cli.run() + + assert mock_collection_install.call_count == 1 + assert mock_collection_install.call_args[0][0] == [('namespace.name', '*', None, None)] + assert mock_collection_install.call_args[0][1] == cli._get_default_collection_path() + + assert mock_role_install.call_count == 0 + + found = False + for mock_call in mock_display.mock_calls: + if 'contains roles which will be ignored' in mock_call[1][0]: + found = True + break + assert found diff --git a/test/units/cli/test_playbook.py b/test/units/cli/test_playbook.py new file mode 100644 index 00000000..f25e54df --- /dev/null +++ b/test/units/cli/test_playbook.py @@ -0,0 +1,46 @@ +# (c) 2016, Adrian Likins +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.mock.loader import DictDataLoader + +from ansible import context +from ansible.inventory.manager import InventoryManager +from ansible.vars.manager import VariableManager + +from ansible.cli.playbook import PlaybookCLI + + +class TestPlaybookCLI(unittest.TestCase): + def test_flush_cache(self): + cli = PlaybookCLI(args=["ansible-playbook", "--flush-cache", "foobar.yml"]) + cli.parse() + self.assertTrue(context.CLIARGS['flush_cache']) + + variable_manager = VariableManager() + fake_loader = DictDataLoader({'foobar.yml': ""}) + inventory = InventoryManager(loader=fake_loader, sources='testhost,') + + variable_manager.set_host_facts('testhost', {'canary': True}) + self.assertTrue('testhost' in variable_manager._fact_cache) + + cli._flush_cache(inventory, variable_manager) + self.assertFalse('testhost' in variable_manager._fact_cache) diff --git a/test/units/cli/test_vault.py b/test/units/cli/test_vault.py new file mode 100644 index 00000000..9f2ba685 --- /dev/null +++ b/test/units/cli/test_vault.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# (c) 2017, Adrian Likins +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import pytest + +from units.compat import unittest +from units.compat.mock import patch, MagicMock +from units.mock.vault_helper import TextVaultSecret + +from ansible import context, errors +from ansible.cli.vault import VaultCLI +from ansible.module_utils._text import to_text +from ansible.utils import context_objects as co + + +# TODO: make these tests assert something, likely by verifing +# mock calls + + +@pytest.fixture(autouse='function') +def reset_cli_args(): + co.GlobalCLIArgs._Singleton__instance = None + yield + co.GlobalCLIArgs._Singleton__instance = None + + +class TestVaultCli(unittest.TestCase): + def setUp(self): + self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=False) + self.mock_isatty = self.tty_patcher.start() + + def tearDown(self): + self.tty_patcher.stop() + + def test_parse_empty(self): + cli = VaultCLI(['vaultcli']) + self.assertRaises(SystemExit, + cli.parse) + + # FIXME: something weird seems to be afoot when parsing actions + # cli = VaultCLI(args=['view', '/dev/null/foo', 'mysecret3']) + # will skip '/dev/null/foo'. something in cli.CLI.set_action() ? + # maybe we self.args gets modified in a loop? + def test_parse_view_file(self): + cli = VaultCLI(args=['ansible-vault', 'view', '/dev/null/foo']) + cli.parse() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + def test_view_missing_file_no_secret(self, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [] + cli = VaultCLI(args=['ansible-vault', 'view', '/dev/null/foo']) + cli.parse() + self.assertRaisesRegexp(errors.AnsibleOptionsError, + "A vault password is required to use Ansible's Vault", + cli.run) + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + def test_encrypt_missing_file_no_secret(self, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [] + cli = VaultCLI(args=['ansible-vault', 'encrypt', '/dev/null/foo']) + cli.parse() + self.assertRaisesRegexp(errors.AnsibleOptionsError, + "A vault password is required to use Ansible's Vault", + cli.run) + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + def test_encrypt(self, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', 'encrypt', '/dev/null/foo']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + def test_encrypt_string(self, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', 'encrypt_string', + 'some string to encrypt']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + @patch('ansible.cli.vault.display.prompt', return_value='a_prompt') + def test_encrypt_string_prompt(self, mock_display, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', + 'encrypt_string', + '--prompt', + 'some string to encrypt']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + @patch('ansible.cli.vault.sys.stdin.read', return_value='This is data from stdin') + def test_encrypt_string_stdin(self, mock_stdin_read, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', + 'encrypt_string', + '--stdin-name', + 'the_var_from_stdin', + '-']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + def test_encrypt_string_names(self, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', 'encrypt_string', + '--name', 'foo1', + '--name', 'foo2', + 'some string to encrypt']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + def test_encrypt_string_more_args_than_names(self, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', 'encrypt_string', + '--name', 'foo1', + 'some string to encrypt', + 'other strings', + 'a few more string args']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + def test_create(self, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', 'create', '/dev/null/foo']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + def test_edit(self, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', 'edit', '/dev/null/foo']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + def test_decrypt(self, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', 'decrypt', '/dev/null/foo']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + def test_view(self, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', 'view', '/dev/null/foo']) + cli.parse() + cli.run() + + @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') + @patch('ansible.cli.vault.VaultEditor') + def test_rekey(self, mock_vault_editor, mock_setup_vault_secrets): + mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] + cli = VaultCLI(args=['ansible-vault', 'rekey', '/dev/null/foo']) + cli.parse() + cli.run() + + +@pytest.mark.parametrize('cli_args, expected', [ + (['ansible-vault', 'view', 'vault.txt'], 0), + (['ansible-vault', 'view', 'vault.txt', '-vvv'], 3), + (['ansible-vault', '-vv', 'view', 'vault.txt'], 2), + # Due to our manual parsing we want to verify that -v set in the sub parser takes precedence. This behaviour is + # deprecated and tests should be removed when the code that handles it is removed + (['ansible-vault', '-vv', 'view', 'vault.txt', '-v'], 1), + (['ansible-vault', '-vv', 'view', 'vault.txt', '-vvvv'], 4), +]) +def test_verbosity_arguments(cli_args, expected, tmp_path_factory, monkeypatch): + # Add a password file so we don't get a prompt in the test + test_dir = to_text(tmp_path_factory.mktemp('test-ansible-vault')) + pass_file = os.path.join(test_dir, 'pass.txt') + with open(pass_file, 'w') as pass_fd: + pass_fd.write('password') + + cli_args.extend(['--vault-id', pass_file]) + + # Mock out the functions so we don't actually execute anything + for func_name in [f for f in dir(VaultCLI) if f.startswith("execute_")]: + monkeypatch.setattr(VaultCLI, func_name, MagicMock()) + + cli = VaultCLI(args=cli_args) + cli.run() + + assert context.CLIARGS['verbosity'] == expected diff --git a/test/units/compat/__init__.py b/test/units/compat/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/compat/builtins.py b/test/units/compat/builtins.py new file mode 100644 index 00000000..f60ee678 --- /dev/null +++ b/test/units/compat/builtins.py @@ -0,0 +1,33 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/test/units/compat/mock.py b/test/units/compat/mock.py new file mode 100644 index 00000000..0972cd2e --- /dev/null +++ b/test/units/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/test/units/compat/unittest.py b/test/units/compat/unittest.py new file mode 100644 index 00000000..98f08ad6 --- /dev/null +++ b/test/units/compat/unittest.py @@ -0,0 +1,38 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') +else: + from unittest import * diff --git a/test/units/config/manager/__init__.py b/test/units/config/manager/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/config/manager/test_find_ini_config_file.py b/test/units/config/manager/test_find_ini_config_file.py new file mode 100644 index 00000000..df411388 --- /dev/null +++ b/test/units/config/manager/test_find_ini_config_file.py @@ -0,0 +1,253 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import os.path +import stat + +import pytest + +from ansible.config.manager import find_ini_config_file +from ansible.module_utils._text import to_text + +real_exists = os.path.exists +real_isdir = os.path.isdir + +working_dir = os.path.dirname(__file__) +cfg_in_cwd = os.path.join(working_dir, 'ansible.cfg') + +cfg_dir = os.path.join(working_dir, 'data') +cfg_file = os.path.join(cfg_dir, 'ansible.cfg') +alt_cfg_file = os.path.join(cfg_dir, 'test.cfg') +cfg_in_homedir = os.path.expanduser('~/.ansible.cfg') + + +@pytest.fixture +def setup_env(request): + cur_config = os.environ.get('ANSIBLE_CONFIG', None) + cfg_path = request.param[0] + + if cfg_path is None and cur_config: + del os.environ['ANSIBLE_CONFIG'] + else: + os.environ['ANSIBLE_CONFIG'] = request.param[0] + + yield + + if cur_config is None and cfg_path: + del os.environ['ANSIBLE_CONFIG'] + else: + os.environ['ANSIBLE_CONFIG'] = cur_config + + +@pytest.fixture +def setup_existing_files(request, monkeypatch): + def _os_path_exists(path): + if to_text(path) in (request.param[0]): + return True + else: + return False + + def _os_access(path, access): + if to_text(path) in (request.param[0]): + return True + else: + return False + + # Enable user and system dirs so that we know cwd takes precedence + monkeypatch.setattr("os.path.exists", _os_path_exists) + monkeypatch.setattr("os.access", _os_access) + monkeypatch.setattr("os.getcwd", lambda: os.path.dirname(cfg_dir)) + monkeypatch.setattr("os.path.isdir", lambda path: True if to_text(path) == cfg_dir else real_isdir(path)) + + +class TestFindIniFile: + # This tells us to run twice, once with a file specified and once with a directory + @pytest.mark.parametrize('setup_env, expected', (([alt_cfg_file], alt_cfg_file), ([cfg_dir], cfg_file)), indirect=['setup_env']) + # This just passes the list of files that exist to the fixture + @pytest.mark.parametrize('setup_existing_files', + [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, alt_cfg_file, cfg_file)]], + indirect=['setup_existing_files']) + def test_env_has_cfg_file(self, setup_env, setup_existing_files, expected): + """ANSIBLE_CONFIG is specified, use it""" + warnings = set() + assert find_ini_config_file(warnings) == expected + assert warnings == set() + + @pytest.mark.parametrize('setup_env', ([alt_cfg_file], [cfg_dir]), indirect=['setup_env']) + @pytest.mark.parametrize('setup_existing_files', + [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd)]], + indirect=['setup_existing_files']) + def test_env_has_no_cfg_file(self, setup_env, setup_existing_files): + """ANSIBLE_CONFIG is specified but the file does not exist""" + + warnings = set() + # since the cfg file specified by ANSIBLE_CONFIG doesn't exist, the one at cwd that does + # exist should be returned + assert find_ini_config_file(warnings) == cfg_in_cwd + assert warnings == set() + + # ANSIBLE_CONFIG not specified + @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env']) + # All config files are present + @pytest.mark.parametrize('setup_existing_files', + [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]], + indirect=['setup_existing_files']) + def test_ini_in_cwd(self, setup_env, setup_existing_files): + """ANSIBLE_CONFIG not specified. Use the cwd cfg""" + warnings = set() + assert find_ini_config_file(warnings) == cfg_in_cwd + assert warnings == set() + + # ANSIBLE_CONFIG not specified + @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env']) + # No config in cwd + @pytest.mark.parametrize('setup_existing_files', + [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]], + indirect=['setup_existing_files']) + def test_ini_in_homedir(self, setup_env, setup_existing_files): + """First config found is in the homedir""" + warnings = set() + assert find_ini_config_file(warnings) == cfg_in_homedir + assert warnings == set() + + # ANSIBLE_CONFIG not specified + @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env']) + # No config in cwd + @pytest.mark.parametrize('setup_existing_files', [[('/etc/ansible/ansible.cfg', cfg_file, alt_cfg_file)]], indirect=['setup_existing_files']) + def test_ini_in_systemdir(self, setup_env, setup_existing_files): + """First config found is the system config""" + warnings = set() + assert find_ini_config_file(warnings) == '/etc/ansible/ansible.cfg' + assert warnings == set() + + # ANSIBLE_CONFIG not specified + @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env']) + # No config in cwd + @pytest.mark.parametrize('setup_existing_files', + [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]], + indirect=['setup_existing_files']) + def test_cwd_does_not_exist(self, setup_env, setup_existing_files, monkeypatch): + """Smoketest current working directory doesn't exist""" + def _os_stat(path): + raise OSError('%s does not exist' % path) + monkeypatch.setattr('os.stat', _os_stat) + + warnings = set() + assert find_ini_config_file(warnings) == cfg_in_homedir + assert warnings == set() + + @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env']) + # No config in cwd + @pytest.mark.parametrize('setup_existing_files', [[list()]], indirect=['setup_existing_files']) + def test_no_config(self, setup_env, setup_existing_files): + """No config present, no config found""" + warnings = set() + assert find_ini_config_file(warnings) is None + assert warnings == set() + + # ANSIBLE_CONFIG not specified + @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env']) + # All config files are present except in cwd + @pytest.mark.parametrize('setup_existing_files', + [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]], + indirect=['setup_existing_files']) + def test_no_cwd_cfg_no_warning_on_writable(self, setup_env, setup_existing_files, monkeypatch): + """If the cwd is writable but there is no config file there, move on with no warning""" + real_stat = os.stat + + def _os_stat(path): + if path == working_dir: + from posix import stat_result + stat_info = list(real_stat(path)) + stat_info[stat.ST_MODE] |= stat.S_IWOTH + return stat_result(stat_info) + else: + return real_stat(path) + + monkeypatch.setattr('os.stat', _os_stat) + + warnings = set() + assert find_ini_config_file(warnings) == cfg_in_homedir + assert len(warnings) == 0 + + # ANSIBLE_CONFIG not specified + @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env']) + # All config files are present + @pytest.mark.parametrize('setup_existing_files', + [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]], + indirect=['setup_existing_files']) + def test_cwd_warning_on_writable(self, setup_env, setup_existing_files, monkeypatch): + """If the cwd is writable, warn and skip it """ + real_stat = os.stat + + def _os_stat(path): + if path == working_dir: + from posix import stat_result + stat_info = list(real_stat(path)) + stat_info[stat.ST_MODE] |= stat.S_IWOTH + return stat_result(stat_info) + else: + return real_stat(path) + + monkeypatch.setattr('os.stat', _os_stat) + + warnings = set() + assert find_ini_config_file(warnings) == cfg_in_homedir + assert len(warnings) == 1 + warning = warnings.pop() + assert u'Ansible is being run in a world writable directory' in warning + assert u'ignoring it as an ansible.cfg source' in warning + + # ANSIBLE_CONFIG is sepcified + @pytest.mark.parametrize('setup_env, expected', (([alt_cfg_file], alt_cfg_file), ([cfg_in_cwd], cfg_in_cwd)), indirect=['setup_env']) + # All config files are present + @pytest.mark.parametrize('setup_existing_files', + [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]], + indirect=['setup_existing_files']) + def test_no_warning_on_writable_if_env_used(self, setup_env, setup_existing_files, monkeypatch, expected): + """If the cwd is writable but ANSIBLE_CONFIG was used, no warning should be issued""" + real_stat = os.stat + + def _os_stat(path): + if path == working_dir: + from posix import stat_result + stat_info = list(real_stat(path)) + stat_info[stat.ST_MODE] |= stat.S_IWOTH + return stat_result(stat_info) + else: + return real_stat(path) + + monkeypatch.setattr('os.stat', _os_stat) + + warnings = set() + assert find_ini_config_file(warnings) == expected + assert warnings == set() + + # ANSIBLE_CONFIG not specified + @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env']) + # All config files are present + @pytest.mark.parametrize('setup_existing_files', + [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]], + indirect=['setup_existing_files']) + def test_cwd_warning_on_writable_no_warning_set(self, setup_env, setup_existing_files, monkeypatch): + """Smoketest that the function succeeds even though no warning set was passed in""" + real_stat = os.stat + + def _os_stat(path): + if path == working_dir: + from posix import stat_result + stat_info = list(real_stat(path)) + stat_info[stat.ST_MODE] |= stat.S_IWOTH + return stat_result(stat_info) + else: + return real_stat(path) + + monkeypatch.setattr('os.stat', _os_stat) + + assert find_ini_config_file() == cfg_in_homedir diff --git a/test/units/config/test.cfg b/test/units/config/test.cfg new file mode 100644 index 00000000..57958d87 --- /dev/null +++ b/test/units/config/test.cfg @@ -0,0 +1,4 @@ +[defaults] +inikey=fromini +matterless=lessfromini +mattermore=morefromini diff --git a/test/units/config/test.yml b/test/units/config/test.yml new file mode 100644 index 00000000..384a055b --- /dev/null +++ b/test/units/config/test.yml @@ -0,0 +1,55 @@ +# mock config defs with diff use cases +config_entry: &entry + name: test config + default: DEFAULT + description: + - This does nothing, its for testing + env: + - name: ENVVAR + ini: + - section: defaults + key: inikey + type: string +config_entry_multi: &entry_multi + name: has more than one entry per config source + default: DEFAULT + description: + - This does nothing, its for testing + env: + - name: MATTERLESS + - name: MATTERMORE + ini: + - section: defaults + key: matterless + - section: defaults + key: mattermore + type: string +config_entry_bool: + <<: *entry + type: bool + default: False +config_entry_list: + <<: *entry + type: list + default: [DEFAULT] +config_entry_deprecated: + <<: *entry + deprecated: &dep + why: 'cause i wanna' + version: 9.2 + alternative: 'none whatso ever' +config_entry_multi_deprecated: + <<: *entry_multi + deprecated: *dep +config_entry_multi_deprecated_source: + <<: *entry_multi + env: + - name: MATTERLESS + deprecated: *dep + - name: MATTERMORE + ini: + - section: defaults + key: matterless + deprecated: *dep + - section: defaults + key: mattermore diff --git a/test/units/config/test2.cfg b/test/units/config/test2.cfg new file mode 100644 index 00000000..da2d77b0 --- /dev/null +++ b/test/units/config/test2.cfg @@ -0,0 +1,4 @@ +[defaults] +inikey=fromini2 +matterless=lessfromini2 +mattermore=morefromini2 diff --git a/test/units/config/test_data.py b/test/units/config/test_data.py new file mode 100644 index 00000000..da043e7b --- /dev/null +++ b/test/units/config/test_data.py @@ -0,0 +1,41 @@ +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest + +from ansible.config.data import ConfigData +from ansible.config.manager import Setting + + +mykey = Setting('mykey', 'myvalue', 'test', 'string') +mykey2 = Setting('mykey2', 'myvalue2', ['test', 'test2'], 'list') +mykey3 = Setting('mykey3', 'myvalue3', 11111111111, 'integer') + + +class TestConfigData(unittest.TestCase): + + def setUp(self): + self.cdata = ConfigData() + + def tearDown(self): + self.cdata = None + + def test_update_setting(self): + for setting in [mykey, mykey2, mykey3]: + self.cdata.update_setting(setting) + self.assertEqual(setting, self.cdata._global_settings.get(setting.name)) + + def test_update_setting_with_plugin(self): + pass + + def test_get_setting(self): + self.cdata._global_settings = {'mykey': mykey} + self.assertEqual(mykey, self.cdata.get_setting('mykey')) + + def test_get_settings(self): + all_settings = {'mykey': mykey, 'mykey2': mykey2} + self.cdata._global_settings = all_settings + + for setting in self.cdata.get_settings(): + self.assertEqual(all_settings[setting.name], setting) diff --git a/test/units/config/test_manager.py b/test/units/config/test_manager.py new file mode 100644 index 00000000..d103e5e6 --- /dev/null +++ b/test/units/config/test_manager.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import os.path +import pytest + +from ansible.config.manager import ConfigManager, Setting, ensure_type, resolve_path, get_config_type +from ansible.errors import AnsibleOptionsError, AnsibleError +from ansible.module_utils.six import integer_types, string_types +from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode + +curdir = os.path.dirname(__file__) +cfg_file = os.path.join(curdir, 'test.cfg') +cfg_file2 = os.path.join(curdir, 'test2.cfg') + +expected_ini = {'CONFIG_FILE': Setting(name='CONFIG_FILE', value=cfg_file, origin='', type='string'), + 'config_entry': Setting(name='config_entry', value=u'fromini', origin=cfg_file, type='string'), + 'config_entry_bool': Setting(name='config_entry_bool', value=False, origin=cfg_file, type='bool'), + 'config_entry_list': Setting(name='config_entry_list', value=['fromini'], origin=cfg_file, type='list'), + 'config_entry_deprecated': Setting(name='config_entry_deprecated', value=u'fromini', origin=cfg_file, type='string'), + 'config_entry_multi': Setting(name='config_entry_multi', value=u'morefromini', origin=cfg_file, type='string'), + 'config_entry_multi_deprecated': Setting(name='config_entry_multi_deprecated', value=u'morefromini', origin=cfg_file, type='string'), + 'config_entry_multi_deprecated_source': Setting(name='config_entry_multi_deprecated_source', value=u'morefromini', + origin=cfg_file, type='string')} + +ensure_test_data = [ + ('a,b', 'list', list), + (['a', 'b'], 'list', list), + ('y', 'bool', bool), + ('yes', 'bool', bool), + ('on', 'bool', bool), + ('1', 'bool', bool), + ('true', 'bool', bool), + ('t', 'bool', bool), + (1, 'bool', bool), + (1.0, 'bool', bool), + (True, 'bool', bool), + ('n', 'bool', bool), + ('no', 'bool', bool), + ('off', 'bool', bool), + ('0', 'bool', bool), + ('false', 'bool', bool), + ('f', 'bool', bool), + (0, 'bool', bool), + (0.0, 'bool', bool), + (False, 'bool', bool), + ('10', 'int', integer_types), + (20, 'int', integer_types), + ('0.10', 'float', float), + (0.2, 'float', float), + ('/tmp/test.yml', 'pathspec', list), + ('/tmp/test.yml,/home/test2.yml', 'pathlist', list), + ('a', 'str', string_types), + ('a', 'string', string_types), + ('Café', 'string', string_types), + ('', 'string', string_types), + ('None', 'none', type(None)) +] + + +class TestConfigManager: + @classmethod + def setup_class(cls): + cls.manager = ConfigManager(cfg_file, os.path.join(curdir, 'test.yml')) + + @classmethod + def teardown_class(cls): + cls.manager = None + + def test_initial_load(self): + assert self.manager.data._global_settings == expected_ini + + @pytest.mark.parametrize("value, expected_type, python_type", ensure_test_data) + def test_ensure_type(self, value, expected_type, python_type): + assert isinstance(ensure_type(value, expected_type), python_type) + + def test_resolve_path(self): + assert os.path.join(curdir, 'test.yml') == resolve_path('./test.yml', cfg_file) + + def test_resolve_path_cwd(self): + assert os.path.join(os.getcwd(), 'test.yml') == resolve_path('{{CWD}}/test.yml') + assert os.path.join(os.getcwd(), 'test.yml') == resolve_path('./test.yml') + + def test_value_and_origin_from_ini(self): + assert self.manager.get_config_value_and_origin('config_entry') == ('fromini', cfg_file) + + def test_value_from_ini(self): + assert self.manager.get_config_value('config_entry') == 'fromini' + + def test_value_and_origin_from_alt_ini(self): + assert self.manager.get_config_value_and_origin('config_entry', cfile=cfg_file2) == ('fromini2', cfg_file2) + + def test_value_from_alt_ini(self): + assert self.manager.get_config_value('config_entry', cfile=cfg_file2) == 'fromini2' + + def test_config_types(self): + assert get_config_type('/tmp/ansible.ini') == 'ini' + assert get_config_type('/tmp/ansible.cfg') == 'ini' + assert get_config_type('/tmp/ansible.yaml') == 'yaml' + assert get_config_type('/tmp/ansible.yml') == 'yaml' + + def test_config_types_negative(self): + with pytest.raises(AnsibleOptionsError) as exec_info: + get_config_type('/tmp/ansible.txt') + assert "Unsupported configuration file extension for" in str(exec_info.value) + + def test_read_config_yaml_file(self): + assert isinstance(self.manager._read_config_yaml_file(os.path.join(curdir, 'test.yml')), dict) + + def test_read_config_yaml_file_negative(self): + with pytest.raises(AnsibleError) as exec_info: + self.manager._read_config_yaml_file(os.path.join(curdir, 'test_non_existent.yml')) + + assert "Missing base YAML definition file (bad install?)" in str(exec_info.value) + + def test_entry_as_vault_var(self): + class MockVault: + + def decrypt(self, value): + return value + + vault_var = AnsibleVaultEncryptedUnicode(b"vault text") + vault_var.vault = MockVault() + + actual_value, actual_origin = self.manager._loop_entries({'name': vault_var}, [{'name': 'name'}]) + assert actual_value == "vault text" + assert actual_origin == "name" + + @pytest.mark.parametrize("value_type", ("str", "string", None)) + def test_ensure_type_with_vaulted_str(self, value_type): + class MockVault: + def decrypt(self, value): + return value + + vault_var = AnsibleVaultEncryptedUnicode(b"vault text") + vault_var.vault = MockVault() + + actual_value = ensure_type(vault_var, value_type) + assert actual_value == "vault text" diff --git a/test/units/errors/__init__.py b/test/units/errors/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/errors/test_errors.py b/test/units/errors/test_errors.py new file mode 100644 index 00000000..136a2695 --- /dev/null +++ b/test/units/errors/test_errors.py @@ -0,0 +1,151 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from units.compat import unittest +from units.compat.builtins import BUILTINS +from units.compat.mock import mock_open, patch +from ansible.errors import AnsibleError +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject + + +class TestErrors(unittest.TestCase): + + def setUp(self): + self.message = 'This is the error message' + self.unicode_message = 'This is an error with \xf0\x9f\x98\xa8 in it' + + self.obj = AnsibleBaseYAMLObject() + + def test_basic_error(self): + e = AnsibleError(self.message) + self.assertEqual(e.message, self.message) + self.assertEqual(e.__repr__(), self.message) + + def test_basic_unicode_error(self): + e = AnsibleError(self.unicode_message) + self.assertEqual(e.message, self.unicode_message) + self.assertEqual(e.__repr__(), self.unicode_message) + + @patch.object(AnsibleError, '_get_error_lines_from_file') + def test_error_with_kv(self, mock_method): + ''' This tests a task with both YAML and k=v syntax + + - lineinfile: line=foo path=bar + line: foo + + An accurate error message and position indicator are expected. + + _get_error_lines_from_file() returns (target_line, prev_line) + ''' + + self.obj.ansible_pos = ('foo.yml', 2, 1) + + mock_method.return_value = [' line: foo\n', '- lineinfile: line=foo path=bar\n'] + + e = AnsibleError(self.message, self.obj) + self.assertEqual( + e.message, + ("This is the error message\n\nThe error appears to be in 'foo.yml': line 1, column 19, but may\nbe elsewhere in the " + "file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n- lineinfile: line=foo path=bar\n" + " ^ here\n\n" + "There appears to be both 'k=v' shorthand syntax and YAML in this task. Only one syntax may be used.\n") + ) + + @patch.object(AnsibleError, '_get_error_lines_from_file') + def test_error_with_object(self, mock_method): + self.obj.ansible_pos = ('foo.yml', 1, 1) + + mock_method.return_value = ('this is line 1\n', '') + e = AnsibleError(self.message, self.obj) + + self.assertEqual( + e.message, + ("This is the error message\n\nThe error appears to be in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the " + "exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n") + ) + + def test_get_error_lines_from_file(self): + m = mock_open() + m.return_value.readlines.return_value = ['this is line 1\n'] + + with patch('{0}.open'.format(BUILTINS), m): + # this line will be found in the file + self.obj.ansible_pos = ('foo.yml', 1, 1) + e = AnsibleError(self.message, self.obj) + self.assertEqual( + e.message, + ("This is the error message\n\nThe error appears to be in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on " + "the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n") + ) + + with patch('ansible.errors.to_text', side_effect=IndexError('Raised intentionally')): + # raise an IndexError + self.obj.ansible_pos = ('foo.yml', 2, 1) + e = AnsibleError(self.message, self.obj) + self.assertEqual( + e.message, + ("This is the error message\n\nThe error appears to be in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on " + "the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)") + ) + + m = mock_open() + m.return_value.readlines.return_value = ['this line has unicode \xf0\x9f\x98\xa8 in it!\n'] + + with patch('{0}.open'.format(BUILTINS), m): + # this line will be found in the file + self.obj.ansible_pos = ('foo.yml', 1, 1) + e = AnsibleError(self.unicode_message, self.obj) + self.assertEqual( + e.message, + ("This is an error with \xf0\x9f\x98\xa8 in it\n\nThe error appears to be in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the " + "file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis line has unicode \xf0\x9f\x98\xa8 in it!\n^ " + "here\n") + ) + + def test_get_error_lines_error_in_last_line(self): + m = mock_open() + m.return_value.readlines.return_value = ['this is line 1\n', 'this is line 2\n', 'this is line 3\n'] + + with patch('{0}.open'.format(BUILTINS), m): + # If the error occurs in the last line of the file, use the correct index to get the line + # and avoid the IndexError + self.obj.ansible_pos = ('foo.yml', 4, 1) + e = AnsibleError(self.message, self.obj) + self.assertEqual( + e.message, + ("This is the error message\n\nThe error appears to be in 'foo.yml': line 4, column 1, but may\nbe elsewhere in the file depending on " + "the exact syntax problem.\n\nThe offending line appears to be:\n\nthis is line 2\nthis is line 3\n^ here\n") + ) + + def test_get_error_lines_error_empty_lines_around_error(self): + """Test that trailing whitespace after the error is removed""" + m = mock_open() + m.return_value.readlines.return_value = ['this is line 1\n', 'this is line 2\n', 'this is line 3\n', ' \n', ' \n', ' '] + + with patch('{0}.open'.format(BUILTINS), m): + self.obj.ansible_pos = ('foo.yml', 5, 1) + e = AnsibleError(self.message, self.obj) + self.assertEqual( + e.message, + ("This is the error message\n\nThe error appears to be in 'foo.yml': line 5, column 1, but may\nbe elsewhere in the file depending on " + "the exact syntax problem.\n\nThe offending line appears to be:\n\nthis is line 2\nthis is line 3\n^ here\n") + ) diff --git a/test/units/executor/__init__.py b/test/units/executor/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/executor/module_common/test_modify_module.py b/test/units/executor/module_common/test_modify_module.py new file mode 100644 index 00000000..dceef763 --- /dev/null +++ b/test/units/executor/module_common/test_modify_module.py @@ -0,0 +1,43 @@ +# Copyright (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# -*- coding: utf-8 -*- + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.executor.module_common import modify_module +from ansible.module_utils.six import PY2 + +from test_module_common import templar + + +FAKE_OLD_MODULE = b'''#!/usr/bin/python +import sys +print('{"result": "%s"}' % sys.executable) +''' + + +@pytest.fixture +def fake_old_module_open(mocker): + m = mocker.mock_open(read_data=FAKE_OLD_MODULE) + if PY2: + mocker.patch('__builtin__.open', m) + else: + mocker.patch('builtins.open', m) + +# this test no longer makes sense, since a Python module will always either have interpreter discovery run or +# an explicit interpreter passed (so we'll never default to the module shebang) +# def test_shebang(fake_old_module_open, templar): +# (data, style, shebang) = modify_module('fake_module', 'fake_path', {}, templar) +# assert shebang == '#!/usr/bin/python' + + +def test_shebang_task_vars(fake_old_module_open, templar): + task_vars = { + 'ansible_python_interpreter': '/usr/bin/python3' + } + + (data, style, shebang) = modify_module('fake_module', 'fake_path', {}, templar, task_vars=task_vars) + assert shebang == '#!/usr/bin/python3' diff --git a/test/units/executor/module_common/test_module_common.py b/test/units/executor/module_common/test_module_common.py new file mode 100644 index 00000000..04bae85d --- /dev/null +++ b/test/units/executor/module_common/test_module_common.py @@ -0,0 +1,197 @@ +# (c) 2017, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os.path + +import pytest + +import ansible.errors + +from ansible.executor import module_common as amc +from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError +from ansible.module_utils.six import PY2 + + +class TestStripComments: + def test_no_changes(self): + no_comments = u"""def some_code(): + return False""" + assert amc._strip_comments(no_comments) == no_comments + + def test_all_comments(self): + all_comments = u"""# This is a test + # Being as it is + # To be + """ + assert amc._strip_comments(all_comments) == u"" + + def test_all_whitespace(self): + # Note: Do not remove the spaces on the blank lines below. They're + # test data to show that the lines get removed despite having spaces + # on them + all_whitespace = u""" + + + +\t\t\r\n + """ # nopep8 + assert amc._strip_comments(all_whitespace) == u"" + + def test_somewhat_normal(self): + mixed = u"""#!/usr/bin/python + +# here we go +def test(arg): + # this is a thing + thing = '# test' + return thing +# End +""" + mixed_results = u"""def test(arg): + thing = '# test' + return thing""" + assert amc._strip_comments(mixed) == mixed_results + + +class TestSlurp: + def test_slurp_nonexistent(self, mocker): + mocker.patch('os.path.exists', side_effect=lambda x: False) + with pytest.raises(ansible.errors.AnsibleError): + amc._slurp('no_file') + + def test_slurp_file(self, mocker): + mocker.patch('os.path.exists', side_effect=lambda x: True) + m = mocker.mock_open(read_data='This is a test') + if PY2: + mocker.patch('__builtin__.open', m) + else: + mocker.patch('builtins.open', m) + assert amc._slurp('some_file') == 'This is a test' + + def test_slurp_file_with_newlines(self, mocker): + mocker.patch('os.path.exists', side_effect=lambda x: True) + m = mocker.mock_open(read_data='#!/usr/bin/python\ndef test(args):\nprint("hi")\n') + if PY2: + mocker.patch('__builtin__.open', m) + else: + mocker.patch('builtins.open', m) + assert amc._slurp('some_file') == '#!/usr/bin/python\ndef test(args):\nprint("hi")\n' + + +@pytest.fixture +def templar(): + class FakeTemplar: + def template(self, template_string, *args, **kwargs): + return template_string + + return FakeTemplar() + + +class TestGetShebang: + """Note: We may want to change the API of this function in the future. It isn't a great API""" + def test_no_interpreter_set(self, templar): + # normally this would return /usr/bin/python, but so long as we're defaulting to auto python discovery, we'll get + # an InterpreterDiscoveryRequiredError here instead + with pytest.raises(InterpreterDiscoveryRequiredError): + amc._get_shebang(u'/usr/bin/python', {}, templar) + + def test_non_python_interpreter(self, templar): + assert amc._get_shebang(u'/usr/bin/ruby', {}, templar) == (None, u'/usr/bin/ruby') + + def test_interpreter_set_in_task_vars(self, templar): + assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/pypy'}, templar) == \ + (u'#!/usr/bin/pypy', u'/usr/bin/pypy') + + def test_non_python_interpreter_in_task_vars(self, templar): + assert amc._get_shebang(u'/usr/bin/ruby', {u'ansible_ruby_interpreter': u'/usr/local/bin/ruby'}, templar) == \ + (u'#!/usr/local/bin/ruby', u'/usr/local/bin/ruby') + + def test_with_args(self, templar): + assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/python3'}, templar, args=('-tt', '-OO')) == \ + (u'#!/usr/bin/python3 -tt -OO', u'/usr/bin/python3') + + def test_python_via_env(self, templar): + assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/env python'}, templar) == \ + (u'#!/usr/bin/env python', u'/usr/bin/env python') + + +class TestDetectionRegexes: + ANSIBLE_MODULE_UTIL_STRINGS = ( + # Absolute collection imports + b'import ansible_collections.my_ns.my_col.plugins.module_utils.my_util', + b'from ansible_collections.my_ns.my_col.plugins.module_utils import my_util', + b'from ansible_collections.my_ns.my_col.plugins.module_utils.my_util import my_func', + # Absolute core imports + b'import ansible.module_utils.basic', + b'from ansible.module_utils import basic', + b'from ansible.module_utils.basic import AnsibleModule', + # Relative imports + b'from ..module_utils import basic', + b'from .. module_utils import basic', + b'from ....module_utils import basic', + b'from ..module_utils.basic import AnsibleModule', + ) + NOT_ANSIBLE_MODULE_UTIL_STRINGS = ( + b'from ansible import release', + b'from ..release import __version__', + b'from .. import release', + b'from ansible.modules.system import ping', + b'from ansible_collecitons.my_ns.my_col.plugins.modules import function', + ) + + OFFSET = os.path.dirname(os.path.dirname(amc.__file__)) + CORE_PATHS = ( + ('%s/modules/from_role.py' % OFFSET, 'ansible/modules/from_role'), + ('%s/modules/system/ping.py' % OFFSET, 'ansible/modules/system/ping'), + ('%s/modules/cloud/amazon/s3.py' % OFFSET, 'ansible/modules/cloud/amazon/s3'), + ) + + COLLECTION_PATHS = ( + ('/root/ansible_collections/ns/col/plugins/modules/ping.py', + 'ansible_collections/ns/col/plugins/modules/ping'), + ('/root/ansible_collections/ns/col/plugins/modules/subdir/ping.py', + 'ansible_collections/ns/col/plugins/modules/subdir/ping'), + ) + + @pytest.mark.parametrize('testcase', ANSIBLE_MODULE_UTIL_STRINGS) + def test_detect_new_style_python_module_re(self, testcase): + assert amc.NEW_STYLE_PYTHON_MODULE_RE.search(testcase) + + @pytest.mark.parametrize('testcase', NOT_ANSIBLE_MODULE_UTIL_STRINGS) + def test_no_detect_new_style_python_module_re(self, testcase): + assert not amc.NEW_STYLE_PYTHON_MODULE_RE.search(testcase) + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + @pytest.mark.parametrize('testcase, result', CORE_PATHS) # pylint: disable=undefined-variable + def test_detect_core_library_path_re(self, testcase, result): + assert amc.CORE_LIBRARY_PATH_RE.search(testcase).group('path') == result + + @pytest.mark.parametrize('testcase', (p[0] for p in COLLECTION_PATHS)) # pylint: disable=undefined-variable + def test_no_detect_core_library_path_re(self, testcase): + assert not amc.CORE_LIBRARY_PATH_RE.search(testcase) + + @pytest.mark.parametrize('testcase, result', COLLECTION_PATHS) # pylint: disable=undefined-variable + def test_detect_collection_path_re(self, testcase, result): + assert amc.COLLECTION_PATH_RE.search(testcase).group('path') == result + + @pytest.mark.parametrize('testcase', (p[0] for p in CORE_PATHS)) # pylint: disable=undefined-variable + def test_no_detect_collection_path_re(self, testcase): + assert not amc.COLLECTION_PATH_RE.search(testcase) diff --git a/test/units/executor/module_common/test_recursive_finder.py b/test/units/executor/module_common/test_recursive_finder.py new file mode 100644 index 00000000..c72973f8 --- /dev/null +++ b/test/units/executor/module_common/test_recursive_finder.py @@ -0,0 +1,127 @@ +# (c) 2017, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import pytest +import zipfile + +from collections import namedtuple +from io import BytesIO + +import ansible.errors + +from ansible.executor.module_common import recursive_finder +from ansible.module_utils.six import PY2 + + +# These are the modules that are brought in by module_utils/basic.py This may need to be updated +# when basic.py gains new imports +# We will remove these when we modify AnsiBallZ to store its args in a separate file instead of in +# basic.py + +MODULE_UTILS_BASIC_FILES = frozenset(('ansible/__init__.py', + 'ansible/module_utils/__init__.py', + 'ansible/module_utils/_text.py', + 'ansible/module_utils/basic.py', + 'ansible/module_utils/six/__init__.py', + 'ansible/module_utils/_text.py', + 'ansible/module_utils/common/_collections_compat.py', + 'ansible/module_utils/common/_json_compat.py', + 'ansible/module_utils/common/collections.py', + 'ansible/module_utils/common/parameters.py', + 'ansible/module_utils/common/warnings.py', + 'ansible/module_utils/parsing/convert_bool.py', + 'ansible/module_utils/common/__init__.py', + 'ansible/module_utils/common/file.py', + 'ansible/module_utils/common/process.py', + 'ansible/module_utils/common/sys_info.py', + 'ansible/module_utils/common/text/__init__.py', + 'ansible/module_utils/common/text/converters.py', + 'ansible/module_utils/common/text/formatters.py', + 'ansible/module_utils/common/validation.py', + 'ansible/module_utils/common/_utils.py', + 'ansible/module_utils/compat/__init__.py', + 'ansible/module_utils/compat/_selectors2.py', + 'ansible/module_utils/compat/selectors.py', + 'ansible/module_utils/distro/__init__.py', + 'ansible/module_utils/distro/_distro.py', + 'ansible/module_utils/parsing/__init__.py', + 'ansible/module_utils/parsing/convert_bool.py', + 'ansible/module_utils/pycompat24.py', + 'ansible/module_utils/six/__init__.py', + )) + +ONLY_BASIC_FILE = frozenset(('ansible/module_utils/basic.py',)) + +ANSIBLE_LIB = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'lib', 'ansible') + + +@pytest.fixture +def finder_containers(): + FinderContainers = namedtuple('FinderContainers', ['zf']) + + zipoutput = BytesIO() + zf = zipfile.ZipFile(zipoutput, mode='w', compression=zipfile.ZIP_STORED) + + return FinderContainers(zf) + + +class TestRecursiveFinder(object): + def test_no_module_utils(self, finder_containers): + name = 'ping' + data = b'#!/usr/bin/python\nreturn \'{\"changed\": false}\'' + recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'ping.py'), data, *finder_containers) + assert frozenset(finder_containers.zf.namelist()) == MODULE_UTILS_BASIC_FILES + + def test_module_utils_with_syntax_error(self, finder_containers): + name = 'fake_module' + data = b'#!/usr/bin/python\ndef something(:\n pass\n' + with pytest.raises(ansible.errors.AnsibleError) as exec_info: + recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'fake_module.py'), data, *finder_containers) + assert 'Unable to import fake_module due to invalid syntax' in str(exec_info.value) + + def test_module_utils_with_identation_error(self, finder_containers): + name = 'fake_module' + data = b'#!/usr/bin/python\n def something():\n pass\n' + with pytest.raises(ansible.errors.AnsibleError) as exec_info: + recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'fake_module.py'), data, *finder_containers) + assert 'Unable to import fake_module due to unexpected indent' in str(exec_info.value) + + # + # Test importing six with many permutations because it is not a normal module + # + def test_from_import_six(self, finder_containers): + name = 'ping' + data = b'#!/usr/bin/python\nfrom ansible.module_utils import six' + recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'ping.py'), data, *finder_containers) + assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/six/__init__.py', )).union(MODULE_UTILS_BASIC_FILES) + + def test_import_six(self, finder_containers): + name = 'ping' + data = b'#!/usr/bin/python\nimport ansible.module_utils.six' + recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'ping.py'), data, *finder_containers) + assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/six/__init__.py', )).union(MODULE_UTILS_BASIC_FILES) + + def test_import_six_from_many_submodules(self, finder_containers): + name = 'ping' + data = b'#!/usr/bin/python\nfrom ansible.module_utils.six.moves.urllib.parse import urlparse' + recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'ping.py'), data, *finder_containers) + assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/six/__init__.py',)).union(MODULE_UTILS_BASIC_FILES) diff --git a/test/units/executor/test_interpreter_discovery.py b/test/units/executor/test_interpreter_discovery.py new file mode 100644 index 00000000..10f97d63 --- /dev/null +++ b/test/units/executor/test_interpreter_discovery.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +# (c) 2019, Jordan Borean +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat.mock import MagicMock + +from ansible.executor.interpreter_discovery import discover_interpreter +from ansible.module_utils._text import to_text + +mock_ubuntu_platform_res = to_text( + r'{"osrelease_content": "NAME=\"Ubuntu\"\nVERSION=\"16.04.5 LTS (Xenial Xerus)\"\nID=ubuntu\nID_LIKE=debian\n' + r'PRETTY_NAME=\"Ubuntu 16.04.5 LTS\"\nVERSION_ID=\"16.04\"\nHOME_URL=\"http://www.ubuntu.com/\"\n' + r'SUPPORT_URL=\"http://help.ubuntu.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/ubuntu/\"\n' + r'VERSION_CODENAME=xenial\nUBUNTU_CODENAME=xenial\n", "platform_dist_result": ["Ubuntu", "16.04", "xenial"]}' +) + + +def test_discovery_interpreter_linux_auto_legacy(): + res1 = u'PLATFORM\nLinux\nFOUND\n/usr/bin/python\n/usr/bin/python3.5\n/usr/bin/python3\nENDFOUND' + + mock_action = MagicMock() + mock_action._low_level_execute_command.side_effect = [{'stdout': res1}, {'stdout': mock_ubuntu_platform_res}] + + actual = discover_interpreter(mock_action, 'python', 'auto_legacy', {'inventory_hostname': u'host-fóöbär'}) + + assert actual == u'/usr/bin/python' + assert len(mock_action.method_calls) == 3 + assert mock_action.method_calls[2][0] == '_discovery_deprecation_warnings.append' + assert u'Distribution Ubuntu 16.04 on host host-fóöbär should use /usr/bin/python3, but is using /usr/bin/python' \ + u' for backward compatibility' in mock_action.method_calls[2][1][0]['msg'] + assert mock_action.method_calls[2][1][0]['version'] == '2.12' + + +def test_discovery_interpreter_linux_auto_legacy_silent(): + res1 = u'PLATFORM\nLinux\nFOUND\n/usr/bin/python\n/usr/bin/python3.5\n/usr/bin/python3\nENDFOUND' + + mock_action = MagicMock() + mock_action._low_level_execute_command.side_effect = [{'stdout': res1}, {'stdout': mock_ubuntu_platform_res}] + + actual = discover_interpreter(mock_action, 'python', 'auto_legacy_silent', {'inventory_hostname': u'host-fóöbär'}) + + assert actual == u'/usr/bin/python' + assert len(mock_action.method_calls) == 2 + + +def test_discovery_interpreter_linux_auto(): + res1 = u'PLATFORM\nLinux\nFOUND\n/usr/bin/python\n/usr/bin/python3.5\n/usr/bin/python3\nENDFOUND' + + mock_action = MagicMock() + mock_action._low_level_execute_command.side_effect = [{'stdout': res1}, {'stdout': mock_ubuntu_platform_res}] + + actual = discover_interpreter(mock_action, 'python', 'auto', {'inventory_hostname': u'host-fóöbär'}) + + assert actual == u'/usr/bin/python3' + assert len(mock_action.method_calls) == 2 + + +def test_discovery_interpreter_non_linux(): + mock_action = MagicMock() + mock_action._low_level_execute_command.return_value = \ + {'stdout': u'PLATFORM\nDarwin\nFOUND\n/usr/bin/python\nENDFOUND'} + + actual = discover_interpreter(mock_action, 'python', 'auto_legacy', {'inventory_hostname': u'host-fóöbär'}) + + assert actual == u'/usr/bin/python' + assert len(mock_action.method_calls) == 2 + assert mock_action.method_calls[1][0] == '_discovery_warnings.append' + assert u'Platform darwin on host host-fóöbär is using the discovered Python interpreter at /usr/bin/python, ' \ + u'but future installation of another Python interpreter could change the meaning of that path' \ + in mock_action.method_calls[1][1][0] + + +def test_no_interpreters_found(): + mock_action = MagicMock() + mock_action._low_level_execute_command.return_value = {'stdout': u'PLATFORM\nWindows\nFOUND\nENDFOUND'} + + actual = discover_interpreter(mock_action, 'python', 'auto_legacy', {'inventory_hostname': u'host-fóöbär'}) + + assert actual == u'/usr/bin/python' + assert len(mock_action.method_calls) == 2 + assert mock_action.method_calls[1][0] == '_discovery_warnings.append' + assert u'No python interpreters found for host host-fóöbär (tried' \ + in mock_action.method_calls[1][1][0] diff --git a/test/units/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py new file mode 100644 index 00000000..4ccfd69a --- /dev/null +++ b/test/units/executor/test_play_iterator.py @@ -0,0 +1,458 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import patch, MagicMock + +from ansible.executor.play_iterator import HostState, PlayIterator +from ansible.playbook import Playbook +from ansible.playbook.play_context import PlayContext + +from units.mock.loader import DictDataLoader +from units.mock.path import mock_unfrackpath_noop + + +class TestPlayIterator(unittest.TestCase): + + def test_host_state(self): + hs = HostState(blocks=[x for x in range(0, 10)]) + hs.tasks_child_state = HostState(blocks=[0]) + hs.rescue_child_state = HostState(blocks=[1]) + hs.always_child_state = HostState(blocks=[2]) + hs.__repr__() + hs.run_state = 100 + hs.__repr__() + hs.fail_state = 15 + hs.__repr__() + + for i in range(0, 10): + hs.cur_block = i + self.assertEqual(hs.get_current_block(), i) + + new_hs = hs.copy() + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_play_iterator(self): + # import epdb; epdb.st() + fake_loader = DictDataLoader({ + "test_play.yml": """ + - hosts: all + gather_facts: false + roles: + - test_role + pre_tasks: + - debug: msg="this is a pre_task" + tasks: + - debug: msg="this is a regular task" + - block: + - debug: msg="this is a block task" + - block: + - debug: msg="this is a sub-block in a block" + rescue: + - debug: msg="this is a rescue task" + - block: + - debug: msg="this is a sub-block in a rescue" + always: + - debug: msg="this is an always task" + - block: + - debug: msg="this is a sub-block in an always" + post_tasks: + - debug: msg="this is a post_task" + """, + '/etc/ansible/roles/test_role/tasks/main.yml': """ + - name: role task + debug: msg="this is a role task" + - block: + - name: role block task + debug: msg="inside block in role" + always: + - name: role always task + debug: msg="always task in block in role" + - include: foo.yml + - name: role task after include + debug: msg="after include in role" + - block: + - name: starting role nested block 1 + debug: + - block: + - name: role nested block 1 task 1 + debug: + - name: role nested block 1 task 2 + debug: + - name: role nested block 1 task 3 + debug: + - name: end of role nested block 1 + debug: + - name: starting role nested block 2 + debug: + - block: + - name: role nested block 2 task 1 + debug: + - name: role nested block 2 task 2 + debug: + - name: role nested block 2 task 3 + debug: + - name: end of role nested block 2 + debug: + """, + '/etc/ansible/roles/test_role/tasks/foo.yml': """ + - name: role included task + debug: msg="this is task in an include from a role" + """ + }) + + mock_var_manager = MagicMock() + mock_var_manager._fact_cache = dict() + mock_var_manager.get_vars.return_value = dict() + + p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) + + hosts = [] + for i in range(0, 10): + host = MagicMock() + host.name = host.get_name.return_value = 'host%02d' % i + hosts.append(host) + + mock_var_manager._fact_cache['host00'] = dict() + + inventory = MagicMock() + inventory.get_hosts.return_value = hosts + inventory.filter_hosts.return_value = hosts + + play_context = PlayContext(play=p._entries[0]) + + itr = PlayIterator( + inventory=inventory, + play=p._entries[0], + play_context=play_context, + variable_manager=mock_var_manager, + all_vars=dict(), + ) + + # pre task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + # implicit meta: flush_handlers + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'meta') + # role task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.name, "role task") + self.assertIsNotNone(task._role) + # role block task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "role block task") + self.assertIsNotNone(task._role) + # role block always task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "role always task") + self.assertIsNotNone(task._role) + # role include task + # (host_state, task) = itr.get_next_task_for_host(hosts[0]) + # self.assertIsNotNone(task) + # self.assertEqual(task.action, 'debug') + # self.assertEqual(task.name, "role included task") + # self.assertIsNotNone(task._role) + # role task after include + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "role task after include") + self.assertIsNotNone(task._role) + # role nested block tasks + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "starting role nested block 1") + self.assertIsNotNone(task._role) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "role nested block 1 task 1") + self.assertIsNotNone(task._role) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "role nested block 1 task 2") + self.assertIsNotNone(task._role) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "role nested block 1 task 3") + self.assertIsNotNone(task._role) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "end of role nested block 1") + self.assertIsNotNone(task._role) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "starting role nested block 2") + self.assertIsNotNone(task._role) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "role nested block 2 task 1") + self.assertIsNotNone(task._role) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "role nested block 2 task 2") + self.assertIsNotNone(task._role) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "role nested block 2 task 3") + self.assertIsNotNone(task._role) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.name, "end of role nested block 2") + self.assertIsNotNone(task._role) + # regular play task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertIsNone(task._role) + # block task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a block task")) + # sub-block task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a sub-block in a block")) + # mark the host failed + itr.mark_host_failed(hosts[0]) + # block rescue task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a rescue task")) + # sub-block rescue task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue")) + # block always task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is an always task")) + # sub-block always task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a sub-block in an always")) + # implicit meta: flush_handlers + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'meta') + # post task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + # implicit meta: flush_handlers + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'meta') + # end of iteration + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNone(task) + + # host 0 shouldn't be in the failed hosts, as the error + # was handled by a rescue block + failed_hosts = itr.get_failed_hosts() + self.assertNotIn(hosts[0], failed_hosts) + + def test_play_iterator_nested_blocks(self): + fake_loader = DictDataLoader({ + "test_play.yml": """ + - hosts: all + gather_facts: false + tasks: + - block: + - block: + - block: + - block: + - block: + - debug: msg="this is the first task" + - ping: + rescue: + - block: + - block: + - block: + - block: + - debug: msg="this is the rescue task" + always: + - block: + - block: + - block: + - block: + - debug: msg="this is the always task" + """, + }) + + mock_var_manager = MagicMock() + mock_var_manager._fact_cache = dict() + mock_var_manager.get_vars.return_value = dict() + + p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) + + hosts = [] + for i in range(0, 10): + host = MagicMock() + host.name = host.get_name.return_value = 'host%02d' % i + hosts.append(host) + + inventory = MagicMock() + inventory.get_hosts.return_value = hosts + inventory.filter_hosts.return_value = hosts + + play_context = PlayContext(play=p._entries[0]) + + itr = PlayIterator( + inventory=inventory, + play=p._entries[0], + play_context=play_context, + variable_manager=mock_var_manager, + all_vars=dict(), + ) + + # implicit meta: flush_handlers + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'meta') + self.assertEqual(task.args, dict(_raw_params='flush_handlers')) + # get the first task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg='this is the first task')) + # fail the host + itr.mark_host_failed(hosts[0]) + # get the resuce task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg='this is the rescue task')) + # get the always task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg='this is the always task')) + # implicit meta: flush_handlers + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'meta') + self.assertEqual(task.args, dict(_raw_params='flush_handlers')) + # implicit meta: flush_handlers + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'meta') + self.assertEqual(task.args, dict(_raw_params='flush_handlers')) + # end of iteration + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNone(task) + + def test_play_iterator_add_tasks(self): + fake_loader = DictDataLoader({ + 'test_play.yml': """ + - hosts: all + gather_facts: no + tasks: + - debug: msg="dummy task" + """, + }) + + mock_var_manager = MagicMock() + mock_var_manager._fact_cache = dict() + mock_var_manager.get_vars.return_value = dict() + + p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) + + hosts = [] + for i in range(0, 10): + host = MagicMock() + host.name = host.get_name.return_value = 'host%02d' % i + hosts.append(host) + + inventory = MagicMock() + inventory.get_hosts.return_value = hosts + inventory.filter_hosts.return_value = hosts + + play_context = PlayContext(play=p._entries[0]) + + itr = PlayIterator( + inventory=inventory, + play=p._entries[0], + play_context=play_context, + variable_manager=mock_var_manager, + all_vars=dict(), + ) + + # test the high-level add_tasks() method + s = HostState(blocks=[0, 1, 2]) + itr._insert_tasks_into_state = MagicMock(return_value=s) + itr.add_tasks(hosts[0], [MagicMock(), MagicMock(), MagicMock()]) + self.assertEqual(itr._host_states[hosts[0].name], s) + + # now actually test the lower-level method that does the work + itr = PlayIterator( + inventory=inventory, + play=p._entries[0], + play_context=play_context, + variable_manager=mock_var_manager, + all_vars=dict(), + ) + + # iterate past first task + _, task = itr.get_next_task_for_host(hosts[0]) + while(task and task.action != 'debug'): + _, task = itr.get_next_task_for_host(hosts[0]) + + if task is None: + raise Exception("iterated past end of play while looking for place to insert tasks") + + # get the current host state and copy it so we can mutate it + s = itr.get_host_state(hosts[0]) + s_copy = s.copy() + + # assert with an empty task list, or if we're in a failed state, we simply return the state as-is + res_state = itr._insert_tasks_into_state(s_copy, task_list=[]) + self.assertEqual(res_state, s_copy) + + s_copy.fail_state = itr.FAILED_TASKS + res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()]) + self.assertEqual(res_state, s_copy) + + # but if we've failed with a rescue/always block + mock_task = MagicMock() + s_copy.run_state = itr.ITERATING_RESCUE + res_state = itr._insert_tasks_into_state(s_copy, task_list=[mock_task]) + self.assertEqual(res_state, s_copy) + self.assertIn(mock_task, res_state._blocks[res_state.cur_block].rescue) + itr._host_states[hosts[0].name] = res_state + (next_state, next_task) = itr.get_next_task_for_host(hosts[0], peek=True) + self.assertEqual(next_task, mock_task) + itr._host_states[hosts[0].name] = s + + # test a regular insertion + s_copy = s.copy() + res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()]) diff --git a/test/units/executor/test_playbook_executor.py b/test/units/executor/test_playbook_executor.py new file mode 100644 index 00000000..529eda36 --- /dev/null +++ b/test/units/executor/test_playbook_executor.py @@ -0,0 +1,148 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import MagicMock + +from ansible.executor.playbook_executor import PlaybookExecutor +from ansible.playbook import Playbook +from ansible.template import Templar +from ansible.utils import context_objects as co + +from units.mock.loader import DictDataLoader + + +class TestPlaybookExecutor(unittest.TestCase): + + def setUp(self): + # Reset command line args for every test + co.GlobalCLIArgs._Singleton__instance = None + + def tearDown(self): + # And cleanup after ourselves too + co.GlobalCLIArgs._Singleton__instance = None + + def test_get_serialized_batches(self): + fake_loader = DictDataLoader({ + 'no_serial.yml': ''' + - hosts: all + gather_facts: no + tasks: + - debug: var=inventory_hostname + ''', + 'serial_int.yml': ''' + - hosts: all + gather_facts: no + serial: 2 + tasks: + - debug: var=inventory_hostname + ''', + 'serial_pct.yml': ''' + - hosts: all + gather_facts: no + serial: 20% + tasks: + - debug: var=inventory_hostname + ''', + 'serial_list.yml': ''' + - hosts: all + gather_facts: no + serial: [1, 2, 3] + tasks: + - debug: var=inventory_hostname + ''', + 'serial_list_mixed.yml': ''' + - hosts: all + gather_facts: no + serial: [1, "20%", -1] + tasks: + - debug: var=inventory_hostname + ''', + }) + + mock_inventory = MagicMock() + mock_var_manager = MagicMock() + + templar = Templar(loader=fake_loader) + + pbe = PlaybookExecutor( + playbooks=['no_serial.yml', 'serial_int.yml', 'serial_pct.yml', 'serial_list.yml', 'serial_list_mixed.yml'], + inventory=mock_inventory, + variable_manager=mock_var_manager, + loader=fake_loader, + passwords=[], + ) + + playbook = Playbook.load(pbe._playbooks[0], variable_manager=mock_var_manager, loader=fake_loader) + play = playbook.get_plays()[0] + play.post_validate(templar) + mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9'] + self.assertEqual(pbe._get_serialized_batches(play), [['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9']]) + + playbook = Playbook.load(pbe._playbooks[1], variable_manager=mock_var_manager, loader=fake_loader) + play = playbook.get_plays()[0] + play.post_validate(templar) + mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9'] + self.assertEqual( + pbe._get_serialized_batches(play), + [['host0', 'host1'], ['host2', 'host3'], ['host4', 'host5'], ['host6', 'host7'], ['host8', 'host9']] + ) + + playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader) + play = playbook.get_plays()[0] + play.post_validate(templar) + mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9'] + self.assertEqual( + pbe._get_serialized_batches(play), + [['host0', 'host1'], ['host2', 'host3'], ['host4', 'host5'], ['host6', 'host7'], ['host8', 'host9']] + ) + + playbook = Playbook.load(pbe._playbooks[3], variable_manager=mock_var_manager, loader=fake_loader) + play = playbook.get_plays()[0] + play.post_validate(templar) + mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9'] + self.assertEqual( + pbe._get_serialized_batches(play), + [['host0'], ['host1', 'host2'], ['host3', 'host4', 'host5'], ['host6', 'host7', 'host8'], ['host9']] + ) + + playbook = Playbook.load(pbe._playbooks[4], variable_manager=mock_var_manager, loader=fake_loader) + play = playbook.get_plays()[0] + play.post_validate(templar) + mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9'] + self.assertEqual(pbe._get_serialized_batches(play), [['host0'], ['host1', 'host2'], ['host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9']]) + + # Test when serial percent is under 1.0 + playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader) + play = playbook.get_plays()[0] + play.post_validate(templar) + mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2'] + self.assertEqual(pbe._get_serialized_batches(play), [['host0'], ['host1'], ['host2']]) + + # Test when there is a remainder for serial as a percent + playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader) + play = playbook.get_plays()[0] + play.post_validate(templar) + mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9', 'host10'] + self.assertEqual( + pbe._get_serialized_batches(play), + [['host0', 'host1'], ['host2', 'host3'], ['host4', 'host5'], ['host6', 'host7'], ['host8', 'host9'], ['host10']] + ) diff --git a/test/units/executor/test_task_executor.py b/test/units/executor/test_task_executor.py new file mode 100644 index 00000000..7d9d711f --- /dev/null +++ b/test/units/executor/test_task_executor.py @@ -0,0 +1,656 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import mock + +from units.compat import unittest +from units.compat.mock import patch, MagicMock +from ansible.errors import AnsibleError +from ansible.executor.task_executor import TaskExecutor, remove_omit +from ansible.plugins.loader import action_loader, lookup_loader +from ansible.parsing.yaml.objects import AnsibleUnicode +from ansible.utils.unsafe_proxy import AnsibleUnsafeText, AnsibleUnsafeBytes +from ansible.module_utils.six import text_type + +from units.mock.loader import DictDataLoader + + +class TestTaskExecutor(unittest.TestCase): + + def test_task_executor_init(self): + fake_loader = DictDataLoader({}) + mock_host = MagicMock() + mock_task = MagicMock() + mock_play_context = MagicMock() + mock_shared_loader = MagicMock() + new_stdin = None + job_vars = dict() + mock_queue = MagicMock() + te = TaskExecutor( + host=mock_host, + task=mock_task, + job_vars=job_vars, + play_context=mock_play_context, + new_stdin=new_stdin, + loader=fake_loader, + shared_loader_obj=mock_shared_loader, + final_q=mock_queue, + ) + + def test_task_executor_run(self): + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + mock_task = MagicMock() + mock_task._role._role_path = '/path/to/role/foo' + + mock_play_context = MagicMock() + + mock_shared_loader = MagicMock() + mock_queue = MagicMock() + + new_stdin = None + job_vars = dict() + + te = TaskExecutor( + host=mock_host, + task=mock_task, + job_vars=job_vars, + play_context=mock_play_context, + new_stdin=new_stdin, + loader=fake_loader, + shared_loader_obj=mock_shared_loader, + final_q=mock_queue, + ) + + te._get_loop_items = MagicMock(return_value=None) + te._execute = MagicMock(return_value=dict()) + res = te.run() + + te._get_loop_items = MagicMock(return_value=[]) + res = te.run() + + te._get_loop_items = MagicMock(return_value=['a', 'b', 'c']) + te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item='c')]) + res = te.run() + + te._get_loop_items = MagicMock(side_effect=AnsibleError("")) + res = te.run() + self.assertIn("failed", res) + + def test_task_executor_run_clean_res(self): + te = TaskExecutor(None, MagicMock(), None, None, None, None, None, None) + te._get_loop_items = MagicMock(return_value=[1]) + te._run_loop = MagicMock( + return_value=[ + { + 'unsafe_bytes': AnsibleUnsafeBytes(b'{{ $bar }}'), + 'unsafe_text': AnsibleUnsafeText(u'{{ $bar }}'), + 'bytes': b'bytes', + 'text': u'text', + 'int': 1, + } + ] + ) + res = te.run() + data = res['results'][0] + self.assertIsInstance(data['unsafe_bytes'], AnsibleUnsafeText) + self.assertIsInstance(data['unsafe_text'], AnsibleUnsafeText) + self.assertIsInstance(data['bytes'], text_type) + self.assertIsInstance(data['text'], text_type) + self.assertIsInstance(data['int'], int) + + def test_task_executor_get_loop_items(self): + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + mock_task = MagicMock() + mock_task.loop_with = 'items' + mock_task.loop = ['a', 'b', 'c'] + + mock_play_context = MagicMock() + + mock_shared_loader = MagicMock() + mock_shared_loader.lookup_loader = lookup_loader + + new_stdin = None + job_vars = dict() + mock_queue = MagicMock() + + te = TaskExecutor( + host=mock_host, + task=mock_task, + job_vars=job_vars, + play_context=mock_play_context, + new_stdin=new_stdin, + loader=fake_loader, + shared_loader_obj=mock_shared_loader, + final_q=mock_queue, + ) + + items = te._get_loop_items() + self.assertEqual(items, ['a', 'b', 'c']) + + def test_task_executor_run_loop(self): + items = ['a', 'b', 'c'] + + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + def _copy(exclude_parent=False, exclude_tasks=False): + new_item = MagicMock() + return new_item + + mock_task = MagicMock() + mock_task.copy.side_effect = _copy + + mock_play_context = MagicMock() + + mock_shared_loader = MagicMock() + mock_queue = MagicMock() + + new_stdin = None + job_vars = dict() + + te = TaskExecutor( + host=mock_host, + task=mock_task, + job_vars=job_vars, + play_context=mock_play_context, + new_stdin=new_stdin, + loader=fake_loader, + shared_loader_obj=mock_shared_loader, + final_q=mock_queue, + ) + + def _execute(variables): + return dict(item=variables.get('item')) + + te._squash_items = MagicMock(return_value=items) + te._execute = MagicMock(side_effect=_execute) + + res = te._run_loop(items) + self.assertEqual(len(res), 3) + + def test_task_executor_squash_items(self): + items = ['a', 'b', 'c'] + + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + loop_var = 'item' + + def _evaluate_conditional(templar, variables): + item = variables.get(loop_var) + if item == 'b': + return False + return True + + mock_task = MagicMock() + mock_task.evaluate_conditional.side_effect = _evaluate_conditional + + mock_play_context = MagicMock() + + mock_shared_loader = None + mock_queue = MagicMock() + + new_stdin = None + job_vars = dict(pkg_mgr='yum') + + te = TaskExecutor( + host=mock_host, + task=mock_task, + job_vars=job_vars, + play_context=mock_play_context, + new_stdin=new_stdin, + loader=fake_loader, + shared_loader_obj=mock_shared_loader, + final_q=mock_queue, + ) + + # No replacement + mock_task.action = 'yum' + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + self.assertEqual(new_items, ['a', 'b', 'c']) + self.assertIsInstance(mock_task.args, MagicMock) + + mock_task.action = 'foo' + mock_task.args = {'name': '{{item}}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + self.assertEqual(new_items, ['a', 'b', 'c']) + self.assertEqual(mock_task.args, {'name': '{{item}}'}) + + mock_task.action = 'yum' + mock_task.args = {'name': 'static'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + self.assertEqual(new_items, ['a', 'b', 'c']) + self.assertEqual(mock_task.args, {'name': 'static'}) + + mock_task.action = 'yum' + mock_task.args = {'name': '{{pkg_mgr}}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + self.assertEqual(new_items, ['a', 'b', 'c']) + self.assertEqual(mock_task.args, {'name': '{{pkg_mgr}}'}) + + mock_task.action = '{{unknown}}' + mock_task.args = {'name': '{{item}}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + self.assertEqual(new_items, ['a', 'b', 'c']) + self.assertEqual(mock_task.args, {'name': '{{item}}'}) + + # Could do something like this to recover from bad deps in a package + job_vars = dict(pkg_mgr='yum', packages=['a', 'b']) + items = ['absent', 'latest'] + mock_task.action = 'yum' + mock_task.args = {'name': '{{ packages }}', 'state': '{{ item }}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + self.assertEqual(new_items, items) + self.assertEqual(mock_task.args, {'name': '{{ packages }}', 'state': '{{ item }}'}) + + # Maybe should raise an error in this case. The user would have to specify: + # - yum: name="{{ packages[item] }}" + # with_items: + # - ['a', 'b'] + # - ['foo', 'bar'] + # you can't use a list as a dict key so that would probably throw + # an error later. If so, we can throw it now instead. + # Squashing in this case would not be intuitive as the user is being + # explicit in using each list entry as a key. + job_vars = dict(pkg_mgr='yum', packages={"a": "foo", "b": "bar", "foo": "baz", "bar": "quux"}) + items = [['a', 'b'], ['foo', 'bar']] + mock_task.action = 'yum' + mock_task.args = {'name': '{{ packages[item] }}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + self.assertEqual(new_items, items) + self.assertEqual(mock_task.args, {'name': '{{ packages[item] }}'}) + + # Replaces + items = ['a', 'b', 'c'] + mock_task.action = 'yum' + mock_task.args = {'name': '{{item}}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + self.assertEqual(new_items, [['a', 'c']]) + self.assertEqual(mock_task.args, {'name': ['a', 'c']}) + + mock_task.action = '{{pkg_mgr}}' + mock_task.args = {'name': '{{item}}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + self.assertEqual(new_items, [['a', 'c']]) + self.assertEqual(mock_task.args, {'name': ['a', 'c']}) + + # New loop_var + mock_task.action = 'yum' + mock_task.args = {'name': '{{a_loop_var_item}}'} + mock_task.loop_control = {'loop_var': 'a_loop_var_item'} + loop_var = 'a_loop_var_item' + new_items = te._squash_items(items=items, loop_var='a_loop_var_item', variables=job_vars) + self.assertEqual(new_items, [['a', 'c']]) + self.assertEqual(mock_task.args, {'name': ['a', 'c']}) + loop_var = 'item' + + # + # These are presently not optimized but could be in the future. + # Expected output if they were optimized is given as a comment + # Please move these to a different section if they are optimized + # + + # Squashing lists + job_vars = dict(pkg_mgr='yum') + items = [['a', 'b'], ['foo', 'bar']] + mock_task.action = 'yum' + mock_task.args = {'name': '{{ item }}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + # self.assertEqual(new_items, [['a', 'b', 'foo', 'bar']]) + # self.assertEqual(mock_task.args, {'name': ['a', 'b', 'foo', 'bar']}) + self.assertEqual(new_items, items) + self.assertEqual(mock_task.args, {'name': '{{ item }}'}) + + # Retrieving from a dict + items = ['a', 'b', 'foo'] + mock_task.action = 'yum' + mock_task.args = {'name': '{{ packages[item] }}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + # self.assertEqual(new_items, [['foo', 'baz']]) + # self.assertEqual(mock_task.args, {'name': ['foo', 'baz']}) + self.assertEqual(new_items, items) + self.assertEqual(mock_task.args, {'name': '{{ packages[item] }}'}) + + # Another way to retrieve from a dict + job_vars = dict(pkg_mgr='yum') + items = [{'package': 'foo'}, {'package': 'bar'}] + mock_task.action = 'yum' + mock_task.args = {'name': '{{ item["package"] }}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + # self.assertEqual(new_items, [['foo', 'bar']]) + # self.assertEqual(mock_task.args, {'name': ['foo', 'bar']}) + self.assertEqual(new_items, items) + self.assertEqual(mock_task.args, {'name': '{{ item["package"] }}'}) + + items = [ + dict(name='a', state='present'), + dict(name='b', state='present'), + dict(name='c', state='present'), + ] + mock_task.action = 'yum' + mock_task.args = {'name': '{{item.name}}', 'state': '{{item.state}}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + # self.assertEqual(new_items, [dict(name=['a', 'b', 'c'], state='present')]) + # self.assertEqual(mock_task.args, {'name': ['a', 'b', 'c'], 'state': 'present'}) + self.assertEqual(new_items, items) + self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'}) + + items = [ + dict(name='a', state='present'), + dict(name='b', state='present'), + dict(name='c', state='absent'), + ] + mock_task.action = 'yum' + mock_task.args = {'name': '{{item.name}}', 'state': '{{item.state}}'} + new_items = te._squash_items(items=items, loop_var='item', variables=job_vars) + # self.assertEqual(new_items, [dict(name=['a', 'b'], state='present'), + # dict(name='c', state='absent')]) + # self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'}) + self.assertEqual(new_items, items) + self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'}) + + def test_task_executor_get_action_handler(self): + te = TaskExecutor( + host=MagicMock(), + task=MagicMock(), + job_vars={}, + play_context=MagicMock(), + new_stdin=None, + loader=DictDataLoader({}), + shared_loader_obj=MagicMock(), + final_q=MagicMock(), + ) + + action_loader = te._shared_loader_obj.action_loader + action_loader.has_plugin.return_value = True + action_loader.get.return_value = mock.sentinel.handler + + mock_connection = MagicMock() + mock_templar = MagicMock() + action = 'namespace.prefix_suffix' + te._task.action = action + + handler = te._get_action_handler(mock_connection, mock_templar) + + self.assertIs(mock.sentinel.handler, handler) + + action_loader.has_plugin.assert_called_once_with( + action, collection_list=te._task.collections) + + action_loader.get.assert_called_once_with( + te._task.action, task=te._task, connection=mock_connection, + play_context=te._play_context, loader=te._loader, + templar=mock_templar, shared_loader_obj=te._shared_loader_obj, + collection_list=te._task.collections) + + def test_task_executor_get_handler_prefix(self): + te = TaskExecutor( + host=MagicMock(), + task=MagicMock(), + job_vars={}, + play_context=MagicMock(), + new_stdin=None, + loader=DictDataLoader({}), + shared_loader_obj=MagicMock(), + final_q=MagicMock(), + ) + + action_loader = te._shared_loader_obj.action_loader + action_loader.has_plugin.side_effect = [False, True] + action_loader.get.return_value = mock.sentinel.handler + action_loader.__contains__.return_value = True + + mock_connection = MagicMock() + mock_templar = MagicMock() + action = 'namespace.netconf_suffix' + module_prefix = action.split('_')[0] + te._task.action = action + + handler = te._get_action_handler(mock_connection, mock_templar) + + self.assertIs(mock.sentinel.handler, handler) + action_loader.has_plugin.assert_has_calls([mock.call(action, collection_list=te._task.collections), + mock.call(module_prefix, collection_list=te._task.collections)]) + + action_loader.get.assert_called_once_with( + module_prefix, task=te._task, connection=mock_connection, + play_context=te._play_context, loader=te._loader, + templar=mock_templar, shared_loader_obj=te._shared_loader_obj, + collection_list=te._task.collections) + + def test_task_executor_get_handler_normal(self): + te = TaskExecutor( + host=MagicMock(), + task=MagicMock(), + job_vars={}, + play_context=MagicMock(), + new_stdin=None, + loader=DictDataLoader({}), + shared_loader_obj=MagicMock(), + final_q=MagicMock(), + ) + + action_loader = te._shared_loader_obj.action_loader + action_loader.has_plugin.return_value = False + action_loader.get.return_value = mock.sentinel.handler + action_loader.__contains__.return_value = False + + mock_connection = MagicMock() + mock_templar = MagicMock() + action = 'namespace.prefix_suffix' + module_prefix = action.split('_')[0] + te._task.action = action + handler = te._get_action_handler(mock_connection, mock_templar) + + self.assertIs(mock.sentinel.handler, handler) + + action_loader.has_plugin.assert_has_calls([mock.call(action, collection_list=te._task.collections), + mock.call(module_prefix, collection_list=te._task.collections)]) + + action_loader.get.assert_called_once_with( + 'ansible.legacy.normal', task=te._task, connection=mock_connection, + play_context=te._play_context, loader=te._loader, + templar=mock_templar, shared_loader_obj=te._shared_loader_obj, + collection_list=None) + + def test_task_executor_execute(self): + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + mock_task = MagicMock() + mock_task.args = dict() + mock_task.retries = 0 + mock_task.delay = -1 + mock_task.register = 'foo' + mock_task.until = None + mock_task.changed_when = None + mock_task.failed_when = None + mock_task.post_validate.return_value = None + # mock_task.async_val cannot be left unset, because on Python 3 MagicMock() + # > 0 raises a TypeError There are two reasons for using the value 1 + # here: on Python 2 comparing MagicMock() > 0 returns True, and the + # other reason is that if I specify 0 here, the test fails. ;) + mock_task.async_val = 1 + mock_task.poll = 0 + + mock_play_context = MagicMock() + mock_play_context.post_validate.return_value = None + mock_play_context.update_vars.return_value = None + + mock_connection = MagicMock() + mock_connection.set_host_overrides.return_value = None + mock_connection._connect.return_value = None + + mock_action = MagicMock() + mock_queue = MagicMock() + + shared_loader = None + new_stdin = None + job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX") + + te = TaskExecutor( + host=mock_host, + task=mock_task, + job_vars=job_vars, + play_context=mock_play_context, + new_stdin=new_stdin, + loader=fake_loader, + shared_loader_obj=shared_loader, + final_q=mock_queue, + ) + + te._get_connection = MagicMock(return_value=mock_connection) + te._get_action_handler = MagicMock(return_value=mock_action) + + mock_action.run.return_value = dict(ansible_facts=dict()) + res = te._execute() + + mock_task.changed_when = MagicMock(return_value=AnsibleUnicode("1 == 1")) + res = te._execute() + + mock_task.changed_when = None + mock_task.failed_when = MagicMock(return_value=AnsibleUnicode("1 == 1")) + res = te._execute() + + mock_task.failed_when = None + mock_task.evaluate_conditional.return_value = False + res = te._execute() + + mock_task.evaluate_conditional.return_value = True + mock_task.args = dict(_raw_params='foo.yml', a='foo', b='bar') + mock_task.action = 'include' + res = te._execute() + + def test_task_executor_poll_async_result(self): + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + mock_task = MagicMock() + mock_task.async_val = 0.1 + mock_task.poll = 0.05 + + mock_play_context = MagicMock() + + mock_connection = MagicMock() + + mock_action = MagicMock() + mock_queue = MagicMock() + + shared_loader = MagicMock() + shared_loader.action_loader = action_loader + + new_stdin = None + job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX") + + te = TaskExecutor( + host=mock_host, + task=mock_task, + job_vars=job_vars, + play_context=mock_play_context, + new_stdin=new_stdin, + loader=fake_loader, + shared_loader_obj=shared_loader, + final_q=mock_queue, + ) + + te._connection = MagicMock() + + def _get(*args, **kwargs): + mock_action = MagicMock() + mock_action.run.return_value = dict(stdout='') + return mock_action + + # testing with some bad values in the result passed to poll async, + # and with a bad value returned from the mock action + with patch.object(action_loader, 'get', _get): + mock_templar = MagicMock() + res = te._poll_async_result(result=dict(), templar=mock_templar) + self.assertIn('failed', res) + res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar) + self.assertIn('failed', res) + + def _get(*args, **kwargs): + mock_action = MagicMock() + mock_action.run.return_value = dict(finished=1) + return mock_action + + # now testing with good values + with patch.object(action_loader, 'get', _get): + mock_templar = MagicMock() + res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar) + self.assertEqual(res, dict(finished=1)) + + def test_recursive_remove_omit(self): + omit_token = 'POPCORN' + + data = { + 'foo': 'bar', + 'baz': 1, + 'qux': ['one', 'two', 'three'], + 'subdict': { + 'remove': 'POPCORN', + 'keep': 'not_popcorn', + 'subsubdict': { + 'remove': 'POPCORN', + 'keep': 'not_popcorn', + }, + 'a_list': ['POPCORN'], + }, + 'a_list': ['POPCORN'], + 'list_of_lists': [ + ['some', 'thing'], + ], + 'list_of_dicts': [ + { + 'remove': 'POPCORN', + } + ], + } + + expected = { + 'foo': 'bar', + 'baz': 1, + 'qux': ['one', 'two', 'three'], + 'subdict': { + 'keep': 'not_popcorn', + 'subsubdict': { + 'keep': 'not_popcorn', + }, + 'a_list': ['POPCORN'], + }, + 'a_list': ['POPCORN'], + 'list_of_lists': [ + ['some', 'thing'], + ], + 'list_of_dicts': [{}], + } + + self.assertEqual(remove_omit(data, omit_token), expected) diff --git a/test/units/executor/test_task_queue_manager_callbacks.py b/test/units/executor/test_task_queue_manager_callbacks.py new file mode 100644 index 00000000..6c0ceee0 --- /dev/null +++ b/test/units/executor/test_task_queue_manager_callbacks.py @@ -0,0 +1,121 @@ +# (c) 2016, Steve Kuznetsov +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) + +from units.compat import unittest +from units.compat.mock import MagicMock + +from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.playbook import Playbook +from ansible.plugins.callback import CallbackBase +from ansible.utils import context_objects as co + +__metaclass__ = type + + +class TestTaskQueueManagerCallbacks(unittest.TestCase): + def setUp(self): + inventory = MagicMock() + variable_manager = MagicMock() + loader = MagicMock() + passwords = [] + + # Reset the stored command line args + co.GlobalCLIArgs._Singleton__instance = None + self._tqm = TaskQueueManager(inventory, variable_manager, loader, passwords) + self._playbook = Playbook(loader) + + # we use a MagicMock to register the result of the call we + # expect to `v2_playbook_on_call`. We don't mock out the + # method since we're testing code that uses `inspect` to + # look at that method's argspec and we want to ensure this + # test is easy to reason about. + self._register = MagicMock() + + def tearDown(self): + # Reset the stored command line args + co.GlobalCLIArgs._Singleton__instance = None + + def test_task_queue_manager_callbacks_v2_playbook_on_start(self): + """ + Assert that no exceptions are raised when sending a Playbook + start callback to a current callback module plugin. + """ + register = self._register + + class CallbackModule(CallbackBase): + """ + This is a callback module with the current + method signature for `v2_playbook_on_start`. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'current_module' + + def v2_playbook_on_start(self, playbook): + register(self, playbook) + + callback_module = CallbackModule() + self._tqm._callback_plugins.append(callback_module) + self._tqm.send_callback('v2_playbook_on_start', self._playbook) + register.assert_called_once_with(callback_module, self._playbook) + + def test_task_queue_manager_callbacks_v2_playbook_on_start_wrapped(self): + """ + Assert that no exceptions are raised when sending a Playbook + start callback to a wrapped current callback module plugin. + """ + register = self._register + + def wrap_callback(func): + """ + This wrapper changes the exposed argument + names for a method from the original names + to (*args, **kwargs). This is used in order + to validate that wrappers which change par- + ameter names do not break the TQM callback + system. + + :param func: function to decorate + :return: decorated function + """ + + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + + return wrapper + + class WrappedCallbackModule(CallbackBase): + """ + This is a callback module with the current + method signature for `v2_playbook_on_start` + wrapped in order to change the signature. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'current_module' + + @wrap_callback + def v2_playbook_on_start(self, playbook): + register(self, playbook) + + callback_module = WrappedCallbackModule() + self._tqm._callback_plugins.append(callback_module) + self._tqm.send_callback('v2_playbook_on_start', self._playbook) + register.assert_called_once_with(callback_module, self._playbook) diff --git a/test/units/executor/test_task_result.py b/test/units/executor/test_task_result.py new file mode 100644 index 00000000..3ce210de --- /dev/null +++ b/test/units/executor/test_task_result.py @@ -0,0 +1,171 @@ +# (c) 2016, James Cammarata +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import patch, MagicMock + +from ansible.executor.task_result import TaskResult + + +class TestTaskResult(unittest.TestCase): + def test_task_result_basic(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test loading a result with a dict + tr = TaskResult(mock_host, mock_task, dict()) + + # test loading a result with a JSON string + with patch('ansible.parsing.dataloader.DataLoader.load') as p: + tr = TaskResult(mock_host, mock_task, '{}') + + def test_task_result_is_changed(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test with no changed in result + tr = TaskResult(mock_host, mock_task, dict()) + self.assertFalse(tr.is_changed()) + + # test with changed in the result + tr = TaskResult(mock_host, mock_task, dict(changed=True)) + self.assertTrue(tr.is_changed()) + + # test with multiple results but none changed + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True])) + self.assertFalse(tr.is_changed()) + + # test with multiple results and one changed + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)])) + self.assertTrue(tr.is_changed()) + + def test_task_result_is_skipped(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test with no skipped in result + tr = TaskResult(mock_host, mock_task, dict()) + self.assertFalse(tr.is_skipped()) + + # test with skipped in the result + tr = TaskResult(mock_host, mock_task, dict(skipped=True)) + self.assertTrue(tr.is_skipped()) + + # test with multiple results but none skipped + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True])) + self.assertFalse(tr.is_skipped()) + + # test with multiple results and one skipped + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)])) + self.assertFalse(tr.is_skipped()) + + # test with multiple results and all skipped + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)])) + self.assertTrue(tr.is_skipped()) + + # test with multiple squashed results (list of strings) + # first with the main result having skipped=False + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=False)) + self.assertFalse(tr.is_skipped()) + # then with the main result having skipped=True + tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=True)) + self.assertTrue(tr.is_skipped()) + + def test_task_result_is_unreachable(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test with no unreachable in result + tr = TaskResult(mock_host, mock_task, dict()) + self.assertFalse(tr.is_unreachable()) + + # test with unreachable in the result + tr = TaskResult(mock_host, mock_task, dict(unreachable=True)) + self.assertTrue(tr.is_unreachable()) + + # test with multiple results but none unreachable + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True])) + self.assertFalse(tr.is_unreachable()) + + # test with multiple results and one unreachable + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)])) + self.assertTrue(tr.is_unreachable()) + + def test_task_result_is_failed(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test with no failed in result + tr = TaskResult(mock_host, mock_task, dict()) + self.assertFalse(tr.is_failed()) + + # test failed result with rc values (should not matter) + tr = TaskResult(mock_host, mock_task, dict(rc=0)) + self.assertFalse(tr.is_failed()) + tr = TaskResult(mock_host, mock_task, dict(rc=1)) + self.assertFalse(tr.is_failed()) + + # test with failed in result + tr = TaskResult(mock_host, mock_task, dict(failed=True)) + self.assertTrue(tr.is_failed()) + + # test with failed_when in result + tr = TaskResult(mock_host, mock_task, dict(failed_when_result=True)) + self.assertTrue(tr.is_failed()) + + def test_task_result_no_log(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # no_log should remove secrets + tr = TaskResult(mock_host, mock_task, dict(_ansible_no_log=True, secret='DONTSHOWME')) + clean = tr.clean_copy() + self.assertTrue('secret' not in clean._result) + + def test_task_result_no_log_preserve(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # no_log should not remove presrved keys + tr = TaskResult( + mock_host, + mock_task, + dict( + _ansible_no_log=True, + retries=5, + attempts=5, + changed=False, + foo='bar', + ) + ) + clean = tr.clean_copy() + self.assertTrue('retries' in clean._result) + self.assertTrue('attempts' in clean._result) + self.assertTrue('changed' in clean._result) + self.assertTrue('foo' not in clean._result) diff --git a/test/units/galaxy/__init__.py b/test/units/galaxy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/galaxy/test_api.py b/test/units/galaxy/test_api.py new file mode 100644 index 00000000..f333a64b --- /dev/null +++ b/test/units/galaxy/test_api.py @@ -0,0 +1,912 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import re +import pytest +import tarfile +import tempfile +import time + +from io import BytesIO, StringIO +from units.compat.mock import MagicMock + +from ansible import context +from ansible.errors import AnsibleError +from ansible.galaxy import api as galaxy_api +from ansible.galaxy.api import CollectionVersionMetadata, GalaxyAPI, GalaxyError +from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.six.moves.urllib import error as urllib_error +from ansible.utils import context_objects as co +from ansible.utils.display import Display + + +@pytest.fixture(autouse='function') +def reset_cli_args(): + co.GlobalCLIArgs._Singleton__instance = None + # Required to initialise the GalaxyAPI object + context.CLIARGS._store = {'ignore_certs': False} + yield + co.GlobalCLIArgs._Singleton__instance = None + + +@pytest.fixture() +def collection_artifact(tmp_path_factory): + ''' Creates a collection artifact tarball that is ready to be published ''' + output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output')) + + tar_path = os.path.join(output_dir, 'namespace-collection-v1.0.0.tar.gz') + with tarfile.open(tar_path, 'w:gz') as tfile: + b_io = BytesIO(b"\x00\x01\x02\x03") + tar_info = tarfile.TarInfo('test') + tar_info.size = 4 + tar_info.mode = 0o0644 + tfile.addfile(tarinfo=tar_info, fileobj=b_io) + + yield tar_path + + +def get_test_galaxy_api(url, version, token_ins=None, token_value=None): + token_value = token_value or "my token" + token_ins = token_ins or GalaxyToken(token_value) + api = GalaxyAPI(None, "test", url) + # Warning, this doesn't test g_connect() because _availabe_api_versions is set here. That means + # that urls for v2 servers have to append '/api/' themselves in the input data. + api._available_api_versions = {version: '%s' % version} + api.token = token_ins + + return api + + +def test_api_no_auth(): + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/") + actual = {} + api._add_auth_token(actual, "") + assert actual == {} + + +def test_api_no_auth_but_required(): + expected = "No access token or username set. A token can be set with --api-key or at " + with pytest.raises(AnsibleError, match=expected): + GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")._add_auth_token({}, "", required=True) + + +def test_api_token_auth(): + token = GalaxyToken(token=u"my_token") + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token) + actual = {} + api._add_auth_token(actual, "", required=True) + assert actual == {'Authorization': 'Token my_token'} + + +def test_api_token_auth_with_token_type(monkeypatch): + token = KeycloakToken(auth_url='https://api.test/') + mock_token_get = MagicMock() + mock_token_get.return_value = 'my_token' + monkeypatch.setattr(token, 'get', mock_token_get) + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token) + actual = {} + api._add_auth_token(actual, "", token_type="Bearer", required=True) + assert actual == {'Authorization': 'Bearer my_token'} + + +def test_api_token_auth_with_v3_url(monkeypatch): + token = KeycloakToken(auth_url='https://api.test/') + mock_token_get = MagicMock() + mock_token_get.return_value = 'my_token' + monkeypatch.setattr(token, 'get', mock_token_get) + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token) + actual = {} + api._add_auth_token(actual, "https://galaxy.ansible.com/api/v3/resource/name", required=True) + assert actual == {'Authorization': 'Bearer my_token'} + + +def test_api_token_auth_with_v2_url(): + token = GalaxyToken(token=u"my_token") + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token) + actual = {} + # Add v3 to random part of URL but response should only see the v2 as the full URI path segment. + api._add_auth_token(actual, "https://galaxy.ansible.com/api/v2/resourcev3/name", required=True) + assert actual == {'Authorization': 'Token my_token'} + + +def test_api_basic_auth_password(): + token = BasicAuthToken(username=u"user", password=u"pass") + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token) + actual = {} + api._add_auth_token(actual, "", required=True) + assert actual == {'Authorization': 'Basic dXNlcjpwYXNz'} + + +def test_api_basic_auth_no_password(): + token = BasicAuthToken(username=u"user") + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token) + actual = {} + api._add_auth_token(actual, "", required=True) + assert actual == {'Authorization': 'Basic dXNlcjo='} + + +def test_api_dont_override_auth_header(): + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/") + actual = {'Authorization': 'Custom token'} + api._add_auth_token(actual, "", required=True) + assert actual == {'Authorization': 'Custom token'} + + +def test_initialise_galaxy(monkeypatch): + mock_open = MagicMock() + mock_open.side_effect = [ + StringIO(u'{"available_versions":{"v1":"v1/"}}'), + StringIO(u'{"token":"my token"}'), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/") + actual = api.authenticate("github_token") + + assert len(api.available_api_versions) == 2 + assert api.available_api_versions['v1'] == u'v1/' + assert api.available_api_versions['v2'] == u'v2/' + assert actual == {u'token': u'my token'} + assert mock_open.call_count == 2 + assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/' + assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent'] + assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/' + assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent'] + assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token' + + +def test_initialise_galaxy_with_auth(monkeypatch): + mock_open = MagicMock() + mock_open.side_effect = [ + StringIO(u'{"available_versions":{"v1":"v1/"}}'), + StringIO(u'{"token":"my token"}'), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token')) + actual = api.authenticate("github_token") + + assert len(api.available_api_versions) == 2 + assert api.available_api_versions['v1'] == u'v1/' + assert api.available_api_versions['v2'] == u'v2/' + assert actual == {u'token': u'my token'} + assert mock_open.call_count == 2 + assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/' + assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent'] + assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/' + assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent'] + assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token' + + +def test_initialise_automation_hub(monkeypatch): + mock_open = MagicMock() + mock_open.side_effect = [ + StringIO(u'{"available_versions":{"v2": "v2/", "v3":"v3/"}}'), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + token = KeycloakToken(auth_url='https://api.test/') + mock_token_get = MagicMock() + mock_token_get.return_value = 'my_token' + monkeypatch.setattr(token, 'get', mock_token_get) + + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token) + + assert len(api.available_api_versions) == 2 + assert api.available_api_versions['v2'] == u'v2/' + assert api.available_api_versions['v3'] == u'v3/' + + assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/' + assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent'] + assert mock_open.mock_calls[0][2]['headers'] == {'Authorization': 'Bearer my_token'} + + +def test_initialise_unknown(monkeypatch): + mock_open = MagicMock() + mock_open.side_effect = [ + urllib_error.HTTPError('https://galaxy.ansible.com/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')), + urllib_error.HTTPError('https://galaxy.ansible.com/api/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token')) + + expected = "Error when finding available api versions from test (%s) (HTTP Code: 500, Message: msg)" \ + % api.api_server + with pytest.raises(AnsibleError, match=re.escape(expected)): + api.authenticate("github_token") + + +def test_get_available_api_versions(monkeypatch): + mock_open = MagicMock() + mock_open.side_effect = [ + StringIO(u'{"available_versions":{"v1":"v1/","v2":"v2/"}}'), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/") + actual = api.available_api_versions + assert len(actual) == 2 + assert actual['v1'] == u'v1/' + assert actual['v2'] == u'v2/' + + assert mock_open.call_count == 1 + assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/' + assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent'] + + +def test_publish_collection_missing_file(): + fake_path = u'/fake/ÅÑŚÌβŁÈ/path' + expected = to_native("The collection path specified '%s' does not exist." % fake_path) + + api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2") + with pytest.raises(AnsibleError, match=expected): + api.publish_collection(fake_path) + + +def test_publish_collection_not_a_tarball(): + expected = "The collection path specified '{0}' is not a tarball, use 'ansible-galaxy collection build' to " \ + "create a proper release artifact." + + api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2") + with tempfile.NamedTemporaryFile(prefix=u'ÅÑŚÌβŁÈ') as temp_file: + temp_file.write(b"\x00") + temp_file.flush() + with pytest.raises(AnsibleError, match=expected.format(to_native(temp_file.name))): + api.publish_collection(temp_file.name) + + +def test_publish_collection_unsupported_version(): + expected = "Galaxy action publish_collection requires API versions 'v2, v3' but only 'v1' are available on test " \ + "https://galaxy.ansible.com/api/" + + api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v1") + with pytest.raises(AnsibleError, match=expected): + api.publish_collection("path") + + +@pytest.mark.parametrize('api_version, collection_url', [ + ('v2', 'collections'), + ('v3', 'artifacts/collections'), +]) +def test_publish_collection(api_version, collection_url, collection_artifact, monkeypatch): + api = get_test_galaxy_api("https://galaxy.ansible.com/api/", api_version) + + mock_call = MagicMock() + mock_call.return_value = {'task': 'http://task.url/'} + monkeypatch.setattr(api, '_call_galaxy', mock_call) + + actual = api.publish_collection(collection_artifact) + assert actual == 'http://task.url/' + assert mock_call.call_count == 1 + assert mock_call.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/%s/%s/' % (api_version, collection_url) + assert mock_call.mock_calls[0][2]['headers']['Content-length'] == len(mock_call.mock_calls[0][2]['args']) + assert mock_call.mock_calls[0][2]['headers']['Content-type'].startswith( + 'multipart/form-data; boundary=') + assert mock_call.mock_calls[0][2]['args'].startswith(b'--') + assert mock_call.mock_calls[0][2]['method'] == 'POST' + assert mock_call.mock_calls[0][2]['auth_required'] is True + + +@pytest.mark.parametrize('api_version, collection_url, response, expected', [ + ('v2', 'collections', {}, + 'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'), + ('v2', 'collections', { + 'message': u'Galaxy error messäge', + 'code': 'GWE002', + }, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Galaxy error messäge Code: GWE002)'), + ('v3', 'artifact/collections', {}, + 'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'), + ('v3', 'artifact/collections', { + 'errors': [ + { + 'code': 'conflict.collection_exists', + 'detail': 'Collection "mynamespace-mycollection-4.1.1" already exists.', + 'title': 'Conflict.', + 'status': '400', + }, + { + 'code': 'quantum_improbability', + 'title': u'Rändom(?) quantum improbability.', + 'source': {'parameter': 'the_arrow_of_time'}, + 'meta': {'remediation': 'Try again before'}, + }, + ], + }, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Collection ' + u'"mynamespace-mycollection-4.1.1" already exists. Code: conflict.collection_exists), (HTTP Code: 500, ' + u'Message: Rändom(?) quantum improbability. Code: quantum_improbability)') +]) +def test_publish_failure(api_version, collection_url, response, expected, collection_artifact, monkeypatch): + api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version) + + expected_url = '%s/api/%s/%s' % (api.api_server, api_version, collection_url) + + mock_open = MagicMock() + mock_open.side_effect = urllib_error.HTTPError(expected_url, 500, 'msg', {}, + StringIO(to_text(json.dumps(response)))) + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + with pytest.raises(GalaxyError, match=re.escape(to_native(expected % api.api_server))): + api.publish_collection(collection_artifact) + + +@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [ + ('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'), + '1234', + 'https://galaxy.server.com/api/v2/collection-imports/1234/'), + ('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), + '1234', + 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'), +]) +def test_wait_import_task(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch): + api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins) + + if token_ins: + mock_token_get = MagicMock() + mock_token_get.return_value = 'my token' + monkeypatch.setattr(token_ins, 'get', mock_token_get) + + mock_open = MagicMock() + mock_open.return_value = StringIO(u'{"state":"success","finished_at":"time"}') + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + api.wait_import_task(import_uri) + + assert mock_open.call_count == 1 + assert mock_open.mock_calls[0][1][0] == full_import_uri + assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type + + assert mock_display.call_count == 1 + assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri + + +@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [ + ('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'), + '1234', + 'https://galaxy.server.com/api/v2/collection-imports/1234/'), + ('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), + '1234', + 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'), +]) +def test_wait_import_task_multiple_requests(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch): + api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins) + + if token_ins: + mock_token_get = MagicMock() + mock_token_get.return_value = 'my token' + monkeypatch.setattr(token_ins, 'get', mock_token_get) + + mock_open = MagicMock() + mock_open.side_effect = [ + StringIO(u'{"state":"test"}'), + StringIO(u'{"state":"success","finished_at":"time"}'), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + mock_vvv = MagicMock() + monkeypatch.setattr(Display, 'vvv', mock_vvv) + + monkeypatch.setattr(time, 'sleep', MagicMock()) + + api.wait_import_task(import_uri) + + assert mock_open.call_count == 2 + assert mock_open.mock_calls[0][1][0] == full_import_uri + assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type + assert mock_open.mock_calls[1][1][0] == full_import_uri + assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type + + assert mock_display.call_count == 1 + assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri + + assert mock_vvv.call_count == 1 + assert mock_vvv.mock_calls[0][1][0] == \ + 'Galaxy import process has a status of test, wait 2 seconds before trying again' + + +@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri,', [ + ('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'), + '1234', + 'https://galaxy.server.com/api/v2/collection-imports/1234/'), + ('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), + '1234', + 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'), +]) +def test_wait_import_task_with_failure(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch): + api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins) + + if token_ins: + mock_token_get = MagicMock() + mock_token_get.return_value = 'my token' + monkeypatch.setattr(token_ins, 'get', mock_token_get) + + mock_open = MagicMock() + mock_open.side_effect = [ + StringIO(to_text(json.dumps({ + 'finished_at': 'some_time', + 'state': 'failed', + 'error': { + 'code': 'GW001', + 'description': u'Becäuse I said so!', + + }, + 'messages': [ + { + 'level': 'error', + 'message': u'Somé error', + }, + { + 'level': 'warning', + 'message': u'Some wärning', + }, + { + 'level': 'info', + 'message': u'Somé info', + }, + ], + }))), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + mock_vvv = MagicMock() + monkeypatch.setattr(Display, 'vvv', mock_vvv) + + mock_warn = MagicMock() + monkeypatch.setattr(Display, 'warning', mock_warn) + + mock_err = MagicMock() + monkeypatch.setattr(Display, 'error', mock_err) + + expected = to_native(u'Galaxy import process failed: Becäuse I said so! (Code: GW001)') + with pytest.raises(AnsibleError, match=re.escape(expected)): + api.wait_import_task(import_uri) + + assert mock_open.call_count == 1 + assert mock_open.mock_calls[0][1][0] == full_import_uri + assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type + + assert mock_display.call_count == 1 + assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri + + assert mock_vvv.call_count == 1 + assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info' + + assert mock_warn.call_count == 1 + assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning' + + assert mock_err.call_count == 1 + assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error' + + +@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [ + ('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my_token'), + '1234', + 'https://galaxy.server.com/api/v2/collection-imports/1234/'), + ('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), + '1234', + 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'), +]) +def test_wait_import_task_with_failure_no_error(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch): + api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins) + + if token_ins: + mock_token_get = MagicMock() + mock_token_get.return_value = 'my token' + monkeypatch.setattr(token_ins, 'get', mock_token_get) + + mock_open = MagicMock() + mock_open.side_effect = [ + StringIO(to_text(json.dumps({ + 'finished_at': 'some_time', + 'state': 'failed', + 'error': {}, + 'messages': [ + { + 'level': 'error', + 'message': u'Somé error', + }, + { + 'level': 'warning', + 'message': u'Some wärning', + }, + { + 'level': 'info', + 'message': u'Somé info', + }, + ], + }))), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + mock_vvv = MagicMock() + monkeypatch.setattr(Display, 'vvv', mock_vvv) + + mock_warn = MagicMock() + monkeypatch.setattr(Display, 'warning', mock_warn) + + mock_err = MagicMock() + monkeypatch.setattr(Display, 'error', mock_err) + + expected = 'Galaxy import process failed: Unknown error, see %s for more details \\(Code: UNKNOWN\\)' % full_import_uri + with pytest.raises(AnsibleError, match=expected): + api.wait_import_task(import_uri) + + assert mock_open.call_count == 1 + assert mock_open.mock_calls[0][1][0] == full_import_uri + assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type + + assert mock_display.call_count == 1 + assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri + + assert mock_vvv.call_count == 1 + assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info' + + assert mock_warn.call_count == 1 + assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning' + + assert mock_err.call_count == 1 + assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error' + + +@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [ + ('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'), + '1234', + 'https://galaxy.server.com/api/v2/collection-imports/1234/'), + ('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), + '1234', + 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'), +]) +def test_wait_import_task_timeout(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch): + api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins) + + if token_ins: + mock_token_get = MagicMock() + mock_token_get.return_value = 'my token' + monkeypatch.setattr(token_ins, 'get', mock_token_get) + + def return_response(*args, **kwargs): + return StringIO(u'{"state":"waiting"}') + + mock_open = MagicMock() + mock_open.side_effect = return_response + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + mock_vvv = MagicMock() + monkeypatch.setattr(Display, 'vvv', mock_vvv) + + monkeypatch.setattr(time, 'sleep', MagicMock()) + + expected = "Timeout while waiting for the Galaxy import process to finish, check progress at '%s'" % full_import_uri + with pytest.raises(AnsibleError, match=expected): + api.wait_import_task(import_uri, 1) + + assert mock_open.call_count > 1 + assert mock_open.mock_calls[0][1][0] == full_import_uri + assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type + assert mock_open.mock_calls[1][1][0] == full_import_uri + assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type + + assert mock_display.call_count == 1 + assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri + + # expected_wait_msg = 'Galaxy import process has a status of waiting, wait {0} seconds before trying again' + assert mock_vvv.call_count > 9 # 1st is opening Galaxy token file. + + # FIXME: + # assert mock_vvv.mock_calls[1][1][0] == expected_wait_msg.format(2) + # assert mock_vvv.mock_calls[2][1][0] == expected_wait_msg.format(3) + # assert mock_vvv.mock_calls[3][1][0] == expected_wait_msg.format(4) + # assert mock_vvv.mock_calls[4][1][0] == expected_wait_msg.format(6) + # assert mock_vvv.mock_calls[5][1][0] == expected_wait_msg.format(10) + # assert mock_vvv.mock_calls[6][1][0] == expected_wait_msg.format(15) + # assert mock_vvv.mock_calls[7][1][0] == expected_wait_msg.format(22) + # assert mock_vvv.mock_calls[8][1][0] == expected_wait_msg.format(30) + + +@pytest.mark.parametrize('api_version, token_type, version, token_ins', [ + ('v2', None, 'v2.1.13', None), + ('v3', 'Bearer', 'v1.0.0', KeycloakToken(auth_url='https://api.test/api/automation-hub/')), +]) +def test_get_collection_version_metadata_no_version(api_version, token_type, version, token_ins, monkeypatch): + api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins) + + if token_ins: + mock_token_get = MagicMock() + mock_token_get.return_value = 'my token' + monkeypatch.setattr(token_ins, 'get', mock_token_get) + + mock_open = MagicMock() + mock_open.side_effect = [ + StringIO(to_text(json.dumps({ + 'download_url': 'https://downloadme.com', + 'artifact': { + 'sha256': 'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f', + }, + 'namespace': { + 'name': 'namespace', + }, + 'collection': { + 'name': 'collection', + }, + 'version': version, + 'metadata': { + 'dependencies': {}, + } + }))), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + actual = api.get_collection_version_metadata('namespace', 'collection', version) + + assert isinstance(actual, CollectionVersionMetadata) + assert actual.namespace == u'namespace' + assert actual.name == u'collection' + assert actual.download_url == u'https://downloadme.com' + assert actual.artifact_sha256 == u'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f' + assert actual.version == version + assert actual.dependencies == {} + + assert mock_open.call_count == 1 + assert mock_open.mock_calls[0][1][0] == '%s%s/collections/namespace/collection/versions/%s/' \ + % (api.api_server, api_version, version) + + # v2 calls dont need auth, so no authz header or token_type + if token_type: + assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type + + +@pytest.mark.parametrize('api_version, token_type, token_ins, response', [ + ('v2', None, None, { + 'count': 2, + 'next': None, + 'previous': None, + 'results': [ + { + 'version': '1.0.0', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0', + }, + { + 'version': '1.0.1', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1', + }, + ], + }), + # TODO: Verify this once Automation Hub is actually out + ('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), { + 'count': 2, + 'next': None, + 'previous': None, + 'data': [ + { + 'version': '1.0.0', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0', + }, + { + 'version': '1.0.1', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1', + }, + ], + }), +]) +def test_get_collection_versions(api_version, token_type, token_ins, response, monkeypatch): + api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins) + + if token_ins: + mock_token_get = MagicMock() + mock_token_get.return_value = 'my token' + monkeypatch.setattr(token_ins, 'get', mock_token_get) + + mock_open = MagicMock() + mock_open.side_effect = [ + StringIO(to_text(json.dumps(response))), + ] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + actual = api.get_collection_versions('namespace', 'collection') + assert actual == [u'1.0.0', u'1.0.1'] + + assert mock_open.call_count == 1 + assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \ + 'versions/' % api_version + if token_ins: + assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type + + +@pytest.mark.parametrize('api_version, token_type, token_ins, responses', [ + ('v2', None, None, [ + { + 'count': 6, + 'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2', + 'previous': None, + 'results': [ + { + 'version': '1.0.0', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0', + }, + { + 'version': '1.0.1', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1', + }, + ], + }, + { + 'count': 6, + 'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=3', + 'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions', + 'results': [ + { + 'version': '1.0.2', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.2', + }, + { + 'version': '1.0.3', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.3', + }, + ], + }, + { + 'count': 6, + 'next': None, + 'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2', + 'results': [ + { + 'version': '1.0.4', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.4', + }, + { + 'version': '1.0.5', + 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.5', + }, + ], + }, + ]), + ('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), [ + { + 'count': 6, + 'links': { + 'next': '/api/v3/collections/namespace/collection/versions/?page=2', + 'previous': None, + }, + 'data': [ + { + 'version': '1.0.0', + 'href': '/api/v3/collections/namespace/collection/versions/1.0.0', + }, + { + 'version': '1.0.1', + 'href': '/api/v3/collections/namespace/collection/versions/1.0.1', + }, + ], + }, + { + 'count': 6, + 'links': { + 'next': '/api/v3/collections/namespace/collection/versions/?page=3', + 'previous': '/api/v3/collections/namespace/collection/versions', + }, + 'data': [ + { + 'version': '1.0.2', + 'href': '/api/v3/collections/namespace/collection/versions/1.0.2', + }, + { + 'version': '1.0.3', + 'href': '/api/v3/collections/namespace/collection/versions/1.0.3', + }, + ], + }, + { + 'count': 6, + 'links': { + 'next': None, + 'previous': '/api/v3/collections/namespace/collection/versions/?page=2', + }, + 'data': [ + { + 'version': '1.0.4', + 'href': '/api/v3/collections/namespace/collection/versions/1.0.4', + }, + { + 'version': '1.0.5', + 'href': '/api/v3/collections/namespace/collection/versions/1.0.5', + }, + ], + }, + ]), +]) +def test_get_collection_versions_pagination(api_version, token_type, token_ins, responses, monkeypatch): + api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins) + + if token_ins: + mock_token_get = MagicMock() + mock_token_get.return_value = 'my token' + monkeypatch.setattr(token_ins, 'get', mock_token_get) + + mock_open = MagicMock() + mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + actual = api.get_collection_versions('namespace', 'collection') + assert actual == [u'1.0.0', u'1.0.1', u'1.0.2', u'1.0.3', u'1.0.4', u'1.0.5'] + + assert mock_open.call_count == 3 + assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \ + 'versions/' % api_version + assert mock_open.mock_calls[1][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \ + 'versions/?page=2' % api_version + assert mock_open.mock_calls[2][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \ + 'versions/?page=3' % api_version + + if token_type: + assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type + assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type + assert mock_open.mock_calls[2][2]['headers']['Authorization'] == '%s my token' % token_type + + +@pytest.mark.parametrize('responses', [ + [ + { + 'count': 2, + 'results': [{'name': '3.5.1', }, {'name': '3.5.2'}], + 'next_link': None, + 'next': None, + 'previous_link': None, + 'previous': None + }, + ], + [ + { + 'count': 2, + 'results': [{'name': '3.5.1'}], + 'next_link': '/api/v1/roles/432/versions/?page=2&page_size=50', + 'next': '/roles/432/versions/?page=2&page_size=50', + 'previous_link': None, + 'previous': None + }, + { + 'count': 2, + 'results': [{'name': '3.5.2'}], + 'next_link': None, + 'next': None, + 'previous_link': '/api/v1/roles/432/versions/?&page_size=50', + 'previous': '/roles/432/versions/?page_size=50', + }, + ] +]) +def test_get_role_versions_pagination(monkeypatch, responses): + api = get_test_galaxy_api('https://galaxy.com/api/', 'v1') + + mock_open = MagicMock() + mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses] + monkeypatch.setattr(galaxy_api, 'open_url', mock_open) + + actual = api.fetch_role_related('versions', 432) + assert actual == [{'name': '3.5.1'}, {'name': '3.5.2'}] + + assert mock_open.call_count == len(responses) + + assert mock_open.mock_calls[0][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page_size=50' + if len(responses) == 2: + assert mock_open.mock_calls[1][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page=2&page_size=50' diff --git a/test/units/galaxy/test_collection.py b/test/units/galaxy/test_collection.py new file mode 100644 index 00000000..fda6fe69 --- /dev/null +++ b/test/units/galaxy/test_collection.py @@ -0,0 +1,1326 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import pytest +import re +import tarfile +import uuid + +from hashlib import sha256 +from io import BytesIO +from units.compat.mock import MagicMock, mock_open, patch + +from ansible import context +from ansible.cli.galaxy import GalaxyCLI +from ansible.errors import AnsibleError +from ansible.galaxy import api, collection, token +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.six.moves import builtins +from ansible.utils import context_objects as co +from ansible.utils.display import Display +from ansible.utils.hashing import secure_hash_s + + +@pytest.fixture(autouse='function') +def reset_cli_args(): + co.GlobalCLIArgs._Singleton__instance = None + yield + co.GlobalCLIArgs._Singleton__instance = None + + +@pytest.fixture() +def collection_input(tmp_path_factory): + ''' Creates a collection skeleton directory for build tests ''' + test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input')) + namespace = 'ansible_namespace' + collection = 'collection' + skeleton = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton') + + galaxy_args = ['ansible-galaxy', 'collection', 'init', '%s.%s' % (namespace, collection), + '-c', '--init-path', test_dir, '--collection-skeleton', skeleton] + GalaxyCLI(args=galaxy_args).run() + collection_dir = os.path.join(test_dir, namespace, collection) + output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Output')) + + return collection_dir, output_dir + + +@pytest.fixture() +def collection_artifact(monkeypatch, tmp_path_factory): + ''' Creates a temp collection artifact and mocked open_url instance for publishing tests ''' + mock_open = MagicMock() + monkeypatch.setattr(collection, 'open_url', mock_open) + + mock_uuid = MagicMock() + mock_uuid.return_value.hex = 'uuid' + monkeypatch.setattr(uuid, 'uuid4', mock_uuid) + + tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections') + input_file = to_text(tmp_path / 'collection.tar.gz') + + with tarfile.open(input_file, 'w:gz') as tfile: + b_io = BytesIO(b"\x00\x01\x02\x03") + tar_info = tarfile.TarInfo('test') + tar_info.size = 4 + tar_info.mode = 0o0644 + tfile.addfile(tarinfo=tar_info, fileobj=b_io) + + return input_file, mock_open + + +@pytest.fixture() +def galaxy_yml(request, tmp_path_factory): + b_test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) + b_galaxy_yml = os.path.join(b_test_dir, b'galaxy.yml') + with open(b_galaxy_yml, 'wb') as galaxy_obj: + galaxy_obj.write(to_bytes(request.param)) + + yield b_galaxy_yml + + +@pytest.fixture() +def tmp_tarfile(tmp_path_factory, manifest_info): + ''' Creates a temporary tar file for _extract_tar_file tests ''' + filename = u'ÅÑŚÌβŁÈ' + temp_dir = to_bytes(tmp_path_factory.mktemp('test-%s Collections' % to_native(filename))) + tar_file = os.path.join(temp_dir, to_bytes('%s.tar.gz' % filename)) + data = os.urandom(8) + + with tarfile.open(tar_file, 'w:gz') as tfile: + b_io = BytesIO(data) + tar_info = tarfile.TarInfo(filename) + tar_info.size = len(data) + tar_info.mode = 0o0644 + tfile.addfile(tarinfo=tar_info, fileobj=b_io) + + b_data = to_bytes(json.dumps(manifest_info, indent=True), errors='surrogate_or_strict') + b_io = BytesIO(b_data) + tar_info = tarfile.TarInfo('MANIFEST.json') + tar_info.size = len(b_data) + tar_info.mode = 0o0644 + tfile.addfile(tarinfo=tar_info, fileobj=b_io) + + sha256_hash = sha256() + sha256_hash.update(data) + + with tarfile.open(tar_file, 'r') as tfile: + yield temp_dir, tfile, filename, sha256_hash.hexdigest() + + +@pytest.fixture() +def galaxy_server(): + context.CLIARGS._store = {'ignore_certs': False} + galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com', + token=token.GalaxyToken(token='key')) + return galaxy_api + + +@pytest.fixture() +def manifest_template(): + def get_manifest_info(namespace='ansible_namespace', name='collection', version='0.1.0'): + return { + "collection_info": { + "namespace": namespace, + "name": name, + "version": version, + "authors": [ + "shertel" + ], + "readme": "README.md", + "tags": [ + "test", + "collection" + ], + "description": "Test", + "license": [ + "MIT" + ], + "license_file": None, + "dependencies": {}, + "repository": "https://github.com/{0}/{1}".format(namespace, name), + "documentation": None, + "homepage": None, + "issues": None + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "files_manifest_checksum", + "format": 1 + }, + "format": 1 + } + + return get_manifest_info + + +@pytest.fixture() +def manifest_info(manifest_template): + return manifest_template() + + +@pytest.fixture() +def files_manifest_info(): + return { + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": None, + "chksum_sha256": None, + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "individual_file_checksum", + "format": 1 + } + ], + "format": 1} + + +@pytest.fixture() +def manifest(manifest_info): + b_data = to_bytes(json.dumps(manifest_info)) + + with patch.object(builtins, 'open', mock_open(read_data=b_data)) as m: + with open('MANIFEST.json', mode='rb') as fake_file: + yield fake_file, sha256(b_data).hexdigest() + + +@pytest.fixture() +def mock_collection(galaxy_server): + def create_mock_collection(namespace='ansible_namespace', name='collection', version='0.1.0', local=True, local_installed=True): + b_path = None + force = False + + if local: + mock_collection = collection.CollectionRequirement(namespace, name, b_path, galaxy_server, [version], version, force, skip=local_installed) + else: + download_url = 'https://galaxy.ansible.com/download/{0}-{1}-{2}.tar.gz'.format(namespace, name, version) + digest = '19415a6a6df831df61cffde4a09d1d89ac8d8ca5c0586e85bea0b106d6dff29a' + dependencies = {} + metadata = api.CollectionVersionMetadata(namespace, name, version, download_url, digest, dependencies) + mock_collection = collection.CollectionRequirement(namespace, name, b_path, galaxy_server, [version], version, force, metadata=metadata) + + return mock_collection + return create_mock_collection + + +def test_build_collection_no_galaxy_yaml(): + fake_path = u'/fake/ÅÑŚÌβŁÈ/path' + expected = to_native("The collection galaxy.yml path '%s/galaxy.yml' does not exist." % fake_path) + + with pytest.raises(AnsibleError, match=expected): + collection.build_collection(fake_path, 'output', False) + + +def test_build_existing_output_file(collection_input): + input_dir, output_dir = collection_input + + existing_output_dir = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz') + os.makedirs(existing_output_dir) + + expected = "The output collection artifact '%s' already exists, but is a directory - aborting" \ + % to_native(existing_output_dir) + with pytest.raises(AnsibleError, match=expected): + collection.build_collection(input_dir, output_dir, False) + + +def test_build_existing_output_without_force(collection_input): + input_dir, output_dir = collection_input + + existing_output = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz') + with open(existing_output, 'w+') as out_file: + out_file.write("random garbage") + out_file.flush() + + expected = "The file '%s' already exists. You can use --force to re-create the collection artifact." \ + % to_native(existing_output) + with pytest.raises(AnsibleError, match=expected): + collection.build_collection(input_dir, output_dir, False) + + +def test_build_existing_output_with_force(collection_input): + input_dir, output_dir = collection_input + + existing_output = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz') + with open(existing_output, 'w+') as out_file: + out_file.write("random garbage") + out_file.flush() + + collection.build_collection(input_dir, output_dir, True) + + # Verify the file was replaced with an actual tar file + assert tarfile.is_tarfile(existing_output) + + +@pytest.mark.parametrize('galaxy_yml', [b'namespace: value: broken'], indirect=True) +def test_invalid_yaml_galaxy_file(galaxy_yml): + expected = to_native(b"Failed to parse the galaxy.yml at '%s' with the following error:" % galaxy_yml) + + with pytest.raises(AnsibleError, match=expected): + collection._get_galaxy_yml(galaxy_yml) + + +@pytest.mark.parametrize('galaxy_yml', [b'namespace: test_namespace'], indirect=True) +def test_missing_required_galaxy_key(galaxy_yml): + expected = "The collection galaxy.yml at '%s' is missing the following mandatory keys: authors, name, " \ + "readme, version" % to_native(galaxy_yml) + + with pytest.raises(AnsibleError, match=expected): + collection._get_galaxy_yml(galaxy_yml) + + +@pytest.mark.parametrize('galaxy_yml', [b""" +namespace: namespace +name: collection +authors: Jordan +version: 0.1.0 +readme: README.md +invalid: value"""], indirect=True) +def test_warning_extra_keys(galaxy_yml, monkeypatch): + display_mock = MagicMock() + monkeypatch.setattr(Display, 'warning', display_mock) + + collection._get_galaxy_yml(galaxy_yml) + + assert display_mock.call_count == 1 + assert display_mock.call_args[0][0] == "Found unknown keys in collection galaxy.yml at '%s': invalid"\ + % to_text(galaxy_yml) + + +@pytest.mark.parametrize('galaxy_yml', [b""" +namespace: namespace +name: collection +authors: Jordan +version: 0.1.0 +readme: README.md"""], indirect=True) +def test_defaults_galaxy_yml(galaxy_yml): + actual = collection._get_galaxy_yml(galaxy_yml) + + assert actual['namespace'] == 'namespace' + assert actual['name'] == 'collection' + assert actual['authors'] == ['Jordan'] + assert actual['version'] == '0.1.0' + assert actual['readme'] == 'README.md' + assert actual['description'] is None + assert actual['repository'] is None + assert actual['documentation'] is None + assert actual['homepage'] is None + assert actual['issues'] is None + assert actual['tags'] == [] + assert actual['dependencies'] == {} + assert actual['license_ids'] == [] + + +@pytest.mark.parametrize('galaxy_yml', [(b""" +namespace: namespace +name: collection +authors: Jordan +version: 0.1.0 +readme: README.md +license: MIT"""), (b""" +namespace: namespace +name: collection +authors: Jordan +version: 0.1.0 +readme: README.md +license: +- MIT""")], indirect=True) +def test_galaxy_yml_list_value(galaxy_yml): + actual = collection._get_galaxy_yml(galaxy_yml) + assert actual['license_ids'] == ['MIT'] + + +def test_build_ignore_files_and_folders(collection_input, monkeypatch): + input_dir = collection_input[0] + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'vvv', mock_display) + + git_folder = os.path.join(input_dir, '.git') + retry_file = os.path.join(input_dir, 'ansible.retry') + + tests_folder = os.path.join(input_dir, 'tests', 'output') + tests_output_file = os.path.join(tests_folder, 'result.txt') + + os.makedirs(git_folder) + os.makedirs(tests_folder) + + with open(retry_file, 'w+') as ignore_file: + ignore_file.write('random') + ignore_file.flush() + + with open(tests_output_file, 'w+') as tests_file: + tests_file.write('random') + tests_file.flush() + + actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', []) + + assert actual['format'] == 1 + for manifest_entry in actual['files']: + assert manifest_entry['name'] not in ['.git', 'ansible.retry', 'galaxy.yml', 'tests/output', 'tests/output/result.txt'] + + expected_msgs = [ + "Skipping '%s/galaxy.yml' for collection build" % to_text(input_dir), + "Skipping '%s' for collection build" % to_text(retry_file), + "Skipping '%s' for collection build" % to_text(git_folder), + "Skipping '%s' for collection build" % to_text(tests_folder), + ] + assert mock_display.call_count == 4 + assert mock_display.mock_calls[0][1][0] in expected_msgs + assert mock_display.mock_calls[1][1][0] in expected_msgs + assert mock_display.mock_calls[2][1][0] in expected_msgs + assert mock_display.mock_calls[3][1][0] in expected_msgs + + +def test_build_ignore_older_release_in_root(collection_input, monkeypatch): + input_dir = collection_input[0] + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'vvv', mock_display) + + # This is expected to be ignored because it is in the root collection dir. + release_file = os.path.join(input_dir, 'namespace-collection-0.0.0.tar.gz') + + # This is not expected to be ignored because it is not in the root collection dir. + fake_release_file = os.path.join(input_dir, 'plugins', 'namespace-collection-0.0.0.tar.gz') + + for filename in [release_file, fake_release_file]: + with open(filename, 'w+') as file_obj: + file_obj.write('random') + file_obj.flush() + + actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', []) + assert actual['format'] == 1 + + plugin_release_found = False + for manifest_entry in actual['files']: + assert manifest_entry['name'] != 'namespace-collection-0.0.0.tar.gz' + if manifest_entry['name'] == 'plugins/namespace-collection-0.0.0.tar.gz': + plugin_release_found = True + + assert plugin_release_found + + expected_msgs = [ + "Skipping '%s/galaxy.yml' for collection build" % to_text(input_dir), + "Skipping '%s' for collection build" % to_text(release_file) + ] + assert mock_display.call_count == 2 + assert mock_display.mock_calls[0][1][0] in expected_msgs + assert mock_display.mock_calls[1][1][0] in expected_msgs + + +def test_build_ignore_patterns(collection_input, monkeypatch): + input_dir = collection_input[0] + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'vvv', mock_display) + + actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', + ['*.md', 'plugins/action', 'playbooks/*.j2']) + assert actual['format'] == 1 + + expected_missing = [ + 'README.md', + 'docs/My Collection.md', + 'plugins/action', + 'playbooks/templates/test.conf.j2', + 'playbooks/templates/subfolder/test.conf.j2', + ] + + # Files or dirs that are close to a match but are not, make sure they are present + expected_present = [ + 'docs', + 'roles/common/templates/test.conf.j2', + 'roles/common/templates/subfolder/test.conf.j2', + ] + + actual_files = [e['name'] for e in actual['files']] + for m in expected_missing: + assert m not in actual_files + + for p in expected_present: + assert p in actual_files + + expected_msgs = [ + "Skipping '%s/galaxy.yml' for collection build" % to_text(input_dir), + "Skipping '%s/README.md' for collection build" % to_text(input_dir), + "Skipping '%s/docs/My Collection.md' for collection build" % to_text(input_dir), + "Skipping '%s/plugins/action' for collection build" % to_text(input_dir), + "Skipping '%s/playbooks/templates/test.conf.j2' for collection build" % to_text(input_dir), + "Skipping '%s/playbooks/templates/subfolder/test.conf.j2' for collection build" % to_text(input_dir), + ] + assert mock_display.call_count == len(expected_msgs) + assert mock_display.mock_calls[0][1][0] in expected_msgs + assert mock_display.mock_calls[1][1][0] in expected_msgs + assert mock_display.mock_calls[2][1][0] in expected_msgs + assert mock_display.mock_calls[3][1][0] in expected_msgs + assert mock_display.mock_calls[4][1][0] in expected_msgs + assert mock_display.mock_calls[5][1][0] in expected_msgs + + +def test_build_ignore_symlink_target_outside_collection(collection_input, monkeypatch): + input_dir, outside_dir = collection_input + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'warning', mock_display) + + link_path = os.path.join(input_dir, 'plugins', 'connection') + os.symlink(outside_dir, link_path) + + actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', []) + for manifest_entry in actual['files']: + assert manifest_entry['name'] != 'plugins/connection' + + assert mock_display.call_count == 1 + assert mock_display.mock_calls[0][1][0] == "Skipping '%s' as it is a symbolic link to a directory outside " \ + "the collection" % to_text(link_path) + + +def test_build_copy_symlink_target_inside_collection(collection_input): + input_dir = collection_input[0] + + os.makedirs(os.path.join(input_dir, 'playbooks', 'roles')) + roles_link = os.path.join(input_dir, 'playbooks', 'roles', 'linked') + + roles_target = os.path.join(input_dir, 'roles', 'linked') + roles_target_tasks = os.path.join(roles_target, 'tasks') + os.makedirs(roles_target_tasks) + with open(os.path.join(roles_target_tasks, 'main.yml'), 'w+') as tasks_main: + tasks_main.write("---\n- hosts: localhost\n tasks:\n - ping:") + tasks_main.flush() + + os.symlink(roles_target, roles_link) + + actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', []) + + linked_entries = [e for e in actual['files'] if e['name'].startswith('playbooks/roles/linked')] + assert len(linked_entries) == 1 + assert linked_entries[0]['name'] == 'playbooks/roles/linked' + assert linked_entries[0]['ftype'] == 'dir' + + +def test_build_with_symlink_inside_collection(collection_input): + input_dir, output_dir = collection_input + + os.makedirs(os.path.join(input_dir, 'playbooks', 'roles')) + roles_link = os.path.join(input_dir, 'playbooks', 'roles', 'linked') + file_link = os.path.join(input_dir, 'docs', 'README.md') + + roles_target = os.path.join(input_dir, 'roles', 'linked') + roles_target_tasks = os.path.join(roles_target, 'tasks') + os.makedirs(roles_target_tasks) + with open(os.path.join(roles_target_tasks, 'main.yml'), 'w+') as tasks_main: + tasks_main.write("---\n- hosts: localhost\n tasks:\n - ping:") + tasks_main.flush() + + os.symlink(roles_target, roles_link) + os.symlink(os.path.join(input_dir, 'README.md'), file_link) + + collection.build_collection(input_dir, output_dir, False) + + output_artifact = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz') + assert tarfile.is_tarfile(output_artifact) + + with tarfile.open(output_artifact, mode='r') as actual: + members = actual.getmembers() + + linked_folder = next(m for m in members if m.path == 'playbooks/roles/linked') + assert linked_folder.type == tarfile.SYMTYPE + assert linked_folder.linkname == '../../roles/linked' + + linked_file = next(m for m in members if m.path == 'docs/README.md') + assert linked_file.type == tarfile.SYMTYPE + assert linked_file.linkname == '../README.md' + + linked_file_obj = actual.extractfile(linked_file.name) + actual_file = secure_hash_s(linked_file_obj.read()) + linked_file_obj.close() + + assert actual_file == '63444bfc766154e1bc7557ef6280de20d03fcd81' + + +def test_publish_no_wait(galaxy_server, collection_artifact, monkeypatch): + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + artifact_path, mock_open = collection_artifact + fake_import_uri = 'https://galaxy.server.com/api/v2/import/1234' + + mock_publish = MagicMock() + mock_publish.return_value = fake_import_uri + monkeypatch.setattr(galaxy_server, 'publish_collection', mock_publish) + + collection.publish_collection(artifact_path, galaxy_server, False, 0) + + assert mock_publish.call_count == 1 + assert mock_publish.mock_calls[0][1][0] == artifact_path + + assert mock_display.call_count == 1 + assert mock_display.mock_calls[0][1][0] == \ + "Collection has been pushed to the Galaxy server %s %s, not waiting until import has completed due to " \ + "--no-wait being set. Import task results can be found at %s" % (galaxy_server.name, galaxy_server.api_server, + fake_import_uri) + + +def test_publish_with_wait(galaxy_server, collection_artifact, monkeypatch): + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + artifact_path, mock_open = collection_artifact + fake_import_uri = 'https://galaxy.server.com/api/v2/import/1234' + + mock_publish = MagicMock() + mock_publish.return_value = fake_import_uri + monkeypatch.setattr(galaxy_server, 'publish_collection', mock_publish) + + mock_wait = MagicMock() + monkeypatch.setattr(galaxy_server, 'wait_import_task', mock_wait) + + collection.publish_collection(artifact_path, galaxy_server, True, 0) + + assert mock_publish.call_count == 1 + assert mock_publish.mock_calls[0][1][0] == artifact_path + + assert mock_wait.call_count == 1 + assert mock_wait.mock_calls[0][1][0] == '1234' + + assert mock_display.mock_calls[0][1][0] == "Collection has been published to the Galaxy server test_server %s" \ + % galaxy_server.api_server + + +def test_find_existing_collections(tmp_path_factory, monkeypatch): + test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) + collection1 = os.path.join(test_dir, 'namespace1', 'collection1') + collection2 = os.path.join(test_dir, 'namespace2', 'collection2') + fake_collection1 = os.path.join(test_dir, 'namespace3', 'collection3') + fake_collection2 = os.path.join(test_dir, 'namespace4') + os.makedirs(collection1) + os.makedirs(collection2) + os.makedirs(os.path.split(fake_collection1)[0]) + + open(fake_collection1, 'wb+').close() + open(fake_collection2, 'wb+').close() + + collection1_manifest = json.dumps({ + 'collection_info': { + 'namespace': 'namespace1', + 'name': 'collection1', + 'version': '1.2.3', + 'authors': ['Jordan Borean'], + 'readme': 'README.md', + 'dependencies': {}, + }, + 'format': 1, + }) + with open(os.path.join(collection1, 'MANIFEST.json'), 'wb') as manifest_obj: + manifest_obj.write(to_bytes(collection1_manifest)) + + mock_warning = MagicMock() + monkeypatch.setattr(Display, 'warning', mock_warning) + + actual = collection.find_existing_collections(test_dir) + + assert len(actual) == 2 + for actual_collection in actual: + assert actual_collection.skip is True + + if str(actual_collection) == 'namespace1.collection1': + assert actual_collection.namespace == 'namespace1' + assert actual_collection.name == 'collection1' + assert actual_collection.b_path == to_bytes(collection1) + assert actual_collection.api is None + assert actual_collection.versions == set(['1.2.3']) + assert actual_collection.latest_version == '1.2.3' + assert actual_collection.dependencies == {} + else: + assert actual_collection.namespace == 'namespace2' + assert actual_collection.name == 'collection2' + assert actual_collection.b_path == to_bytes(collection2) + assert actual_collection.api is None + assert actual_collection.versions == set(['*']) + assert actual_collection.latest_version == '*' + assert actual_collection.dependencies == {} + + assert mock_warning.call_count == 1 + assert mock_warning.mock_calls[0][1][0] == "Collection at '%s' does not have a MANIFEST.json file, cannot " \ + "detect version." % to_text(collection2) + + +def test_download_file(tmp_path_factory, monkeypatch): + temp_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) + + data = b"\x00\x01\x02\x03" + sha256_hash = sha256() + sha256_hash.update(data) + + mock_open = MagicMock() + mock_open.return_value = BytesIO(data) + monkeypatch.setattr(collection, 'open_url', mock_open) + + expected = os.path.join(temp_dir, b'file') + actual = collection._download_file('http://google.com/file', temp_dir, sha256_hash.hexdigest(), True) + + assert actual.startswith(expected) + assert os.path.isfile(actual) + with open(actual, 'rb') as file_obj: + assert file_obj.read() == data + + assert mock_open.call_count == 1 + assert mock_open.mock_calls[0][1][0] == 'http://google.com/file' + + +def test_download_file_hash_mismatch(tmp_path_factory, monkeypatch): + temp_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) + + data = b"\x00\x01\x02\x03" + + mock_open = MagicMock() + mock_open.return_value = BytesIO(data) + monkeypatch.setattr(collection, 'open_url', mock_open) + + expected = "Mismatch artifact hash with downloaded file" + with pytest.raises(AnsibleError, match=expected): + collection._download_file('http://google.com/file', temp_dir, 'bad', True) + + +def test_extract_tar_file_invalid_hash(tmp_tarfile): + temp_dir, tfile, filename, dummy = tmp_tarfile + + expected = "Checksum mismatch for '%s' inside collection at '%s'" % (to_native(filename), to_native(tfile.name)) + with pytest.raises(AnsibleError, match=expected): + collection._extract_tar_file(tfile, filename, temp_dir, temp_dir, "fakehash") + + +def test_extract_tar_file_missing_member(tmp_tarfile): + temp_dir, tfile, dummy, dummy = tmp_tarfile + + expected = "Collection tar at '%s' does not contain the expected file 'missing'." % to_native(tfile.name) + with pytest.raises(AnsibleError, match=expected): + collection._extract_tar_file(tfile, 'missing', temp_dir, temp_dir) + + +def test_extract_tar_file_missing_parent_dir(tmp_tarfile): + temp_dir, tfile, filename, checksum = tmp_tarfile + output_dir = os.path.join(temp_dir, b'output') + output_file = os.path.join(output_dir, to_bytes(filename)) + + collection._extract_tar_file(tfile, filename, output_dir, temp_dir, checksum) + os.path.isfile(output_file) + + +def test_extract_tar_file_outside_dir(tmp_path_factory): + filename = u'ÅÑŚÌβŁÈ' + temp_dir = to_bytes(tmp_path_factory.mktemp('test-%s Collections' % to_native(filename))) + tar_file = os.path.join(temp_dir, to_bytes('%s.tar.gz' % filename)) + data = os.urandom(8) + + tar_filename = '../%s.sh' % filename + with tarfile.open(tar_file, 'w:gz') as tfile: + b_io = BytesIO(data) + tar_info = tarfile.TarInfo(tar_filename) + tar_info.size = len(data) + tar_info.mode = 0o0644 + tfile.addfile(tarinfo=tar_info, fileobj=b_io) + + expected = re.escape("Cannot extract tar entry '%s' as it will be placed outside the collection directory" + % to_native(tar_filename)) + with tarfile.open(tar_file, 'r') as tfile: + with pytest.raises(AnsibleError, match=expected): + collection._extract_tar_file(tfile, tar_filename, os.path.join(temp_dir, to_bytes(filename)), temp_dir) + + +def test_require_one_of_collections_requirements_with_both(): + cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify', 'namespace.collection', '-r', 'requirements.yml']) + + with pytest.raises(AnsibleError) as req_err: + cli._require_one_of_collections_requirements(('namespace.collection',), 'requirements.yml') + + with pytest.raises(AnsibleError) as cli_err: + cli.run() + + assert req_err.value.message == cli_err.value.message == 'The positional collection_name arg and --requirements-file are mutually exclusive.' + + +def test_require_one_of_collections_requirements_with_neither(): + cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify']) + + with pytest.raises(AnsibleError) as req_err: + cli._require_one_of_collections_requirements((), '') + + with pytest.raises(AnsibleError) as cli_err: + cli.run() + + assert req_err.value.message == cli_err.value.message == 'You must specify a collection name or a requirements file.' + + +def test_require_one_of_collections_requirements_with_collections(): + cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify', 'namespace1.collection1', 'namespace2.collection1:1.0.0']) + collections = ('namespace1.collection1', 'namespace2.collection1:1.0.0',) + + requirements = cli._require_one_of_collections_requirements(collections, '')['collections'] + + assert requirements == [('namespace1.collection1', '*', None, None), ('namespace2.collection1', '1.0.0', None, None)] + + +@patch('ansible.cli.galaxy.GalaxyCLI._parse_requirements_file') +def test_require_one_of_collections_requirements_with_requirements(mock_parse_requirements_file, galaxy_server): + cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify', '-r', 'requirements.yml', 'namespace.collection']) + mock_parse_requirements_file.return_value = {'collections': [('namespace.collection', '1.0.5', galaxy_server)]} + requirements = cli._require_one_of_collections_requirements((), 'requirements.yml')['collections'] + + assert mock_parse_requirements_file.call_count == 1 + assert requirements == [('namespace.collection', '1.0.5', galaxy_server)] + + +@patch('ansible.cli.galaxy.GalaxyCLI.execute_verify', spec=True) +def test_call_GalaxyCLI(execute_verify): + galaxy_args = ['ansible-galaxy', 'collection', 'verify', 'namespace.collection'] + + GalaxyCLI(args=galaxy_args).run() + + assert execute_verify.call_count == 1 + + +@patch('ansible.cli.galaxy.GalaxyCLI.execute_verify') +def test_call_GalaxyCLI_with_implicit_role(execute_verify): + galaxy_args = ['ansible-galaxy', 'verify', 'namespace.implicit_role'] + + with pytest.raises(SystemExit): + GalaxyCLI(args=galaxy_args).run() + + assert not execute_verify.called + + +@patch('ansible.cli.galaxy.GalaxyCLI.execute_verify') +def test_call_GalaxyCLI_with_role(execute_verify): + galaxy_args = ['ansible-galaxy', 'role', 'verify', 'namespace.role'] + + with pytest.raises(SystemExit): + GalaxyCLI(args=galaxy_args).run() + + assert not execute_verify.called + + +@patch('ansible.cli.galaxy.verify_collections', spec=True) +def test_execute_verify_with_defaults(mock_verify_collections): + galaxy_args = ['ansible-galaxy', 'collection', 'verify', 'namespace.collection:1.0.4'] + GalaxyCLI(args=galaxy_args).run() + + assert mock_verify_collections.call_count == 1 + + requirements, search_paths, galaxy_apis, validate, ignore_errors = mock_verify_collections.call_args[0] + + assert requirements == [('namespace.collection', '1.0.4', None, None)] + for install_path in search_paths: + assert install_path.endswith('ansible_collections') + assert galaxy_apis[0].api_server == 'https://galaxy.ansible.com' + assert validate is True + assert ignore_errors is False + + +@patch('ansible.cli.galaxy.verify_collections', spec=True) +def test_execute_verify(mock_verify_collections): + GalaxyCLI(args=[ + 'ansible-galaxy', 'collection', 'verify', 'namespace.collection:1.0.4', '--ignore-certs', + '-p', '~/.ansible', '--ignore-errors', '--server', 'http://galaxy-dev.com', + ]).run() + + assert mock_verify_collections.call_count == 1 + + requirements, search_paths, galaxy_apis, validate, ignore_errors = mock_verify_collections.call_args[0] + + assert requirements == [('namespace.collection', '1.0.4', None, None)] + for install_path in search_paths: + assert install_path.endswith('ansible_collections') + assert galaxy_apis[0].api_server == 'http://galaxy-dev.com' + assert validate is False + assert ignore_errors is True + + +def test_verify_file_hash_deleted_file(manifest_info): + data = to_bytes(json.dumps(manifest_info)) + digest = sha256(data).hexdigest() + + namespace = manifest_info['collection_info']['namespace'] + name = manifest_info['collection_info']['name'] + version = manifest_info['collection_info']['version'] + server = 'http://galaxy.ansible.com' + + error_queue = [] + + with patch.object(builtins, 'open', mock_open(read_data=data)) as m: + with patch.object(collection.os.path, 'isfile', MagicMock(return_value=False)) as mock_isfile: + collection_req = collection.CollectionRequirement(namespace, name, './', server, [version], version, False) + collection_req._verify_file_hash(b'path/', 'file', digest, error_queue) + + assert mock_isfile.called_once + + assert len(error_queue) == 1 + assert error_queue[0].installed is None + assert error_queue[0].expected == digest + + +def test_verify_file_hash_matching_hash(manifest_info): + + data = to_bytes(json.dumps(manifest_info)) + digest = sha256(data).hexdigest() + + namespace = manifest_info['collection_info']['namespace'] + name = manifest_info['collection_info']['name'] + version = manifest_info['collection_info']['version'] + server = 'http://galaxy.ansible.com' + + error_queue = [] + + with patch.object(builtins, 'open', mock_open(read_data=data)) as m: + with patch.object(collection.os.path, 'isfile', MagicMock(return_value=True)) as mock_isfile: + collection_req = collection.CollectionRequirement(namespace, name, './', server, [version], version, False) + collection_req._verify_file_hash(b'path/', 'file', digest, error_queue) + + assert mock_isfile.called_once + + assert error_queue == [] + + +def test_verify_file_hash_mismatching_hash(manifest_info): + + data = to_bytes(json.dumps(manifest_info)) + digest = sha256(data).hexdigest() + different_digest = 'not_{0}'.format(digest) + + namespace = manifest_info['collection_info']['namespace'] + name = manifest_info['collection_info']['name'] + version = manifest_info['collection_info']['version'] + server = 'http://galaxy.ansible.com' + + error_queue = [] + + with patch.object(builtins, 'open', mock_open(read_data=data)) as m: + with patch.object(collection.os.path, 'isfile', MagicMock(return_value=True)) as mock_isfile: + collection_req = collection.CollectionRequirement(namespace, name, './', server, [version], version, False) + collection_req._verify_file_hash(b'path/', 'file', different_digest, error_queue) + + assert mock_isfile.called_once + + assert len(error_queue) == 1 + assert error_queue[0].installed == digest + assert error_queue[0].expected == different_digest + + +def test_consume_file(manifest): + + manifest_file, checksum = manifest + assert checksum == collection._consume_file(manifest_file) + + +def test_consume_file_and_write_contents(manifest, manifest_info): + + manifest_file, checksum = manifest + + write_to = BytesIO() + actual_hash = collection._consume_file(manifest_file, write_to) + + write_to.seek(0) + assert to_bytes(json.dumps(manifest_info)) == write_to.read() + assert actual_hash == checksum + + +def test_get_tar_file_member(tmp_tarfile): + + temp_dir, tfile, filename, checksum = tmp_tarfile + + with collection._get_tar_file_member(tfile, filename) as (tar_file_member, tar_file_obj): + assert isinstance(tar_file_member, tarfile.TarInfo) + assert isinstance(tar_file_obj, tarfile.ExFileObject) + + +def test_get_nonexistent_tar_file_member(tmp_tarfile): + temp_dir, tfile, filename, checksum = tmp_tarfile + + file_does_not_exist = filename + 'nonexistent' + + with pytest.raises(AnsibleError) as err: + collection._get_tar_file_member(tfile, file_does_not_exist) + + assert to_text(err.value.message) == "Collection tar at '%s' does not contain the expected file '%s'." % (to_text(tfile.name), file_does_not_exist) + + +def test_get_tar_file_hash(tmp_tarfile): + temp_dir, tfile, filename, checksum = tmp_tarfile + + assert checksum == collection._get_tar_file_hash(tfile.name, filename) + + +def test_get_json_from_tar_file(tmp_tarfile): + temp_dir, tfile, filename, checksum = tmp_tarfile + + assert 'MANIFEST.json' in tfile.getnames() + + data = collection._get_json_from_tar_file(tfile.name, 'MANIFEST.json') + + assert isinstance(data, dict) + + +def test_verify_collection_not_installed(mock_collection): + + local_collection = mock_collection(local_installed=False) + remote_collection = mock_collection(local=False) + + with patch.object(collection.display, 'display') as mocked_display: + local_collection.verify(remote_collection, './', './') + + assert mocked_display.called + assert mocked_display.call_args[0][0] == "'%s.%s' has not been installed, nothing to verify" % (local_collection.namespace, local_collection.name) + + +def test_verify_successful_debug_info(monkeypatch, mock_collection): + local_collection = mock_collection() + remote_collection = mock_collection(local=False) + + monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock()) + monkeypatch.setattr(collection.CollectionRequirement, '_verify_file_hash', MagicMock()) + monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock()) + + with patch.object(collection.display, 'vvv') as mock_display: + local_collection.verify(remote_collection, './', './') + + namespace = local_collection.namespace + name = local_collection.name + version = local_collection.latest_version + + assert mock_display.call_count == 4 + assert mock_display.call_args_list[0][0][0] == "Verifying '%s.%s:%s'." % (namespace, name, version) + assert mock_display.call_args_list[1][0][0] == "Installed collection found at './%s/%s'" % (namespace, name) + located = "Remote collection found at 'https://galaxy.ansible.com/download/%s-%s-%s.tar.gz'" % (namespace, name, version) + assert mock_display.call_args_list[2][0][0] == located + verified = "Successfully verified that checksums for '%s.%s:%s' match the remote collection" % (namespace, name, version) + assert mock_display.call_args_list[3][0][0] == verified + + +def test_verify_different_versions(mock_collection): + + local_collection = mock_collection(version='0.1.0') + remote_collection = mock_collection(local=False, version='3.0.0') + + with patch.object(collection.display, 'display') as mock_display: + local_collection.verify(remote_collection, './', './') + + namespace = local_collection.namespace + name = local_collection.name + installed_version = local_collection.latest_version + compared_version = remote_collection.latest_version + + msg = "%s.%s has the version '%s' but is being compared to '%s'" % (namespace, name, installed_version, compared_version) + + assert mock_display.call_count == 1 + assert mock_display.call_args[0][0] == msg + + +@patch.object(builtins, 'open', mock_open()) +def test_verify_modified_manifest(monkeypatch, mock_collection, manifest_info): + local_collection = mock_collection() + remote_collection = mock_collection(local=False) + + monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock(side_effect=['manifest_checksum'])) + monkeypatch.setattr(collection, '_consume_file', MagicMock(side_effect=['manifest_checksum_modified', 'files_manifest_checksum'])) + monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock(side_effect=[manifest_info, {'files': []}])) + monkeypatch.setattr(collection.os.path, 'isfile', MagicMock(return_value=True)) + + with patch.object(collection.display, 'display') as mock_display: + with patch.object(collection.display, 'vvv') as mock_debug: + local_collection.verify(remote_collection, './', './') + + namespace = local_collection.namespace + name = local_collection.name + + assert mock_display.call_count == 3 + assert mock_display.call_args_list[0][0][0] == 'Collection %s.%s contains modified content in the following files:' % (namespace, name) + assert mock_display.call_args_list[1][0][0] == '%s.%s' % (namespace, name) + assert mock_display.call_args_list[2][0][0] == ' MANIFEST.json' + + # The -vvv output should show details (the checksums do not match) + assert mock_debug.call_count == 5 + assert mock_debug.call_args_list[-1][0][0] == ' Expected: manifest_checksum\n Found: manifest_checksum_modified' + + +@patch.object(builtins, 'open', mock_open()) +def test_verify_modified_files_manifest(monkeypatch, mock_collection, manifest_info): + local_collection = mock_collection() + remote_collection = mock_collection(local=False) + + monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock(side_effect=['manifest_checksum'])) + monkeypatch.setattr(collection, '_consume_file', MagicMock(side_effect=['manifest_checksum', 'files_manifest_checksum_modified'])) + monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock(side_effect=[manifest_info, {'files': []}])) + monkeypatch.setattr(collection.os.path, 'isfile', MagicMock(return_value=True)) + + with patch.object(collection.display, 'display') as mock_display: + with patch.object(collection.display, 'vvv') as mock_debug: + local_collection.verify(remote_collection, './', './') + + namespace = local_collection.namespace + name = local_collection.name + + assert mock_display.call_count == 3 + assert mock_display.call_args_list[0][0][0] == 'Collection %s.%s contains modified content in the following files:' % (namespace, name) + assert mock_display.call_args_list[1][0][0] == '%s.%s' % (namespace, name) + assert mock_display.call_args_list[2][0][0] == ' FILES.json' + + # The -vvv output should show details (the checksums do not match) + assert mock_debug.call_count == 5 + assert mock_debug.call_args_list[-1][0][0] == ' Expected: files_manifest_checksum\n Found: files_manifest_checksum_modified' + + +@patch.object(builtins, 'open', mock_open()) +def test_verify_modified_files(monkeypatch, mock_collection, manifest_info, files_manifest_info): + + local_collection = mock_collection() + remote_collection = mock_collection(local=False) + + monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock(side_effect=['manifest_checksum'])) + fakehashes = ['manifest_checksum', 'files_manifest_checksum', 'individual_file_checksum_modified'] + monkeypatch.setattr(collection, '_consume_file', MagicMock(side_effect=fakehashes)) + monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock(side_effect=[manifest_info, files_manifest_info])) + monkeypatch.setattr(collection.os.path, 'isfile', MagicMock(return_value=True)) + + with patch.object(collection.display, 'display') as mock_display: + with patch.object(collection.display, 'vvv') as mock_debug: + local_collection.verify(remote_collection, './', './') + + namespace = local_collection.namespace + name = local_collection.name + + assert mock_display.call_count == 3 + assert mock_display.call_args_list[0][0][0] == 'Collection %s.%s contains modified content in the following files:' % (namespace, name) + assert mock_display.call_args_list[1][0][0] == '%s.%s' % (namespace, name) + assert mock_display.call_args_list[2][0][0] == ' README.md' + + # The -vvv output should show details (the checksums do not match) + assert mock_debug.call_count == 5 + assert mock_debug.call_args_list[-1][0][0] == ' Expected: individual_file_checksum\n Found: individual_file_checksum_modified' + + +@patch.object(builtins, 'open', mock_open()) +def test_verify_identical(monkeypatch, mock_collection, manifest_info, files_manifest_info): + + local_collection = mock_collection() + remote_collection = mock_collection(local=False) + + monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock(side_effect=['manifest_checksum'])) + monkeypatch.setattr(collection, '_consume_file', MagicMock(side_effect=['manifest_checksum', 'files_manifest_checksum', 'individual_file_checksum'])) + monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock(side_effect=[manifest_info, files_manifest_info])) + monkeypatch.setattr(collection.os.path, 'isfile', MagicMock(return_value=True)) + + with patch.object(collection.display, 'display') as mock_display: + with patch.object(collection.display, 'vvv') as mock_debug: + local_collection.verify(remote_collection, './', './') + + # Successful verification is quiet + assert mock_display.call_count == 0 + + # The -vvv output should show the checksums not matching + namespace = local_collection.namespace + name = local_collection.name + version = local_collection.latest_version + success_msg = "Successfully verified that checksums for '%s.%s:%s' match the remote collection" % (namespace, name, version) + + assert mock_debug.call_count == 4 + assert mock_debug.call_args_list[-1][0][0] == success_msg + + +@patch.object(os.path, 'isdir', return_value=True) +def test_verify_collections_no_version(mock_isdir, mock_collection, monkeypatch): + namespace = 'ansible_namespace' + name = 'collection' + version = '*' # Occurs if MANIFEST.json does not exist + + local_collection = mock_collection(namespace=namespace, name=name, version=version) + monkeypatch.setattr(collection.CollectionRequirement, 'from_path', MagicMock(return_value=local_collection)) + + collections = [('%s.%s' % (namespace, name), version, None)] + + with pytest.raises(AnsibleError) as err: + collection.verify_collections(collections, './', local_collection.api, False, False) + + err_msg = 'Collection %s.%s does not appear to have a MANIFEST.json. ' % (namespace, name) + err_msg += 'A MANIFEST.json is expected if the collection has been built and installed via ansible-galaxy.' + assert err.value.message == err_msg + + +@patch.object(collection.CollectionRequirement, 'verify') +def test_verify_collections_not_installed(mock_verify, mock_collection, monkeypatch): + namespace = 'ansible_namespace' + name = 'collection' + version = '1.0.0' + + local_collection = mock_collection(local_installed=False) + + found_remote = MagicMock(return_value=mock_collection(local=False)) + monkeypatch.setattr(collection.CollectionRequirement, 'from_name', found_remote) + + collections = [('%s.%s' % (namespace, name), version, None, None)] + search_path = './' + validate_certs = False + ignore_errors = False + apis = [local_collection.api] + + with patch.object(collection, '_download_file') as mock_download_file: + with pytest.raises(AnsibleError) as err: + collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors) + + assert err.value.message == "Collection %s.%s is not installed in any of the collection paths." % (namespace, name) + + +@patch.object(collection.CollectionRequirement, 'verify') +def test_verify_collections_not_installed_ignore_errors(mock_verify, mock_collection, monkeypatch): + namespace = 'ansible_namespace' + name = 'collection' + version = '1.0.0' + + local_collection = mock_collection(local_installed=False) + + found_remote = MagicMock(return_value=mock_collection(local=False)) + monkeypatch.setattr(collection.CollectionRequirement, 'from_name', found_remote) + + collections = [('%s.%s' % (namespace, name), version, None)] + search_path = './' + validate_certs = False + ignore_errors = True + apis = [local_collection.api] + + with patch.object(collection, '_download_file') as mock_download_file: + with patch.object(Display, 'warning') as mock_warning: + collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors) + + skip_message = "Failed to verify collection %s.%s but skipping due to --ignore-errors being set." % (namespace, name) + original_err = "Error: Collection %s.%s is not installed in any of the collection paths." % (namespace, name) + + assert mock_warning.called + assert mock_warning.call_args[0][0] == skip_message + " " + original_err + + +@patch.object(os.path, 'isdir', return_value=True) +@patch.object(collection.CollectionRequirement, 'verify') +def test_verify_collections_no_remote(mock_verify, mock_isdir, mock_collection, monkeypatch): + namespace = 'ansible_namespace' + name = 'collection' + version = '1.0.0' + + monkeypatch.setattr(os.path, 'isfile', MagicMock(side_effect=[False, True])) + monkeypatch.setattr(collection.CollectionRequirement, 'from_path', MagicMock(return_value=mock_collection())) + + collections = [('%s.%s' % (namespace, name), version, None)] + search_path = './' + validate_certs = False + ignore_errors = False + apis = [] + + with pytest.raises(AnsibleError) as err: + collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors) + + assert err.value.message == "Failed to find remote collection %s.%s:%s on any of the galaxy servers" % (namespace, name, version) + + +@patch.object(os.path, 'isdir', return_value=True) +@patch.object(collection.CollectionRequirement, 'verify') +def test_verify_collections_no_remote_ignore_errors(mock_verify, mock_isdir, mock_collection, monkeypatch): + namespace = 'ansible_namespace' + name = 'collection' + version = '1.0.0' + + monkeypatch.setattr(os.path, 'isfile', MagicMock(side_effect=[False, True])) + monkeypatch.setattr(collection.CollectionRequirement, 'from_path', MagicMock(return_value=mock_collection())) + + collections = [('%s.%s' % (namespace, name), version, None)] + search_path = './' + validate_certs = False + ignore_errors = True + apis = [] + + with patch.object(Display, 'warning') as mock_warning: + collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors) + + skip_message = "Failed to verify collection %s.%s but skipping due to --ignore-errors being set." % (namespace, name) + original_err = "Error: Failed to find remote collection %s.%s:%s on any of the galaxy servers" % (namespace, name, version) + + assert mock_warning.called + assert mock_warning.call_args[0][0] == skip_message + " " + original_err + + +def test_verify_collections_tarfile(monkeypatch): + + monkeypatch.setattr(os.path, 'isfile', MagicMock(return_value=True)) + + invalid_format = 'ansible_namespace-collection-0.1.0.tar.gz' + collections = [(invalid_format, '*', None)] + + with pytest.raises(AnsibleError) as err: + collection.verify_collections(collections, './', [], False, False) + + msg = "'%s' is not a valid collection name. The format namespace.name is expected." % invalid_format + assert err.value.message == msg + + +def test_verify_collections_path(monkeypatch): + + monkeypatch.setattr(os.path, 'isfile', MagicMock(return_value=False)) + + invalid_format = 'collections/collection_namespace/collection_name' + collections = [(invalid_format, '*', None)] + + with pytest.raises(AnsibleError) as err: + collection.verify_collections(collections, './', [], False, False) + + msg = "'%s' is not a valid collection name. The format namespace.name is expected." % invalid_format + assert err.value.message == msg + + +def test_verify_collections_url(monkeypatch): + + monkeypatch.setattr(os.path, 'isfile', MagicMock(return_value=False)) + + invalid_format = 'https://galaxy.ansible.com/download/ansible_namespace-collection-0.1.0.tar.gz' + collections = [(invalid_format, '*', None)] + + with pytest.raises(AnsibleError) as err: + collection.verify_collections(collections, './', [], False, False) + + msg = "'%s' is not a valid collection name. The format namespace.name is expected." % invalid_format + assert err.value.message == msg + + +@patch.object(os.path, 'isdir', return_value=True) +@patch.object(collection.CollectionRequirement, 'verify') +def test_verify_collections_name(mock_verify, mock_isdir, mock_collection, monkeypatch): + local_collection = mock_collection() + monkeypatch.setattr(collection.CollectionRequirement, 'from_path', MagicMock(return_value=local_collection)) + + monkeypatch.setattr(os.path, 'isfile', MagicMock(side_effect=[False, True, False])) + + located_remote_from_name = MagicMock(return_value=mock_collection(local=False)) + monkeypatch.setattr(collection.CollectionRequirement, 'from_name', located_remote_from_name) + + with patch.object(collection, '_download_file') as mock_download_file: + + collections = [('%s.%s' % (local_collection.namespace, local_collection.name), '%s' % local_collection.latest_version, None)] + search_path = './' + validate_certs = False + ignore_errors = False + apis = [local_collection.api] + + collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors) + + assert mock_download_file.call_count == 1 + assert located_remote_from_name.call_count == 1 diff --git a/test/units/galaxy/test_collection_install.py b/test/units/galaxy/test_collection_install.py new file mode 100644 index 00000000..629a3564 --- /dev/null +++ b/test/units/galaxy/test_collection_install.py @@ -0,0 +1,816 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import json +import os +import pytest +import re +import shutil +import stat +import tarfile +import yaml + +from io import BytesIO, StringIO +from units.compat.mock import MagicMock + +import ansible.module_utils.six.moves.urllib.error as urllib_error + +from ansible import context +from ansible.cli.galaxy import GalaxyCLI +from ansible.errors import AnsibleError +from ansible.galaxy import collection, api +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.utils import context_objects as co +from ansible.utils.display import Display + + +def call_galaxy_cli(args): + orig = co.GlobalCLIArgs._Singleton__instance + co.GlobalCLIArgs._Singleton__instance = None + try: + GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run() + finally: + co.GlobalCLIArgs._Singleton__instance = orig + + +def artifact_json(namespace, name, version, dependencies, server): + json_str = json.dumps({ + 'artifact': { + 'filename': '%s-%s-%s.tar.gz' % (namespace, name, version), + 'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd', + 'size': 1234, + }, + 'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version), + 'metadata': { + 'namespace': namespace, + 'name': name, + 'dependencies': dependencies, + }, + 'version': version + }) + return to_text(json_str) + + +def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None): + results = [] + available_api_versions = available_api_versions or {} + api_version = 'v2' + if 'v3' in available_api_versions: + api_version = 'v3' + for version in versions: + results.append({ + 'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version), + 'version': version, + }) + + if api_version == 'v2': + json_str = json.dumps({ + 'count': len(versions), + 'next': None, + 'previous': None, + 'results': results + }) + + if api_version == 'v3': + response = {'meta': {'count': len(versions)}, + 'data': results, + 'links': {'first': None, + 'last': None, + 'next': None, + 'previous': None}, + } + json_str = json.dumps(response) + return to_text(json_str) + + +def error_json(galaxy_api, errors_to_return=None, available_api_versions=None): + errors_to_return = errors_to_return or [] + available_api_versions = available_api_versions or {} + + response = {} + + api_version = 'v2' + if 'v3' in available_api_versions: + api_version = 'v3' + + if api_version == 'v2': + assert len(errors_to_return) <= 1 + if errors_to_return: + response = errors_to_return[0] + + if api_version == 'v3': + response['errors'] = errors_to_return + + json_str = json.dumps(response) + return to_text(json_str) + + +@pytest.fixture(autouse='function') +def reset_cli_args(): + co.GlobalCLIArgs._Singleton__instance = None + yield + co.GlobalCLIArgs._Singleton__instance = None + + +@pytest.fixture() +def collection_artifact(request, tmp_path_factory): + test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input')) + namespace = 'ansible_namespace' + collection = 'collection' + + skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton') + collection_path = os.path.join(test_dir, namespace, collection) + + call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir, + '--collection-skeleton', skeleton_path]) + dependencies = getattr(request, 'param', None) + if dependencies: + galaxy_yml = os.path.join(collection_path, 'galaxy.yml') + with open(galaxy_yml, 'rb+') as galaxy_obj: + existing_yaml = yaml.safe_load(galaxy_obj) + existing_yaml['dependencies'] = dependencies + + galaxy_obj.seek(0) + galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml))) + galaxy_obj.truncate() + + # Create a file with +x in the collection so we can test the permissions + execute_path = os.path.join(collection_path, 'runme.sh') + with open(execute_path, mode='wb') as fd: + fd.write(b"echo hi") + os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC) + + call_galaxy_cli(['build', collection_path, '--output-path', test_dir]) + + collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection)) + return to_bytes(collection_path), to_bytes(collection_tar) + + +@pytest.fixture() +def galaxy_server(): + context.CLIARGS._store = {'ignore_certs': False} + galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com') + return galaxy_api + + +def test_build_requirement_from_path(collection_artifact): + actual = collection.CollectionRequirement.from_path(collection_artifact[0], True) + + assert actual.namespace == u'ansible_namespace' + assert actual.name == u'collection' + assert actual.b_path == collection_artifact[0] + assert actual.api is None + assert actual.skip is True + assert actual.versions == set([u'*']) + assert actual.latest_version == u'*' + assert actual.dependencies == {} + + +@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0']) +def test_build_requirement_from_path_with_manifest(version, collection_artifact): + manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json') + manifest_value = json.dumps({ + 'collection_info': { + 'namespace': 'namespace', + 'name': 'name', + 'version': version, + 'dependencies': { + 'ansible_namespace.collection': '*' + } + } + }) + with open(manifest_path, 'wb') as manifest_obj: + manifest_obj.write(to_bytes(manifest_value)) + + actual = collection.CollectionRequirement.from_path(collection_artifact[0], True) + + # While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth. + assert actual.namespace == u'namespace' + assert actual.name == u'name' + assert actual.b_path == collection_artifact[0] + assert actual.api is None + assert actual.skip is True + assert actual.versions == set([to_text(version)]) + assert actual.latest_version == to_text(version) + assert actual.dependencies == {'ansible_namespace.collection': '*'} + + +def test_build_requirement_from_path_invalid_manifest(collection_artifact): + manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json') + with open(manifest_path, 'wb') as manifest_obj: + manifest_obj.write(b"not json") + + expected = "Collection file at '%s' does not contain a valid json string." % to_native(manifest_path) + with pytest.raises(AnsibleError, match=expected): + collection.CollectionRequirement.from_path(collection_artifact[0], True) + + +def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch): + manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json') + manifest_value = json.dumps({ + 'collection_info': { + 'namespace': 'namespace', + 'name': 'name', + 'version': '', + 'dependencies': {} + } + }) + with open(manifest_path, 'wb') as manifest_obj: + manifest_obj.write(to_bytes(manifest_value)) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + actual = collection.CollectionRequirement.from_path(collection_artifact[0], True) + + # While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth. + assert actual.namespace == u'namespace' + assert actual.name == u'name' + assert actual.b_path == collection_artifact[0] + assert actual.api is None + assert actual.skip is True + assert actual.versions == set(['*']) + assert actual.latest_version == u'*' + assert actual.dependencies == {} + + assert mock_display.call_count == 1 + + actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n')) + expected_warn = "Collection at '%s' does not have a valid version set, falling back to '*'. Found version: ''" \ + % to_text(collection_artifact[0]) + assert expected_warn in actual_warn + + +def test_build_requirement_from_tar(collection_artifact): + actual = collection.CollectionRequirement.from_tar(collection_artifact[1], True, True) + + assert actual.namespace == u'ansible_namespace' + assert actual.name == u'collection' + assert actual.b_path == collection_artifact[1] + assert actual.api is None + assert actual.skip is False + assert actual.versions == set([u'0.1.0']) + assert actual.latest_version == u'0.1.0' + assert actual.dependencies == {} + + +def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory): + test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input')) + test_file = os.path.join(test_dir, b'fake.tar.gz') + with open(test_file, 'wb') as test_obj: + test_obj.write(b"\x00\x01\x02\x03") + + expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file) + with pytest.raises(AnsibleError, match=expected): + collection.CollectionRequirement.from_tar(test_file, True, True) + + +def test_build_requirement_from_tar_no_manifest(tmp_path_factory): + test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input')) + + json_data = to_bytes(json.dumps( + { + 'files': [], + 'format': 1, + } + )) + + tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz') + with tarfile.open(tar_path, 'w:gz') as tfile: + b_io = BytesIO(json_data) + tar_info = tarfile.TarInfo('FILES.json') + tar_info.size = len(json_data) + tar_info.mode = 0o0644 + tfile.addfile(tarinfo=tar_info, fileobj=b_io) + + expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path) + with pytest.raises(AnsibleError, match=expected): + collection.CollectionRequirement.from_tar(tar_path, True, True) + + +def test_build_requirement_from_tar_no_files(tmp_path_factory): + test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input')) + + json_data = to_bytes(json.dumps( + { + 'collection_info': {}, + } + )) + + tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz') + with tarfile.open(tar_path, 'w:gz') as tfile: + b_io = BytesIO(json_data) + tar_info = tarfile.TarInfo('MANIFEST.json') + tar_info.size = len(json_data) + tar_info.mode = 0o0644 + tfile.addfile(tarinfo=tar_info, fileobj=b_io) + + expected = "Collection at '%s' does not contain the required file FILES.json." % to_native(tar_path) + with pytest.raises(AnsibleError, match=expected): + collection.CollectionRequirement.from_tar(tar_path, True, True) + + +def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory): + test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input')) + + json_data = b"not a json" + + tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz') + with tarfile.open(tar_path, 'w:gz') as tfile: + b_io = BytesIO(json_data) + tar_info = tarfile.TarInfo('MANIFEST.json') + tar_info.size = len(json_data) + tar_info.mode = 0o0644 + tfile.addfile(tarinfo=tar_info, fileobj=b_io) + + expected = "Collection tar file member MANIFEST.json does not contain a valid json string." + with pytest.raises(AnsibleError, match=expected): + collection.CollectionRequirement.from_tar(tar_path, True, True) + + +def test_build_requirement_from_name(galaxy_server, monkeypatch): + mock_get_versions = MagicMock() + mock_get_versions.return_value = ['2.1.9', '2.1.10'] + monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions) + + actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True) + + assert actual.namespace == u'namespace' + assert actual.name == u'collection' + assert actual.b_path is None + assert actual.api == galaxy_server + assert actual.skip is False + assert actual.versions == set([u'2.1.9', u'2.1.10']) + assert actual.latest_version == u'2.1.10' + assert actual.dependencies == {} + + assert mock_get_versions.call_count == 1 + assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection') + + +def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch): + mock_get_versions = MagicMock() + mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1'] + monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions) + + actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True) + + assert actual.namespace == u'namespace' + assert actual.name == u'collection' + assert actual.b_path is None + assert actual.api == galaxy_server + assert actual.skip is False + assert actual.versions == set([u'1.0.1', u'2.0.1']) + assert actual.latest_version == u'2.0.1' + assert actual.dependencies == {} + + assert mock_get_versions.call_count == 1 + assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection') + + +def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch): + mock_get_info = MagicMock() + mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None, + {}) + monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info) + + actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.1-beta.1', True, + True) + + assert actual.namespace == u'namespace' + assert actual.name == u'collection' + assert actual.b_path is None + assert actual.api == galaxy_server + assert actual.skip is False + assert actual.versions == set([u'2.0.1-beta.1']) + assert actual.latest_version == u'2.0.1-beta.1' + assert actual.dependencies == {} + + assert mock_get_info.call_count == 1 + assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1') + + +def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch): + mock_get_versions = MagicMock() + mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3'] + monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions) + + broken_server = copy.copy(galaxy_server) + broken_server.api_server = 'https://broken.com/' + mock_404 = MagicMock() + mock_404.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {}, + StringIO()), "custom msg") + monkeypatch.setattr(broken_server, 'get_collection_versions', mock_404) + + actual = collection.CollectionRequirement.from_name('namespace.collection', [broken_server, galaxy_server], + '>1.0.1', False, True) + + assert actual.namespace == u'namespace' + assert actual.name == u'collection' + assert actual.b_path is None + # assert actual.api == galaxy_server + assert actual.skip is False + assert actual.versions == set([u'1.0.2', u'1.0.3']) + assert actual.latest_version == u'1.0.3' + assert actual.dependencies == {} + + assert mock_404.call_count == 1 + assert mock_404.mock_calls[0][1] == ('namespace', 'collection') + + assert mock_get_versions.call_count == 1 + assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection') + + +def test_build_requirement_from_name_missing(galaxy_server, monkeypatch): + mock_open = MagicMock() + mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {}, + StringIO()), "") + + monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open) + + expected = "Failed to find collection namespace.collection:*" + with pytest.raises(AnsibleError, match=expected): + collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False, + True) + + +def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch): + mock_open = MagicMock() + mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {}, + StringIO()), "error") + + monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open) + + expected = "error (HTTP Code: 401, Message: msg)" + with pytest.raises(api.GalaxyError, match=re.escape(expected)): + collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False) + + +def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch): + mock_get_info = MagicMock() + mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None, + {}) + monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info) + + actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.0', True, + True) + + assert actual.namespace == u'namespace' + assert actual.name == u'collection' + assert actual.b_path is None + assert actual.api == galaxy_server + assert actual.skip is False + assert actual.versions == set([u'2.0.0']) + assert actual.latest_version == u'2.0.0' + assert actual.dependencies == {} + + assert mock_get_info.call_count == 1 + assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0') + + +def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch): + mock_get_versions = MagicMock() + mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2'] + monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions) + + mock_get_info = MagicMock() + mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None, + {}) + monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info) + + actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '>=2.0.1,<2.0.2', + True, True) + + assert actual.namespace == u'namespace' + assert actual.name == u'collection' + assert actual.b_path is None + assert actual.api == galaxy_server + assert actual.skip is False + assert actual.versions == set([u'2.0.1']) + assert actual.latest_version == u'2.0.1' + assert actual.dependencies == {} + + assert mock_get_versions.call_count == 1 + assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection') + + assert mock_get_info.call_count == 1 + assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1') + + +def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch): + mock_get_versions = MagicMock() + mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5'] + monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions) + + actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '!=2.0.2', + True, True) + + assert actual.namespace == u'namespace' + assert actual.name == u'collection' + assert actual.b_path is None + assert actual.api == galaxy_server + assert actual.skip is False + assert actual.versions == set([u'2.0.0', u'2.0.1', u'2.0.3', u'2.0.4', u'2.0.5']) + assert actual.latest_version == u'2.0.5' + assert actual.dependencies == {} + + assert mock_get_versions.call_count == 1 + assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection') + + +@pytest.mark.parametrize('versions, requirement, expected_filter, expected_latest', [ + [['1.0.0', '1.0.1'], '*', ['1.0.0', '1.0.1'], '1.0.1'], + [['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<1.1.0', ['1.0.5'], '1.0.5'], + [['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<=1.0.5', ['1.0.5'], '1.0.5'], + [['1.0.0', '1.0.5', '1.1.0'], '>=1.1.0', ['1.1.0'], '1.1.0'], + [['1.0.0', '1.0.5', '1.1.0'], '!=1.1.0', ['1.0.0', '1.0.5'], '1.0.5'], + [['1.0.0', '1.0.5', '1.1.0'], '==1.0.5', ['1.0.5'], '1.0.5'], + [['1.0.0', '1.0.5', '1.1.0'], '1.0.5', ['1.0.5'], '1.0.5'], + [['1.0.0', '2.0.0', '3.0.0'], '>=2', ['2.0.0', '3.0.0'], '3.0.0'], +]) +def test_add_collection_requirements(versions, requirement, expected_filter, expected_latest): + req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', versions, requirement, + False) + assert req.versions == set(expected_filter) + assert req.latest_version == expected_latest + + +def test_add_collection_requirement_to_unknown_installed_version(monkeypatch): + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False, + skip=True) + + req.add_requirement('parent.collection', '1.0.0') + assert req.latest_version == '*' + + assert mock_display.call_count == 1 + + actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n')) + assert "Failed to validate the collection requirement 'namespace.name:1.0.0' for parent.collection" in actual_warn + + +def test_add_collection_wildcard_requirement_to_unknown_installed_version(): + req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False, + skip=True) + req.add_requirement(str(req), '*') + + assert req.versions == set('*') + assert req.latest_version == '*' + + +def test_add_collection_requirement_with_conflict(galaxy_server): + expected = "Cannot meet requirement ==1.0.2 for dependency namespace.name from source '%s'. Available versions " \ + "before last requirement added: 1.0.0, 1.0.1\n" \ + "Requirements from:\n" \ + "\tbase - 'namespace.name:==1.0.2'" % galaxy_server.api_server + with pytest.raises(AnsibleError, match=expected): + collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '==1.0.2', + False) + + +def test_add_requirement_to_existing_collection_with_conflict(galaxy_server): + req = collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '*', False) + + expected = "Cannot meet dependency requirement 'namespace.name:1.0.2' for collection namespace.collection2 from " \ + "source '%s'. Available versions before last requirement added: 1.0.0, 1.0.1\n" \ + "Requirements from:\n" \ + "\tbase - 'namespace.name:*'\n" \ + "\tnamespace.collection2 - 'namespace.name:1.0.2'" % galaxy_server.api_server + with pytest.raises(AnsibleError, match=re.escape(expected)): + req.add_requirement('namespace.collection2', '1.0.2') + + +def test_add_requirement_to_installed_collection_with_conflict(): + source = 'https://galaxy.ansible.com' + req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False, + skip=True) + + expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \ + "Use --force to overwrite" + with pytest.raises(AnsibleError, match=re.escape(expected)): + req.add_requirement(None, '1.0.2') + + +def test_add_requirement_to_installed_collection_with_conflict_as_dep(): + source = 'https://galaxy.ansible.com' + req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False, + skip=True) + + expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \ + "Use --force-with-deps to overwrite" + with pytest.raises(AnsibleError, match=re.escape(expected)): + req.add_requirement('namespace.collection2', '1.0.2') + + +def test_install_skipped_collection(monkeypatch): + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + req = collection.CollectionRequirement('namespace', 'name', None, 'source', ['1.0.0'], '*', False, skip=True) + req.install(None, None) + + assert mock_display.call_count == 1 + assert mock_display.mock_calls[0][1][0] == "Skipping 'namespace.name' as it is already installed" + + +def test_install_collection(collection_artifact, monkeypatch): + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + collection_tar = collection_artifact[1] + output_path = os.path.join(os.path.split(collection_tar)[0], b'output') + collection_path = os.path.join(output_path, b'ansible_namespace', b'collection') + os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir + + temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp') + os.makedirs(temp_path) + + req = collection.CollectionRequirement.from_tar(collection_tar, True, True) + req.install(to_text(output_path), temp_path) + + # Ensure the temp directory is empty, nothing is left behind + assert os.listdir(temp_path) == [] + + actual_files = os.listdir(collection_path) + actual_files.sort() + assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles', + b'runme.sh'] + + assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755 + assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644 + assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755 + + assert mock_display.call_count == 2 + assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \ + % to_text(collection_path) + assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection (0.1.0) was installed successfully" + + +def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch): + collection_tar = collection_artifact[1] + output_path = os.path.join(os.path.split(collection_tar)[0], b'output') + collection_path = os.path.join(output_path, b'ansible_namespace', b'collection') + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + mock_download = MagicMock() + mock_download.return_value = collection_tar + monkeypatch.setattr(collection, '_download_file', mock_download) + + monkeypatch.setattr(galaxy_server, '_available_api_versions', {'v2': 'v2/'}) + temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp') + os.makedirs(temp_path) + + meta = api.CollectionVersionMetadata('ansible_namespace', 'collection', '0.1.0', 'https://downloadme.com', + 'myhash', {}) + req = collection.CollectionRequirement('ansible_namespace', 'collection', None, galaxy_server, + ['0.1.0'], '*', False, metadata=meta) + req.install(to_text(output_path), temp_path) + + # Ensure the temp directory is empty, nothing is left behind + assert os.listdir(temp_path) == [] + + actual_files = os.listdir(collection_path) + actual_files.sort() + assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles', + b'runme.sh'] + + assert mock_display.call_count == 2 + assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \ + % to_text(collection_path) + assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection (0.1.0) was installed successfully" + + assert mock_download.call_count == 1 + assert mock_download.mock_calls[0][1][0] == 'https://downloadme.com' + assert mock_download.mock_calls[0][1][1] == temp_path + assert mock_download.mock_calls[0][1][2] == 'myhash' + assert mock_download.mock_calls[0][1][3] is True + + +def test_install_collections_from_tar(collection_artifact, monkeypatch): + collection_path, collection_tar = collection_artifact + temp_path = os.path.split(collection_tar)[0] + shutil.rmtree(collection_path) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path), + [u'https://galaxy.ansible.com'], True, False, False, False, False) + + assert os.path.isdir(collection_path) + + actual_files = os.listdir(collection_path) + actual_files.sort() + assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles', + b'runme.sh'] + + with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj: + actual_manifest = json.loads(to_text(manifest_obj.read())) + + assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace' + assert actual_manifest['collection_info']['name'] == 'collection' + assert actual_manifest['collection_info']['version'] == '0.1.0' + + # Filter out the progress cursor display calls. + display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1] + assert len(display_msgs) == 4 + assert display_msgs[0] == "Process install dependency map" + assert display_msgs[1] == "Starting collection install process" + assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path) + + +def test_install_collections_existing_without_force(collection_artifact, monkeypatch): + collection_path, collection_tar = collection_artifact + temp_path = os.path.split(collection_tar)[0] + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + # If we don't delete collection_path it will think the original build skeleton is installed so we expect a skip + collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path), + [u'https://galaxy.ansible.com'], True, False, False, False, False) + + assert os.path.isdir(collection_path) + + actual_files = os.listdir(collection_path) + actual_files.sort() + assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh'] + + # Filter out the progress cursor display calls. + display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1] + assert len(display_msgs) == 3 + + assert display_msgs[0] == "Process install dependency map" + assert display_msgs[1] == "Starting collection install process" + assert display_msgs[2] == "Skipping 'ansible_namespace.collection' as it is already installed" + + for msg in display_msgs: + assert 'WARNING' not in msg + + +def test_install_missing_metadata_warning(collection_artifact, monkeypatch): + collection_path, collection_tar = collection_artifact + temp_path = os.path.split(collection_tar)[0] + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + for file in [b'MANIFEST.json', b'galaxy.yml']: + b_path = os.path.join(collection_path, file) + if os.path.isfile(b_path): + os.unlink(b_path) + + collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path), + [u'https://galaxy.ansible.com'], True, False, False, False, False) + + display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1] + + assert 'WARNING' in display_msgs[0] + + +# Makes sure we don't get stuck in some recursive loop +@pytest.mark.parametrize('collection_artifact', [ + {'ansible_namespace.collection': '>=0.0.1'}, +], indirect=True) +def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch): + collection_path, collection_tar = collection_artifact + temp_path = os.path.split(collection_tar)[0] + shutil.rmtree(collection_path) + + mock_display = MagicMock() + monkeypatch.setattr(Display, 'display', mock_display) + + collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path), + [u'https://galaxy.ansible.com'], True, False, False, False, False) + + assert os.path.isdir(collection_path) + + actual_files = os.listdir(collection_path) + actual_files.sort() + assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles', + b'runme.sh'] + + with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj: + actual_manifest = json.loads(to_text(manifest_obj.read())) + + assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace' + assert actual_manifest['collection_info']['name'] == 'collection' + assert actual_manifest['collection_info']['version'] == '0.1.0' + + # Filter out the progress cursor display calls. + display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1] + assert len(display_msgs) == 4 + assert display_msgs[0] == "Process install dependency map" + assert display_msgs[1] == "Starting collection install process" + assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path) + assert display_msgs[3] == "ansible_namespace.collection (0.1.0) was installed successfully" diff --git a/test/units/galaxy/test_token.py b/test/units/galaxy/test_token.py new file mode 100644 index 00000000..94449e28 --- /dev/null +++ b/test/units/galaxy/test_token.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import pytest + +import ansible.constants as C +from ansible.galaxy.token import GalaxyToken, NoTokenSentinel +from ansible.module_utils._text import to_bytes, to_text + + +@pytest.fixture() +def b_token_file(request, tmp_path_factory): + b_test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Token')) + b_token_path = os.path.join(b_test_dir, b"token.yml") + + token = getattr(request, 'param', None) + if token: + with open(b_token_path, 'wb') as token_fd: + token_fd.write(b"token: %s" % to_bytes(token)) + + orig_token_path = C.GALAXY_TOKEN_PATH + C.GALAXY_TOKEN_PATH = to_text(b_token_path) + try: + yield b_token_path + finally: + C.GALAXY_TOKEN_PATH = orig_token_path + + +def test_token_explicit(b_token_file): + assert GalaxyToken(token="explicit").get() == "explicit" + + +@pytest.mark.parametrize('b_token_file', ['file'], indirect=True) +def test_token_explicit_override_file(b_token_file): + assert GalaxyToken(token="explicit").get() == "explicit" + + +@pytest.mark.parametrize('b_token_file', ['file'], indirect=True) +def test_token_from_file(b_token_file): + assert GalaxyToken().get() == "file" + + +def test_token_from_file_missing(b_token_file): + assert GalaxyToken().get() is None + + +@pytest.mark.parametrize('b_token_file', ['file'], indirect=True) +def test_token_none(b_token_file): + assert GalaxyToken(token=NoTokenSentinel).get() is None diff --git a/test/units/galaxy/test_user_agent.py b/test/units/galaxy/test_user_agent.py new file mode 100644 index 00000000..da0103f3 --- /dev/null +++ b/test/units/galaxy/test_user_agent.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import platform + +from ansible.galaxy import user_agent +from ansible.module_utils.ansible_release import __version__ as ansible_version + + +def test_user_agent(): + res = user_agent.user_agent() + assert res.startswith('ansible-galaxy/%s' % ansible_version) + assert platform.system() in res + assert 'python:' in res diff --git a/test/units/inventory/test_group.py b/test/units/inventory/test_group.py new file mode 100644 index 00000000..e8f1c0b0 --- /dev/null +++ b/test/units/inventory/test_group.py @@ -0,0 +1,155 @@ +# Copyright 2018 Alan Rominger +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest + +from ansible.inventory.group import Group +from ansible.inventory.host import Host +from ansible.errors import AnsibleError + + +class TestGroup(unittest.TestCase): + + def test_depth_update(self): + A = Group('A') + B = Group('B') + Z = Group('Z') + A.add_child_group(B) + A.add_child_group(Z) + self.assertEqual(A.depth, 0) + self.assertEqual(Z.depth, 1) + self.assertEqual(B.depth, 1) + + def test_depth_update_dual_branches(self): + alpha = Group('alpha') + A = Group('A') + alpha.add_child_group(A) + B = Group('B') + A.add_child_group(B) + Z = Group('Z') + alpha.add_child_group(Z) + beta = Group('beta') + B.add_child_group(beta) + Z.add_child_group(beta) + + self.assertEqual(alpha.depth, 0) # apex + self.assertEqual(beta.depth, 3) # alpha -> A -> B -> beta + + omega = Group('omega') + omega.add_child_group(alpha) + + # verify that both paths are traversed to get the max depth value + self.assertEqual(B.depth, 3) # omega -> alpha -> A -> B + self.assertEqual(beta.depth, 4) # B -> beta + + def test_depth_recursion(self): + A = Group('A') + B = Group('B') + A.add_child_group(B) + # hypothetical of adding B as child group to A + A.parent_groups.append(B) + B.child_groups.append(A) + # can't update depths of groups, because of loop + with self.assertRaises(AnsibleError): + B._check_children_depth() + + def test_loop_detection(self): + A = Group('A') + B = Group('B') + C = Group('C') + A.add_child_group(B) + B.add_child_group(C) + with self.assertRaises(AnsibleError): + C.add_child_group(A) + + def test_direct_host_ordering(self): + """Hosts are returned in order they are added + """ + group = Group('A') + # host names not added in alphabetical order + host_name_list = ['z', 'b', 'c', 'a', 'p', 'q'] + expected_hosts = [] + for host_name in host_name_list: + h = Host(host_name) + group.add_host(h) + expected_hosts.append(h) + assert group.get_hosts() == expected_hosts + + def test_sub_group_host_ordering(self): + """With multiple nested groups, asserts that hosts are returned + in deterministic order + """ + top_group = Group('A') + expected_hosts = [] + for name in ['z', 'b', 'c', 'a', 'p', 'q']: + child = Group('group_{0}'.format(name)) + top_group.add_child_group(child) + host = Host('host_{0}'.format(name)) + child.add_host(host) + expected_hosts.append(host) + assert top_group.get_hosts() == expected_hosts + + def test_populates_descendant_hosts(self): + A = Group('A') + B = Group('B') + C = Group('C') + h = Host('h') + C.add_host(h) + A.add_child_group(B) # B is child of A + B.add_child_group(C) # C is descendant of A + A.add_child_group(B) + self.assertEqual(set(h.groups), set([C, B, A])) + h2 = Host('h2') + C.add_host(h2) + self.assertEqual(set(h2.groups), set([C, B, A])) + + def test_ancestor_example(self): + # see docstring for Group._walk_relationship + groups = {} + for name in ['A', 'B', 'C', 'D', 'E', 'F']: + groups[name] = Group(name) + # first row + groups['A'].add_child_group(groups['D']) + groups['B'].add_child_group(groups['D']) + groups['B'].add_child_group(groups['E']) + groups['C'].add_child_group(groups['D']) + # second row + groups['D'].add_child_group(groups['E']) + groups['D'].add_child_group(groups['F']) + groups['E'].add_child_group(groups['F']) + + self.assertEqual( + set(groups['F'].get_ancestors()), + set([ + groups['A'], groups['B'], groups['C'], groups['D'], groups['E'] + ]) + ) + + def test_ancestors_recursive_loop_safe(self): + ''' + The get_ancestors method may be referenced before circular parenting + checks, so the method is expected to be stable even with loops + ''' + A = Group('A') + B = Group('B') + A.parent_groups.append(B) + B.parent_groups.append(A) + # finishes in finite time + self.assertEqual(A.get_ancestors(), set([A, B])) diff --git a/test/units/inventory/test_host.py b/test/units/inventory/test_host.py new file mode 100644 index 00000000..c8f47714 --- /dev/null +++ b/test/units/inventory/test_host.py @@ -0,0 +1,112 @@ +# Copyright 2015 Marius Gedminas +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# for __setstate__/__getstate__ tests + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pickle + +from units.compat import unittest + +from ansible.inventory.group import Group +from ansible.inventory.host import Host +from ansible.module_utils.six import string_types + + +class TestHost(unittest.TestCase): + ansible_port = 22 + + def setUp(self): + self.hostA = Host('a') + self.hostB = Host('b') + + def test_equality(self): + self.assertEqual(self.hostA, self.hostA) + self.assertNotEqual(self.hostA, self.hostB) + self.assertNotEqual(self.hostA, Host('a')) + + def test_hashability(self): + # equality implies the hash values are the same + self.assertEqual(hash(self.hostA), hash(Host('a'))) + + def test_get_vars(self): + host_vars = self.hostA.get_vars() + self.assertIsInstance(host_vars, dict) + + def test_repr(self): + host_repr = repr(self.hostA) + self.assertIsInstance(host_repr, string_types) + + def test_add_group(self): + group = Group('some_group') + group_len = len(self.hostA.groups) + self.hostA.add_group(group) + self.assertEqual(len(self.hostA.groups), group_len + 1) + + def test_get_groups(self): + group = Group('some_group') + self.hostA.add_group(group) + groups = self.hostA.get_groups() + self.assertEqual(len(groups), 1) + for _group in groups: + self.assertIsInstance(_group, Group) + + def test_equals_none(self): + other = None + self.hostA == other + other == self.hostA + self.hostA != other + other != self.hostA + self.assertNotEqual(self.hostA, other) + + def test_serialize(self): + group = Group('some_group') + self.hostA.add_group(group) + data = self.hostA.serialize() + self.assertIsInstance(data, dict) + + def test_serialize_then_deserialize(self): + group = Group('some_group') + self.hostA.add_group(group) + hostA_data = self.hostA.serialize() + + hostA_clone = Host() + hostA_clone.deserialize(hostA_data) + self.assertEqual(self.hostA, hostA_clone) + + def test_set_state(self): + group = Group('some_group') + self.hostA.add_group(group) + + pickled_hostA = pickle.dumps(self.hostA) + + hostA_clone = pickle.loads(pickled_hostA) + self.assertEqual(self.hostA, hostA_clone) + + +class TestHostWithPort(TestHost): + ansible_port = 8822 + + def setUp(self): + self.hostA = Host(name='a', port=self.ansible_port) + self.hostB = Host(name='b', port=self.ansible_port) + + def test_get_vars_ansible_port(self): + host_vars = self.hostA.get_vars() + self.assertEqual(host_vars['ansible_port'], self.ansible_port) diff --git a/test/units/inventory_test_data/group_vars/noparse/all.yml~ b/test/units/inventory_test_data/group_vars/noparse/all.yml~ new file mode 100644 index 00000000..6f52f114 --- /dev/null +++ b/test/units/inventory_test_data/group_vars/noparse/all.yml~ @@ -0,0 +1,2 @@ +--- +YAML_FILENAME_EXTENSIONS_TEST: False diff --git a/test/units/inventory_test_data/group_vars/noparse/file.txt b/test/units/inventory_test_data/group_vars/noparse/file.txt new file mode 100644 index 00000000..6f52f114 --- /dev/null +++ b/test/units/inventory_test_data/group_vars/noparse/file.txt @@ -0,0 +1,2 @@ +--- +YAML_FILENAME_EXTENSIONS_TEST: False diff --git a/test/units/inventory_test_data/group_vars/parse/all.yml b/test/units/inventory_test_data/group_vars/parse/all.yml new file mode 100644 index 00000000..8687c86c --- /dev/null +++ b/test/units/inventory_test_data/group_vars/parse/all.yml @@ -0,0 +1,2 @@ +--- +YAML_FILENAME_EXTENSIONS_TEST: True diff --git a/test/units/mock/__init__.py b/test/units/mock/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/mock/loader.py b/test/units/mock/loader.py new file mode 100644 index 00000000..c47ec39e --- /dev/null +++ b/test/units/mock/loader.py @@ -0,0 +1,117 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors import AnsibleParserError +from ansible.parsing.dataloader import DataLoader +from ansible.module_utils._text import to_bytes, to_text + + +class DictDataLoader(DataLoader): + + def __init__(self, file_mapping=None): + file_mapping = {} if file_mapping is None else file_mapping + assert type(file_mapping) == dict + + super(DictDataLoader, self).__init__() + + self._file_mapping = file_mapping + self._build_known_directories() + self._vault_secrets = None + + def load_from_file(self, path, cache=True, unsafe=False): + data = None + path = to_text(path) + if path in self._file_mapping: + data = self.load(self._file_mapping[path], path) + return data + + # TODO: the real _get_file_contents returns a bytestring, so we actually convert the + # unicode/text it's created with to utf-8 + def _get_file_contents(self, path): + path = to_text(path) + if path in self._file_mapping: + return (to_bytes(self._file_mapping[path]), False) + else: + raise AnsibleParserError("file not found: %s" % path) + + def path_exists(self, path): + path = to_text(path) + return path in self._file_mapping or path in self._known_directories + + def is_file(self, path): + path = to_text(path) + return path in self._file_mapping + + def is_directory(self, path): + path = to_text(path) + return path in self._known_directories + + def list_directory(self, path): + ret = [] + path = to_text(path) + for x in (list(self._file_mapping.keys()) + self._known_directories): + if x.startswith(path): + if os.path.dirname(x) == path: + ret.append(os.path.basename(x)) + return ret + + def is_executable(self, path): + # FIXME: figure out a way to make paths return true for this + return False + + def _add_known_directory(self, directory): + if directory not in self._known_directories: + self._known_directories.append(directory) + + def _build_known_directories(self): + self._known_directories = [] + for path in self._file_mapping: + dirname = os.path.dirname(path) + while dirname not in ('/', ''): + self._add_known_directory(dirname) + dirname = os.path.dirname(dirname) + + def push(self, path, content): + rebuild_dirs = False + if path not in self._file_mapping: + rebuild_dirs = True + + self._file_mapping[path] = content + + if rebuild_dirs: + self._build_known_directories() + + def pop(self, path): + if path in self._file_mapping: + del self._file_mapping[path] + self._build_known_directories() + + def clear(self): + self._file_mapping = dict() + self._known_directories = [] + + def get_basedir(self): + return os.getcwd() + + def set_vault_secrets(self, vault_secrets): + self._vault_secrets = vault_secrets diff --git a/test/units/mock/path.py b/test/units/mock/path.py new file mode 100644 index 00000000..721dc293 --- /dev/null +++ b/test/units/mock/path.py @@ -0,0 +1,8 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat.mock import MagicMock +from ansible.utils.path import unfrackpath + + +mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x) diff --git a/test/units/mock/procenv.py b/test/units/mock/procenv.py new file mode 100644 index 00000000..271a207e --- /dev/null +++ b/test/units/mock/procenv.py @@ -0,0 +1,90 @@ +# (c) 2016, Matt Davis +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import json + +from contextlib import contextmanager +from io import BytesIO, StringIO +from units.compat import unittest +from ansible.module_utils.six import PY3 +from ansible.module_utils._text import to_bytes + + +@contextmanager +def swap_stdin_and_argv(stdin_data='', argv_data=tuple()): + """ + context manager that temporarily masks the test runner's values for stdin and argv + """ + real_stdin = sys.stdin + real_argv = sys.argv + + if PY3: + fake_stream = StringIO(stdin_data) + fake_stream.buffer = BytesIO(to_bytes(stdin_data)) + else: + fake_stream = BytesIO(to_bytes(stdin_data)) + + try: + sys.stdin = fake_stream + sys.argv = argv_data + + yield + finally: + sys.stdin = real_stdin + sys.argv = real_argv + + +@contextmanager +def swap_stdout(): + """ + context manager that temporarily replaces stdout for tests that need to verify output + """ + old_stdout = sys.stdout + + if PY3: + fake_stream = StringIO() + else: + fake_stream = BytesIO() + + try: + sys.stdout = fake_stream + + yield fake_stream + finally: + sys.stdout = old_stdout + + +class ModuleTestCase(unittest.TestCase): + def setUp(self, module_args=None): + if module_args is None: + module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False} + + args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args)) + + # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually + self.stdin_swap = swap_stdin_and_argv(stdin_data=args) + self.stdin_swap.__enter__() + + def tearDown(self): + # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually + self.stdin_swap.__exit__(None, None, None) diff --git a/test/units/mock/vault_helper.py b/test/units/mock/vault_helper.py new file mode 100644 index 00000000..dcce9c78 --- /dev/null +++ b/test/units/mock/vault_helper.py @@ -0,0 +1,39 @@ +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils._text import to_bytes + +from ansible.parsing.vault import VaultSecret + + +class TextVaultSecret(VaultSecret): + '''A secret piece of text. ie, a password. Tracks text encoding. + + The text encoding of the text may not be the default text encoding so + we keep track of the encoding so we encode it to the same bytes.''' + + def __init__(self, text, encoding=None, errors=None, _bytes=None): + super(TextVaultSecret, self).__init__() + self.text = text + self.encoding = encoding or 'utf-8' + self._bytes = _bytes + self.errors = errors or 'strict' + + @property + def bytes(self): + '''The text encoded with encoding, unless we specifically set _bytes.''' + return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors) diff --git a/test/units/mock/yaml_helper.py b/test/units/mock/yaml_helper.py new file mode 100644 index 00000000..1ef17215 --- /dev/null +++ b/test/units/mock/yaml_helper.py @@ -0,0 +1,124 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import io +import yaml + +from ansible.module_utils.six import PY3 +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.parsing.yaml.dumper import AnsibleDumper + + +class YamlTestUtils(object): + """Mixin class to combine with a unittest.TestCase subclass.""" + def _loader(self, stream): + """Vault related tests will want to override this. + + Vault cases should setup a AnsibleLoader that has the vault password.""" + return AnsibleLoader(stream) + + def _dump_stream(self, obj, stream, dumper=None): + """Dump to a py2-unicode or py3-string stream.""" + if PY3: + return yaml.dump(obj, stream, Dumper=dumper) + else: + return yaml.dump(obj, stream, Dumper=dumper, encoding=None) + + def _dump_string(self, obj, dumper=None): + """Dump to a py2-unicode or py3-string""" + if PY3: + return yaml.dump(obj, Dumper=dumper) + else: + return yaml.dump(obj, Dumper=dumper, encoding=None) + + def _dump_load_cycle(self, obj): + # Each pass though a dump or load revs the 'generation' + # obj to yaml string + string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper) + + # wrap a stream/file like StringIO around that yaml + stream_from_object_dump = io.StringIO(string_from_object_dump) + loader = self._loader(stream_from_object_dump) + # load the yaml stream to create a new instance of the object (gen 2) + obj_2 = loader.get_data() + + # dump the gen 2 objects directory to strings + string_from_object_dump_2 = self._dump_string(obj_2, + dumper=AnsibleDumper) + + # The gen 1 and gen 2 yaml strings + self.assertEqual(string_from_object_dump, string_from_object_dump_2) + # the gen 1 (orig) and gen 2 py object + self.assertEqual(obj, obj_2) + + # again! gen 3... load strings into py objects + stream_3 = io.StringIO(string_from_object_dump_2) + loader_3 = self._loader(stream_3) + obj_3 = loader_3.get_data() + + string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper) + + self.assertEqual(obj, obj_3) + # should be transitive, but... + self.assertEqual(obj_2, obj_3) + self.assertEqual(string_from_object_dump, string_from_object_dump_3) + + def _old_dump_load_cycle(self, obj): + '''Dump the passed in object to yaml, load it back up, dump again, compare.''' + stream = io.StringIO() + + yaml_string = self._dump_string(obj, dumper=AnsibleDumper) + self._dump_stream(obj, stream, dumper=AnsibleDumper) + + yaml_string_from_stream = stream.getvalue() + + # reset stream + stream.seek(0) + + loader = self._loader(stream) + # loader = AnsibleLoader(stream, vault_password=self.vault_password) + obj_from_stream = loader.get_data() + + stream_from_string = io.StringIO(yaml_string) + loader2 = self._loader(stream_from_string) + # loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password) + obj_from_string = loader2.get_data() + + stream_obj_from_stream = io.StringIO() + stream_obj_from_string = io.StringIO() + + if PY3: + yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper) + yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper) + else: + yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None) + yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None) + + yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue() + yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue() + + stream_obj_from_stream.seek(0) + stream_obj_from_string.seek(0) + + if PY3: + yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper) + yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper) + else: + yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None) + yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None) + + assert yaml_string == yaml_string_obj_from_stream + assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string + assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream == + yaml_string_stream_obj_from_string) + assert obj == obj_from_stream + assert obj == obj_from_string + assert obj == yaml_string_obj_from_stream + assert obj == yaml_string_obj_from_string + assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string + return {'obj': obj, + 'yaml_string': yaml_string, + 'yaml_string_from_stream': yaml_string_from_stream, + 'obj_from_stream': obj_from_stream, + 'obj_from_string': obj_from_string, + 'yaml_string_obj_from_string': yaml_string_obj_from_string} diff --git a/test/units/module_utils/__init__.py b/test/units/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/basic/__init__.py b/test/units/module_utils/basic/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/basic/test__log_invocation.py b/test/units/module_utils/basic/test__log_invocation.py new file mode 100644 index 00000000..3beda8bd --- /dev/null +++ b/test/units/module_utils/basic/test__log_invocation.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# (c) 2016, James Cammarata +# (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + + +ARGS = dict(foo=False, bar=[1, 2, 3], bam="bam", baz=u'baz') +ARGUMENT_SPEC = dict( + foo=dict(default=True, type='bool'), + bar=dict(default=[], type='list'), + bam=dict(default="bam"), + baz=dict(default=u"baz"), + password=dict(default=True), + no_log=dict(default="you shouldn't see me", no_log=True), +) + + +@pytest.mark.parametrize('am, stdin', [(ARGUMENT_SPEC, ARGS)], indirect=['am', 'stdin']) +def test_module_utils_basic__log_invocation(am, mocker): + + am.log = mocker.MagicMock() + am._log_invocation() + + # Message is generated from a dict so it will be in an unknown order. + # have to check this manually rather than with assert_called_with() + args = am.log.call_args[0] + assert len(args) == 1 + message = args[0] + + assert len(message) == \ + len('Invoked with bam=bam bar=[1, 2, 3] foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD') + + assert message.startswith('Invoked with ') + assert ' bam=bam' in message + assert ' bar=[1, 2, 3]' in message + assert ' foo=False' in message + assert ' baz=baz' in message + assert ' no_log=NOT_LOGGING_PARAMETER' in message + assert ' password=NOT_LOGGING_PASSWORD' in message + + kwargs = am.log.call_args[1] + assert kwargs == \ + dict(log_args={ + 'foo': 'False', + 'bar': '[1, 2, 3]', + 'bam': 'bam', + 'baz': 'baz', + 'password': 'NOT_LOGGING_PASSWORD', + 'no_log': 'NOT_LOGGING_PARAMETER', + }) diff --git a/test/units/module_utils/basic/test__symbolic_mode_to_octal.py b/test/units/module_utils/basic/test__symbolic_mode_to_octal.py new file mode 100644 index 00000000..7793b348 --- /dev/null +++ b/test/units/module_utils/basic/test__symbolic_mode_to_octal.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# Copyright: +# (c) 2012-2014, Michael DeHaan +# (c) 2016-2017 Ansible Project +# License: GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.module_utils.basic import AnsibleModule + + +# +# Info helpful for making new test cases: +# +# base_mode = {'dir no perms': 0o040000, +# 'file no perms': 0o100000, +# 'dir all perms': 0o400000 | 0o777, +# 'file all perms': 0o100000, | 0o777} +# +# perm_bits = {'x': 0b001, +# 'w': 0b010, +# 'r': 0b100} +# +# role_shift = {'u': 6, +# 'g': 3, +# 'o': 0} + +DATA = ( # Going from no permissions to setting all for user, group, and/or other + (0o040000, u'a+rwx', 0o0777), + (0o040000, u'u+rwx,g+rwx,o+rwx', 0o0777), + (0o040000, u'o+rwx', 0o0007), + (0o040000, u'g+rwx', 0o0070), + (0o040000, u'u+rwx', 0o0700), + + # Going from all permissions to none for user, group, and/or other + (0o040777, u'a-rwx', 0o0000), + (0o040777, u'u-rwx,g-rwx,o-rwx', 0o0000), + (0o040777, u'o-rwx', 0o0770), + (0o040777, u'g-rwx', 0o0707), + (0o040777, u'u-rwx', 0o0077), + + # now using absolute assignment from None to a set of perms + (0o040000, u'a=rwx', 0o0777), + (0o040000, u'u=rwx,g=rwx,o=rwx', 0o0777), + (0o040000, u'o=rwx', 0o0007), + (0o040000, u'g=rwx', 0o0070), + (0o040000, u'u=rwx', 0o0700), + + # X effect on files and dirs + (0o040000, u'a+X', 0o0111), + (0o100000, u'a+X', 0), + (0o040000, u'a=X', 0o0111), + (0o100000, u'a=X', 0), + (0o040777, u'a-X', 0o0666), + # Same as chmod but is it a bug? + # chmod a-X statfile <== removes execute from statfile + (0o100777, u'a-X', 0o0666), + + # Multiple permissions + (0o040000, u'u=rw-x+X,g=r-x+X,o=r-x+X', 0o0755), + (0o100000, u'u=rw-x+X,g=r-x+X,o=r-x+X', 0o0644), +) + +UMASK_DATA = ( + (0o100000, '+rwx', 0o770), + (0o100777, '-rwx', 0o007), +) + +INVALID_DATA = ( + (0o040000, u'a=foo', "bad symbolic permission for mode: a=foo"), + (0o040000, u'f=rwx', "bad symbolic permission for mode: f=rwx"), +) + + +@pytest.mark.parametrize('stat_info, mode_string, expected', DATA) +def test_good_symbolic_modes(mocker, stat_info, mode_string, expected): + mock_stat = mocker.MagicMock() + mock_stat.st_mode = stat_info + assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == expected + + +@pytest.mark.parametrize('stat_info, mode_string, expected', UMASK_DATA) +def test_umask_with_symbolic_modes(mocker, stat_info, mode_string, expected): + mock_umask = mocker.patch('os.umask') + mock_umask.return_value = 0o7 + + mock_stat = mocker.MagicMock() + mock_stat.st_mode = stat_info + + assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == expected + + +@pytest.mark.parametrize('stat_info, mode_string, expected', INVALID_DATA) +def test_invalid_symbolic_modes(mocker, stat_info, mode_string, expected): + mock_stat = mocker.MagicMock() + mock_stat.st_mode = stat_info + with pytest.raises(ValueError) as exc: + assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == 'blah' + assert exc.match(expected) diff --git a/test/units/module_utils/basic/test_argument_spec.py b/test/units/module_utils/basic/test_argument_spec.py new file mode 100644 index 00000000..6e297669 --- /dev/null +++ b/test/units/module_utils/basic/test_argument_spec.py @@ -0,0 +1,706 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016 Toshio Kuratomi +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +import os + +import pytest + +from units.compat.mock import MagicMock +from ansible.module_utils import basic +from ansible.module_utils.api import basic_auth_argument_spec, rate_limit_argument_spec, retry_argument_spec +from ansible.module_utils.common.warnings import get_deprecation_messages, get_warning_messages +from ansible.module_utils.six import integer_types, string_types +from ansible.module_utils.six.moves import builtins + + +MOCK_VALIDATOR_FAIL = MagicMock(side_effect=TypeError("bad conversion")) +# Data is argspec, argument, expected +VALID_SPECS = ( + # Simple type=int + ({'arg': {'type': 'int'}}, {'arg': 42}, 42), + # Simple type=int with a large value (will be of type long under Python 2) + ({'arg': {'type': 'int'}}, {'arg': 18765432109876543210}, 18765432109876543210), + # Simple type=list, elements=int + ({'arg': {'type': 'list', 'elements': 'int'}}, {'arg': [42, 32]}, [42, 32]), + # Type=int with conversion from string + ({'arg': {'type': 'int'}}, {'arg': '42'}, 42), + # Type=list elements=int with conversion from string + ({'arg': {'type': 'list', 'elements': 'int'}}, {'arg': ['42', '32']}, [42, 32]), + # Simple type=float + ({'arg': {'type': 'float'}}, {'arg': 42.0}, 42.0), + # Simple type=list, elements=float + ({'arg': {'type': 'list', 'elements': 'float'}}, {'arg': [42.1, 32.2]}, [42.1, 32.2]), + # Type=float conversion from int + ({'arg': {'type': 'float'}}, {'arg': 42}, 42.0), + # type=list, elements=float conversion from int + ({'arg': {'type': 'list', 'elements': 'float'}}, {'arg': [42, 32]}, [42.0, 32.0]), + # Type=float conversion from string + ({'arg': {'type': 'float'}}, {'arg': '42.0'}, 42.0), + # type=list, elements=float conversion from string + ({'arg': {'type': 'list', 'elements': 'float'}}, {'arg': ['42.1', '32.2']}, [42.1, 32.2]), + # Type=float conversion from string without decimal point + ({'arg': {'type': 'float'}}, {'arg': '42'}, 42.0), + # Type=list elements=float conversion from string without decimal point + ({'arg': {'type': 'list', 'elements': 'float'}}, {'arg': ['42', '32.2']}, [42.0, 32.2]), + # Simple type=bool + ({'arg': {'type': 'bool'}}, {'arg': True}, True), + # Simple type=list elements=bool + ({'arg': {'type': 'list', 'elements': 'bool'}}, {'arg': [True, 'true', 1, 'yes', False, 'false', 'no', 0]}, + [True, True, True, True, False, False, False, False]), + # Type=int with conversion from string + ({'arg': {'type': 'bool'}}, {'arg': 'yes'}, True), + # Type=str converts to string + ({'arg': {'type': 'str'}}, {'arg': 42}, '42'), + # Type=list elements=str simple converts to string + ({'arg': {'type': 'list', 'elements': 'str'}}, {'arg': ['42', '32']}, ['42', '32']), + # Type is implicit, converts to string + ({'arg': {'type': 'str'}}, {'arg': 42}, '42'), + # Type=list elements=str implicit converts to string + ({'arg': {'type': 'list', 'elements': 'str'}}, {'arg': [42, 32]}, ['42', '32']), + # parameter is required + ({'arg': {'required': True}}, {'arg': 42}, '42'), +) + +INVALID_SPECS = ( + # Type is int; unable to convert this string + ({'arg': {'type': 'int'}}, {'arg': "wolf"}, "is of type {0} and we were unable to convert to int: {0} cannot be converted to an int".format(type('bad'))), + # Type is list elements is int; unable to convert this string + ({'arg': {'type': 'list', 'elements': 'int'}}, {'arg': [1, "bad"]}, "is of type {0} and we were unable to convert to int: {0} cannot be converted to " + "an int".format(type('int'))), + # Type is int; unable to convert float + ({'arg': {'type': 'int'}}, {'arg': 42.1}, "'float'> cannot be converted to an int"), + # Type is list, elements is int; unable to convert float + ({'arg': {'type': 'list', 'elements': 'int'}}, {'arg': [42.1, 32, 2]}, "'float'> cannot be converted to an int"), + # type is a callable that fails to convert + ({'arg': {'type': MOCK_VALIDATOR_FAIL}}, {'arg': "bad"}, "bad conversion"), + # type is a list, elements is callable that fails to convert + ({'arg': {'type': 'list', 'elements': MOCK_VALIDATOR_FAIL}}, {'arg': [1, "bad"]}, "bad conversion"), + # unknown parameter + ({'arg': {'type': 'int'}}, {'other': 'bad', '_ansible_module_name': 'ansible_unittest'}, + 'Unsupported parameters for (ansible_unittest) module: other Supported parameters include: arg'), + # parameter is required + ({'arg': {'required': True}}, {}, 'missing required arguments: arg'), +) + +BASIC_AUTH_VALID_ARGS = [ + {'api_username': 'user1', 'api_password': 'password1', 'api_url': 'http://example.com', 'validate_certs': False}, + {'api_username': 'user1', 'api_password': 'password1', 'api_url': 'http://example.com', 'validate_certs': True}, +] + +RATE_LIMIT_VALID_ARGS = [ + {'rate': 1, 'rate_limit': 1}, + {'rate': '1', 'rate_limit': 1}, + {'rate': 1, 'rate_limit': '1'}, + {'rate': '1', 'rate_limit': '1'}, +] + +RETRY_VALID_ARGS = [ + {'retries': 1, 'retry_pause': 1.5}, + {'retries': '1', 'retry_pause': '1.5'}, + {'retries': 1, 'retry_pause': '1.5'}, + {'retries': '1', 'retry_pause': 1.5}, +] + + +@pytest.fixture +def complex_argspec(): + arg_spec = dict( + foo=dict(required=True, aliases=['dup']), + bar=dict(), + bam=dict(), + bing=dict(), + bang=dict(), + bong=dict(), + baz=dict(fallback=(basic.env_fallback, ['BAZ'])), + bar1=dict(type='bool'), + bar3=dict(type='list', elements='path'), + bar_str=dict(type='list', elements=str), + zardoz=dict(choices=['one', 'two']), + zardoz2=dict(type='list', choices=['one', 'two', 'three']), + zardoz3=dict(type='str', aliases=['zodraz'], deprecated_aliases=[dict(name='zodraz', version='9.99')]), + ) + mut_ex = (('bar', 'bam'), ('bing', 'bang', 'bong')) + req_to = (('bam', 'baz'),) + + kwargs = dict( + argument_spec=arg_spec, + mutually_exclusive=mut_ex, + required_together=req_to, + no_log=True, + add_file_common_args=True, + supports_check_mode=True, + ) + return kwargs + + +@pytest.fixture +def options_argspec_list(): + options_spec = dict( + foo=dict(required=True, aliases=['dup']), + bar=dict(), + bar1=dict(type='list', elements='str'), + bar2=dict(type='list', elements='int'), + bar3=dict(type='list', elements='float'), + bar4=dict(type='list', elements='path'), + bam=dict(), + baz=dict(fallback=(basic.env_fallback, ['BAZ'])), + bam1=dict(), + bam2=dict(default='test'), + bam3=dict(type='bool'), + bam4=dict(type='str'), + ) + + arg_spec = dict( + foobar=dict( + type='list', + elements='dict', + options=options_spec, + mutually_exclusive=[ + ['bam', 'bam1'], + ], + required_if=[ + ['foo', 'hello', ['bam']], + ['foo', 'bam2', ['bam2']] + ], + required_one_of=[ + ['bar', 'bam'] + ], + required_together=[ + ['bam1', 'baz'] + ], + required_by={ + 'bam4': ('bam1', 'bam3'), + }, + ) + ) + + kwargs = dict( + argument_spec=arg_spec, + no_log=True, + add_file_common_args=True, + supports_check_mode=True + ) + return kwargs + + +@pytest.fixture +def options_argspec_dict(options_argspec_list): + # should test ok, for options in dict format. + kwargs = options_argspec_list + kwargs['argument_spec']['foobar']['type'] = 'dict' + kwargs['argument_spec']['foobar']['elements'] = None + + return kwargs + + +# +# Tests for one aspect of arg_spec +# + +@pytest.mark.parametrize('argspec, expected, stdin', [(s[0], s[2], s[1]) for s in VALID_SPECS], + indirect=['stdin']) +def test_validator_basic_types(argspec, expected, stdin): + + am = basic.AnsibleModule(argspec) + + if 'type' in argspec['arg']: + if argspec['arg']['type'] == 'int': + type_ = integer_types + else: + type_ = getattr(builtins, argspec['arg']['type']) + else: + type_ = str + + assert isinstance(am.params['arg'], type_) + assert am.params['arg'] == expected + + +@pytest.mark.parametrize('stdin', [{'arg': 42}, {'arg': 18765432109876543210}], indirect=['stdin']) +def test_validator_function(mocker, stdin): + # Type is a callable + MOCK_VALIDATOR_SUCCESS = mocker.MagicMock(return_value=27) + argspec = {'arg': {'type': MOCK_VALIDATOR_SUCCESS}} + am = basic.AnsibleModule(argspec) + + assert isinstance(am.params['arg'], integer_types) + assert am.params['arg'] == 27 + + +@pytest.mark.parametrize('stdin', BASIC_AUTH_VALID_ARGS, indirect=['stdin']) +def test_validate_basic_auth_arg(mocker, stdin): + kwargs = dict( + argument_spec=basic_auth_argument_spec() + ) + am = basic.AnsibleModule(**kwargs) + assert isinstance(am.params['api_username'], string_types) + assert isinstance(am.params['api_password'], string_types) + assert isinstance(am.params['api_url'], string_types) + assert isinstance(am.params['validate_certs'], bool) + + +@pytest.mark.parametrize('stdin', RATE_LIMIT_VALID_ARGS, indirect=['stdin']) +def test_validate_rate_limit_argument_spec(mocker, stdin): + kwargs = dict( + argument_spec=rate_limit_argument_spec() + ) + am = basic.AnsibleModule(**kwargs) + assert isinstance(am.params['rate'], integer_types) + assert isinstance(am.params['rate_limit'], integer_types) + + +@pytest.mark.parametrize('stdin', RETRY_VALID_ARGS, indirect=['stdin']) +def test_validate_retry_argument_spec(mocker, stdin): + kwargs = dict( + argument_spec=retry_argument_spec() + ) + am = basic.AnsibleModule(**kwargs) + assert isinstance(am.params['retries'], integer_types) + assert isinstance(am.params['retry_pause'], float) + + +@pytest.mark.parametrize('stdin', [{'arg': '123'}, {'arg': 123}], indirect=['stdin']) +def test_validator_string_type(mocker, stdin): + # Custom callable that is 'str' + argspec = {'arg': {'type': str}} + am = basic.AnsibleModule(argspec) + + assert isinstance(am.params['arg'], string_types) + assert am.params['arg'] == '123' + + +@pytest.mark.parametrize('argspec, expected, stdin', [(s[0], s[2], s[1]) for s in INVALID_SPECS], + indirect=['stdin']) +def test_validator_fail(stdin, capfd, argspec, expected): + with pytest.raises(SystemExit): + basic.AnsibleModule(argument_spec=argspec) + + out, err = capfd.readouterr() + assert not err + assert expected in json.loads(out)['msg'] + assert json.loads(out)['failed'] + + +class TestComplexArgSpecs: + """Test with a more complex arg_spec""" + + @pytest.mark.parametrize('stdin', [{'foo': 'hello'}, {'dup': 'hello'}], indirect=['stdin']) + def test_complex_required(self, stdin, complex_argspec): + """Test that the complex argspec works if we give it its required param as either the canonical or aliased name""" + am = basic.AnsibleModule(**complex_argspec) + assert isinstance(am.params['foo'], str) + assert am.params['foo'] == 'hello' + + @pytest.mark.parametrize('stdin', [{'foo': 'hello1', 'dup': 'hello2'}], indirect=['stdin']) + def test_complex_duplicate_warning(self, stdin, complex_argspec): + """Test that the complex argspec issues a warning if we specify an option both with its canonical name and its alias""" + am = basic.AnsibleModule(**complex_argspec) + assert isinstance(am.params['foo'], str) + assert 'Both option foo and its alias dup are set.' in get_warning_messages() + assert am.params['foo'] == 'hello2' + + @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'test'}], indirect=['stdin']) + def test_complex_type_fallback(self, mocker, stdin, complex_argspec): + """Test that the complex argspec works if we get a required parameter via fallback""" + environ = os.environ.copy() + environ['BAZ'] = 'test data' + mocker.patch('ansible.module_utils.basic.os.environ', environ) + + am = basic.AnsibleModule(**complex_argspec) + + assert isinstance(am.params['baz'], str) + assert am.params['baz'] == 'test data' + + @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'bad', 'bam': 'bad2', 'bing': 'a', 'bang': 'b', 'bong': 'c'}], indirect=['stdin']) + def test_fail_mutually_exclusive(self, capfd, stdin, complex_argspec): + """Fail because of mutually exclusive parameters""" + with pytest.raises(SystemExit): + am = basic.AnsibleModule(**complex_argspec) + + out, err = capfd.readouterr() + results = json.loads(out) + + assert results['failed'] + assert results['msg'] == "parameters are mutually exclusive: bar|bam, bing|bang|bong" + + @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'bad2'}], indirect=['stdin']) + def test_fail_required_together(self, capfd, stdin, complex_argspec): + """Fail because only one of a required_together pair of parameters was specified""" + with pytest.raises(SystemExit): + am = basic.AnsibleModule(**complex_argspec) + + out, err = capfd.readouterr() + results = json.loads(out) + + assert results['failed'] + assert results['msg'] == "parameters are required together: bam, baz" + + @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'hi'}], indirect=['stdin']) + def test_fail_required_together_and_default(self, capfd, stdin, complex_argspec): + """Fail because one of a required_together pair of parameters has a default and the other was not specified""" + complex_argspec['argument_spec']['baz'] = {'default': 42} + with pytest.raises(SystemExit): + am = basic.AnsibleModule(**complex_argspec) + + out, err = capfd.readouterr() + results = json.loads(out) + + assert results['failed'] + assert results['msg'] == "parameters are required together: bam, baz" + + @pytest.mark.parametrize('stdin', [{'foo': 'hello'}], indirect=['stdin']) + def test_fail_required_together_and_fallback(self, capfd, mocker, stdin, complex_argspec): + """Fail because one of a required_together pair of parameters has a fallback and the other was not specified""" + environ = os.environ.copy() + environ['BAZ'] = 'test data' + mocker.patch('ansible.module_utils.basic.os.environ', environ) + + with pytest.raises(SystemExit): + am = basic.AnsibleModule(**complex_argspec) + + out, err = capfd.readouterr() + results = json.loads(out) + + assert results['failed'] + assert results['msg'] == "parameters are required together: bam, baz" + + @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zardoz2': ['one', 'four', 'five']}], indirect=['stdin']) + def test_fail_list_with_choices(self, capfd, mocker, stdin, complex_argspec): + """Fail because one of the items is not in the choice""" + with pytest.raises(SystemExit): + basic.AnsibleModule(**complex_argspec) + + out, err = capfd.readouterr() + results = json.loads(out) + + assert results['failed'] + assert results['msg'] == "value of zardoz2 must be one or more of: one, two, three. Got no match for: four, five" + + @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zardoz2': ['one', 'three']}], indirect=['stdin']) + def test_list_with_choices(self, capfd, mocker, stdin, complex_argspec): + """Test choices with list""" + am = basic.AnsibleModule(**complex_argspec) + assert isinstance(am.params['zardoz2'], list) + assert am.params['zardoz2'] == ['one', 'three'] + + @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar3': ['~/test', 'test/']}], indirect=['stdin']) + def test_list_with_elements_path(self, capfd, mocker, stdin, complex_argspec): + """Test choices with list""" + am = basic.AnsibleModule(**complex_argspec) + assert isinstance(am.params['bar3'], list) + assert am.params['bar3'][0].startswith('/') + assert am.params['bar3'][1] == 'test/' + + @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zodraz': 'one'}], indirect=['stdin']) + def test_deprecated_alias(self, capfd, mocker, stdin, complex_argspec): + """Test a deprecated alias""" + am = basic.AnsibleModule(**complex_argspec) + + assert "Alias 'zodraz' is deprecated." in get_deprecation_messages()[0]['msg'] + assert get_deprecation_messages()[0]['version'] == '9.99' + + @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar_str': [867, '5309']}], indirect=['stdin']) + def test_list_with_elements_callable_str(self, capfd, mocker, stdin, complex_argspec): + """Test choices with list""" + am = basic.AnsibleModule(**complex_argspec) + assert isinstance(am.params['bar_str'], list) + assert isinstance(am.params['bar_str'][0], string_types) + assert isinstance(am.params['bar_str'][1], string_types) + assert am.params['bar_str'][0] == '867' + assert am.params['bar_str'][1] == '5309' + + +class TestComplexOptions: + """Test arg spec options""" + + # (Parameters, expected value of module.params['foobar']) + OPTIONS_PARAMS_LIST = ( + ({'foobar': [{"foo": "hello", "bam": "good"}, {"foo": "test", "bar": "good"}]}, + [{'foo': 'hello', 'bam': 'good', 'bam2': 'test', 'bar': None, 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None, + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}, + {'foo': 'test', 'bam': None, 'bam2': 'test', 'bar': 'good', 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None, + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}] + ), + # Alias for required param + ({'foobar': [{"dup": "test", "bar": "good"}]}, + [{'foo': 'test', 'dup': 'test', 'bam': None, 'bam2': 'test', 'bar': 'good', 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None, + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}] + ), + # Required_if utilizing default value of the requirement + ({'foobar': [{"foo": "bam2", "bar": "required_one_of"}]}, + [{'bam': None, 'bam1': None, 'bam2': 'test', 'bam3': None, 'bam4': None, 'bar': 'required_one_of', 'baz': None, 'foo': 'bam2', + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}] + ), + # Check that a bool option is converted + ({"foobar": [{"foo": "required", "bam": "good", "bam3": "yes"}]}, + [{'bam': 'good', 'bam1': None, 'bam2': 'test', 'bam3': True, 'bam4': None, 'bar': None, 'baz': None, 'foo': 'required', + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}] + ), + # Check required_by options + ({"foobar": [{"foo": "required", "bar": "good", "baz": "good", "bam4": "required_by", "bam1": "ok", "bam3": "yes"}]}, + [{'bar': 'good', 'baz': 'good', 'bam1': 'ok', 'bam2': 'test', 'bam3': True, 'bam4': 'required_by', 'bam': None, 'foo': 'required', + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}] + ), + # Check for elements in sub-options + ({"foobar": [{"foo": "good", "bam": "required_one_of", "bar1": [1, "good", "yes"], "bar2": ['1', 1], "bar3":['1.3', 1.3, 1]}]}, + [{'foo': 'good', 'bam1': None, 'bam2': 'test', 'bam3': None, 'bam4': None, 'bar': None, 'baz': None, 'bam': 'required_one_of', + 'bar1': ["1", "good", "yes"], 'bar2': [1, 1], 'bar3': [1.3, 1.3, 1.0], 'bar4': None}] + ), + ) + + # (Parameters, expected value of module.params['foobar']) + OPTIONS_PARAMS_DICT = ( + ({'foobar': {"foo": "hello", "bam": "good"}}, + {'foo': 'hello', 'bam': 'good', 'bam2': 'test', 'bar': None, 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None, + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None} + ), + # Alias for required param + ({'foobar': {"dup": "test", "bar": "good"}}, + {'foo': 'test', 'dup': 'test', 'bam': None, 'bam2': 'test', 'bar': 'good', 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None, + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None} + ), + # Required_if utilizing default value of the requirement + ({'foobar': {"foo": "bam2", "bar": "required_one_of"}}, + {'bam': None, 'bam1': None, 'bam2': 'test', 'bam3': None, 'bam4': None, 'bar': 'required_one_of', 'baz': None, 'foo': 'bam2', + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None} + ), + # Check that a bool option is converted + ({"foobar": {"foo": "required", "bam": "good", "bam3": "yes"}}, + {'bam': 'good', 'bam1': None, 'bam2': 'test', 'bam3': True, 'bam4': None, 'bar': None, 'baz': None, 'foo': 'required', + 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None} + ), + # Check required_by options + ({"foobar": {"foo": "required", "bar": "good", "baz": "good", "bam4": "required_by", "bam1": "ok", "bam3": "yes"}}, + {'bar': 'good', 'baz': 'good', 'bam1': 'ok', 'bam2': 'test', 'bam3': True, 'bam4': 'required_by', 'bam': None, + 'foo': 'required', 'bar1': None, 'bar3': None, 'bar2': None, 'bar4': None} + ), + # Check for elements in sub-options + ({"foobar": {"foo": "good", "bam": "required_one_of", "bar1": [1, "good", "yes"], + "bar2": ['1', 1], "bar3": ['1.3', 1.3, 1]}}, + {'foo': 'good', 'bam1': None, 'bam2': 'test', 'bam3': None, 'bam4': None, 'bar': None, + 'baz': None, 'bam': 'required_one_of', + 'bar1': ["1", "good", "yes"], 'bar2': [1, 1], 'bar3': [1.3, 1.3, 1.0], 'bar4': None} + ), + ) + + # (Parameters, failure message) + FAILING_PARAMS_LIST = ( + # Missing required option + ({'foobar': [{}]}, 'missing required arguments: foo found in foobar'), + # Invalid option + ({'foobar': [{"foo": "hello", "bam": "good", "invalid": "bad"}]}, 'module: invalid found in foobar. Supported parameters include'), + # Mutually exclusive options found + ({'foobar': [{"foo": "test", "bam": "bad", "bam1": "bad", "baz": "req_to"}]}, + 'parameters are mutually exclusive: bam|bam1 found in foobar'), + # required_if fails + ({'foobar': [{"foo": "hello", "bar": "bad"}]}, + 'foo is hello but all of the following are missing: bam found in foobar'), + # Missing required_one_of option + ({'foobar': [{"foo": "test"}]}, + 'one of the following is required: bar, bam found in foobar'), + # Missing required_together option + ({'foobar': [{"foo": "test", "bar": "required_one_of", "bam1": "bad"}]}, + 'parameters are required together: bam1, baz found in foobar'), + # Missing required_by options + ({'foobar': [{"foo": "test", "bar": "required_one_of", "bam4": "required_by"}]}, + "missing parameter(s) required by 'bam4': bam1, bam3"), + ) + + # (Parameters, failure message) + FAILING_PARAMS_DICT = ( + # Missing required option + ({'foobar': {}}, 'missing required arguments: foo found in foobar'), + # Invalid option + ({'foobar': {"foo": "hello", "bam": "good", "invalid": "bad"}}, + 'module: invalid found in foobar. Supported parameters include'), + # Mutually exclusive options found + ({'foobar': {"foo": "test", "bam": "bad", "bam1": "bad", "baz": "req_to"}}, + 'parameters are mutually exclusive: bam|bam1 found in foobar'), + # required_if fails + ({'foobar': {"foo": "hello", "bar": "bad"}}, + 'foo is hello but all of the following are missing: bam found in foobar'), + # Missing required_one_of option + ({'foobar': {"foo": "test"}}, + 'one of the following is required: bar, bam found in foobar'), + # Missing required_together option + ({'foobar': {"foo": "test", "bar": "required_one_of", "bam1": "bad"}}, + 'parameters are required together: bam1, baz found in foobar'), + # Missing required_by options + ({'foobar': {"foo": "test", "bar": "required_one_of", "bam4": "required_by"}}, + "missing parameter(s) required by 'bam4': bam1, bam3"), + ) + + @pytest.mark.parametrize('stdin, expected', OPTIONS_PARAMS_DICT, indirect=['stdin']) + def test_options_type_dict(self, stdin, options_argspec_dict, expected): + """Test that a basic creation with required and required_if works""" + # should test ok, tests basic foo requirement and required_if + am = basic.AnsibleModule(**options_argspec_dict) + + assert isinstance(am.params['foobar'], dict) + assert am.params['foobar'] == expected + + @pytest.mark.parametrize('stdin, expected', OPTIONS_PARAMS_LIST, indirect=['stdin']) + def test_options_type_list(self, stdin, options_argspec_list, expected): + """Test that a basic creation with required and required_if works""" + # should test ok, tests basic foo requirement and required_if + am = basic.AnsibleModule(**options_argspec_list) + + assert isinstance(am.params['foobar'], list) + assert am.params['foobar'] == expected + + @pytest.mark.parametrize('stdin, expected', FAILING_PARAMS_DICT, indirect=['stdin']) + def test_fail_validate_options_dict(self, capfd, stdin, options_argspec_dict, expected): + """Fail because one of a required_together pair of parameters has a default and the other was not specified""" + with pytest.raises(SystemExit): + am = basic.AnsibleModule(**options_argspec_dict) + + out, err = capfd.readouterr() + results = json.loads(out) + + assert results['failed'] + assert expected in results['msg'] + + @pytest.mark.parametrize('stdin, expected', FAILING_PARAMS_LIST, indirect=['stdin']) + def test_fail_validate_options_list(self, capfd, stdin, options_argspec_list, expected): + """Fail because one of a required_together pair of parameters has a default and the other was not specified""" + with pytest.raises(SystemExit): + am = basic.AnsibleModule(**options_argspec_list) + + out, err = capfd.readouterr() + results = json.loads(out) + + assert results['failed'] + assert expected in results['msg'] + + @pytest.mark.parametrize('stdin', [{'foobar': {'foo': 'required', 'bam1': 'test', 'bar': 'case'}}], indirect=['stdin']) + def test_fallback_in_option(self, mocker, stdin, options_argspec_dict): + """Test that the complex argspec works if we get a required parameter via fallback""" + environ = os.environ.copy() + environ['BAZ'] = 'test data' + mocker.patch('ansible.module_utils.basic.os.environ', environ) + + am = basic.AnsibleModule(**options_argspec_dict) + + assert isinstance(am.params['foobar']['baz'], str) + assert am.params['foobar']['baz'] == 'test data' + + @pytest.mark.parametrize('stdin', + [{'foobar': {'foo': 'required', 'bam1': 'test', 'baz': 'data', 'bar': 'case', 'bar4': '~/test'}}], + indirect=['stdin']) + def test_elements_path_in_option(self, mocker, stdin, options_argspec_dict): + """Test that the complex argspec works with elements path type""" + + am = basic.AnsibleModule(**options_argspec_dict) + + assert isinstance(am.params['foobar']['bar4'][0], str) + assert am.params['foobar']['bar4'][0].startswith('/') + + @pytest.mark.parametrize('stdin,spec,expected', [ + ({}, + {'one': {'type': 'dict', 'apply_defaults': True, 'options': {'two': {'default': True, 'type': 'bool'}}}}, + {'two': True}), + ({}, + {'one': {'type': 'dict', 'options': {'two': {'default': True, 'type': 'bool'}}}}, + None), + ], indirect=['stdin']) + def test_subspec_not_required_defaults(self, stdin, spec, expected): + # Check that top level not required, processed subspec defaults + am = basic.AnsibleModule(spec) + assert am.params['one'] == expected + + +class TestLoadFileCommonArguments: + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_smoketest_load_file_common_args(self, am): + """With no file arguments, an empty dict is returned""" + am.selinux_mls_enabled = MagicMock() + am.selinux_mls_enabled.return_value = True + am.selinux_default_context = MagicMock() + am.selinux_default_context.return_value = 'unconfined_u:object_r:default_t:s0'.split(':', 3) + + assert am.load_file_common_arguments(params={}) == {} + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_load_file_common_args(self, am, mocker): + am.selinux_mls_enabled = MagicMock() + am.selinux_mls_enabled.return_value = True + am.selinux_default_context = MagicMock() + am.selinux_default_context.return_value = 'unconfined_u:object_r:default_t:s0'.split(':', 3) + + base_params = dict( + path='/path/to/file', + mode=0o600, + owner='root', + group='root', + seuser='_default', + serole='_default', + setype='_default', + selevel='_default', + ) + + extended_params = base_params.copy() + extended_params.update(dict( + follow=True, + foo='bar', + )) + + final_params = base_params.copy() + final_params.update(dict( + path='/path/to/real_file', + secontext=['unconfined_u', 'object_r', 'default_t', 's0'], + attributes=None, + )) + + # with the proper params specified, the returned dictionary should represent + # only those params which have something to do with the file arguments, excluding + # other params and updated as required with proper values which may have been + # massaged by the method + mocker.patch('os.path.islink', return_value=True) + mocker.patch('os.path.realpath', return_value='/path/to/real_file') + + res = am.load_file_common_arguments(params=extended_params) + + assert res == final_params + + +@pytest.mark.parametrize("stdin", [{"arg_pass": "testing"}], indirect=["stdin"]) +def test_no_log_true(stdin, capfd): + """Explicitly mask an argument (no_log=True).""" + arg_spec = { + "arg_pass": {"no_log": True} + } + am = basic.AnsibleModule(arg_spec) + # no_log=True is picked up by both am._log_invocation and list_no_log_values + # (called by am._handle_no_log_values). As a result, we can check for the + # value in am.no_log_values. + assert "testing" in am.no_log_values + + +@pytest.mark.parametrize("stdin", [{"arg_pass": "testing"}], indirect=["stdin"]) +def test_no_log_false(stdin, capfd): + """Explicitly log and display an argument (no_log=False).""" + arg_spec = { + "arg_pass": {"no_log": False} + } + am = basic.AnsibleModule(arg_spec) + assert "testing" not in am.no_log_values and not get_warning_messages() + + +@pytest.mark.parametrize("stdin", [{"arg_pass": "testing"}], indirect=["stdin"]) +def test_no_log_none(stdin, capfd): + """Allow Ansible to make the decision by matching the argument name + against PASSWORD_MATCH.""" + arg_spec = { + "arg_pass": {} + } + am = basic.AnsibleModule(arg_spec) + # Omitting no_log is only picked up by _log_invocation, so the value never + # makes it into am.no_log_values. Instead we can check for the warning + # emitted by am._log_invocation. + assert len(get_warning_messages()) > 0 diff --git a/test/units/module_utils/basic/test_atomic_move.py b/test/units/module_utils/basic/test_atomic_move.py new file mode 100644 index 00000000..bbdb0519 --- /dev/null +++ b/test/units/module_utils/basic/test_atomic_move.py @@ -0,0 +1,223 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016 Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import errno +import json +from itertools import product + +import pytest + +from ansible.module_utils import basic + + +@pytest.fixture +def atomic_am(am, mocker): + am.selinux_enabled = mocker.MagicMock() + am.selinux_context = mocker.MagicMock() + am.selinux_default_context = mocker.MagicMock() + am.set_context_if_different = mocker.MagicMock() + am._unsafe_writes = mocker.MagicMock() + + yield am + + +@pytest.fixture +def atomic_mocks(mocker, monkeypatch): + environ = dict() + mocks = { + 'chmod': mocker.patch('os.chmod'), + 'chown': mocker.patch('os.chown'), + 'close': mocker.patch('os.close'), + 'environ': mocker.patch('os.environ', environ), + 'getlogin': mocker.patch('os.getlogin'), + 'getuid': mocker.patch('os.getuid'), + 'path_exists': mocker.patch('os.path.exists'), + 'rename': mocker.patch('os.rename'), + 'stat': mocker.patch('os.stat'), + 'umask': mocker.patch('os.umask'), + 'getpwuid': mocker.patch('pwd.getpwuid'), + 'copy2': mocker.patch('shutil.copy2'), + 'copyfileobj': mocker.patch('shutil.copyfileobj'), + 'move': mocker.patch('shutil.move'), + 'mkstemp': mocker.patch('tempfile.mkstemp'), + } + + mocks['getlogin'].return_value = 'root' + mocks['getuid'].return_value = 0 + mocks['getpwuid'].return_value = ('root', '', 0, 0, '', '', '') + mocks['umask'].side_effect = [18, 0] + mocks['rename'].return_value = None + + # normalize OS specific features + monkeypatch.delattr(os, 'chflags', raising=False) + + yield mocks + + +@pytest.fixture +def fake_stat(mocker): + stat1 = mocker.MagicMock() + stat1.st_mode = 0o0644 + stat1.st_uid = 0 + stat1.st_gid = 0 + stat1.st_flags = 0 + yield stat1 + + +@pytest.mark.parametrize('stdin, selinux', product([{}], (True, False)), indirect=['stdin']) +def test_new_file(atomic_am, atomic_mocks, mocker, selinux): + # test destination does not exist, login name = 'root', no environment, os.rename() succeeds + mock_context = atomic_am.selinux_default_context.return_value + atomic_mocks['path_exists'].return_value = False + atomic_am.selinux_enabled.return_value = selinux + + atomic_am.atomic_move('/path/to/src', '/path/to/dest') + + atomic_mocks['rename'].assert_called_with(b'/path/to/src', b'/path/to/dest') + assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/dest', basic.DEFAULT_PERM & ~18)] + + if selinux: + assert atomic_am.selinux_default_context.call_args_list == [mocker.call('/path/to/dest')] + assert atomic_am.set_context_if_different.call_args_list == [mocker.call('/path/to/dest', mock_context, False)] + else: + assert not atomic_am.selinux_default_context.called + assert not atomic_am.set_context_if_different.called + + +@pytest.mark.parametrize('stdin, selinux', product([{}], (True, False)), indirect=['stdin']) +def test_existing_file(atomic_am, atomic_mocks, fake_stat, mocker, selinux): + # Test destination already present + mock_context = atomic_am.selinux_context.return_value + atomic_mocks['stat'].return_value = fake_stat + atomic_mocks['path_exists'].return_value = True + atomic_am.selinux_enabled.return_value = selinux + + atomic_am.atomic_move('/path/to/src', '/path/to/dest') + + atomic_mocks['rename'].assert_called_with(b'/path/to/src', b'/path/to/dest') + assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/src', basic.DEFAULT_PERM & ~18)] + + if selinux: + assert atomic_am.set_context_if_different.call_args_list == [mocker.call('/path/to/dest', mock_context, False)] + assert atomic_am.selinux_context.call_args_list == [mocker.call('/path/to/dest')] + else: + assert not atomic_am.selinux_default_context.called + assert not atomic_am.set_context_if_different.called + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_no_tty_fallback(atomic_am, atomic_mocks, fake_stat, mocker): + """Raise OSError when using getlogin() to simulate no tty cornercase""" + mock_context = atomic_am.selinux_context.return_value + atomic_mocks['stat'].return_value = fake_stat + atomic_mocks['path_exists'].return_value = True + atomic_am.selinux_enabled.return_value = True + atomic_mocks['getlogin'].side_effect = OSError() + atomic_mocks['environ']['LOGNAME'] = 'root' + + atomic_am.atomic_move('/path/to/src', '/path/to/dest') + + atomic_mocks['rename'].assert_called_with(b'/path/to/src', b'/path/to/dest') + assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/src', basic.DEFAULT_PERM & ~18)] + + assert atomic_am.set_context_if_different.call_args_list == [mocker.call('/path/to/dest', mock_context, False)] + assert atomic_am.selinux_context.call_args_list == [mocker.call('/path/to/dest')] + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_existing_file_stat_failure(atomic_am, atomic_mocks, mocker): + """Failure to stat an existing file in order to copy permissions propogates the error (unless EPERM)""" + atomic_mocks['stat'].side_effect = OSError() + atomic_mocks['path_exists'].return_value = True + + with pytest.raises(OSError): + atomic_am.atomic_move('/path/to/src', '/path/to/dest') + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_existing_file_stat_perms_failure(atomic_am, atomic_mocks, mocker): + """Failure to stat an existing file to copy the permissions due to permissions passes fine""" + # and now have os.stat return EPERM, which should not fail + mock_context = atomic_am.selinux_context.return_value + atomic_mocks['stat'].side_effect = OSError(errno.EPERM, 'testing os stat with EPERM') + atomic_mocks['path_exists'].return_value = True + atomic_am.selinux_enabled.return_value = True + + atomic_am.atomic_move('/path/to/src', '/path/to/dest') + + atomic_mocks['rename'].assert_called_with(b'/path/to/src', b'/path/to/dest') + # FIXME: Should atomic_move() set a default permission value when it cannot retrieve the + # existing file's permissions? (Right now it's up to the calling code. + # assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/src', basic.DEFAULT_PERM & ~18)] + assert atomic_am.set_context_if_different.call_args_list == [mocker.call('/path/to/dest', mock_context, False)] + assert atomic_am.selinux_context.call_args_list == [mocker.call('/path/to/dest')] + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_rename_failure(atomic_am, atomic_mocks, mocker, capfd): + """Test os.rename fails with EIO, causing it to bail out""" + atomic_mocks['path_exists'].side_effect = [False, False] + atomic_mocks['rename'].side_effect = OSError(errno.EIO, 'failing with EIO') + + with pytest.raises(SystemExit): + atomic_am.atomic_move('/path/to/src', '/path/to/dest') + + out, err = capfd.readouterr() + results = json.loads(out) + + assert 'Could not replace file' in results['msg'] + assert 'failing with EIO' in results['msg'] + assert results['failed'] + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_rename_perms_fail_temp_creation_fails(atomic_am, atomic_mocks, mocker, capfd): + """Test os.rename fails with EPERM working but failure in mkstemp""" + atomic_mocks['path_exists'].return_value = False + atomic_mocks['close'].return_value = None + atomic_mocks['rename'].side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None] + atomic_mocks['mkstemp'].return_value = None + atomic_mocks['mkstemp'].side_effect = OSError() + atomic_am.selinux_enabled.return_value = False + + with pytest.raises(SystemExit): + atomic_am.atomic_move('/path/to/src', '/path/to/dest') + + out, err = capfd.readouterr() + results = json.loads(out) + + assert 'is not writable by the current user' in results['msg'] + assert results['failed'] + + +@pytest.mark.parametrize('stdin, selinux', product([{}], (True, False)), indirect=['stdin']) +def test_rename_perms_fail_temp_succeeds(atomic_am, atomic_mocks, fake_stat, mocker, selinux): + """Test os.rename raising an error but fallback to using mkstemp works""" + mock_context = atomic_am.selinux_default_context.return_value + atomic_mocks['path_exists'].return_value = False + atomic_mocks['rename'].side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None] + atomic_mocks['stat'].return_value = fake_stat + atomic_mocks['stat'].side_effect = None + atomic_mocks['mkstemp'].return_value = (None, '/path/to/tempfile') + atomic_mocks['mkstemp'].side_effect = None + atomic_am.selinux_enabled.return_value = selinux + + atomic_am.atomic_move('/path/to/src', '/path/to/dest') + assert atomic_mocks['rename'].call_args_list == [mocker.call(b'/path/to/src', b'/path/to/dest'), + mocker.call(b'/path/to/tempfile', b'/path/to/dest')] + assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/dest', basic.DEFAULT_PERM & ~18)] + + if selinux: + assert atomic_am.selinux_default_context.call_args_list == [mocker.call('/path/to/dest')] + assert atomic_am.set_context_if_different.call_args_list == [mocker.call(b'/path/to/tempfile', mock_context, False), + mocker.call('/path/to/dest', mock_context, False)] + else: + assert not atomic_am.selinux_default_context.called + assert not atomic_am.set_context_if_different.called diff --git a/test/units/module_utils/basic/test_deprecate_warn.py b/test/units/module_utils/basic/test_deprecate_warn.py new file mode 100644 index 00000000..351cf25b --- /dev/null +++ b/test/units/module_utils/basic/test_deprecate_warn.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +import pytest + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_warn(am, capfd): + + am.warn('warning1') + + with pytest.raises(SystemExit): + am.exit_json(warnings=['warning2']) + out, err = capfd.readouterr() + assert json.loads(out)['warnings'] == ['warning1', 'warning2'] + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_deprecate(am, capfd): + am.deprecate('deprecation1') + am.deprecate('deprecation2', '2.3') # pylint: disable=ansible-deprecated-no-collection-name + am.deprecate('deprecation3', version='2.4') # pylint: disable=ansible-deprecated-no-collection-name + am.deprecate('deprecation4', date='2020-03-10') # pylint: disable=ansible-deprecated-no-collection-name + am.deprecate('deprecation5', collection_name='ansible.builtin') + am.deprecate('deprecation6', '2.3', collection_name='ansible.builtin') + am.deprecate('deprecation7', version='2.4', collection_name='ansible.builtin') + am.deprecate('deprecation8', date='2020-03-10', collection_name='ansible.builtin') + + with pytest.raises(SystemExit): + am.exit_json(deprecations=['deprecation9', ('deprecation10', '2.4')]) + + out, err = capfd.readouterr() + output = json.loads(out) + assert ('warnings' not in output or output['warnings'] == []) + assert output['deprecations'] == [ + {u'msg': u'deprecation1', u'version': None, u'collection_name': None}, + {u'msg': u'deprecation2', u'version': '2.3', u'collection_name': None}, + {u'msg': u'deprecation3', u'version': '2.4', u'collection_name': None}, + {u'msg': u'deprecation4', u'date': '2020-03-10', u'collection_name': None}, + {u'msg': u'deprecation5', u'version': None, u'collection_name': 'ansible.builtin'}, + {u'msg': u'deprecation6', u'version': '2.3', u'collection_name': 'ansible.builtin'}, + {u'msg': u'deprecation7', u'version': '2.4', u'collection_name': 'ansible.builtin'}, + {u'msg': u'deprecation8', u'date': '2020-03-10', u'collection_name': 'ansible.builtin'}, + {u'msg': u'deprecation9', u'version': None, u'collection_name': None}, + {u'msg': u'deprecation10', u'version': '2.4', u'collection_name': None}, + ] + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_deprecate_without_list(am, capfd): + with pytest.raises(SystemExit): + am.exit_json(deprecations='Simple deprecation warning') + + out, err = capfd.readouterr() + output = json.loads(out) + assert ('warnings' not in output or output['warnings'] == []) + assert output['deprecations'] == [ + {u'msg': u'Simple deprecation warning', u'version': None, u'collection_name': None}, + ] + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_deprecate_without_list(am, capfd): + with pytest.raises(AssertionError) as ctx: + am.deprecate('Simple deprecation warning', date='', version='') + assert ctx.value.args[0] == "implementation error -- version and date must not both be set" diff --git a/test/units/module_utils/basic/test_dict_converters.py b/test/units/module_utils/basic/test_dict_converters.py new file mode 100644 index 00000000..f63ed9c6 --- /dev/null +++ b/test/units/module_utils/basic/test_dict_converters.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016 Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from units.mock.procenv import ModuleTestCase + +from ansible.module_utils.six.moves import builtins + +realimport = builtins.__import__ + + +class TestTextifyContainers(ModuleTestCase): + def test_module_utils_basic_json_dict_converters(self): + from ansible.module_utils.basic import json_dict_unicode_to_bytes, json_dict_bytes_to_unicode + + test_data = dict( + item1=u"Fóo", + item2=[u"Bár", u"Bam"], + item3=dict(sub1=u"Súb"), + item4=(u"föo", u"bär", u"©"), + item5=42, + ) + res = json_dict_unicode_to_bytes(test_data) + res2 = json_dict_bytes_to_unicode(res) + + self.assertEqual(test_data, res2) diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py new file mode 100644 index 00000000..240095c0 --- /dev/null +++ b/test/units/module_utils/basic/test_exit_json.py @@ -0,0 +1,154 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2015-2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys + +import pytest + + +EMPTY_INVOCATION = {u'module_args': {}} + + +class TestAnsibleModuleExitJson: + """ + Test that various means of calling exitJson and FailJson return the messages they've been given + """ + DATA = ( + ({}, {'invocation': EMPTY_INVOCATION}), + ({'msg': 'message'}, {'msg': 'message', 'invocation': EMPTY_INVOCATION}), + ({'msg': 'success', 'changed': True}, + {'msg': 'success', 'changed': True, 'invocation': EMPTY_INVOCATION}), + ({'msg': 'nochange', 'changed': False}, + {'msg': 'nochange', 'changed': False, 'invocation': EMPTY_INVOCATION}), + ) + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + # pylint: disable=undefined-variable + @pytest.mark.parametrize('args, expected, stdin', ((a, e, {}) for a, e in DATA), indirect=['stdin']) + def test_exit_json_exits(self, am, capfd, args, expected): + with pytest.raises(SystemExit) as ctx: + am.exit_json(**args) + assert ctx.value.code == 0 + + out, err = capfd.readouterr() + return_val = json.loads(out) + assert return_val == expected + + # Fail_json is only legal if it's called with a message + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + @pytest.mark.parametrize('args, expected, stdin', + ((a, e, {}) for a, e in DATA if 'msg' in a), # pylint: disable=undefined-variable + indirect=['stdin']) + def test_fail_json_exits(self, am, capfd, args, expected): + with pytest.raises(SystemExit) as ctx: + am.fail_json(**args) + assert ctx.value.code == 1 + + out, err = capfd.readouterr() + return_val = json.loads(out) + # Fail_json should add failed=True + expected['failed'] = True + assert return_val == expected + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_fail_json_msg_positional(self, am, capfd): + with pytest.raises(SystemExit) as ctx: + am.fail_json('This is the msg') + assert ctx.value.code == 1 + + out, err = capfd.readouterr() + return_val = json.loads(out) + # Fail_json should add failed=True + assert return_val == {'msg': 'This is the msg', 'failed': True, + 'invocation': EMPTY_INVOCATION} + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_fail_json_msg_as_kwarg_after(self, am, capfd): + """Test that msg as a kwarg after other kwargs works""" + with pytest.raises(SystemExit) as ctx: + am.fail_json(arbitrary=42, msg='This is the msg') + assert ctx.value.code == 1 + + out, err = capfd.readouterr() + return_val = json.loads(out) + # Fail_json should add failed=True + assert return_val == {'msg': 'This is the msg', 'failed': True, + 'arbitrary': 42, + 'invocation': EMPTY_INVOCATION} + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_fail_json_no_msg(self, am): + with pytest.raises(TypeError) as ctx: + am.fail_json() + + if sys.version_info < (3,): + error_msg = "fail_json() takes exactly 2 arguments (1 given)" + else: + error_msg = "fail_json() missing 1 required positional argument: 'msg'" + + assert ctx.value.args[0] == error_msg + + +class TestAnsibleModuleExitValuesRemoved: + """ + Test that ExitJson and FailJson remove password-like values + """ + OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + + DATA = ( + ( + dict(username='person', password='$ecret k3y'), + dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', + not_secret='following the leader', msg='here'), + dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/', + not_secret='following the leader', msg='here', + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), + ), + ( + dict(username='person', password='password12345'), + dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', + not_secret='following the leader', msg='here'), + dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/', + not_secret='following the leader', msg='here', + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), + ), + ( + dict(username='person', password='$ecret k3y'), + dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/', + not_secret='following the leader', msg='here'), + dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/', + not_secret='following the leader', msg='here', + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), + ), + ) + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + @pytest.mark.parametrize('am, stdin, return_val, expected', + (({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e) + for s, r, e in DATA), # pylint: disable=undefined-variable + indirect=['am', 'stdin']) + def test_exit_json_removes_values(self, am, capfd, return_val, expected): + with pytest.raises(SystemExit): + am.exit_json(**return_val) + out, err = capfd.readouterr() + + assert json.loads(out) == expected + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + @pytest.mark.parametrize('am, stdin, return_val, expected', + (({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e) + for s, r, e in DATA), # pylint: disable=undefined-variable + indirect=['am', 'stdin']) + def test_fail_json_removes_values(self, am, capfd, return_val, expected): + expected['failed'] = True + with pytest.raises(SystemExit): + am.fail_json(**return_val) == expected + out, err = capfd.readouterr() + + assert json.loads(out) == expected diff --git a/test/units/module_utils/basic/test_filesystem.py b/test/units/module_utils/basic/test_filesystem.py new file mode 100644 index 00000000..37d1c553 --- /dev/null +++ b/test/units/module_utils/basic/test_filesystem.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016 Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from units.mock.procenv import ModuleTestCase + +from units.compat.mock import patch, MagicMock +from ansible.module_utils.six.moves import builtins + +realimport = builtins.__import__ + + +class TestOtherFilesystem(ModuleTestCase): + def test_module_utils_basic_ansible_module_user_and_group(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + mock_stat = MagicMock() + mock_stat.st_uid = 0 + mock_stat.st_gid = 0 + + with patch('os.lstat', return_value=mock_stat): + self.assertEqual(am.user_and_group('/path/to/file'), (0, 0)) + + def test_module_utils_basic_ansible_module_find_mount_point(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + def _mock_ismount(path): + if path == b'/': + return True + return False + + with patch('os.path.ismount', side_effect=_mock_ismount): + self.assertEqual(am.find_mount_point('/root/fs/../mounted/path/to/whatever'), '/') + + def _mock_ismount(path): + if path == b'/subdir/mount': + return True + if path == b'/': + return True + return False + + with patch('os.path.ismount', side_effect=_mock_ismount): + self.assertEqual(am.find_mount_point('/subdir/mount/path/to/whatever'), '/subdir/mount') + + def test_module_utils_basic_ansible_module_set_owner_if_different(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + self.assertEqual(am.set_owner_if_different('/path/to/file', None, True), True) + self.assertEqual(am.set_owner_if_different('/path/to/file', None, False), False) + + am.user_and_group = MagicMock(return_value=(500, 500)) + + with patch('os.lchown', return_value=None) as m: + self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True) + m.assert_called_with(b'/path/to/file', 0, -1) + + def _mock_getpwnam(*args, **kwargs): + mock_pw = MagicMock() + mock_pw.pw_uid = 0 + return mock_pw + + m.reset_mock() + with patch('pwd.getpwnam', side_effect=_mock_getpwnam): + self.assertEqual(am.set_owner_if_different('/path/to/file', 'root', False), True) + m.assert_called_with(b'/path/to/file', 0, -1) + + with patch('pwd.getpwnam', side_effect=KeyError): + self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False) + + m.reset_mock() + am.check_mode = True + self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True) + self.assertEqual(m.called, False) + am.check_mode = False + + with patch('os.lchown', side_effect=OSError) as m: + self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False) + + def test_module_utils_basic_ansible_module_set_group_if_different(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + self.assertEqual(am.set_group_if_different('/path/to/file', None, True), True) + self.assertEqual(am.set_group_if_different('/path/to/file', None, False), False) + + am.user_and_group = MagicMock(return_value=(500, 500)) + + with patch('os.lchown', return_value=None) as m: + self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True) + m.assert_called_with(b'/path/to/file', -1, 0) + + def _mock_getgrnam(*args, **kwargs): + mock_gr = MagicMock() + mock_gr.gr_gid = 0 + return mock_gr + + m.reset_mock() + with patch('grp.getgrnam', side_effect=_mock_getgrnam): + self.assertEqual(am.set_group_if_different('/path/to/file', 'root', False), True) + m.assert_called_with(b'/path/to/file', -1, 0) + + with patch('grp.getgrnam', side_effect=KeyError): + self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False) + + m.reset_mock() + am.check_mode = True + self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True) + self.assertEqual(m.called, False) + am.check_mode = False + + with patch('os.lchown', side_effect=OSError) as m: + self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False) diff --git a/test/units/module_utils/basic/test_get_file_attributes.py b/test/units/module_utils/basic/test_get_file_attributes.py new file mode 100644 index 00000000..5130a5fb --- /dev/null +++ b/test/units/module_utils/basic/test_get_file_attributes.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright: +# (c) 2017, Pierre-Louis Bonicoli +# License: GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from itertools import product + +from ansible.module_utils.basic import AnsibleModule + +import pytest + + +DATA = ( + ( + '3353595900 --------------e---- /usr/lib32', + {'attr_flags': 'e', 'version': '3353595900', 'attributes': ['extents']} + ), + # with e2fsprogs < 1.43, output isn't aligned + ( + '78053594 -----------I--e---- /usr/lib', + {'attr_flags': 'Ie', 'version': '78053594', 'attributes': ['indexed', 'extents']} + ), + ( + '15711607 -------A------e---- /tmp/test', + {'attr_flags': 'Ae', 'version': '15711607', 'attributes': ['noatime', 'extents']} + ), + # with e2fsprogs >= 1.43, output is aligned + ( + '78053594 -----------I--e---- /usr/lib', + {'attr_flags': 'Ie', 'version': '78053594', 'attributes': ['indexed', 'extents']} + ), + ( + '15711607 -------A------e---- /tmp/test', + {'attr_flags': 'Ae', 'version': '15711607', 'attributes': ['noatime', 'extents']} + ), +) + + +@pytest.mark.parametrize('stdin, data', product(({},), DATA), indirect=['stdin']) +def test_get_file_attributes(am, stdin, mocker, data): + # Test #18731 + mocker.patch.object(AnsibleModule, 'get_bin_path', return_value=(0, '/usr/bin/lsattr', '')) + mocker.patch.object(AnsibleModule, 'run_command', return_value=(0, data[0], '')) + result = am.get_file_attributes('/path/to/file') + for key, value in data[1].items(): + assert key in result and result[key] == value diff --git a/test/units/module_utils/basic/test_get_module_path.py b/test/units/module_utils/basic/test_get_module_path.py new file mode 100644 index 00000000..6ff4a3bc --- /dev/null +++ b/test/units/module_utils/basic/test_get_module_path.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016 Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from units.mock.procenv import ModuleTestCase + +from units.compat.mock import patch +from ansible.module_utils.six.moves import builtins + +realimport = builtins.__import__ + + +class TestGetModulePath(ModuleTestCase): + def test_module_utils_basic_get_module_path(self): + from ansible.module_utils.basic import get_module_path + with patch('os.path.realpath', return_value='/path/to/foo/'): + self.assertEqual(get_module_path(), '/path/to/foo') diff --git a/test/units/module_utils/basic/test_heuristic_log_sanitize.py b/test/units/module_utils/basic/test_heuristic_log_sanitize.py new file mode 100644 index 00000000..f8a0929d --- /dev/null +++ b/test/units/module_utils/basic/test_heuristic_log_sanitize.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.module_utils.basic import heuristic_log_sanitize + + +class TestHeuristicLogSanitize(unittest.TestCase): + def setUp(self): + self.URL_SECRET = 'http://username:pas:word@foo.com/data' + self.SSH_SECRET = 'username:pas:word@foo.com/data' + self.clean_data = repr(self._gen_data(3, True, True, 'no_secret_here')) + self.url_data = repr(self._gen_data(3, True, True, self.URL_SECRET)) + self.ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET)) + + def _gen_data(self, records, per_rec, top_level, secret_text): + hostvars = {'hostvars': {}} + for i in range(1, records, 1): + host_facts = { + 'host%s' % i: { + 'pstack': { + 'running': '875.1', + 'symlinked': '880.0', + 'tars': [], + 'versions': ['885.0'] + }, + } + } + if per_rec: + host_facts['host%s' % i]['secret'] = secret_text + hostvars['hostvars'].update(host_facts) + if top_level: + hostvars['secret'] = secret_text + return hostvars + + def test_did_not_hide_too_much(self): + self.assertEqual(heuristic_log_sanitize(self.clean_data), self.clean_data) + + def test_hides_url_secrets(self): + url_output = heuristic_log_sanitize(self.url_data) + # Basic functionality: Successfully hid the password + self.assertNotIn('pas:word', url_output) + + # Slightly more advanced, we hid all of the password despite the ":" + self.assertNotIn('pas', url_output) + + # In this implementation we replace the password with 8 "*" which is + # also the length of our password. The url fields should be able to + # accurately detect where the password ends so the length should be + # the same: + self.assertEqual(len(url_output), len(self.url_data)) + + def test_hides_ssh_secrets(self): + ssh_output = heuristic_log_sanitize(self.ssh_data) + self.assertNotIn('pas:word', ssh_output) + + # Slightly more advanced, we hid all of the password despite the ":" + self.assertNotIn('pas', ssh_output) + + # ssh checking is harder as the heuristic is overzealous in many + # cases. Since the input will have at least one ":" present before + # the password we can tell some things about the beginning and end of + # the data, though: + self.assertTrue(ssh_output.startswith("{'")) + self.assertTrue(ssh_output.endswith("}")) + self.assertIn(":********@foo.com/data'", ssh_output) + + def test_hides_parameter_secrets(self): + output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret'])) + self.assertNotIn('secret', output) diff --git a/test/units/module_utils/basic/test_imports.py b/test/units/module_utils/basic/test_imports.py new file mode 100644 index 00000000..9d8ae68d --- /dev/null +++ b/test/units/module_utils/basic/test_imports.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016 Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import sys + +from units.mock.procenv import ModuleTestCase + +from units.compat import unittest +from units.compat.mock import patch +from ansible.module_utils.six.moves import builtins + +realimport = builtins.__import__ + + +class TestImports(ModuleTestCase): + + def clear_modules(self, mods): + for mod in mods: + if mod in sys.modules: + del sys.modules[mod] + + @patch.object(builtins, '__import__') + def test_module_utils_basic_import_syslog(self, mock_import): + def _mock_import(name, *args, **kwargs): + if name == 'syslog': + raise ImportError + return realimport(name, *args, **kwargs) + + self.clear_modules(['syslog', 'ansible.module_utils.basic']) + mod = builtins.__import__('ansible.module_utils.basic') + self.assertTrue(mod.module_utils.basic.HAS_SYSLOG) + + self.clear_modules(['syslog', 'ansible.module_utils.basic']) + mock_import.side_effect = _mock_import + mod = builtins.__import__('ansible.module_utils.basic') + self.assertFalse(mod.module_utils.basic.HAS_SYSLOG) + + @patch.object(builtins, '__import__') + def test_module_utils_basic_import_selinux(self, mock_import): + def _mock_import(name, *args, **kwargs): + if name == 'selinux': + raise ImportError + return realimport(name, *args, **kwargs) + + try: + self.clear_modules(['selinux', 'ansible.module_utils.basic']) + mod = builtins.__import__('ansible.module_utils.basic') + self.assertTrue(mod.module_utils.basic.HAVE_SELINUX) + except ImportError: + # no selinux on test system, so skip + pass + + self.clear_modules(['selinux', 'ansible.module_utils.basic']) + mock_import.side_effect = _mock_import + mod = builtins.__import__('ansible.module_utils.basic') + self.assertFalse(mod.module_utils.basic.HAVE_SELINUX) + + @patch.object(builtins, '__import__') + def test_module_utils_basic_import_json(self, mock_import): + def _mock_import(name, *args, **kwargs): + if name == 'ansible.module_utils.common._json_compat': + raise ImportError + return realimport(name, *args, **kwargs) + + self.clear_modules(['json', 'ansible.module_utils.basic']) + builtins.__import__('ansible.module_utils.basic') + self.clear_modules(['json', 'ansible.module_utils.basic']) + mock_import.side_effect = _mock_import + with self.assertRaises(SystemExit): + builtins.__import__('ansible.module_utils.basic') + + # FIXME: doesn't work yet + # @patch.object(builtins, 'bytes') + # def test_module_utils_basic_bytes(self, mock_bytes): + # mock_bytes.side_effect = NameError() + # from ansible.module_utils import basic + + @patch.object(builtins, '__import__') + @unittest.skipIf(sys.version_info[0] >= 3, "literal_eval is available in every version of Python3") + def test_module_utils_basic_import_literal_eval(self, mock_import): + def _mock_import(name, *args, **kwargs): + try: + fromlist = kwargs.get('fromlist', args[2]) + except IndexError: + fromlist = [] + if name == 'ast' and 'literal_eval' in fromlist: + raise ImportError + return realimport(name, *args, **kwargs) + + mock_import.side_effect = _mock_import + self.clear_modules(['ast', 'ansible.module_utils.basic']) + mod = builtins.__import__('ansible.module_utils.basic') + self.assertEqual(mod.module_utils.basic.literal_eval("'1'"), "1") + self.assertEqual(mod.module_utils.basic.literal_eval("1"), 1) + self.assertEqual(mod.module_utils.basic.literal_eval("-1"), -1) + self.assertEqual(mod.module_utils.basic.literal_eval("(1,2,3)"), (1, 2, 3)) + self.assertEqual(mod.module_utils.basic.literal_eval("[1]"), [1]) + self.assertEqual(mod.module_utils.basic.literal_eval("True"), True) + self.assertEqual(mod.module_utils.basic.literal_eval("False"), False) + self.assertEqual(mod.module_utils.basic.literal_eval("None"), None) + # self.assertEqual(mod.module_utils.basic.literal_eval('{"a": 1}'), dict(a=1)) + self.assertRaises(ValueError, mod.module_utils.basic.literal_eval, "asdfasdfasdf") + + @patch.object(builtins, '__import__') + def test_module_utils_basic_import_systemd_journal(self, mock_import): + def _mock_import(name, *args, **kwargs): + try: + fromlist = kwargs.get('fromlist', args[2]) + except IndexError: + fromlist = [] + if name == 'systemd' and 'journal' in fromlist: + raise ImportError + return realimport(name, *args, **kwargs) + + self.clear_modules(['systemd', 'ansible.module_utils.basic']) + mod = builtins.__import__('ansible.module_utils.basic') + self.assertTrue(mod.module_utils.basic.has_journal) + + self.clear_modules(['systemd', 'ansible.module_utils.basic']) + mock_import.side_effect = _mock_import + mod = builtins.__import__('ansible.module_utils.basic') + self.assertFalse(mod.module_utils.basic.has_journal) diff --git a/test/units/module_utils/basic/test_log.py b/test/units/module_utils/basic/test_log.py new file mode 100644 index 00000000..f3f764fc --- /dev/null +++ b/test/units/module_utils/basic/test_log.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import syslog +from itertools import product + +import pytest + +import ansible.module_utils.basic +from ansible.module_utils.six import PY3 + + +class TestAnsibleModuleLogSmokeTest: + DATA = [u'Text string', u'Toshio くらとみ non-ascii test'] + DATA = DATA + [d.encode('utf-8') for d in DATA] + DATA += [b'non-utf8 :\xff: test'] + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + @pytest.mark.parametrize('msg, stdin', ((m, {}) for m in DATA), indirect=['stdin']) # pylint: disable=undefined-variable + def test_smoketest_syslog(self, am, mocker, msg): + # These talk to the live daemons on the system. Need to do this to + # show that what we send doesn't cause an issue once it gets to the + # daemon. These are just smoketests to test that we don't fail. + mocker.patch('ansible.module_utils.basic.has_journal', False) + + am.log(u'Text string') + am.log(u'Toshio くらとみ non-ascii test') + + am.log(b'Byte string') + am.log(u'Toshio くらとみ non-ascii test'.encode('utf-8')) + am.log(b'non-utf8 :\xff: test') + + @pytest.mark.skipif(not ansible.module_utils.basic.has_journal, reason='python systemd bindings not installed') + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + @pytest.mark.parametrize('msg, stdin', ((m, {}) for m in DATA), indirect=['stdin']) # pylint: disable=undefined-variable + def test_smoketest_journal(self, am, mocker, msg): + # These talk to the live daemons on the system. Need to do this to + # show that what we send doesn't cause an issue once it gets to the + # daemon. These are just smoketests to test that we don't fail. + mocker.patch('ansible.module_utils.basic.has_journal', True) + + am.log(u'Text string') + am.log(u'Toshio くらとみ non-ascii test') + + am.log(b'Byte string') + am.log(u'Toshio くらとみ non-ascii test'.encode('utf-8')) + am.log(b'non-utf8 :\xff: test') + + +class TestAnsibleModuleLogSyslog: + """Test the AnsibleModule Log Method""" + + PY2_OUTPUT_DATA = [ + (u'Text string', b'Text string'), + (u'Toshio くらとみ non-ascii test', u'Toshio くらとみ non-ascii test'.encode('utf-8')), + (b'Byte string', b'Byte string'), + (u'Toshio くらとみ non-ascii test'.encode('utf-8'), u'Toshio くらとみ non-ascii test'.encode('utf-8')), + (b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace').encode('utf-8')), + ] + + PY3_OUTPUT_DATA = [ + (u'Text string', u'Text string'), + (u'Toshio くらとみ non-ascii test', u'Toshio くらとみ non-ascii test'), + (b'Byte string', u'Byte string'), + (u'Toshio くらとみ non-ascii test'.encode('utf-8'), u'Toshio くらとみ non-ascii test'), + (b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace')), + ] + + OUTPUT_DATA = PY3_OUTPUT_DATA if PY3 else PY2_OUTPUT_DATA + + @pytest.mark.parametrize('no_log, stdin', (product((True, False), [{}])), indirect=['stdin']) + def test_no_log(self, am, mocker, no_log): + """Test that when no_log is set, logging does not occur""" + mock_syslog = mocker.patch('syslog.syslog', autospec=True) + mocker.patch('ansible.module_utils.basic.has_journal', False) + am.no_log = no_log + am.log('unittest no_log') + if no_log: + assert not mock_syslog.called + else: + mock_syslog.assert_called_once_with(syslog.LOG_INFO, 'unittest no_log') + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + @pytest.mark.parametrize('msg, param, stdin', + ((m, p, {}) for m, p in OUTPUT_DATA), # pylint: disable=undefined-variable + indirect=['stdin']) + def test_output_matches(self, am, mocker, msg, param): + """Check that log messages are sent correctly""" + mocker.patch('ansible.module_utils.basic.has_journal', False) + mock_syslog = mocker.patch('syslog.syslog', autospec=True) + + am.log(msg) + mock_syslog.assert_called_once_with(syslog.LOG_INFO, param) + + +@pytest.mark.skipif(not ansible.module_utils.basic.has_journal, reason='python systemd bindings not installed') +class TestAnsibleModuleLogJournal: + """Test the AnsibleModule Log Method""" + + OUTPUT_DATA = [ + (u'Text string', u'Text string'), + (u'Toshio くらとみ non-ascii test', u'Toshio くらとみ non-ascii test'), + (b'Byte string', u'Byte string'), + (u'Toshio くらとみ non-ascii test'.encode('utf-8'), u'Toshio くらとみ non-ascii test'), + (b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace')), + ] + + @pytest.mark.parametrize('no_log, stdin', (product((True, False), [{}])), indirect=['stdin']) + def test_no_log(self, am, mocker, no_log): + journal_send = mocker.patch('systemd.journal.send') + am.no_log = no_log + am.log('unittest no_log') + if no_log: + assert not journal_send.called + else: + assert journal_send.called == 1 + # Message + # call_args is a 2-tuple of (arg_list, kwarg_dict) + assert journal_send.call_args[1]['MESSAGE'].endswith('unittest no_log'), 'Message was not sent to log' + # log adds this journal field + assert 'MODULE' in journal_send.call_args[1] + assert 'basic.py' in journal_send.call_args[1]['MODULE'] + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + @pytest.mark.parametrize('msg, param, stdin', + ((m, p, {}) for m, p in OUTPUT_DATA), # pylint: disable=undefined-variable + indirect=['stdin']) + def test_output_matches(self, am, mocker, msg, param): + journal_send = mocker.patch('systemd.journal.send') + am.log(msg) + assert journal_send.call_count == 1, 'journal.send not called exactly once' + assert journal_send.call_args[1]['MESSAGE'].endswith(param) + + @pytest.mark.parametrize('stdin', ({},), indirect=['stdin']) + def test_log_args(self, am, mocker): + journal_send = mocker.patch('systemd.journal.send') + am.log('unittest log_args', log_args=dict(TEST='log unittest')) + assert journal_send.called == 1 + assert journal_send.call_args[1]['MESSAGE'].endswith('unittest log_args'), 'Message was not sent to log' + + # log adds this journal field + assert 'MODULE' in journal_send.call_args[1] + assert 'basic.py' in journal_send.call_args[1]['MODULE'] + + # We added this journal field + assert 'TEST' in journal_send.call_args[1] + assert 'log unittest' in journal_send.call_args[1]['TEST'] diff --git a/test/units/module_utils/basic/test_no_log.py b/test/units/module_utils/basic/test_no_log.py new file mode 100644 index 00000000..c4797028 --- /dev/null +++ b/test/units/module_utils/basic/test_no_log.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Toshio Kuratomi +# (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from units.compat import unittest + +from ansible.module_utils.basic import remove_values +from ansible.module_utils.common.parameters import _return_datastructure_name + + +class TestReturnValues(unittest.TestCase): + dataset = ( + ('string', frozenset(['string'])), + ('', frozenset()), + (1, frozenset(['1'])), + (1.0, frozenset(['1.0'])), + (False, frozenset()), + (['1', '2', '3'], frozenset(['1', '2', '3'])), + (('1', '2', '3'), frozenset(['1', '2', '3'])), + ({'one': 1, 'two': 'dos'}, frozenset(['1', 'dos'])), + ( + { + 'one': 1, + 'two': 'dos', + 'three': [ + 'amigos', 'musketeers', None, { + 'ping': 'pong', + 'base': ( + 'balls', 'raquets' + ) + } + ] + }, + frozenset(['1', 'dos', 'amigos', 'musketeers', 'pong', 'balls', 'raquets']) + ), + (u'Toshio くらとみ', frozenset(['Toshio くらとみ'])), + ('Toshio くらとみ', frozenset(['Toshio くらとみ'])), + ) + + def test_return_datastructure_name(self): + for data, expected in self.dataset: + self.assertEqual(frozenset(_return_datastructure_name(data)), expected) + + def test_unknown_type(self): + self.assertRaises(TypeError, frozenset, _return_datastructure_name(object())) + + +class TestRemoveValues(unittest.TestCase): + OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + dataset_no_remove = ( + ('string', frozenset(['nope'])), + (1234, frozenset(['4321'])), + (False, frozenset(['4321'])), + (1.0, frozenset(['4321'])), + (['string', 'strang', 'strung'], frozenset(['nope'])), + ({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['nope'])), + ( + { + 'one': 1, + 'two': 'dos', + 'three': [ + 'amigos', 'musketeers', None, { + 'ping': 'pong', 'base': ['balls', 'raquets'] + } + ] + }, + frozenset(['nope']) + ), + (u'Toshio くら'.encode('utf-8'), frozenset([u'とみ'.encode('utf-8')])), + (u'Toshio くら', frozenset([u'とみ'])), + ) + dataset_remove = ( + ('string', frozenset(['string']), OMIT), + (1234, frozenset(['1234']), OMIT), + (1234, frozenset(['23']), OMIT), + (1.0, frozenset(['1.0']), OMIT), + (['string', 'strang', 'strung'], frozenset(['strang']), ['string', OMIT, 'strung']), + (['string', 'strang', 'strung'], frozenset(['strang', 'string', 'strung']), [OMIT, OMIT, OMIT]), + (('string', 'strang', 'strung'), frozenset(['string', 'strung']), [OMIT, 'strang', OMIT]), + ((1234567890, 345678, 987654321), frozenset(['1234567890']), [OMIT, 345678, 987654321]), + ((1234567890, 345678, 987654321), frozenset(['345678']), [OMIT, OMIT, 987654321]), + ({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key']), {'one': 1, 'two': 'dos', 'secret': OMIT}), + ({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']), {'one': OMIT, 'two': OMIT, 'secret': OMIT}), + ({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']), {'one': OMIT, 'two': OMIT, 'secret': OMIT}), + ( + { + 'one': 1, + 'two': 'dos', + 'three': [ + 'amigos', 'musketeers', None, { + 'ping': 'pong', 'base': [ + 'balls', 'raquets' + ] + } + ] + }, + frozenset(['balls', 'base', 'pong', 'amigos']), + { + 'one': 1, + 'two': 'dos', + 'three': [ + OMIT, 'musketeers', None, { + 'ping': OMIT, + 'base': [ + OMIT, 'raquets' + ] + } + ] + } + ), + ( + 'This sentence has an enigma wrapped in a mystery inside of a secret. - mr mystery', + frozenset(['enigma', 'mystery', 'secret']), + 'This sentence has an ******** wrapped in a ******** inside of a ********. - mr ********' + ), + (u'Toshio くらとみ'.encode('utf-8'), frozenset([u'くらとみ'.encode('utf-8')]), u'Toshio ********'.encode('utf-8')), + (u'Toshio くらとみ', frozenset([u'くらとみ']), u'Toshio ********'), + ) + + def test_no_removal(self): + for value, no_log_strings in self.dataset_no_remove: + self.assertEqual(remove_values(value, no_log_strings), value) + + def test_strings_to_remove(self): + for value, no_log_strings, expected in self.dataset_remove: + self.assertEqual(remove_values(value, no_log_strings), expected) + + def test_unknown_type(self): + self.assertRaises(TypeError, remove_values, object(), frozenset()) + + def test_hit_recursion_limit(self): + """ Check that we do not hit a recursion limit""" + data_list = [] + inner_list = data_list + for i in range(0, 10000): + new_list = [] + inner_list.append(new_list) + inner_list = new_list + inner_list.append('secret') + + # Check that this does not hit a recursion limit + actual_data_list = remove_values(data_list, frozenset(('secret',))) + + levels = 0 + inner_list = actual_data_list + while inner_list: + if isinstance(inner_list, list): + self.assertEqual(len(inner_list), 1) + else: + levels -= 1 + break + inner_list = inner_list[0] + levels += 1 + + self.assertEqual(inner_list, self.OMIT) + self.assertEqual(levels, 10000) diff --git a/test/units/module_utils/basic/test_platform_distribution.py b/test/units/module_utils/basic/test_platform_distribution.py new file mode 100644 index 00000000..d7a4510c --- /dev/null +++ b/test/units/module_utils/basic/test_platform_distribution.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016 Toshio Kuratomi +# (c) 2017-2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from units.compat.mock import patch + +from ansible.module_utils.six.moves import builtins + +# Functions being tested +from ansible.module_utils.basic import get_platform +from ansible.module_utils.basic import get_all_subclasses +from ansible.module_utils.basic import get_distribution +from ansible.module_utils.basic import get_distribution_version +from ansible.module_utils.basic import load_platform_subclass + + +realimport = builtins.__import__ + + +@pytest.fixture +def platform_linux(mocker): + mocker.patch('platform.system', return_value='Linux') + + +# +# get_platform tests +# + +def test_get_platform(): + with patch('platform.system', return_value='foo'): + assert get_platform() == 'foo' + + +# +# get_distribution tests +# + +def test_get_distribution_not_linux(): + """If it's not Linux, then it has no distribution""" + with patch('platform.system', return_value='Foo'): + assert get_distribution() is None + + +@pytest.mark.usefixtures("platform_linux") +class TestGetDistribution: + """Tests for get_distribution that have to find something""" + def test_distro_known(self): + with patch('ansible.module_utils.distro.id', return_value="alpine"): + assert get_distribution() == "Alpine" + + with patch('ansible.module_utils.distro.id', return_value="arch"): + assert get_distribution() == "Arch" + + with patch('ansible.module_utils.distro.id', return_value="centos"): + assert get_distribution() == "Centos" + + with patch('ansible.module_utils.distro.id', return_value="clear-linux-os"): + assert get_distribution() == "Clear-linux-os" + + with patch('ansible.module_utils.distro.id', return_value="coreos"): + assert get_distribution() == "Coreos" + + with patch('ansible.module_utils.distro.id', return_value="debian"): + assert get_distribution() == "Debian" + + with patch('ansible.module_utils.distro.id', return_value="flatcar"): + assert get_distribution() == "Flatcar" + + with patch('ansible.module_utils.distro.id', return_value="linuxmint"): + assert get_distribution() == "Linuxmint" + + with patch('ansible.module_utils.distro.id', return_value="opensuse"): + assert get_distribution() == "Opensuse" + + with patch('ansible.module_utils.distro.id', return_value="oracle"): + assert get_distribution() == "Oracle" + + with patch('ansible.module_utils.distro.id', return_value="raspian"): + assert get_distribution() == "Raspian" + + with patch('ansible.module_utils.distro.id', return_value="rhel"): + assert get_distribution() == "Redhat" + + with patch('ansible.module_utils.distro.id', return_value="ubuntu"): + assert get_distribution() == "Ubuntu" + + with patch('ansible.module_utils.distro.id', return_value="virtuozzo"): + assert get_distribution() == "Virtuozzo" + + with patch('ansible.module_utils.distro.id', return_value="foo"): + assert get_distribution() == "Foo" + + def test_distro_unknown(self): + with patch('ansible.module_utils.distro.id', return_value=""): + assert get_distribution() == "OtherLinux" + + def test_distro_amazon_linux_short(self): + with patch('ansible.module_utils.distro.id', return_value="amzn"): + assert get_distribution() == "Amazon" + + def test_distro_amazon_linux_long(self): + with patch('ansible.module_utils.distro.id', return_value="amazon"): + assert get_distribution() == "Amazon" + + +# +# get_distribution_version tests +# + +def test_get_distribution_version_not_linux(): + """If it's not Linux, then it has no distribution""" + with patch('platform.system', return_value='Foo'): + assert get_distribution_version() is None + + +@pytest.mark.usefixtures("platform_linux") +def test_distro_found(): + with patch('ansible.module_utils.distro.version', return_value="1"): + assert get_distribution_version() == "1" + + +# +# Tests for LoadPlatformSubclass +# + +class TestLoadPlatformSubclass: + class LinuxTest: + pass + + class Foo(LinuxTest): + platform = "Linux" + distribution = None + + class Bar(LinuxTest): + platform = "Linux" + distribution = "Bar" + + def test_not_linux(self): + # if neither match, the fallback should be the top-level class + with patch('platform.system', return_value="Foo"): + with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None): + assert isinstance(load_platform_subclass(self.LinuxTest), self.LinuxTest) + + @pytest.mark.usefixtures("platform_linux") + def test_get_distribution_none(self): + # match just the platform class, not a specific distribution + with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None): + assert isinstance(load_platform_subclass(self.LinuxTest), self.Foo) + + @pytest.mark.usefixtures("platform_linux") + def test_get_distribution_found(self): + # match both the distribution and platform class + with patch('ansible.module_utils.common.sys_info.get_distribution', return_value="Bar"): + assert isinstance(load_platform_subclass(self.LinuxTest), self.Bar) + + +# +# Tests for get_all_subclasses +# + +class TestGetAllSubclasses: + class Base: + pass + + class BranchI(Base): + pass + + class BranchII(Base): + pass + + class BranchIA(BranchI): + pass + + class BranchIB(BranchI): + pass + + class BranchIIA(BranchII): + pass + + class BranchIIB(BranchII): + pass + + def test_bottom_level(self): + assert get_all_subclasses(self.BranchIIB) == [] + + def test_one_inheritance(self): + assert set(get_all_subclasses(self.BranchII)) == set([self.BranchIIA, self.BranchIIB]) + + def test_toplevel(self): + assert set(get_all_subclasses(self.Base)) == set([self.BranchI, self.BranchII, + self.BranchIA, self.BranchIB, + self.BranchIIA, self.BranchIIB]) diff --git a/test/units/module_utils/basic/test_run_command.py b/test/units/module_utils/basic/test_run_command.py new file mode 100644 index 00000000..25f1c48e --- /dev/null +++ b/test/units/module_utils/basic/test_run_command.py @@ -0,0 +1,283 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import errno +from itertools import product +from io import BytesIO + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.six import PY2 +from ansible.module_utils.compat import selectors + + +class OpenBytesIO(BytesIO): + """BytesIO with dummy close() method + + So that you can inspect the content after close() was called. + """ + + def close(self): + pass + + +@pytest.fixture +def mock_os(mocker): + def mock_os_chdir(path): + if path == '/inaccessible': + raise OSError(errno.EPERM, "Permission denied: '/inaccessible'") + + def mock_os_abspath(path): + if path.startswith('/'): + return path + else: + return os.getcwd.return_value + '/' + path + + os = mocker.patch('ansible.module_utils.basic.os') + + os.path.expandvars.side_effect = lambda x: x + os.path.expanduser.side_effect = lambda x: x + os.environ = {'PATH': '/bin'} + os.getcwd.return_value = '/home/foo' + os.path.isdir.return_value = True + os.chdir.side_effect = mock_os_chdir + os.path.abspath.side_effect = mock_os_abspath + + yield os + + +class DummyFileObj(): + def __init__(self, fileobj): + self.fileobj = fileobj + + +class SpecialBytesIO(BytesIO): + def __init__(self, *args, **kwargs): + fh = kwargs.pop('fh', None) + super(SpecialBytesIO, self).__init__(*args, **kwargs) + self.fh = fh + + def fileno(self): + return self.fh + + # We need to do this because some of our tests create a new value for stdout and stderr + # The new value is able to affect the string that is returned by the subprocess stdout and + # stderr but by the time the test gets it, it is too late to change the SpecialBytesIO that + # subprocess.Popen returns for stdout and stderr. If we could figure out how to change those as + # well, then we wouldn't need this. + def __eq__(self, other): + if id(self) == id(other) or self.fh == other.fileno(): + return True + return False + + +class DummyKey: + def __init__(self, fileobj): + self.fileobj = fileobj + + +@pytest.fixture +def mock_subprocess(mocker): + + class MockSelector(selectors.BaseSelector): + def __init__(self): + super(MockSelector, self).__init__() + self._file_objs = [] + + def register(self, fileobj, events, data=None): + self._file_objs.append(fileobj) + + def unregister(self, fileobj): + self._file_objs.remove(fileobj) + + def select(self, timeout=None): + ready = [] + for file_obj in self._file_objs: + ready.append((DummyKey(subprocess._output[file_obj.fileno()]), selectors.EVENT_READ)) + return ready + + def get_map(self): + return self._file_objs + + def close(self): + super(MockSelector, self).close() + self._file_objs = [] + + selectors.DefaultSelector = MockSelector + + subprocess = mocker.patch('ansible.module_utils.basic.subprocess') + subprocess._output = {mocker.sentinel.stdout: SpecialBytesIO(b'', fh=mocker.sentinel.stdout), + mocker.sentinel.stderr: SpecialBytesIO(b'', fh=mocker.sentinel.stderr)} + + cmd = mocker.MagicMock() + cmd.returncode = 0 + cmd.stdin = OpenBytesIO() + cmd.stdout = subprocess._output[mocker.sentinel.stdout] + cmd.stderr = subprocess._output[mocker.sentinel.stderr] + subprocess.Popen.return_value = cmd + + yield subprocess + + +@pytest.fixture() +def rc_am(mocker, am, mock_os, mock_subprocess): + am.fail_json = mocker.MagicMock(side_effect=SystemExit) + am._os = mock_os + am._subprocess = mock_subprocess + yield am + + +class TestRunCommandArgs: + # Format is command as passed to run_command, command to Popen as list, command to Popen as string + ARGS_DATA = ( + (['/bin/ls', 'a', 'b', 'c'], [b'/bin/ls', b'a', b'b', b'c'], b'/bin/ls a b c'), + ('/bin/ls a " b" "c "', [b'/bin/ls', b'a', b' b', b'c '], b'/bin/ls a " b" "c "'), + ) + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + # pylint: disable=undefined-variable + @pytest.mark.parametrize('cmd, expected, shell, stdin', + ((arg, cmd_str if sh else cmd_lst, sh, {}) + for (arg, cmd_lst, cmd_str), sh in product(ARGS_DATA, (True, False))), + indirect=['stdin']) + def test_args(self, cmd, expected, shell, rc_am): + rc_am.run_command(cmd, use_unsafe_shell=shell) + assert rc_am._subprocess.Popen.called + args, kwargs = rc_am._subprocess.Popen.call_args + assert args == (expected, ) + assert kwargs['shell'] == shell + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_tuple_as_args(self, rc_am): + with pytest.raises(SystemExit): + rc_am.run_command(('ls', '/')) + assert rc_am.fail_json.called + + +class TestRunCommandCwd: + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_cwd(self, mocker, rc_am): + rc_am._os.getcwd.return_value = '/old' + rc_am.run_command('/bin/ls', cwd='/new') + assert rc_am._os.chdir.mock_calls == [mocker.call(b'/new'), mocker.call('/old'), ] + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_cwd_relative_path(self, mocker, rc_am): + rc_am._os.getcwd.return_value = '/old' + rc_am.run_command('/bin/ls', cwd='sub-dir') + assert rc_am._os.chdir.mock_calls == [mocker.call(b'/old/sub-dir'), mocker.call('/old'), ] + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_cwd_not_a_dir(self, mocker, rc_am): + rc_am._os.getcwd.return_value = '/old' + rc_am._os.path.isdir.side_effect = lambda d: d != '/not-a-dir' + rc_am.run_command('/bin/ls', cwd='/not-a-dir') + assert rc_am._os.chdir.mock_calls == [mocker.call('/old'), ] + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_cwd_not_a_dir_noignore(self, rc_am): + rc_am._os.getcwd.return_value = '/old' + rc_am._os.path.isdir.side_effect = lambda d: d != '/not-a-dir' + with pytest.raises(SystemExit): + rc_am.run_command('/bin/ls', cwd='/not-a-dir', ignore_invalid_cwd=False) + assert rc_am.fail_json.called + + +class TestRunCommandPrompt: + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_prompt_bad_regex(self, rc_am): + with pytest.raises(SystemExit): + rc_am.run_command('foo', prompt_regex='[pP)assword:') + assert rc_am.fail_json.called + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_prompt_no_match(self, mocker, rc_am): + rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'hello') + (rc, _, _) = rc_am.run_command('foo', prompt_regex='[pP]assword:') + assert rc == 0 + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_prompt_match_wo_data(self, mocker, rc_am): + rc_am._subprocess._output = {mocker.sentinel.stdout: + SpecialBytesIO(b'Authentication required!\nEnter password: ', + fh=mocker.sentinel.stdout), + mocker.sentinel.stderr: + SpecialBytesIO(b'', fh=mocker.sentinel.stderr)} + (rc, _, _) = rc_am.run_command('foo', prompt_regex=r'[pP]assword:', data=None) + assert rc == 257 + + +class TestRunCommandRc: + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_check_rc_false(self, rc_am): + rc_am._subprocess.Popen.return_value.returncode = 1 + (rc, _, _) = rc_am.run_command('/bin/false', check_rc=False) + assert rc == 1 + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_check_rc_true(self, rc_am): + rc_am._subprocess.Popen.return_value.returncode = 1 + with pytest.raises(SystemExit): + rc_am.run_command('/bin/false', check_rc=True) + assert rc_am.fail_json.called + args, kwargs = rc_am.fail_json.call_args + assert kwargs['rc'] == 1 + + +class TestRunCommandOutput: + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_text_stdin(self, rc_am): + (rc, stdout, stderr) = rc_am.run_command('/bin/foo', data='hello world') + assert rc_am._subprocess.Popen.return_value.stdin.getvalue() == b'hello world\n' + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_ascii_stdout(self, mocker, rc_am): + rc_am._subprocess._output = {mocker.sentinel.stdout: + SpecialBytesIO(b'hello', fh=mocker.sentinel.stdout), + mocker.sentinel.stderr: + SpecialBytesIO(b'', fh=mocker.sentinel.stderr)} + (rc, stdout, stderr) = rc_am.run_command('/bin/cat hello.txt') + assert rc == 0 + # module_utils function. On py3 it returns text and py2 it returns + # bytes because it's returning native strings + assert stdout == 'hello' + + @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) + def test_utf8_output(self, mocker, rc_am): + rc_am._subprocess._output = {mocker.sentinel.stdout: + SpecialBytesIO(u'Žarn§'.encode('utf-8'), + fh=mocker.sentinel.stdout), + mocker.sentinel.stderr: + SpecialBytesIO(u'لرئيسية'.encode('utf-8'), + fh=mocker.sentinel.stderr)} + (rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly') + assert rc == 0 + # module_utils function. On py3 it returns text and py2 it returns + # bytes because it's returning native strings + assert stdout == to_native(u'Žarn§') + assert stderr == to_native(u'لرئيسية') + + +@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) +def test_run_command_fds(mocker, rc_am): + subprocess_mock = mocker.patch('ansible.module_utils.basic.subprocess') + subprocess_mock.Popen.side_effect = AssertionError + + try: + rc_am.run_command('synchronize', pass_fds=(101, 42)) + except SystemExit: + pass + + if PY2: + assert subprocess_mock.Popen.call_args[1]['close_fds'] is False + assert 'pass_fds' not in subprocess_mock.Popen.call_args[1] + + else: + assert subprocess_mock.Popen.call_args[1]['pass_fds'] == (101, 42) + assert subprocess_mock.Popen.call_args[1]['close_fds'] is True diff --git a/test/units/module_utils/basic/test_safe_eval.py b/test/units/module_utils/basic/test_safe_eval.py new file mode 100644 index 00000000..e8538ca9 --- /dev/null +++ b/test/units/module_utils/basic/test_safe_eval.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# (c) 2015-2017, Toshio Kuratomi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from itertools import chain +import pytest + + +# Strings that should be converted into a typed value +VALID_STRINGS = ( + ("'a'", 'a'), + ("'1'", '1'), + ("1", 1), + ("True", True), + ("False", False), + ("{}", {}), +) + +# Passing things that aren't strings should just return the object +NONSTRINGS = ( + ({'a': 1}, {'a': 1}), +) + +# These strings are not basic types. For security, these should not be +# executed. We return the same string and get an exception for some +INVALID_STRINGS = ( + ("a=1", "a=1", SyntaxError), + ("a.foo()", "a.foo()", None), + ("import foo", "import foo", None), + ("__import__('foo')", "__import__('foo')", ValueError), +) + + +@pytest.mark.parametrize('code, expected, stdin', + ((c, e, {}) for c, e in chain(VALID_STRINGS, NONSTRINGS)), + indirect=['stdin']) +def test_simple_types(am, code, expected): + # test some basic usage for various types + assert am.safe_eval(code) == expected + + +@pytest.mark.parametrize('code, expected, stdin', + ((c, e, {}) for c, e in chain(VALID_STRINGS, NONSTRINGS)), + indirect=['stdin']) +def test_simple_types_with_exceptions(am, code, expected): + # Test simple types with exceptions requested + assert am.safe_eval(code, include_exceptions=True), (expected, None) + + +@pytest.mark.parametrize('code, expected, stdin', + ((c, e, {}) for c, e, dummy in INVALID_STRINGS), + indirect=['stdin']) +def test_invalid_strings(am, code, expected): + assert am.safe_eval(code) == expected + + +@pytest.mark.parametrize('code, expected, exception, stdin', + ((c, e, ex, {}) for c, e, ex in INVALID_STRINGS), + indirect=['stdin']) +def test_invalid_strings_with_exceptions(am, code, expected, exception): + res = am.safe_eval(code, include_exceptions=True) + assert res[0] == expected + if exception is None: + assert res[1] == exception + else: + assert type(res[1]) == exception diff --git a/test/units/module_utils/basic/test_sanitize_keys.py b/test/units/module_utils/basic/test_sanitize_keys.py new file mode 100644 index 00000000..180f8662 --- /dev/null +++ b/test/units/module_utils/basic/test_sanitize_keys.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Red Hat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +from ansible.module_utils.basic import sanitize_keys + + +def test_sanitize_keys_non_dict_types(): + """ Test that non-dict-like objects return the same data. """ + + type_exception = 'Unsupported type for key sanitization.' + no_log_strings = set() + + assert 'string value' == sanitize_keys('string value', no_log_strings) + + assert sanitize_keys(None, no_log_strings) is None + + assert set(['x', 'y']) == sanitize_keys(set(['x', 'y']), no_log_strings) + + assert not sanitize_keys(False, no_log_strings) + + +def _run_comparison(obj): + no_log_strings = set(['secret', 'password']) + + ret = sanitize_keys(obj, no_log_strings) + + expected = [ + None, + True, + 100, + "some string", + set([1, 2]), + [1, 2], + + {'key1': ['value1a', 'value1b'], + 'some-********': 'value-for-some-password', + 'key2': {'key3': set(['value3a', 'value3b']), + 'i-have-a-********': {'********-********': 'value-for-secret-password', 'key4': 'value4'} + } + }, + + {'foo': [{'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER': 1}]} + ] + + assert ret == expected + + +def test_sanitize_keys_dict(): + """ Test that santize_keys works with a dict. """ + + d = [ + None, + True, + 100, + "some string", + set([1, 2]), + [1, 2], + + {'key1': ['value1a', 'value1b'], + 'some-password': 'value-for-some-password', + 'key2': {'key3': set(['value3a', 'value3b']), + 'i-have-a-secret': {'secret-password': 'value-for-secret-password', 'key4': 'value4'} + } + }, + + {'foo': [{'secret': 1}]} + ] + + _run_comparison(d) + + +def test_sanitize_keys_with_ignores(): + """ Test that we can actually ignore keys. """ + + no_log_strings = set(['secret', 'rc']) + ignore_keys = set(['changed', 'rc', 'status']) + + value = {'changed': True, + 'rc': 0, + 'test-rc': 1, + 'another-secret': 2, + 'status': 'okie dokie'} + + # We expect to change 'test-rc' but NOT 'rc'. + expected = {'changed': True, + 'rc': 0, + 'test-********': 1, + 'another-********': 2, + 'status': 'okie dokie'} + + ret = sanitize_keys(value, no_log_strings, ignore_keys) + assert ret == expected diff --git a/test/units/module_utils/basic/test_selinux.py b/test/units/module_utils/basic/test_selinux.py new file mode 100644 index 00000000..8562eb88 --- /dev/null +++ b/test/units/module_utils/basic/test_selinux.py @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016 Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import errno +import json + +from units.mock.procenv import ModuleTestCase, swap_stdin_and_argv + +from units.compat.mock import patch, MagicMock, mock_open, Mock +from ansible.module_utils.six.moves import builtins + +realimport = builtins.__import__ + + +class TestSELinux(ModuleTestCase): + def test_module_utils_basic_ansible_module_selinux_mls_enabled(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + basic.HAVE_SELINUX = False + self.assertEqual(am.selinux_mls_enabled(), False) + + basic.HAVE_SELINUX = True + basic.selinux = Mock() + with patch.dict('sys.modules', {'selinux': basic.selinux}): + with patch('selinux.is_selinux_mls_enabled', return_value=0): + self.assertEqual(am.selinux_mls_enabled(), False) + with patch('selinux.is_selinux_mls_enabled', return_value=1): + self.assertEqual(am.selinux_mls_enabled(), True) + delattr(basic, 'selinux') + + def test_module_utils_basic_ansible_module_selinux_initial_context(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + am.selinux_mls_enabled = MagicMock() + am.selinux_mls_enabled.return_value = False + self.assertEqual(am.selinux_initial_context(), [None, None, None]) + am.selinux_mls_enabled.return_value = True + self.assertEqual(am.selinux_initial_context(), [None, None, None, None]) + + def test_module_utils_basic_ansible_module_selinux_enabled(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + # we first test the cases where the python selinux lib is + # not installed, which has two paths: one in which the system + # does have selinux installed (and the selinuxenabled command + # is present and returns 0 when run), or selinux is not installed + basic.HAVE_SELINUX = False + am.get_bin_path = MagicMock() + am.get_bin_path.return_value = '/path/to/selinuxenabled' + am.run_command = MagicMock() + am.run_command.return_value = (0, '', '') + self.assertRaises(SystemExit, am.selinux_enabled) + am.get_bin_path.return_value = None + self.assertEqual(am.selinux_enabled(), False) + + # finally we test the case where the python selinux lib is installed, + # and both possibilities there (enabled vs. disabled) + basic.HAVE_SELINUX = True + basic.selinux = Mock() + with patch.dict('sys.modules', {'selinux': basic.selinux}): + with patch('selinux.is_selinux_enabled', return_value=0): + self.assertEqual(am.selinux_enabled(), False) + with patch('selinux.is_selinux_enabled', return_value=1): + self.assertEqual(am.selinux_enabled(), True) + delattr(basic, 'selinux') + + def test_module_utils_basic_ansible_module_selinux_default_context(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + am.selinux_initial_context = MagicMock(return_value=[None, None, None, None]) + am.selinux_enabled = MagicMock(return_value=True) + + # we first test the cases where the python selinux lib is not installed + basic.HAVE_SELINUX = False + self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) + + # all following tests assume the python selinux bindings are installed + basic.HAVE_SELINUX = True + + basic.selinux = Mock() + + with patch.dict('sys.modules', {'selinux': basic.selinux}): + # next, we test with a mocked implementation of selinux.matchpathcon to simulate + # an actual context being found + with patch('selinux.matchpathcon', return_value=[0, 'unconfined_u:object_r:default_t:s0']): + self.assertEqual(am.selinux_default_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0']) + + # we also test the case where matchpathcon returned a failure + with patch('selinux.matchpathcon', return_value=[-1, '']): + self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) + + # finally, we test where an OSError occurred during matchpathcon's call + with patch('selinux.matchpathcon', side_effect=OSError): + self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) + + delattr(basic, 'selinux') + + def test_module_utils_basic_ansible_module_selinux_context(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + am.selinux_initial_context = MagicMock(return_value=[None, None, None, None]) + am.selinux_enabled = MagicMock(return_value=True) + + # we first test the cases where the python selinux lib is not installed + basic.HAVE_SELINUX = False + self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None]) + + # all following tests assume the python selinux bindings are installed + basic.HAVE_SELINUX = True + + basic.selinux = Mock() + + with patch.dict('sys.modules', {'selinux': basic.selinux}): + # next, we test with a mocked implementation of selinux.lgetfilecon_raw to simulate + # an actual context being found + with patch('selinux.lgetfilecon_raw', return_value=[0, 'unconfined_u:object_r:default_t:s0']): + self.assertEqual(am.selinux_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0']) + + # we also test the case where matchpathcon returned a failure + with patch('selinux.lgetfilecon_raw', return_value=[-1, '']): + self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None]) + + # finally, we test where an OSError occurred during matchpathcon's call + e = OSError() + e.errno = errno.ENOENT + with patch('selinux.lgetfilecon_raw', side_effect=e): + self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar') + + e = OSError() + with patch('selinux.lgetfilecon_raw', side_effect=e): + self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar') + + delattr(basic, 'selinux') + + def test_module_utils_basic_ansible_module_is_special_selinux_path(self): + from ansible.module_utils import basic + + args = json.dumps(dict(ANSIBLE_MODULE_ARGS={'_ansible_selinux_special_fs': "nfs,nfsd,foos", + '_ansible_remote_tmp': "/tmp", + '_ansible_keep_remote_files': False})) + + with swap_stdin_and_argv(stdin_data=args): + basic._ANSIBLE_ARGS = None + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + def _mock_find_mount_point(path): + if path.startswith('/some/path'): + return '/some/path' + elif path.startswith('/weird/random/fstype'): + return '/weird/random/fstype' + return '/' + + am.find_mount_point = MagicMock(side_effect=_mock_find_mount_point) + am.selinux_context = MagicMock(return_value=['foo_u', 'foo_r', 'foo_t', 's0']) + + m = mock_open() + m.side_effect = OSError + + with patch.object(builtins, 'open', m, create=True): + self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (False, None)) + + mount_data = [ + '/dev/disk1 / ext4 rw,seclabel,relatime,data=ordered 0 0\n', + '1.1.1.1:/path/to/nfs /some/path nfs ro 0 0\n', + 'whatever /weird/random/fstype foos rw 0 0\n', + ] + + # mock_open has a broken readlines() implementation apparently... + # this should work by default but doesn't, so we fix it + m = mock_open(read_data=''.join(mount_data)) + m.return_value.readlines.return_value = mount_data + + with patch.object(builtins, 'open', m, create=True): + self.assertEqual(am.is_special_selinux_path('/some/random/path'), (False, None)) + self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (True, ['foo_u', 'foo_r', 'foo_t', 's0'])) + self.assertEqual(am.is_special_selinux_path('/weird/random/fstype/path'), (True, ['foo_u', 'foo_r', 'foo_t', 's0'])) + + def test_module_utils_basic_ansible_module_set_context_if_different(self): + from ansible.module_utils import basic + basic._ANSIBLE_ARGS = None + + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + basic.HAVE_SELINUX = False + + am.selinux_enabled = MagicMock(return_value=False) + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True), True) + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), False) + + basic.HAVE_SELINUX = True + + am.selinux_enabled = MagicMock(return_value=True) + am.selinux_context = MagicMock(return_value=['bar_u', 'bar_r', None, None]) + am.is_special_selinux_path = MagicMock(return_value=(False, None)) + + basic.selinux = Mock() + with patch.dict('sys.modules', {'selinux': basic.selinux}): + with patch('selinux.lsetfilecon', return_value=0) as m: + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) + m.assert_called_with('/path/to/file', 'foo_u:foo_r:foo_t:s0') + m.reset_mock() + am.check_mode = True + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) + self.assertEqual(m.called, False) + am.check_mode = False + + with patch('selinux.lsetfilecon', return_value=1) as m: + self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True) + + with patch('selinux.lsetfilecon', side_effect=OSError) as m: + self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True) + + am.is_special_selinux_path = MagicMock(return_value=(True, ['sp_u', 'sp_r', 'sp_t', 's0'])) + + with patch('selinux.lsetfilecon', return_value=0) as m: + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) + m.assert_called_with('/path/to/file', 'sp_u:sp_r:sp_t:s0') + + delattr(basic, 'selinux') diff --git a/test/units/module_utils/basic/test_set_cwd.py b/test/units/module_utils/basic/test_set_cwd.py new file mode 100644 index 00000000..159236b7 --- /dev/null +++ b/test/units/module_utils/basic/test_set_cwd.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import shutil +import tempfile + +import pytest + +from units.compat.mock import patch, MagicMock +from ansible.module_utils._text import to_bytes + +from ansible.module_utils import basic + + +class TestAnsibleModuleSetCwd: + + def test_set_cwd(self, monkeypatch): + + '''make sure /tmp is used''' + + def mock_getcwd(): + return '/tmp' + + def mock_access(path, perm): + return True + + def mock_chdir(path): + pass + + monkeypatch.setattr(os, 'getcwd', mock_getcwd) + monkeypatch.setattr(os, 'access', mock_access) + monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}}))) + with patch('time.time', return_value=42): + am = basic.AnsibleModule(argument_spec={}) + + result = am._set_cwd() + assert result == '/tmp' + + def test_set_cwd_unreadable_use_self_tmpdir(self, monkeypatch): + + '''pwd is not readable, use instance's tmpdir property''' + + def mock_getcwd(): + return '/tmp' + + def mock_access(path, perm): + if path == '/tmp' and perm == 4: + return False + return True + + def mock_expandvars(var): + if var == '$HOME': + return '/home/foobar' + return var + + def mock_gettempdir(): + return '/tmp/testdir' + + def mock_chdir(path): + if path == '/tmp': + raise Exception() + return + + monkeypatch.setattr(os, 'getcwd', mock_getcwd) + monkeypatch.setattr(os, 'chdir', mock_chdir) + monkeypatch.setattr(os, 'access', mock_access) + monkeypatch.setattr(os.path, 'expandvars', mock_expandvars) + monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}}))) + with patch('time.time', return_value=42): + am = basic.AnsibleModule(argument_spec={}) + + am._tmpdir = '/tmp2' + result = am._set_cwd() + assert result == am._tmpdir + + def test_set_cwd_unreadable_use_home(self, monkeypatch): + + '''cwd and instance tmpdir are unreadable, use home''' + + def mock_getcwd(): + return '/tmp' + + def mock_access(path, perm): + if path in ['/tmp', '/tmp2'] and perm == 4: + return False + return True + + def mock_expandvars(var): + if var == '$HOME': + return '/home/foobar' + return var + + def mock_gettempdir(): + return '/tmp/testdir' + + def mock_chdir(path): + if path == '/tmp': + raise Exception() + return + + monkeypatch.setattr(os, 'getcwd', mock_getcwd) + monkeypatch.setattr(os, 'chdir', mock_chdir) + monkeypatch.setattr(os, 'access', mock_access) + monkeypatch.setattr(os.path, 'expandvars', mock_expandvars) + monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}}))) + with patch('time.time', return_value=42): + am = basic.AnsibleModule(argument_spec={}) + + am._tmpdir = '/tmp2' + result = am._set_cwd() + assert result == '/home/foobar' + + def test_set_cwd_unreadable_use_gettempdir(self, monkeypatch): + + '''fallback to tempfile.gettempdir''' + + thisdir = None + + def mock_getcwd(): + return '/tmp' + + def mock_access(path, perm): + if path in ['/tmp', '/tmp2', '/home/foobar'] and perm == 4: + return False + return True + + def mock_expandvars(var): + if var == '$HOME': + return '/home/foobar' + return var + + def mock_gettempdir(): + return '/tmp3' + + def mock_chdir(path): + if path == '/tmp': + raise Exception() + thisdir = path + + monkeypatch.setattr(os, 'getcwd', mock_getcwd) + monkeypatch.setattr(os, 'chdir', mock_chdir) + monkeypatch.setattr(os, 'access', mock_access) + monkeypatch.setattr(os.path, 'expandvars', mock_expandvars) + monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}}))) + with patch('time.time', return_value=42): + am = basic.AnsibleModule(argument_spec={}) + + am._tmpdir = '/tmp2' + monkeypatch.setattr(tempfile, 'gettempdir', mock_gettempdir) + result = am._set_cwd() + assert result == '/tmp3' + + def test_set_cwd_unreadable_use_None(self, monkeypatch): + + '''all paths are unreable, should return None and not an exception''' + + def mock_getcwd(): + return '/tmp' + + def mock_access(path, perm): + if path in ['/tmp', '/tmp2', '/tmp3', '/home/foobar'] and perm == 4: + return False + return True + + def mock_expandvars(var): + if var == '$HOME': + return '/home/foobar' + return var + + def mock_gettempdir(): + return '/tmp3' + + def mock_chdir(path): + if path == '/tmp': + raise Exception() + + monkeypatch.setattr(os, 'getcwd', mock_getcwd) + monkeypatch.setattr(os, 'chdir', mock_chdir) + monkeypatch.setattr(os, 'access', mock_access) + monkeypatch.setattr(os.path, 'expandvars', mock_expandvars) + monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}}))) + with patch('time.time', return_value=42): + am = basic.AnsibleModule(argument_spec={}) + + am._tmpdir = '/tmp2' + monkeypatch.setattr(tempfile, 'gettempdir', mock_gettempdir) + result = am._set_cwd() + assert result is None diff --git a/test/units/module_utils/basic/test_set_mode_if_different.py b/test/units/module_utils/basic/test_set_mode_if_different.py new file mode 100644 index 00000000..93fe2467 --- /dev/null +++ b/test/units/module_utils/basic/test_set_mode_if_different.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +# (c) 2016, Toshio Kuratomi +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import errno +import os + +from itertools import product + +try: + import builtins +except ImportError: + import __builtin__ as builtins + +import pytest + + +SYNONYMS_0660 = ( + 0o660, + '0o660', + '660', + 'u+rw-x,g+rw-x,o-rwx', + 'u=rw,g=rw,o-rwx', +) + + +@pytest.fixture +def mock_stats(mocker): + mock_stat1 = mocker.MagicMock() + mock_stat1.st_mode = 0o444 + mock_stat2 = mocker.MagicMock() + mock_stat2.st_mode = 0o660 + yield {"before": mock_stat1, "after": mock_stat2} + + +@pytest.fixture +def am_check_mode(am): + am.check_mode = True + yield am + am.check_mode = False + + +@pytest.fixture +def mock_lchmod(mocker): + m_lchmod = mocker.patch('ansible.module_utils.basic.os.lchmod', return_value=None, create=True) + yield m_lchmod + + +@pytest.mark.parametrize('previous_changes, check_mode, exists, stdin', + product((True, False), (True, False), (True, False), ({},)), + indirect=['stdin']) +def test_no_mode_given_returns_previous_changes(am, mock_stats, mock_lchmod, mocker, previous_changes, check_mode, exists): + am.check_mode = check_mode + mocker.patch('os.lstat', side_effect=[mock_stats['before']]) + m_lchmod = mocker.patch('os.lchmod', return_value=None, create=True) + m_path_exists = mocker.patch('os.path.exists', return_value=exists) + + assert am.set_mode_if_different('/path/to/file', None, previous_changes) == previous_changes + assert not m_lchmod.called + assert not m_path_exists.called + + +@pytest.mark.parametrize('mode, check_mode, stdin', + product(SYNONYMS_0660, (True, False), ({},)), + indirect=['stdin']) +def test_mode_changed_to_0660(am, mock_stats, mocker, mode, check_mode): + # Note: This is for checking that all the different ways of specifying + # 0660 mode work. It cannot be used to check that setting a mode that is + # not equivalent to 0660 works. + am.check_mode = check_mode + mocker.patch('os.lstat', side_effect=[mock_stats['before'], mock_stats['after'], mock_stats['after']]) + m_lchmod = mocker.patch('os.lchmod', return_value=None, create=True) + mocker.patch('os.path.exists', return_value=True) + + assert am.set_mode_if_different('/path/to/file', mode, False) + if check_mode: + assert not m_lchmod.called + else: + m_lchmod.assert_called_with(b'/path/to/file', 0o660) + + +@pytest.mark.parametrize('mode, check_mode, stdin', + product(SYNONYMS_0660, (True, False), ({},)), + indirect=['stdin']) +def test_mode_unchanged_when_already_0660(am, mock_stats, mocker, mode, check_mode): + # Note: This is for checking that all the different ways of specifying + # 0660 mode work. It cannot be used to check that setting a mode that is + # not equivalent to 0660 works. + am.check_mode = check_mode + mocker.patch('os.lstat', side_effect=[mock_stats['after'], mock_stats['after'], mock_stats['after']]) + m_lchmod = mocker.patch('os.lchmod', return_value=None, create=True) + mocker.patch('os.path.exists', return_value=True) + + assert not am.set_mode_if_different('/path/to/file', mode, False) + assert not m_lchmod.called + + +@pytest.mark.parametrize('check_mode, stdin', + product((True, False), ({},)), + indirect=['stdin']) +def test_missing_lchmod_is_not_link(am, mock_stats, mocker, monkeypatch, check_mode): + """Some platforms have lchmod (*BSD) others do not (Linux)""" + + am.check_mode = check_mode + original_hasattr = hasattr + + monkeypatch.delattr(os, 'lchmod', raising=False) + + mocker.patch('os.lstat', side_effect=[mock_stats['before'], mock_stats['after']]) + mocker.patch('os.path.islink', return_value=False) + mocker.patch('os.path.exists', return_value=True) + m_chmod = mocker.patch('os.chmod', return_value=None) + + assert am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False) + if check_mode: + assert not m_chmod.called + else: + m_chmod.assert_called_with(b'/path/to/file/no_lchmod', 0o660) + + +@pytest.mark.parametrize('check_mode, stdin', + product((True, False), ({},)), + indirect=['stdin']) +def test_missing_lchmod_is_link(am, mock_stats, mocker, monkeypatch, check_mode): + """Some platforms have lchmod (*BSD) others do not (Linux)""" + + am.check_mode = check_mode + original_hasattr = hasattr + + monkeypatch.delattr(os, 'lchmod', raising=False) + + mocker.patch('os.lstat', side_effect=[mock_stats['before'], mock_stats['after']]) + mocker.patch('os.path.islink', return_value=True) + mocker.patch('os.path.exists', return_value=True) + m_chmod = mocker.patch('os.chmod', return_value=None) + mocker.patch('os.stat', return_value=mock_stats['after']) + + assert am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False) + if check_mode: + assert not m_chmod.called + else: + m_chmod.assert_called_with(b'/path/to/file/no_lchmod', 0o660) + + mocker.resetall() + mocker.stopall() + + +@pytest.mark.parametrize('stdin,', + ({},), + indirect=['stdin']) +def test_missing_lchmod_is_link_in_sticky_dir(am, mock_stats, mocker): + """Some platforms have lchmod (*BSD) others do not (Linux)""" + + am.check_mode = False + original_hasattr = hasattr + + def _hasattr(obj, name): + if obj == os and name == 'lchmod': + return False + return original_hasattr(obj, name) + + mock_lstat = mocker.MagicMock() + mock_lstat.st_mode = 0o777 + + mocker.patch('os.lstat', side_effect=[mock_lstat, mock_lstat]) + mocker.patch.object(builtins, 'hasattr', side_effect=_hasattr) + mocker.patch('os.path.islink', return_value=True) + mocker.patch('os.path.exists', return_value=True) + m_stat = mocker.patch('os.stat', side_effect=OSError(errno.EACCES, 'Permission denied')) + m_chmod = mocker.patch('os.chmod', return_value=None) + + # not changed: can't set mode on symbolic links + assert not am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False) + m_stat.assert_called_with(b'/path/to/file/no_lchmod') + m_chmod.assert_not_called() + + mocker.resetall() + mocker.stopall() diff --git a/test/units/module_utils/basic/test_tmpdir.py b/test/units/module_utils/basic/test_tmpdir.py new file mode 100644 index 00000000..818cb9b1 --- /dev/null +++ b/test/units/module_utils/basic/test_tmpdir.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import shutil +import tempfile + +import pytest + +from units.compat.mock import patch, MagicMock +from ansible.module_utils._text import to_bytes + +from ansible.module_utils import basic + + +class TestAnsibleModuleTmpDir: + + DATA = ( + ( + { + "_ansible_tmpdir": "/path/to/dir", + "_ansible_remote_tmp": "/path/tmpdir", + "_ansible_keep_remote_files": False, + }, + True, + "/path/to/dir" + ), + ( + { + "_ansible_tmpdir": None, + "_ansible_remote_tmp": "/path/tmpdir", + "_ansible_keep_remote_files": False + }, + False, + "/path/tmpdir/ansible-moduletmp-42-" + ), + ( + { + "_ansible_tmpdir": None, + "_ansible_remote_tmp": "/path/tmpdir", + "_ansible_keep_remote_files": False + }, + True, + "/path/tmpdir/ansible-moduletmp-42-" + ), + ( + { + "_ansible_tmpdir": None, + "_ansible_remote_tmp": "$HOME/.test", + "_ansible_keep_remote_files": False + }, + False, + os.path.join(os.environ['HOME'], ".test/ansible-moduletmp-42-") + ), + ) + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + # pylint: disable=undefined-variable + @pytest.mark.parametrize('args, expected, stat_exists', ((s, e, t) for s, t, e in DATA)) + def test_tmpdir_property(self, monkeypatch, args, expected, stat_exists): + makedirs = {'called': False} + + def mock_mkdtemp(prefix, dir): + return os.path.join(dir, prefix) + + def mock_makedirs(path, mode): + makedirs['called'] = True + makedirs['path'] = path + makedirs['mode'] = mode + return + + monkeypatch.setattr(tempfile, 'mkdtemp', mock_mkdtemp) + monkeypatch.setattr(os.path, 'exists', lambda x: stat_exists) + monkeypatch.setattr(os, 'makedirs', mock_makedirs) + monkeypatch.setattr(shutil, 'rmtree', lambda x: None) + monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': args}))) + + with patch('time.time', return_value=42): + am = basic.AnsibleModule(argument_spec={}) + actual_tmpdir = am.tmpdir + + assert actual_tmpdir == expected + + # verify subsequent calls always produces the same tmpdir + assert am.tmpdir == actual_tmpdir + + if not stat_exists: + assert makedirs['called'] + expected = os.path.expanduser(os.path.expandvars(am._remote_tmp)) + assert makedirs['path'] == expected + assert makedirs['mode'] == 0o700 + + @pytest.mark.parametrize('stdin', ({"_ansible_tmpdir": None, + "_ansible_remote_tmp": "$HOME/.test", + "_ansible_keep_remote_files": True},), + indirect=['stdin']) + def test_tmpdir_makedirs_failure(self, am, monkeypatch): + + mock_mkdtemp = MagicMock(return_value="/tmp/path") + mock_makedirs = MagicMock(side_effect=OSError("Some OS Error here")) + + monkeypatch.setattr(tempfile, 'mkdtemp', mock_mkdtemp) + monkeypatch.setattr(os.path, 'exists', lambda x: False) + monkeypatch.setattr(os, 'makedirs', mock_makedirs) + + actual = am.tmpdir + assert actual == "/tmp/path" + assert mock_makedirs.call_args[0] == (os.path.expanduser(os.path.expandvars("$HOME/.test")),) + assert mock_makedirs.call_args[1] == {"mode": 0o700} + + # because makedirs failed the dir should be None so it uses the System tmp + assert mock_mkdtemp.call_args[1]['dir'] is None + assert mock_mkdtemp.call_args[1]['prefix'].startswith("ansible-moduletmp-") diff --git a/test/units/module_utils/common/__init__.py b/test/units/module_utils/common/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/common/parameters/test_handle_aliases.py b/test/units/module_utils/common/parameters/test_handle_aliases.py new file mode 100644 index 00000000..bc88437f --- /dev/null +++ b/test/units/module_utils/common/parameters/test_handle_aliases.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import pytest + +from ansible.module_utils.common.parameters import handle_aliases +from ansible.module_utils._text import to_native + +DEFAULT_LEGAL_INPUTS = [ + '_ansible_check_mode', + '_ansible_debug', + '_ansible_diff', + '_ansible_keep_remote_files', + '_ansible_module_name', + '_ansible_no_log', + '_ansible_remote_tmp', + '_ansible_selinux_special_fs', + '_ansible_shell_executable', + '_ansible_socket', + '_ansible_string_conversion_action', + '_ansible_syslog_facility', + '_ansible_tmpdir', + '_ansible_verbosity', + '_ansible_version', +] + + +def test_handle_aliases_no_aliases(): + argument_spec = { + 'name': {'type': 'str'}, + } + + params = { + 'name': 'foo', + 'path': 'bar' + } + + expected = ( + {}, + DEFAULT_LEGAL_INPUTS + ['name'], + ) + expected[1].sort() + + result = handle_aliases(argument_spec, params) + result[1].sort() + assert expected == result + + +def test_handle_aliases_basic(): + argument_spec = { + 'name': {'type': 'str', 'aliases': ['surname', 'nick']}, + } + + params = { + 'name': 'foo', + 'path': 'bar', + 'surname': 'foo', + 'nick': 'foo', + } + + expected = ( + {'surname': 'name', 'nick': 'name'}, + DEFAULT_LEGAL_INPUTS + ['name', 'surname', 'nick'], + ) + expected[1].sort() + + result = handle_aliases(argument_spec, params) + result[1].sort() + assert expected == result + + +def test_handle_aliases_value_error(): + argument_spec = { + 'name': {'type': 'str', 'aliases': ['surname', 'nick'], 'default': 'bob', 'required': True}, + } + + params = { + 'name': 'foo', + } + + with pytest.raises(ValueError) as ve: + handle_aliases(argument_spec, params) + assert 'internal error: aliases must be a list or tuple' == to_native(ve.error) + + +def test_handle_aliases_type_error(): + argument_spec = { + 'name': {'type': 'str', 'aliases': 'surname'}, + } + + params = { + 'name': 'foo', + } + + with pytest.raises(TypeError) as te: + handle_aliases(argument_spec, params) + assert 'internal error: required and default are mutually exclusive' in to_native(te.error) diff --git a/test/units/module_utils/common/parameters/test_list_deprecations.py b/test/units/module_utils/common/parameters/test_list_deprecations.py new file mode 100644 index 00000000..0a17187c --- /dev/null +++ b/test/units/module_utils/common/parameters/test_list_deprecations.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.parameters import list_deprecations + + +@pytest.fixture +def params(): + return { + 'name': 'bob', + 'dest': '/etc/hosts', + 'state': 'present', + 'value': 5, + } + + +def test_list_deprecations(): + argument_spec = { + 'old': {'type': 'str', 'removed_in_version': '2.5'}, + 'foo': {'type': 'dict', 'options': {'old': {'type': 'str', 'removed_in_version': 1.0}}}, + 'bar': {'type': 'list', 'elements': 'dict', 'options': {'old': {'type': 'str', 'removed_in_version': '2.10'}}}, + } + + params = { + 'name': 'rod', + 'old': 'option', + 'foo': {'old': 'value'}, + 'bar': [{'old': 'value'}, {}], + } + result = list_deprecations(argument_spec, params) + assert len(result) == 3 + result.sort(key=lambda entry: entry['msg']) + assert result[0]['msg'] == """Param 'bar["old"]' is deprecated. See the module docs for more information""" + assert result[0]['version'] == '2.10' + assert result[1]['msg'] == """Param 'foo["old"]' is deprecated. See the module docs for more information""" + assert result[1]['version'] == 1.0 + assert result[2]['msg'] == "Param 'old' is deprecated. See the module docs for more information" + assert result[2]['version'] == '2.5' diff --git a/test/units/module_utils/common/parameters/test_list_no_log_values.py b/test/units/module_utils/common/parameters/test_list_no_log_values.py new file mode 100644 index 00000000..1b740555 --- /dev/null +++ b/test/units/module_utils/common/parameters/test_list_no_log_values.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.parameters import list_no_log_values + + +@pytest.fixture +def argument_spec(): + # Allow extra specs to be passed to the fixture, which will be added to the output + def _argument_spec(extra_opts=None): + spec = { + 'secret': {'type': 'str', 'no_log': True}, + 'other_secret': {'type': 'str', 'no_log': True}, + 'state': {'type': 'str'}, + 'value': {'type': 'int'}, + } + + if extra_opts: + spec.update(extra_opts) + + return spec + + return _argument_spec + + +@pytest.fixture +def module_parameters(): + # Allow extra parameters to be passed to the fixture, which will be added to the output + def _module_parameters(extra_params=None): + params = { + 'secret': 'under', + 'other_secret': 'makeshift', + 'state': 'present', + 'value': 5, + } + + if extra_params: + params.update(extra_params) + + return params + + return _module_parameters + + +def test_list_no_log_values_no_secrets(module_parameters): + argument_spec = { + 'other_secret': {'type': 'str', 'no_log': False}, + 'state': {'type': 'str'}, + 'value': {'type': 'int'}, + } + expected = set() + assert expected == list_no_log_values(argument_spec, module_parameters) + + +def test_list_no_log_values(argument_spec, module_parameters): + expected = set(('under', 'makeshift')) + assert expected == list_no_log_values(argument_spec(), module_parameters()) + + +@pytest.mark.parametrize('extra_params', [ + {'subopt1': 1}, + {'subopt1': 3.14159}, + {'subopt1': ['one', 'two']}, + {'subopt1': ('one', 'two')}, +]) +def test_list_no_log_values_invalid_suboptions(argument_spec, module_parameters, extra_params): + extra_opts = { + 'subopt1': { + 'type': 'dict', + 'options': { + 'sub_1_1': {}, + } + } + } + + with pytest.raises(TypeError, match=r"(Value '.*?' in the sub parameter field '.*?' must by a dict, not '.*?')" + r"|(dictionary requested, could not parse JSON or key=value)"): + list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params)) + + +def test_list_no_log_values_suboptions(argument_spec, module_parameters): + extra_opts = { + 'subopt1': { + 'type': 'dict', + 'options': { + 'sub_1_1': {'no_log': True}, + 'sub_1_2': {'type': 'list'}, + } + } + } + + extra_params = { + 'subopt1': { + 'sub_1_1': 'bagel', + 'sub_1_2': ['pebble'], + } + } + + expected = set(('under', 'makeshift', 'bagel')) + assert expected == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params)) + + +def test_list_no_log_values_sub_suboptions(argument_spec, module_parameters): + extra_opts = { + 'sub_level_1': { + 'type': 'dict', + 'options': { + 'l1_1': {'no_log': True}, + 'l1_2': {}, + 'l1_3': { + 'type': 'dict', + 'options': { + 'l2_1': {'no_log': True}, + 'l2_2': {}, + } + } + } + } + } + + extra_params = { + 'sub_level_1': { + 'l1_1': 'saucy', + 'l1_2': 'napped', + 'l1_3': { + 'l2_1': 'corporate', + 'l2_2': 'tinsmith', + } + } + } + + expected = set(('under', 'makeshift', 'saucy', 'corporate')) + assert expected == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params)) + + +def test_list_no_log_values_suboptions_list(argument_spec, module_parameters): + extra_opts = { + 'subopt1': { + 'type': 'list', + 'elements': 'dict', + 'options': { + 'sub_1_1': {'no_log': True}, + 'sub_1_2': {}, + } + } + } + + extra_params = { + 'subopt1': [ + { + 'sub_1_1': ['playroom', 'luxury'], + 'sub_1_2': 'deuce', + }, + { + 'sub_1_2': ['squishier', 'finished'], + } + ] + } + + expected = set(('under', 'makeshift', 'playroom', 'luxury')) + assert expected == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params)) + + +def test_list_no_log_values_sub_suboptions_list(argument_spec, module_parameters): + extra_opts = { + 'subopt1': { + 'type': 'list', + 'elements': 'dict', + 'options': { + 'sub_1_1': {'no_log': True}, + 'sub_1_2': {}, + 'subopt2': { + 'type': 'list', + 'elements': 'dict', + 'options': { + 'sub_2_1': {'no_log': True, 'type': 'list'}, + 'sub_2_2': {}, + } + } + } + } + } + + extra_params = { + 'subopt1': { + 'sub_1_1': ['playroom', 'luxury'], + 'sub_1_2': 'deuce', + 'subopt2': [ + { + 'sub_2_1': ['basis', 'gave'], + 'sub_2_2': 'liquid', + }, + { + 'sub_2_1': ['composure', 'thumping'] + }, + ] + } + } + + expected = set(('under', 'makeshift', 'playroom', 'luxury', 'basis', 'gave', 'composure', 'thumping')) + assert expected == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params)) + + +@pytest.mark.parametrize('extra_params, expected', ( + ({'subopt_dict': 'dict_subopt1=rekindle-scandal,dict_subopt2=subgroupavenge'}, ('rekindle-scandal',)), + ({'subopt_dict': 'dict_subopt1=aversion-mutable dict_subopt2=subgroupavenge'}, ('aversion-mutable',)), + ({'subopt_dict': ['dict_subopt1=blip-marine,dict_subopt2=subgroupavenge', 'dict_subopt1=tipping,dict_subopt2=hardening']}, ('blip-marine', 'tipping')), +)) +def test_string_suboptions_as_string(argument_spec, module_parameters, extra_params, expected): + extra_opts = { + 'subopt_dict': { + 'type': 'dict', + 'options': { + 'dict_subopt1': {'no_log': True}, + 'dict_subopt2': {}, + }, + }, + } + + result = set(('under', 'makeshift')) + result.update(expected) + assert result == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params)) diff --git a/test/units/module_utils/common/process/test_get_bin_path.py b/test/units/module_utils/common/process/test_get_bin_path.py new file mode 100644 index 00000000..a337e78d --- /dev/null +++ b/test/units/module_utils/common/process/test_get_bin_path.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.process import get_bin_path + + +def test_get_bin_path(mocker): + path = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' + mocker.patch.dict('os.environ', {'PATH': path}) + mocker.patch('os.pathsep', ':') + + mocker.patch('os.path.isdir', return_value=False) + mocker.patch('ansible.module_utils.common.process.is_executable', return_value=True) + + # pytest-mock 2.0.0 will throw when os.path.exists is messed with + # and then another method is patched afterwards. Likely + # something in the pytest-mock chain uses os.path.exists internally, and + # since pytest-mock prohibits context-specific patching, there's not a + # good solution. For now, just patch os.path.exists last. + mocker.patch('os.path.exists', side_effect=[False, True]) + + assert '/usr/local/bin/notacommand' == get_bin_path('notacommand') + + +def test_get_path_path_raise_valueerror(mocker): + mocker.patch.dict('os.environ', {'PATH': ''}) + + mocker.patch('os.path.exists', return_value=False) + mocker.patch('os.path.isdir', return_value=False) + mocker.patch('ansible.module_utils.common.process.is_executable', return_value=True) + + with pytest.raises(ValueError, match='Failed to find required executable notacommand'): + get_bin_path('notacommand') diff --git a/test/units/module_utils/common/test_collections.py b/test/units/module_utils/common/test_collections.py new file mode 100644 index 00000000..95b2a402 --- /dev/null +++ b/test/units/module_utils/common/test_collections.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018–2019, Sviatoslav Sydorenko +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +"""Test low-level utility functions from ``module_utils.common.collections``.""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.six import Iterator +from ansible.module_utils.common._collections_compat import Sequence +from ansible.module_utils.common.collections import ImmutableDict, is_iterable, is_sequence + + +class SeqStub: + """Stub emulating a sequence type. + + >>> from collections.abc import Sequence + >>> assert issubclass(SeqStub, Sequence) + >>> assert isinstance(SeqStub(), Sequence) + """ + + +Sequence.register(SeqStub) + + +class IteratorStub(Iterator): + def __next__(self): + raise StopIteration + + +class IterableStub: + def __iter__(self): + return IteratorStub() + + +class FakeAnsibleVaultEncryptedUnicode(Sequence): + __ENCRYPTED__ = True + + def __init__(self, data): + self.data = data + + def __getitem__(self, index): + return self.data[index] + + def __len__(self): + return len(self.data) + + +TEST_STRINGS = u'he', u'Україна', u'Česká republika' +TEST_STRINGS = TEST_STRINGS + tuple(s.encode('utf-8') for s in TEST_STRINGS) + (FakeAnsibleVaultEncryptedUnicode(u'foo'),) + +TEST_ITEMS_NON_SEQUENCES = ( + {}, object(), frozenset(), + 4, 0., +) + TEST_STRINGS + +TEST_ITEMS_SEQUENCES = ( + [], (), + SeqStub(), +) +TEST_ITEMS_SEQUENCES = TEST_ITEMS_SEQUENCES + ( + # Iterable effectively containing nested random data: + TEST_ITEMS_NON_SEQUENCES, +) + + +@pytest.mark.parametrize('sequence_input', TEST_ITEMS_SEQUENCES) +def test_sequence_positive(sequence_input): + """Test that non-string item sequences are identified correctly.""" + assert is_sequence(sequence_input) + assert is_sequence(sequence_input, include_strings=False) + + +@pytest.mark.parametrize('non_sequence_input', TEST_ITEMS_NON_SEQUENCES) +def test_sequence_negative(non_sequence_input): + """Test that non-sequences are identified correctly.""" + assert not is_sequence(non_sequence_input) + + +@pytest.mark.parametrize('string_input', TEST_STRINGS) +def test_sequence_string_types_with_strings(string_input): + """Test that ``is_sequence`` can separate string and non-string.""" + assert is_sequence(string_input, include_strings=True) + + +@pytest.mark.parametrize('string_input', TEST_STRINGS) +def test_sequence_string_types_without_strings(string_input): + """Test that ``is_sequence`` can separate string and non-string.""" + assert not is_sequence(string_input, include_strings=False) + + +@pytest.mark.parametrize( + 'seq', + ([], (), {}, set(), frozenset(), IterableStub()), +) +def test_iterable_positive(seq): + assert is_iterable(seq) + + +@pytest.mark.parametrize( + 'seq', (IteratorStub(), object(), 5, 9.) +) +def test_iterable_negative(seq): + assert not is_iterable(seq) + + +@pytest.mark.parametrize('string_input', TEST_STRINGS) +def test_iterable_including_strings(string_input): + assert is_iterable(string_input, include_strings=True) + + +@pytest.mark.parametrize('string_input', TEST_STRINGS) +def test_iterable_excluding_strings(string_input): + assert not is_iterable(string_input, include_strings=False) + + +class TestImmutableDict: + def test_scalar(self): + imdict = ImmutableDict({1: 2}) + assert imdict[1] == 2 + + def test_string(self): + imdict = ImmutableDict({u'café': u'くらとみ'}) + assert imdict[u'café'] == u'くらとみ' + + def test_container(self): + imdict = ImmutableDict({(1, 2): ['1', '2']}) + assert imdict[(1, 2)] == ['1', '2'] + + def test_from_tuples(self): + imdict = ImmutableDict((('a', 1), ('b', 2))) + assert frozenset(imdict.items()) == frozenset((('a', 1), ('b', 2))) + + def test_from_kwargs(self): + imdict = ImmutableDict(a=1, b=2) + assert frozenset(imdict.items()) == frozenset((('a', 1), ('b', 2))) + + def test_immutable(self): + imdict = ImmutableDict({1: 2}) + + expected_reason = r"^'ImmutableDict' object does not support item assignment$" + + with pytest.raises(TypeError, match=expected_reason): + imdict[1] = 3 + + with pytest.raises(TypeError, match=expected_reason): + imdict[5] = 3 + + def test_hashable(self): + # ImmutableDict is hashable when all of its values are hashable + imdict = ImmutableDict({u'café': u'くらとみ'}) + assert hash(imdict) + + def test_nonhashable(self): + # ImmutableDict is unhashable when one of its values is unhashable + imdict = ImmutableDict({u'café': u'くらとみ', 1: [1, 2]}) + + expected_reason = r"^unhashable type: 'list'$" + + with pytest.raises(TypeError, match=expected_reason): + hash(imdict) + + def test_len(self): + imdict = ImmutableDict({1: 2, 'a': 'b'}) + assert len(imdict) == 2 + + def test_repr(self): + initial_data = {1: 2, 'a': 'b'} + initial_data_repr = repr(initial_data) + imdict = ImmutableDict(initial_data) + actual_repr = repr(imdict) + expected_repr = "ImmutableDict({0})".format(initial_data_repr) + assert actual_repr == expected_repr diff --git a/test/units/module_utils/common/test_dict_transformations.py b/test/units/module_utils/common/test_dict_transformations.py new file mode 100644 index 00000000..ecb520b2 --- /dev/null +++ b/test/units/module_utils/common/test_dict_transformations.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +# (c) 2017, Will Thames +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel, camel_dict_to_snake_dict, dict_merge + +EXPECTED_SNAKIFICATION = { + 'alllower': 'alllower', + 'TwoWords': 'two_words', + 'AllUpperAtEND': 'all_upper_at_end', + 'AllUpperButPLURALs': 'all_upper_but_plurals', + 'TargetGroupARNs': 'target_group_arns', + 'HTTPEndpoints': 'http_endpoints', + 'PLURALs': 'plurals' +} + +EXPECTED_REVERSIBLE = { + 'TwoWords': 'two_words', + 'AllUpperAtEND': 'all_upper_at_e_n_d', + 'AllUpperButPLURALs': 'all_upper_but_p_l_u_r_a_ls', + 'TargetGroupARNs': 'target_group_a_r_ns', + 'HTTPEndpoints': 'h_t_t_p_endpoints', + 'PLURALs': 'p_l_u_r_a_ls' +} + + +class CamelToSnakeTestCase(unittest.TestCase): + + def test_camel_to_snake(self): + for (k, v) in EXPECTED_SNAKIFICATION.items(): + self.assertEqual(_camel_to_snake(k), v) + + def test_reversible_camel_to_snake(self): + for (k, v) in EXPECTED_REVERSIBLE.items(): + self.assertEqual(_camel_to_snake(k, reversible=True), v) + + +class SnakeToCamelTestCase(unittest.TestCase): + + def test_snake_to_camel_reversed(self): + for (k, v) in EXPECTED_REVERSIBLE.items(): + self.assertEqual(_snake_to_camel(v, capitalize_first=True), k) + + +class CamelToSnakeAndBackTestCase(unittest.TestCase): + def test_camel_to_snake_and_back(self): + for (k, v) in EXPECTED_REVERSIBLE.items(): + self.assertEqual(_snake_to_camel(_camel_to_snake(k, reversible=True), capitalize_first=True), k) + + +class CamelDictToSnakeDictTestCase(unittest.TestCase): + def test_ignore_list(self): + camel_dict = dict(Hello=dict(One='one', Two='two'), World=dict(Three='three', Four='four')) + snake_dict = camel_dict_to_snake_dict(camel_dict, ignore_list='World') + self.assertEqual(snake_dict['hello'], dict(one='one', two='two')) + self.assertEqual(snake_dict['world'], dict(Three='three', Four='four')) + + +class DictMergeTestCase(unittest.TestCase): + def test_dict_merge(self): + base = dict(obj2=dict(), b1=True, b2=False, b3=False, + one=1, two=2, three=3, obj1=dict(key1=1, key2=2), + l1=[1, 3], l2=[1, 2, 3], l4=[4], + nested=dict(n1=dict(n2=2))) + + other = dict(b1=True, b2=False, b3=True, b4=True, + one=1, three=4, four=4, obj1=dict(key1=2), + l1=[2, 1], l2=[3, 2, 1], l3=[1], + nested=dict(n1=dict(n2=2, n3=3))) + + result = dict_merge(base, other) + + # string assertions + self.assertTrue('one' in result) + self.assertTrue('two' in result) + self.assertEqual(result['three'], 4) + self.assertEqual(result['four'], 4) + + # dict assertions + self.assertTrue('obj1' in result) + self.assertTrue('key1' in result['obj1']) + self.assertTrue('key2' in result['obj1']) + + # list assertions + # this line differs from the network_utils/common test of the function of the + # same name as this method does not merge lists + self.assertEqual(result['l1'], [2, 1]) + self.assertTrue('l2' in result) + self.assertEqual(result['l3'], [1]) + self.assertTrue('l4' in result) + + # nested assertions + self.assertTrue('obj1' in result) + self.assertEqual(result['obj1']['key1'], 2) + self.assertTrue('key2' in result['obj1']) + + # bool assertions + self.assertTrue('b1' in result) + self.assertTrue('b2' in result) + self.assertTrue(result['b3']) + self.assertTrue(result['b4']) + + +class AzureIncidentalTestCase(unittest.TestCase): + + def test_dict_merge_invalid_dict(self): + ''' if b is not a dict, return b ''' + res = dict_merge({}, None) + self.assertEqual(res, None) + + def test_merge_sub_dicts(self): + '''merge sub dicts ''' + a = {'a': {'a1': 1}} + b = {'a': {'b1': 2}} + c = {'a': {'a1': 1, 'b1': 2}} + res = dict_merge(a, b) + self.assertEqual(res, c) diff --git a/test/units/module_utils/common/test_network.py b/test/units/module_utils/common/test_network.py new file mode 100644 index 00000000..1267d0ce --- /dev/null +++ b/test/units/module_utils/common/test_network.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# (c) 2017 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.network import ( + to_masklen, + to_netmask, + to_subnet, + to_ipv6_network, + is_masklen, + is_netmask +) + + +def test_to_masklen(): + assert 24 == to_masklen('255.255.255.0') + + +def test_to_masklen_invalid(): + with pytest.raises(ValueError): + to_masklen('255') + + +def test_to_netmask(): + assert '255.0.0.0' == to_netmask(8) + assert '255.0.0.0' == to_netmask('8') + + +def test_to_netmask_invalid(): + with pytest.raises(ValueError): + to_netmask(128) + + +def test_to_subnet(): + result = to_subnet('192.168.1.1', 24) + assert '192.168.1.0/24' == result + + result = to_subnet('192.168.1.1', 24, dotted_notation=True) + assert '192.168.1.0 255.255.255.0' == result + + +def test_to_subnet_invalid(): + with pytest.raises(ValueError): + to_subnet('foo', 'bar') + + +def test_is_masklen(): + assert is_masklen(32) + assert not is_masklen(33) + assert not is_masklen('foo') + + +def test_is_netmask(): + assert is_netmask('255.255.255.255') + assert not is_netmask(24) + assert not is_netmask('foo') + + +def test_to_ipv6_network(): + assert '2001:db8::' == to_ipv6_network('2001:db8::') + assert '2001:0db8:85a3::' == to_ipv6_network('2001:0db8:85a3:0000:0000:8a2e:0370:7334') + assert '2001:0db8:85a3::' == to_ipv6_network('2001:0db8:85a3:0:0:8a2e:0370:7334') diff --git a/test/units/module_utils/common/test_removed.py b/test/units/module_utils/common/test_removed.py new file mode 100644 index 00000000..36c1c1e9 --- /dev/null +++ b/test/units/module_utils/common/test_removed.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2019, Andrew Klychkov @Andersson007 +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.removed import removed_module + + +@pytest.mark.parametrize('input_data', [u'2.8', 2.8, 2, '', ]) +def test_removed_module_sys_exit(input_data): + """Test for removed_module function, sys.exit().""" + + with pytest.raises(SystemExit) as wrapped_e: + removed_module(input_data) + + assert wrapped_e.type == SystemExit + assert wrapped_e.value.code == 1 + + +@pytest.mark.parametrize( + 'input_data, expected_msg, expected_warn', + [ + ( + u'2.8', + u'This module has been removed. ' + 'The module documentation for Ansible-2.7 may contain hints for porting', + u'', + ), + ( + 2.8, + u'This module has been removed. ' + 'The module documentation for Ansible-2.7 may contain hints for porting', + u'', + ), + ( + 2, + u'This module has been removed. ' + 'The module documentation for Ansible-1 may contain hints for porting', + u'', + ), + ( + u'café', + u'This module has been removed', + u'"warnings": ["removed modules should specify the version they were removed in"]', + ), + ( + 0.1, + u'This module has been removed. ' + 'The module documentation for Ansible-0.0 may contain hints for porting', + u'', + ), + ] +) +def test_removed_module_msgs(input_data, expected_msg, expected_warn, capsys): + """Test for removed_module function, content of output messages.""" + + captured = capsys.readouterr() + assert expected_msg, expected_warn in captured.out diff --git a/test/units/module_utils/common/test_sys_info.py b/test/units/module_utils/common/test_sys_info.py new file mode 100644 index 00000000..cd68225d --- /dev/null +++ b/test/units/module_utils/common/test_sys_info.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016 Toshio Kuratomi +# (c) 2017-2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from units.compat.mock import patch + +from ansible.module_utils.six.moves import builtins + +# Functions being tested +from ansible.module_utils.common.sys_info import get_distribution +from ansible.module_utils.common.sys_info import get_distribution_version +from ansible.module_utils.common.sys_info import get_platform_subclass + + +realimport = builtins.__import__ + + +@pytest.fixture +def platform_linux(mocker): + mocker.patch('platform.system', return_value='Linux') + + +# +# get_distribution tests +# + +def test_get_distribution_not_linux(): + """If it's not Linux, then it has no distribution""" + with patch('platform.system', return_value='Foo'): + assert get_distribution() is None + + +@pytest.mark.usefixtures("platform_linux") +class TestGetDistribution: + """Tests for get_distribution that have to find something""" + def test_distro_known(self): + with patch('ansible.module_utils.distro.id', return_value="alpine"): + assert get_distribution() == "Alpine" + + with patch('ansible.module_utils.distro.id', return_value="arch"): + assert get_distribution() == "Arch" + + with patch('ansible.module_utils.distro.id', return_value="centos"): + assert get_distribution() == "Centos" + + with patch('ansible.module_utils.distro.id', return_value="clear-linux-os"): + assert get_distribution() == "Clear-linux-os" + + with patch('ansible.module_utils.distro.id', return_value="coreos"): + assert get_distribution() == "Coreos" + + with patch('ansible.module_utils.distro.id', return_value="debian"): + assert get_distribution() == "Debian" + + with patch('ansible.module_utils.distro.id', return_value="flatcar"): + assert get_distribution() == "Flatcar" + + with patch('ansible.module_utils.distro.id', return_value="linuxmint"): + assert get_distribution() == "Linuxmint" + + with patch('ansible.module_utils.distro.id', return_value="opensuse"): + assert get_distribution() == "Opensuse" + + with patch('ansible.module_utils.distro.id', return_value="oracle"): + assert get_distribution() == "Oracle" + + with patch('ansible.module_utils.distro.id', return_value="raspian"): + assert get_distribution() == "Raspian" + + with patch('ansible.module_utils.distro.id', return_value="rhel"): + assert get_distribution() == "Redhat" + + with patch('ansible.module_utils.distro.id', return_value="ubuntu"): + assert get_distribution() == "Ubuntu" + + with patch('ansible.module_utils.distro.id', return_value="virtuozzo"): + assert get_distribution() == "Virtuozzo" + + with patch('ansible.module_utils.distro.id', return_value="foo"): + assert get_distribution() == "Foo" + + def test_distro_unknown(self): + with patch('ansible.module_utils.distro.id', return_value=""): + assert get_distribution() == "OtherLinux" + + def test_distro_amazon_linux_short(self): + with patch('ansible.module_utils.distro.id', return_value="amzn"): + assert get_distribution() == "Amazon" + + def test_distro_amazon_linux_long(self): + with patch('ansible.module_utils.distro.id', return_value="amazon"): + assert get_distribution() == "Amazon" + + +# +# get_distribution_version tests +# + +def test_get_distribution_version_not_linux(): + """If it's not Linux, then it has no distribution""" + with patch('platform.system', return_value='Foo'): + assert get_distribution_version() is None + + +@pytest.mark.usefixtures("platform_linux") +def test_distro_found(): + with patch('ansible.module_utils.distro.version', return_value="1"): + assert get_distribution_version() == "1" + + +# +# Tests for get_platform_subclass +# + +class TestGetPlatformSubclass: + class LinuxTest: + pass + + class Foo(LinuxTest): + platform = "Linux" + distribution = None + + class Bar(LinuxTest): + platform = "Linux" + distribution = "Bar" + + def test_not_linux(self): + # if neither match, the fallback should be the top-level class + with patch('platform.system', return_value="Foo"): + with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None): + assert get_platform_subclass(self.LinuxTest) is self.LinuxTest + + @pytest.mark.usefixtures("platform_linux") + def test_get_distribution_none(self): + # match just the platform class, not a specific distribution + with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None): + assert get_platform_subclass(self.LinuxTest) is self.Foo + + @pytest.mark.usefixtures("platform_linux") + def test_get_distribution_found(self): + # match both the distribution and platform class + with patch('ansible.module_utils.common.sys_info.get_distribution', return_value="Bar"): + assert get_platform_subclass(self.LinuxTest) is self.Bar diff --git a/test/units/module_utils/common/test_utils.py b/test/units/module_utils/common/test_utils.py new file mode 100644 index 00000000..ef952393 --- /dev/null +++ b/test/units/module_utils/common/test_utils.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.common.sys_info import get_all_subclasses + + +# +# Tests for get_all_subclasses +# + +class TestGetAllSubclasses: + class Base: + pass + + class BranchI(Base): + pass + + class BranchII(Base): + pass + + class BranchIA(BranchI): + pass + + class BranchIB(BranchI): + pass + + class BranchIIA(BranchII): + pass + + class BranchIIB(BranchII): + pass + + def test_bottom_level(self): + assert get_all_subclasses(self.BranchIIB) == set() + + def test_one_inheritance(self): + assert set(get_all_subclasses(self.BranchII)) == set([self.BranchIIA, self.BranchIIB]) + + def test_toplevel(self): + assert set(get_all_subclasses(self.Base)) == set([self.BranchI, self.BranchII, + self.BranchIA, self.BranchIB, + self.BranchIIA, self.BranchIIB]) diff --git a/test/units/module_utils/common/text/converters/test_container_to_bytes.py b/test/units/module_utils/common/text/converters/test_container_to_bytes.py new file mode 100644 index 00000000..091545e3 --- /dev/null +++ b/test/units/module_utils/common/text/converters/test_container_to_bytes.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Copyright 2019, Andrew Klychkov @Andersson007 +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.text.converters import container_to_bytes + + +DEFAULT_ENCODING = 'utf-8' +DEFAULT_ERR_HANDLER = 'surrogate_or_strict' + + +@pytest.mark.parametrize( + 'test_input,expected', + [ + ({1: 1}, {1: 1}), + ([1, 2], [1, 2]), + ((1, 2), (1, 2)), + (1, 1), + (1.1, 1.1), + (b'str', b'str'), + (u'str', b'str'), + ([u'str'], [b'str']), + ((u'str'), (b'str')), + ({u'str': u'str'}, {b'str': b'str'}), + ] +) +@pytest.mark.parametrize('encoding', ['utf-8', 'latin1', 'shift_jis', 'big5', 'koi8_r']) +@pytest.mark.parametrize('errors', ['strict', 'surrogate_or_strict', 'surrogate_then_replace']) +def test_container_to_bytes(test_input, expected, encoding, errors): + """Test for passing objects to container_to_bytes().""" + assert container_to_bytes(test_input, encoding=encoding, errors=errors) == expected + + +@pytest.mark.parametrize( + 'test_input,expected', + [ + ({1: 1}, {1: 1}), + ([1, 2], [1, 2]), + ((1, 2), (1, 2)), + (1, 1), + (1.1, 1.1), + (True, True), + (None, None), + (u'str', u'str'.encode(DEFAULT_ENCODING)), + (u'くらとみ', u'くらとみ'.encode(DEFAULT_ENCODING)), + (u'café', u'café'.encode(DEFAULT_ENCODING)), + (b'str', u'str'.encode(DEFAULT_ENCODING)), + (u'str', u'str'.encode(DEFAULT_ENCODING)), + ([u'str'], [u'str'.encode(DEFAULT_ENCODING)]), + ((u'str'), (u'str'.encode(DEFAULT_ENCODING))), + ({u'str': u'str'}, {u'str'.encode(DEFAULT_ENCODING): u'str'.encode(DEFAULT_ENCODING)}), + ] +) +def test_container_to_bytes_default_encoding_err(test_input, expected): + """ + Test for passing objects to container_to_bytes(). Default encoding and errors + """ + assert container_to_bytes(test_input, encoding=DEFAULT_ENCODING, + errors=DEFAULT_ERR_HANDLER) == expected + + +@pytest.mark.parametrize( + 'test_input,encoding', + [ + (u'くらとみ', 'latin1'), + (u'café', 'shift_jis'), + ] +) +@pytest.mark.parametrize('errors', ['surrogate_or_strict', 'strict']) +def test_container_to_bytes_incomp_chars_and_encod(test_input, encoding, errors): + """ + Test for passing incompatible characters and encodings container_to_bytes(). + """ + with pytest.raises(UnicodeEncodeError, match="codec can't encode"): + container_to_bytes(test_input, encoding=encoding, errors=errors) + + +@pytest.mark.parametrize( + 'test_input,encoding,expected', + [ + (u'くらとみ', 'latin1', b'????'), + (u'café', 'shift_jis', b'caf?'), + ] +) +def test_container_to_bytes_surrogate_then_replace(test_input, encoding, expected): + """ + Test for container_to_bytes() with surrogate_then_replace err handler. + """ + assert container_to_bytes(test_input, encoding=encoding, + errors='surrogate_then_replace') == expected diff --git a/test/units/module_utils/common/text/converters/test_container_to_text.py b/test/units/module_utils/common/text/converters/test_container_to_text.py new file mode 100644 index 00000000..39038f51 --- /dev/null +++ b/test/units/module_utils/common/text/converters/test_container_to_text.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2019, Andrew Klychkov @Andersson007 +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.text.converters import container_to_text + + +DEFAULT_ENCODING = 'utf-8' +DEFAULT_ERR_HANDLER = 'surrogate_or_strict' + + +@pytest.mark.parametrize( + 'test_input,expected', + [ + ({1: 1}, {1: 1}), + ([1, 2], [1, 2]), + ((1, 2), (1, 2)), + (1, 1), + (1.1, 1.1), + (b'str', u'str'), + (u'str', u'str'), + ([b'str'], [u'str']), + ((b'str'), (u'str')), + ({b'str': b'str'}, {u'str': u'str'}), + ] +) +@pytest.mark.parametrize('encoding', ['utf-8', 'latin1', 'shift-jis', 'big5', 'koi8_r', ]) +@pytest.mark.parametrize('errors', ['strict', 'surrogate_or_strict', 'surrogate_then_replace', ]) +def test_container_to_text_different_types(test_input, expected, encoding, errors): + """Test for passing objects to container_to_text().""" + assert container_to_text(test_input, encoding=encoding, errors=errors) == expected + + +@pytest.mark.parametrize( + 'test_input,expected', + [ + ({1: 1}, {1: 1}), + ([1, 2], [1, 2]), + ((1, 2), (1, 2)), + (1, 1), + (1.1, 1.1), + (True, True), + (None, None), + (u'str', u'str'), + (u'くらとみ'.encode(DEFAULT_ENCODING), u'くらとみ'), + (u'café'.encode(DEFAULT_ENCODING), u'café'), + (u'str'.encode(DEFAULT_ENCODING), u'str'), + ([u'str'.encode(DEFAULT_ENCODING)], [u'str']), + ((u'str'.encode(DEFAULT_ENCODING)), (u'str')), + ({b'str': b'str'}, {u'str': u'str'}), + ] +) +def test_container_to_text_default_encoding_and_err(test_input, expected): + """ + Test for passing objects to container_to_text(). Default encoding and errors + """ + assert container_to_text(test_input, encoding=DEFAULT_ENCODING, + errors=DEFAULT_ERR_HANDLER) == expected + + +@pytest.mark.parametrize( + 'test_input,encoding,expected', + [ + (u'й'.encode('utf-8'), 'latin1', u'й'), + (u'café'.encode('utf-8'), 'shift_jis', u'cafテゥ'), + ] +) +@pytest.mark.parametrize('errors', ['strict', 'surrogate_or_strict', 'surrogate_then_replace', ]) +def test_container_to_text_incomp_encod_chars(test_input, encoding, errors, expected): + """ + Test for passing incompatible characters and encodings container_to_text(). + """ + assert container_to_text(test_input, encoding=encoding, errors=errors) == expected diff --git a/test/units/module_utils/common/text/converters/test_json_encode_fallback.py b/test/units/module_utils/common/text/converters/test_json_encode_fallback.py new file mode 100644 index 00000000..8cf33529 --- /dev/null +++ b/test/units/module_utils/common/text/converters/test_json_encode_fallback.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2019, Andrew Klychkov @Andersson007 +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from datetime import datetime + +from pytz import timezone as tz + +from ansible.module_utils.common.text.converters import _json_encode_fallback + + +@pytest.mark.parametrize( + 'test_input,expected', + [ + (set([1]), [1]), + (datetime(2019, 5, 14, 13, 39, 38, 569047), '2019-05-14T13:39:38.569047'), + (datetime(2019, 5, 14, 13, 47, 16, 923866), '2019-05-14T13:47:16.923866'), + (datetime(2019, 6, 15, 14, 45, tzinfo=tz('UTC')), '2019-06-15T14:45:00+00:00'), + (datetime(2019, 6, 15, 14, 45, tzinfo=tz('Europe/Helsinki')), '2019-06-15T14:45:00+01:40'), + ] +) +def test_json_encode_fallback(test_input, expected): + """ + Test for passing expected objects to _json_encode_fallback(). + """ + assert _json_encode_fallback(test_input) == expected + + +@pytest.mark.parametrize( + 'test_input', + [ + 1, + 1.1, + u'string', + b'string', + [1, 2], + True, + None, + {1: 1}, + (1, 2), + ] +) +def test_json_encode_fallback_default_behavior(test_input): + """ + Test for _json_encode_fallback() default behavior. + + It must fail with TypeError. + """ + with pytest.raises(TypeError, match='Cannot json serialize'): + _json_encode_fallback(test_input) diff --git a/test/units/module_utils/common/text/converters/test_jsonify.py b/test/units/module_utils/common/text/converters/test_jsonify.py new file mode 100644 index 00000000..a3415313 --- /dev/null +++ b/test/units/module_utils/common/text/converters/test_jsonify.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Copyright 2019, Andrew Klychkov @Andersson007 +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.text.converters import jsonify + + +@pytest.mark.parametrize( + 'test_input,expected', + [ + (1, '1'), + (u'string', u'"string"'), + (u'くらとみ', u'"\\u304f\\u3089\\u3068\\u307f"'), + (u'café', u'"caf\\u00e9"'), + (b'string', u'"string"'), + (False, u'false'), + (u'string'.encode('utf-8'), u'"string"'), + ] +) +def test_jsonify(test_input, expected): + """Test for jsonify().""" + assert jsonify(test_input) == expected diff --git a/test/units/module_utils/common/text/converters/test_to_str.py b/test/units/module_utils/common/text/converters/test_to_str.py new file mode 100644 index 00000000..b645db6d --- /dev/null +++ b/test/units/module_utils/common/text/converters/test_to_str.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# (c) 2016 Toshio Kuratomi +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import itertools + +import pytest + +from ansible.module_utils.six import PY3 + +from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native +from ansible.utils.unsafe_proxy import AnsibleUnsafeBytes, AnsibleUnsafeText + + +# Format: byte representation, text representation, encoding of byte representation +VALID_STRINGS = ( + (b'abcde', u'abcde', 'ascii'), + (b'caf\xc3\xa9', u'caf\xe9', 'utf-8'), + (b'caf\xe9', u'caf\xe9', 'latin-1'), + # u'くらとみ' + (b'\xe3\x81\x8f\xe3\x82\x89\xe3\x81\xa8\xe3\x81\xbf', u'\u304f\u3089\u3068\u307f', 'utf-8'), + (b'\x82\xad\x82\xe7\x82\xc6\x82\xdd', u'\u304f\u3089\u3068\u307f', 'shift-jis'), +) + + +@pytest.mark.parametrize('in_string, encoding, expected', + itertools.chain(((d[0], d[2], d[1]) for d in VALID_STRINGS), + ((d[1], d[2], d[1]) for d in VALID_STRINGS))) +def test_to_text(in_string, encoding, expected): + """test happy path of decoding to text""" + assert to_text(in_string, encoding) == expected + + +@pytest.mark.parametrize('in_string, encoding, expected', + itertools.chain(((d[0], d[2], d[0]) for d in VALID_STRINGS), + ((d[1], d[2], d[0]) for d in VALID_STRINGS))) +def test_to_bytes(in_string, encoding, expected): + """test happy path of encoding to bytes""" + assert to_bytes(in_string, encoding) == expected + + +@pytest.mark.parametrize('in_string, encoding, expected', + itertools.chain(((d[0], d[2], d[1] if PY3 else d[0]) for d in VALID_STRINGS), + ((d[1], d[2], d[1] if PY3 else d[0]) for d in VALID_STRINGS))) +def test_to_native(in_string, encoding, expected): + """test happy path of encoding to native strings""" + assert to_native(in_string, encoding) == expected + + +def test_to_text_unsafe(): + assert isinstance(to_text(AnsibleUnsafeBytes(b'foo')), AnsibleUnsafeText) + assert to_text(AnsibleUnsafeBytes(b'foo')) == AnsibleUnsafeText(u'foo') + + +def test_to_bytes_unsafe(): + assert isinstance(to_bytes(AnsibleUnsafeText(u'foo')), AnsibleUnsafeBytes) + assert to_bytes(AnsibleUnsafeText(u'foo')) == AnsibleUnsafeBytes(b'foo') diff --git a/test/units/module_utils/common/text/formatters/test_bytes_to_human.py b/test/units/module_utils/common/text/formatters/test_bytes_to_human.py new file mode 100644 index 00000000..41475f56 --- /dev/null +++ b/test/units/module_utils/common/text/formatters/test_bytes_to_human.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# Copyright 2019, Andrew Klychkov @Andersson007 +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.text.formatters import bytes_to_human + + +@pytest.mark.parametrize( + 'input_data,expected', + [ + (0, u'0.00 Bytes'), + (0.5, u'0.50 Bytes'), + (0.54, u'0.54 Bytes'), + (1024, u'1.00 KB'), + (1025, u'1.00 KB'), + (1536, u'1.50 KB'), + (1790, u'1.75 KB'), + (1048576, u'1.00 MB'), + (1073741824, u'1.00 GB'), + (1099511627776, u'1.00 TB'), + (1125899906842624, u'1.00 PB'), + (1152921504606846976, u'1.00 EB'), + (1180591620717411303424, u'1.00 ZB'), + (1208925819614629174706176, u'1.00 YB'), + ] +) +def test_bytes_to_human(input_data, expected): + """Test of bytes_to_human function, only proper numbers are passed.""" + assert bytes_to_human(input_data) == expected + + +@pytest.mark.parametrize( + 'input_data,expected', + [ + (0, u'0.00 bits'), + (0.5, u'0.50 bits'), + (0.54, u'0.54 bits'), + (1024, u'1.00 Kb'), + (1025, u'1.00 Kb'), + (1536, u'1.50 Kb'), + (1790, u'1.75 Kb'), + (1048576, u'1.00 Mb'), + (1073741824, u'1.00 Gb'), + (1099511627776, u'1.00 Tb'), + (1125899906842624, u'1.00 Pb'), + (1152921504606846976, u'1.00 Eb'), + (1180591620717411303424, u'1.00 Zb'), + (1208925819614629174706176, u'1.00 Yb'), + ] +) +def test_bytes_to_human_isbits(input_data, expected): + """Test of bytes_to_human function with isbits=True proper results.""" + assert bytes_to_human(input_data, isbits=True) == expected + + +@pytest.mark.parametrize( + 'input_data,unit,expected', + [ + (0, u'B', u'0.00 Bytes'), + (0.5, u'B', u'0.50 Bytes'), + (0.54, u'B', u'0.54 Bytes'), + (1024, u'K', u'1.00 KB'), + (1536, u'K', u'1.50 KB'), + (1790, u'K', u'1.75 KB'), + (1048576, u'M', u'1.00 MB'), + (1099511627776, u'T', u'1.00 TB'), + (1152921504606846976, u'E', u'1.00 EB'), + (1180591620717411303424, u'Z', u'1.00 ZB'), + (1208925819614629174706176, u'Y', u'1.00 YB'), + (1025, u'KB', u'1025.00 Bytes'), + (1073741824, u'Gb', u'1073741824.00 Bytes'), + (1125899906842624, u'Pb', u'1125899906842624.00 Bytes'), + ] +) +def test_bytes_to_human_unit(input_data, unit, expected): + """Test unit argument of bytes_to_human function proper results.""" + assert bytes_to_human(input_data, unit=unit) == expected + + +@pytest.mark.parametrize( + 'input_data,unit,expected', + [ + (0, u'B', u'0.00 bits'), + (0.5, u'B', u'0.50 bits'), + (0.54, u'B', u'0.54 bits'), + (1024, u'K', u'1.00 Kb'), + (1536, u'K', u'1.50 Kb'), + (1790, u'K', u'1.75 Kb'), + (1048576, u'M', u'1.00 Mb'), + (1099511627776, u'T', u'1.00 Tb'), + (1152921504606846976, u'E', u'1.00 Eb'), + (1180591620717411303424, u'Z', u'1.00 Zb'), + (1208925819614629174706176, u'Y', u'1.00 Yb'), + (1025, u'KB', u'1025.00 bits'), + (1073741824, u'Gb', u'1073741824.00 bits'), + (1125899906842624, u'Pb', u'1125899906842624.00 bits'), + ] +) +def test_bytes_to_human_unit_isbits(input_data, unit, expected): + """Test unit argument of bytes_to_human function with isbits=True proper results.""" + assert bytes_to_human(input_data, isbits=True, unit=unit) == expected + + +@pytest.mark.parametrize('input_data', [0j, u'1B', [1], {1: 1}, None, b'1B']) +def test_bytes_to_human_illegal_size(input_data): + """Test of bytes_to_human function, illegal objects are passed as a size.""" + e_regexp = (r'(no ordering relation is defined for complex numbers)|' + r'(unsupported operand type\(s\) for /)|(unorderable types)|' + r'(not supported between instances of)') + with pytest.raises(TypeError, match=e_regexp): + bytes_to_human(input_data) diff --git a/test/units/module_utils/common/text/formatters/test_human_to_bytes.py b/test/units/module_utils/common/text/formatters/test_human_to_bytes.py new file mode 100644 index 00000000..d02699a6 --- /dev/null +++ b/test/units/module_utils/common/text/formatters/test_human_to_bytes.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +# Copyright 2019, Andrew Klychkov @Andersson007 +# Copyright 2019, Sviatoslav Sydorenko +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.text.formatters import human_to_bytes + + +NUM_IN_METRIC = { + 'K': 2 ** 10, + 'M': 2 ** 20, + 'G': 2 ** 30, + 'T': 2 ** 40, + 'P': 2 ** 50, + 'E': 2 ** 60, + 'Z': 2 ** 70, + 'Y': 2 ** 80, +} + + +@pytest.mark.parametrize( + 'input_data,expected', + [ + (0, 0), + (u'0B', 0), + (1024, NUM_IN_METRIC['K']), + (u'1024B', NUM_IN_METRIC['K']), + (u'1K', NUM_IN_METRIC['K']), + (u'1KB', NUM_IN_METRIC['K']), + (u'1M', NUM_IN_METRIC['M']), + (u'1MB', NUM_IN_METRIC['M']), + (u'1G', NUM_IN_METRIC['G']), + (u'1GB', NUM_IN_METRIC['G']), + (u'1T', NUM_IN_METRIC['T']), + (u'1TB', NUM_IN_METRIC['T']), + (u'1P', NUM_IN_METRIC['P']), + (u'1PB', NUM_IN_METRIC['P']), + (u'1E', NUM_IN_METRIC['E']), + (u'1EB', NUM_IN_METRIC['E']), + (u'1Z', NUM_IN_METRIC['Z']), + (u'1ZB', NUM_IN_METRIC['Z']), + (u'1Y', NUM_IN_METRIC['Y']), + (u'1YB', NUM_IN_METRIC['Y']), + ] +) +def test_human_to_bytes_number(input_data, expected): + """Test of human_to_bytes function, only number arg is passed.""" + assert human_to_bytes(input_data) == expected + + +@pytest.mark.parametrize( + 'input_data,unit', + [ + (u'1024', 'B'), + (1, u'K'), + (1, u'KB'), + (u'1', u'M'), + (u'1', u'MB'), + (1, u'G'), + (1, u'GB'), + (1, u'T'), + (1, u'TB'), + (u'1', u'P'), + (u'1', u'PB'), + (u'1', u'E'), + (u'1', u'EB'), + (u'1', u'Z'), + (u'1', u'ZB'), + (u'1', u'Y'), + (u'1', u'YB'), + ] +) +def test_human_to_bytes_number_unit(input_data, unit): + """Test of human_to_bytes function, number and default_unit args are passed.""" + assert human_to_bytes(input_data, default_unit=unit) == NUM_IN_METRIC.get(unit[0], 1024) + + +@pytest.mark.parametrize('test_input', [u'1024s', u'1024w', ]) +def test_human_to_bytes_wrong_unit(test_input): + """Test of human_to_bytes function, wrong units.""" + with pytest.raises(ValueError, match="The suffix must be one of"): + human_to_bytes(test_input) + + +@pytest.mark.parametrize('test_input', [u'b1bbb', u'm2mmm', u'', u' ', -1]) +def test_human_to_bytes_wrong_number(test_input): + """Test of human_to_bytes function, number param is invalid string / number.""" + with pytest.raises(ValueError, match="can't interpret"): + human_to_bytes(test_input) + + +@pytest.mark.parametrize( + 'input_data,expected', + [ + (0, 0), + (u'0B', 0), + (u'1024b', 1024), + (u'1024B', 1024), + (u'1K', NUM_IN_METRIC['K']), + (u'1Kb', NUM_IN_METRIC['K']), + (u'1M', NUM_IN_METRIC['M']), + (u'1Mb', NUM_IN_METRIC['M']), + (u'1G', NUM_IN_METRIC['G']), + (u'1Gb', NUM_IN_METRIC['G']), + (u'1T', NUM_IN_METRIC['T']), + (u'1Tb', NUM_IN_METRIC['T']), + (u'1P', NUM_IN_METRIC['P']), + (u'1Pb', NUM_IN_METRIC['P']), + (u'1E', NUM_IN_METRIC['E']), + (u'1Eb', NUM_IN_METRIC['E']), + (u'1Z', NUM_IN_METRIC['Z']), + (u'1Zb', NUM_IN_METRIC['Z']), + (u'1Y', NUM_IN_METRIC['Y']), + (u'1Yb', NUM_IN_METRIC['Y']), + ] +) +def test_human_to_bytes_isbits(input_data, expected): + """Test of human_to_bytes function, isbits = True.""" + assert human_to_bytes(input_data, isbits=True) == expected + + +@pytest.mark.parametrize( + 'input_data,unit', + [ + (1024, 'b'), + (1024, 'B'), + (1, u'K'), + (1, u'Kb'), + (u'1', u'M'), + (u'1', u'Mb'), + (1, u'G'), + (1, u'Gb'), + (1, u'T'), + (1, u'Tb'), + (u'1', u'P'), + (u'1', u'Pb'), + (u'1', u'E'), + (u'1', u'Eb'), + (u'1', u'Z'), + (u'1', u'Zb'), + (u'1', u'Y'), + (u'1', u'Yb'), + ] +) +def test_human_to_bytes_isbits_default_unit(input_data, unit): + """Test of human_to_bytes function, isbits = True and default_unit args are passed.""" + assert human_to_bytes(input_data, default_unit=unit, isbits=True) == NUM_IN_METRIC.get(unit[0], 1024) + + +@pytest.mark.parametrize( + 'test_input,isbits', + [ + ('1024Kb', False), + ('10Mb', False), + ('1Gb', False), + ('10MB', True), + ('2KB', True), + ('4GB', True), + ] +) +def test_human_to_bytes_isbits_wrong_unit(test_input, isbits): + """Test of human_to_bytes function, unit identifier is in an invalid format for isbits value.""" + with pytest.raises(ValueError, match="Value is not a valid string"): + human_to_bytes(test_input, isbits=isbits) + + +@pytest.mark.parametrize( + 'test_input,unit,isbits', + [ + (1024, 'Kb', False), + ('10', 'Mb', False), + ('10', 'MB', True), + (2, 'KB', True), + ('4', 'GB', True), + ] +) +def test_human_to_bytes_isbits_wrong_default_unit(test_input, unit, isbits): + """Test of human_to_bytes function, default_unit is in an invalid format for isbits value.""" + with pytest.raises(ValueError, match="Value is not a valid string"): + human_to_bytes(test_input, default_unit=unit, isbits=isbits) diff --git a/test/units/module_utils/common/text/formatters/test_lenient_lowercase.py b/test/units/module_utils/common/text/formatters/test_lenient_lowercase.py new file mode 100644 index 00000000..1ecc013e --- /dev/null +++ b/test/units/module_utils/common/text/formatters/test_lenient_lowercase.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Copyright 2019, Andrew Klychkov @Andersson007 +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from datetime import datetime + +import pytest + +from ansible.module_utils.common.text.formatters import lenient_lowercase + + +INPUT_LIST = [ + u'HELLO', + u'Ёлка', + u'cafÉ', + u'くらとみ', + b'HELLO', + 1, + {1: 'Dict'}, + True, + [1], + 3.14159, +] + +EXPECTED_LIST = [ + u'hello', + u'ёлка', + u'café', + u'くらとみ', + b'hello', + 1, + {1: 'Dict'}, + True, + [1], + 3.14159, +] + +result_list = lenient_lowercase(INPUT_LIST) + + +@pytest.mark.parametrize( + 'input_value,expected_outcome', + [ + (result_list[0], EXPECTED_LIST[0]), + (result_list[1], EXPECTED_LIST[1]), + (result_list[2], EXPECTED_LIST[2]), + (result_list[3], EXPECTED_LIST[3]), + (result_list[4], EXPECTED_LIST[4]), + (result_list[5], EXPECTED_LIST[5]), + (result_list[6], EXPECTED_LIST[6]), + (result_list[7], EXPECTED_LIST[7]), + (result_list[8], EXPECTED_LIST[8]), + (result_list[9], EXPECTED_LIST[9]), + ] +) +def test_lenient_lowercase(input_value, expected_outcome): + """Test that lenient_lowercase() proper results.""" + assert input_value == expected_outcome + + +@pytest.mark.parametrize('input_data', [1, False, 1.001, 1j, datetime.now(), ]) +def test_lenient_lowercase_illegal_data_type(input_data): + """Test passing objects of illegal types to lenient_lowercase().""" + with pytest.raises(TypeError, match='object is not iterable'): + lenient_lowercase(input_data) diff --git a/test/units/module_utils/common/validation/test_check_mutually_exclusive.py b/test/units/module_utils/common/validation/test_check_mutually_exclusive.py new file mode 100644 index 00000000..7bf90760 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_mutually_exclusive.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_mutually_exclusive + + +@pytest.fixture +def mutually_exclusive_terms(): + return [ + ('string1', 'string2',), + ('box', 'fox', 'socks'), + ] + + +def test_check_mutually_exclusive(mutually_exclusive_terms): + params = { + 'string1': 'cat', + 'fox': 'hat', + } + assert check_mutually_exclusive(mutually_exclusive_terms, params) == [] + + +def test_check_mutually_exclusive_found(mutually_exclusive_terms): + params = { + 'string1': 'cat', + 'string2': 'hat', + 'fox': 'red', + 'socks': 'blue', + } + expected = "parameters are mutually exclusive: string1|string2, box|fox|socks" + + with pytest.raises(TypeError) as e: + check_mutually_exclusive(mutually_exclusive_terms, params) + + assert to_native(e.value) == expected + + +def test_check_mutually_exclusive_none(): + terms = None + params = { + 'string1': 'cat', + 'fox': 'hat', + } + assert check_mutually_exclusive(terms, params) == [] + + +def test_check_mutually_exclusive_no_params(mutually_exclusive_terms): + with pytest.raises(TypeError) as te: + check_mutually_exclusive(mutually_exclusive_terms, None) + assert "'NoneType' object is not iterable" in to_native(te.value) diff --git a/test/units/module_utils/common/validation/test_check_required_arguments.py b/test/units/module_utils/common/validation/test_check_required_arguments.py new file mode 100644 index 00000000..1dd54584 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_required_arguments.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_required_arguments + + +@pytest.fixture +def arguments_terms(): + return { + 'foo': { + 'required': True, + }, + 'bar': { + 'required': False, + }, + 'tomato': { + 'irrelevant': 72, + }, + } + + +@pytest.fixture +def arguments_terms_multiple(): + return { + 'foo': { + 'required': True, + }, + 'bar': { + 'required': True, + }, + 'tomato': { + 'irrelevant': 72, + }, + } + + +def test_check_required_arguments(arguments_terms): + params = { + 'foo': 'hello', + 'bar': 'haha', + } + assert check_required_arguments(arguments_terms, params) == [] + + +def test_check_required_arguments_missing(arguments_terms): + params = { + 'apples': 'woohoo', + } + expected = "missing required arguments: foo" + + with pytest.raises(TypeError) as e: + check_required_arguments(arguments_terms, params) + + assert to_native(e.value) == expected + + +def test_check_required_arguments_missing_multiple(arguments_terms_multiple): + params = { + 'apples': 'woohoo', + } + expected = "missing required arguments: bar, foo" + + with pytest.raises(TypeError) as e: + check_required_arguments(arguments_terms_multiple, params) + + assert to_native(e.value) == expected + + +def test_check_required_arguments_missing_none(): + terms = None + params = { + 'foo': 'bar', + 'baz': 'buzz', + } + assert check_required_arguments(terms, params) == [] + + +def test_check_required_arguments_no_params(arguments_terms): + with pytest.raises(TypeError) as te: + check_required_arguments(arguments_terms, None) + assert "'NoneType' is not iterable" in to_native(te.value) diff --git a/test/units/module_utils/common/validation/test_check_required_together.py b/test/units/module_utils/common/validation/test_check_required_together.py new file mode 100644 index 00000000..8a2daab1 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_required_together.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_required_together + + +@pytest.fixture +def together_terms(): + return [ + ['bananas', 'potatoes'], + ['cats', 'wolves'] + ] + + +def test_check_required_together(together_terms): + params = { + 'bananas': 'hello', + 'potatoes': 'this is here too', + 'dogs': 'haha', + } + assert check_required_together(together_terms, params) == [] + + +def test_check_required_together_missing(together_terms): + params = { + 'bananas': 'woohoo', + 'wolves': 'uh oh', + } + expected = "parameters are required together: bananas, potatoes" + + with pytest.raises(TypeError) as e: + check_required_together(together_terms, params) + + assert to_native(e.value) == expected + + +def test_check_required_together_missing_none(): + terms = None + params = { + 'foo': 'bar', + 'baz': 'buzz', + } + assert check_required_together(terms, params) == [] + + +def test_check_required_together_no_params(together_terms): + with pytest.raises(TypeError) as te: + check_required_together(together_terms, None) + + assert "'NoneType' object is not iterable" in to_native(te.value) diff --git a/test/units/module_utils/common/validation/test_check_type_bits.py b/test/units/module_utils/common/validation/test_check_type_bits.py new file mode 100644 index 00000000..7f6b11d3 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_bits.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_type_bits + + +def test_check_type_bits(): + test_cases = ( + ('1', 1), + (99, 99), + (1.5, 2), + ('1.5', 2), + ('2b', 2), + ('2k', 2048), + ('2K', 2048), + ('1m', 1048576), + ('1M', 1048576), + ('1g', 1073741824), + ('1G', 1073741824), + (1073741824, 1073741824), + ) + for case in test_cases: + assert case[1] == check_type_bits(case[0]) + + +def test_check_type_bits_fail(): + test_cases = ( + 'foo', + '2KB', + '1MB', + '1GB', + ) + for case in test_cases: + with pytest.raises(TypeError) as e: + check_type_bits(case) + assert 'cannot be converted to a Bit value' in to_native(e.value) diff --git a/test/units/module_utils/common/validation/test_check_type_bool.py b/test/units/module_utils/common/validation/test_check_type_bool.py new file mode 100644 index 00000000..bd867dc9 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_bool.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_type_bool + + +def test_check_type_bool(): + test_cases = ( + (True, True), + (False, False), + ('1', True), + ('on', True), + (1, True), + ('0', False), + (0, False), + ('n', False), + ('f', False), + ('false', False), + ('true', True), + ('y', True), + ('t', True), + ('yes', True), + ('no', False), + ('off', False), + ) + for case in test_cases: + assert case[1] == check_type_bool(case[0]) + + +def test_check_type_bool_fail(): + default_test_msg = 'cannot be converted to a bool' + test_cases = ( + ({'k1': 'v1'}, 'is not a valid bool'), + (3.14159, default_test_msg), + (-1, default_test_msg), + (-90810398401982340981023948192349081, default_test_msg), + (90810398401982340981023948192349081, default_test_msg), + ) + for case in test_cases: + with pytest.raises(TypeError) as e: + check_type_bool(case) + assert 'cannot be converted to a bool' in to_native(e.value) diff --git a/test/units/module_utils/common/validation/test_check_type_bytes.py b/test/units/module_utils/common/validation/test_check_type_bytes.py new file mode 100644 index 00000000..6ff62dc2 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_bytes.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_type_bytes + + +def test_check_type_bytes(): + test_cases = ( + ('1', 1), + (99, 99), + (1.5, 2), + ('1.5', 2), + ('2b', 2), + ('2B', 2), + ('2k', 2048), + ('2K', 2048), + ('2KB', 2048), + ('1m', 1048576), + ('1M', 1048576), + ('1MB', 1048576), + ('1g', 1073741824), + ('1G', 1073741824), + ('1GB', 1073741824), + (1073741824, 1073741824), + ) + for case in test_cases: + assert case[1] == check_type_bytes(case[0]) + + +def test_check_type_bytes_fail(): + test_cases = ( + 'foo', + '2kb', + '2Kb', + '1mb', + '1Mb', + '1gb', + '1Gb', + ) + for case in test_cases: + with pytest.raises(TypeError) as e: + check_type_bytes(case) + assert 'cannot be converted to a Byte value' in to_native(e.value) diff --git a/test/units/module_utils/common/validation/test_check_type_dict.py b/test/units/module_utils/common/validation/test_check_type_dict.py new file mode 100644 index 00000000..75638c58 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_dict.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.validation import check_type_dict + + +def test_check_type_dict(): + test_cases = ( + ({'k1': 'v1'}, {'k1': 'v1'}), + ('k1=v1,k2=v2', {'k1': 'v1', 'k2': 'v2'}), + ('k1=v1, k2=v2', {'k1': 'v1', 'k2': 'v2'}), + ('k1=v1, k2=v2, k3=v3', {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}), + ('{"key": "value", "list": ["one", "two"]}', {'key': 'value', 'list': ['one', 'two']}) + ) + for case in test_cases: + assert case[1] == check_type_dict(case[0]) + + +def test_check_type_dict_fail(): + test_cases = ( + 1, + 3.14159, + [1, 2], + 'a', + ) + for case in test_cases: + with pytest.raises(TypeError): + check_type_dict(case) diff --git a/test/units/module_utils/common/validation/test_check_type_float.py b/test/units/module_utils/common/validation/test_check_type_float.py new file mode 100644 index 00000000..57837fae --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_float.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_type_float + + +def test_check_type_float(): + test_cases = ( + ('1.5', 1.5), + ('''1.5''', 1.5), + (u'1.5', 1.5), + (1002, 1002.0), + (1.0, 1.0), + (3.141592653589793, 3.141592653589793), + ('3.141592653589793', 3.141592653589793), + (b'3.141592653589793', 3.141592653589793), + ) + for case in test_cases: + assert case[1] == check_type_float(case[0]) + + +def test_check_type_float_fail(): + test_cases = ( + {'k1': 'v1'}, + ['a', 'b'], + 'b', + ) + for case in test_cases: + with pytest.raises(TypeError) as e: + check_type_float(case) + assert 'cannot be converted to a float' in to_native(e.value) diff --git a/test/units/module_utils/common/validation/test_check_type_int.py b/test/units/module_utils/common/validation/test_check_type_int.py new file mode 100644 index 00000000..22cedf61 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_int.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_type_int + + +def test_check_type_int(): + test_cases = ( + ('1', 1), + (u'1', 1), + (1002, 1002), + ) + for case in test_cases: + assert case[1] == check_type_int(case[0]) + + +def test_check_type_int_fail(): + test_cases = ( + {'k1': 'v1'}, + (b'1', 1), + (3.14159, 3), + 'b', + ) + for case in test_cases: + with pytest.raises(TypeError) as e: + check_type_int(case) + assert 'cannot be converted to an int' in to_native(e.value) diff --git a/test/units/module_utils/common/validation/test_check_type_jsonarg.py b/test/units/module_utils/common/validation/test_check_type_jsonarg.py new file mode 100644 index 00000000..e78e54bb --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_jsonarg.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_type_jsonarg + + +def test_check_type_jsonarg(): + test_cases = ( + ('a', 'a'), + ('a ', 'a'), + (b'99', b'99'), + (b'99 ', b'99'), + ({'k1': 'v1'}, '{"k1": "v1"}'), + ([1, 'a'], '[1, "a"]'), + ((1, 2, 'three'), '[1, 2, "three"]'), + ) + for case in test_cases: + assert case[1] == check_type_jsonarg(case[0]) + + +def test_check_type_jsonarg_fail(): + test_cases = ( + 1.5, + 910313498012384012341982374109384098, + ) + for case in test_cases: + with pytest.raises(TypeError) as e: + check_type_jsonarg(case) + assert 'cannot be converted to a json string' in to_native(e.value) diff --git a/test/units/module_utils/common/validation/test_check_type_list.py b/test/units/module_utils/common/validation/test_check_type_list.py new file mode 100644 index 00000000..3f7a9ee6 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_list.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.validation import check_type_list + + +def test_check_type_list(): + test_cases = ( + ([1, 2], [1, 2]), + (1, ['1']), + (['a', 'b'], ['a', 'b']), + ('a', ['a']), + (3.14159, ['3.14159']), + ('a,b,1,2', ['a', 'b', '1', '2']) + ) + for case in test_cases: + assert case[1] == check_type_list(case[0]) + + +def test_check_type_list_failure(): + test_cases = ( + {'k1': 'v1'}, + ) + for case in test_cases: + with pytest.raises(TypeError): + check_type_list(case) diff --git a/test/units/module_utils/common/validation/test_check_type_path.py b/test/units/module_utils/common/validation/test_check_type_path.py new file mode 100644 index 00000000..d6ff433a --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_path.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re + +import os +from ansible.module_utils.common.validation import check_type_path + + +def mock_expand(value): + return re.sub(r'~|\$HOME', '/home/testuser', value) + + +def test_check_type_path(monkeypatch): + monkeypatch.setattr(os.path, 'expandvars', mock_expand) + monkeypatch.setattr(os.path, 'expanduser', mock_expand) + test_cases = ( + ('~/foo', '/home/testuser/foo'), + ('$HOME/foo', '/home/testuser/foo'), + ('/home/jane', '/home/jane'), + (u'/home/jané', u'/home/jané'), + ) + for case in test_cases: + assert case[1] == check_type_path(case[0]) diff --git a/test/units/module_utils/common/validation/test_check_type_raw.py b/test/units/module_utils/common/validation/test_check_type_raw.py new file mode 100644 index 00000000..988e5543 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_raw.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible.module_utils.common.validation import check_type_raw + + +def test_check_type_raw(): + test_cases = ( + (1, 1), + ('1', '1'), + ('a', 'a'), + ({'k1': 'v1'}, {'k1': 'v1'}), + ([1, 2], [1, 2]), + (b'42', b'42'), + (u'42', u'42'), + ) + for case in test_cases: + assert case[1] == check_type_raw(case[0]) diff --git a/test/units/module_utils/common/validation/test_check_type_str.py b/test/units/module_utils/common/validation/test_check_type_str.py new file mode 100644 index 00000000..f10dad28 --- /dev/null +++ b/test/units/module_utils/common/validation/test_check_type_str.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.validation import check_type_str + + +TEST_CASES = ( + ('string', 'string'), + (100, '100'), + (1.5, '1.5'), + ({'k1': 'v1'}, "{'k1': 'v1'}"), + ([1, 2, 'three'], "[1, 2, 'three']"), + ((1, 2,), '(1, 2)'), +) + + +@pytest.mark.parametrize('value, expected', TEST_CASES) +def test_check_type_str(value, expected): + assert expected == check_type_str(value) + + +@pytest.mark.parametrize('value, expected', TEST_CASES[1:]) +def test_check_type_str_no_conversion(value, expected): + with pytest.raises(TypeError) as e: + check_type_str(value, allow_conversion=False) + assert 'is not a string and conversion is not allowed' in to_native(e.value) diff --git a/test/units/module_utils/common/validation/test_count_terms.py b/test/units/module_utils/common/validation/test_count_terms.py new file mode 100644 index 00000000..f41dc40d --- /dev/null +++ b/test/units/module_utils/common/validation/test_count_terms.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.common.validation import count_terms + + +@pytest.fixture +def params(): + return { + 'name': 'bob', + 'dest': '/etc/hosts', + 'state': 'present', + 'value': 5, + } + + +def test_count_terms(params): + check = set(('name', 'dest')) + assert count_terms(check, params) == 2 + + +def test_count_terms_str_input(params): + check = 'name' + assert count_terms(check, params) == 1 + + +def test_count_terms_tuple_input(params): + check = ('name', 'dest') + assert count_terms(check, params) == 2 + + +def test_count_terms_list_input(params): + check = ['name', 'dest'] + assert count_terms(check, params) == 2 diff --git a/test/units/module_utils/common/warnings/test_deprecate.py b/test/units/module_utils/common/warnings/test_deprecate.py new file mode 100644 index 00000000..42046bfe --- /dev/null +++ b/test/units/module_utils/common/warnings/test_deprecate.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +import ansible.module_utils.common.warnings as warnings + +from ansible.module_utils.common.warnings import deprecate, get_deprecation_messages +from ansible.module_utils.six import PY3 + + +@pytest.fixture +def deprecation_messages(): + return [ + {'msg': 'First deprecation', 'version': None, 'collection_name': None}, + {'msg': 'Second deprecation', 'version': None, 'collection_name': 'ansible.builtin'}, + {'msg': 'Third deprecation', 'version': '2.14', 'collection_name': None}, + {'msg': 'Fourth deprecation', 'version': '2.9', 'collection_name': None}, + {'msg': 'Fifth deprecation', 'version': '2.9', 'collection_name': 'ansible.builtin'}, + {'msg': 'Sixth deprecation', 'date': '2199-12-31', 'collection_name': None}, + {'msg': 'Seventh deprecation', 'date': '2199-12-31', 'collection_name': 'ansible.builtin'}, + ] + + +def test_deprecate_message_only(): + deprecate('Deprecation message') + assert warnings._global_deprecations == [ + {'msg': 'Deprecation message', 'version': None, 'collection_name': None}] + + +def test_deprecate_with_collection(): + deprecate(msg='Deprecation message', collection_name='ansible.builtin') + assert warnings._global_deprecations == [ + {'msg': 'Deprecation message', 'version': None, 'collection_name': 'ansible.builtin'}] + + +def test_deprecate_with_version(): + deprecate(msg='Deprecation message', version='2.14') + assert warnings._global_deprecations == [ + {'msg': 'Deprecation message', 'version': '2.14', 'collection_name': None}] + + +def test_deprecate_with_version_and_collection(): + deprecate(msg='Deprecation message', version='2.14', collection_name='ansible.builtin') + assert warnings._global_deprecations == [ + {'msg': 'Deprecation message', 'version': '2.14', 'collection_name': 'ansible.builtin'}] + + +def test_deprecate_with_date(): + deprecate(msg='Deprecation message', date='2199-12-31') + assert warnings._global_deprecations == [ + {'msg': 'Deprecation message', 'date': '2199-12-31', 'collection_name': None}] + + +def test_deprecate_with_date_and_collection(): + deprecate(msg='Deprecation message', date='2199-12-31', collection_name='ansible.builtin') + assert warnings._global_deprecations == [ + {'msg': 'Deprecation message', 'date': '2199-12-31', 'collection_name': 'ansible.builtin'}] + + +def test_multiple_deprecations(deprecation_messages): + for d in deprecation_messages: + deprecate(**d) + + assert deprecation_messages == warnings._global_deprecations + + +def test_get_deprecation_messages(deprecation_messages): + for d in deprecation_messages: + deprecate(**d) + + accessor_deprecations = get_deprecation_messages() + assert isinstance(accessor_deprecations, tuple) + assert len(accessor_deprecations) == 7 + + +@pytest.mark.parametrize( + 'test_case', + ( + 1, + True, + [1], + {'k1': 'v1'}, + (1, 2), + 6.62607004, + b'bytestr' if PY3 else None, + None, + ) +) +def test_deprecate_failure(test_case): + with pytest.raises(TypeError, match='deprecate requires a string not a %s' % type(test_case)): + deprecate(test_case) diff --git a/test/units/module_utils/common/warnings/test_warn.py b/test/units/module_utils/common/warnings/test_warn.py new file mode 100644 index 00000000..020b0625 --- /dev/null +++ b/test/units/module_utils/common/warnings/test_warn.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +import ansible.module_utils.common.warnings as warnings + +from ansible.module_utils.common.warnings import warn, get_warning_messages +from ansible.module_utils.six import PY3 + + +@pytest.fixture +def warning_messages(): + return [ + 'First warning', + 'Second warning', + 'Third warning', + ] + + +def test_warn(): + warn('Warning message') + assert warnings._global_warnings == ['Warning message'] + + +def test_multiple_warningss(warning_messages): + for w in warning_messages: + warn(w) + + assert warning_messages == warnings._global_warnings + + +def test_get_warning_messages(warning_messages): + for w in warning_messages: + warn(w) + + accessor_warnings = get_warning_messages() + assert isinstance(accessor_warnings, tuple) + assert len(accessor_warnings) == 3 + + +@pytest.mark.parametrize( + 'test_case', + ( + 1, + True, + [1], + {'k1': 'v1'}, + (1, 2), + 6.62607004, + b'bytestr' if PY3 else None, + None, + ) +) +def test_warn_failure(test_case): + with pytest.raises(TypeError, match='warn requires a string not a %s' % type(test_case)): + warn(test_case) diff --git a/test/units/module_utils/conftest.py b/test/units/module_utils/conftest.py new file mode 100644 index 00000000..8bc13c4d --- /dev/null +++ b/test/units/module_utils/conftest.py @@ -0,0 +1,72 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys +from io import BytesIO + +import pytest + +import ansible.module_utils.basic +from ansible.module_utils.six import PY3, string_types +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common._collections_compat import MutableMapping + + +@pytest.fixture +def stdin(mocker, request): + old_args = ansible.module_utils.basic._ANSIBLE_ARGS + ansible.module_utils.basic._ANSIBLE_ARGS = None + old_argv = sys.argv + sys.argv = ['ansible_unittest'] + + if isinstance(request.param, string_types): + args = request.param + elif isinstance(request.param, MutableMapping): + if 'ANSIBLE_MODULE_ARGS' not in request.param: + request.param = {'ANSIBLE_MODULE_ARGS': request.param} + if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + args = json.dumps(request.param) + else: + raise Exception('Malformed data to the stdin pytest fixture') + + fake_stdin = BytesIO(to_bytes(args, errors='surrogate_or_strict')) + if PY3: + mocker.patch('ansible.module_utils.basic.sys.stdin', mocker.MagicMock()) + mocker.patch('ansible.module_utils.basic.sys.stdin.buffer', fake_stdin) + else: + mocker.patch('ansible.module_utils.basic.sys.stdin', fake_stdin) + + yield fake_stdin + + ansible.module_utils.basic._ANSIBLE_ARGS = old_args + sys.argv = old_argv + + +@pytest.fixture +def am(stdin, request): + old_args = ansible.module_utils.basic._ANSIBLE_ARGS + ansible.module_utils.basic._ANSIBLE_ARGS = None + old_argv = sys.argv + sys.argv = ['ansible_unittest'] + + argspec = {} + if hasattr(request, 'param'): + if isinstance(request.param, dict): + argspec = request.param + + am = ansible.module_utils.basic.AnsibleModule( + argument_spec=argspec, + ) + am._name = 'ansible_unittest' + + yield am + + ansible.module_utils.basic._ANSIBLE_ARGS = old_args + sys.argv = old_argv diff --git a/test/units/module_utils/facts/__init__.py b/test/units/module_utils/facts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/facts/base.py b/test/units/module_utils/facts/base.py new file mode 100644 index 00000000..33d3087b --- /dev/null +++ b/test/units/module_utils/facts/base.py @@ -0,0 +1,65 @@ +# base unit test classes for ansible/module_utils/facts/ tests +# -*- coding: utf-8 -*- +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import Mock, patch + + +class BaseFactsTest(unittest.TestCase): + # just a base class, not an actual test + __test__ = False + + gather_subset = ['all'] + valid_subsets = None + fact_namespace = None + collector_class = None + + # a dict ansible_facts. Some fact collectors depend on facts gathered by + # other collectors (like 'ansible_architecture' or 'ansible_system') which + # can be passed via the collected_facts arg to collect() + collected_facts = None + + def _mock_module(self): + mock_module = Mock() + mock_module.params = {'gather_subset': self.gather_subset, + 'gather_timeout': 5, + 'filter': '*'} + mock_module.get_bin_path = Mock(return_value=None) + return mock_module + + @patch('platform.system', return_value='Linux') + @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value='systemd') + def test_collect(self, mock_gfc, mock_ps): + module = self._mock_module() + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts) + self.assertIsInstance(facts_dict, dict) + return facts_dict + + @patch('platform.system', return_value='Linux') + @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value='systemd') + def test_collect_with_namespace(self, mock_gfc, mock_ps): + module = self._mock_module() + fact_collector = self.collector_class() + facts_dict = fact_collector.collect_with_namespace(module=module, + collected_facts=self.collected_facts) + self.assertIsInstance(facts_dict, dict) + return facts_dict diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/aarch64-4cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/aarch64-4cpu-cpuinfo new file mode 100644 index 00000000..c3caa01c --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/aarch64-4cpu-cpuinfo @@ -0,0 +1,40 @@ +processor : 0 +Processor : AArch64 Processor rev 4 (aarch64) +Hardware : sun50iw1p1 +BogoMIPS : 48.00 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +processor : 1 +Processor : AArch64 Processor rev 4 (aarch64) +Hardware : sun50iw1p1 +BogoMIPS : 48.00 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +processor : 2 +Processor : AArch64 Processor rev 4 (aarch64) +Hardware : sun50iw1p1 +BogoMIPS : 48.00 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +processor : 3 +Processor : AArch64 Processor rev 4 (aarch64) +Hardware : sun50iw1p1 +BogoMIPS : 48.00 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/arm64-4cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/arm64-4cpu-cpuinfo new file mode 100644 index 00000000..38fd06e7 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/arm64-4cpu-cpuinfo @@ -0,0 +1,32 @@ +processor : 0 +BogoMIPS : 48.00 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +processor : 1 +BogoMIPS : 48.00 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +processor : 2 +BogoMIPS : 48.00 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +processor : 3 +BogoMIPS : 48.00 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo new file mode 100644 index 00000000..84ee16c9 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo @@ -0,0 +1,12 @@ +processor : 0 +model name : ARMv6-compatible processor rev 7 (v6l) +BogoMIPS : 697.95 +Features : half thumb fastmult vfp edsp java tls +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x0 +CPU part : 0xb76 +CPU revision : 7 +Hardware : BCM2835 +Revision : 0010 +Serial : 000000004a0abca9 diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo new file mode 100644 index 00000000..d4b4d3b6 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo @@ -0,0 +1,75 @@ +processor : 0 +model name : ARMv7 Processor rev 3 (v7l) +BogoMIPS : 12.00 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x0 +CPU part : 0xc07 +CPU revision : 3 +processor : 1 +model name : ARMv7 Processor rev 3 (v7l) +BogoMIPS : 12.00 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x0 +CPU part : 0xc07 +CPU revision : 3 +processor : 2 +model name : ARMv7 Processor rev 3 (v7l) +BogoMIPS : 12.00 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x0 +CPU part : 0xc07 +CPU revision : 3 +processor : 3 +model name : ARMv7 Processor rev 3 (v7l) +BogoMIPS : 12.00 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x0 +CPU part : 0xc07 +CPU revision : 3 +processor : 4 +model name : ARMv7 Processor rev 3 (v7l) +BogoMIPS : 120.00 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x2 +CPU part : 0xc0f +CPU revision : 3 +processor : 5 +model name : ARMv7 Processor rev 3 (v7l) +BogoMIPS : 120.00 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x2 +CPU part : 0xc0f +CPU revision : 3 +processor : 6 +model name : ARMv7 Processor rev 3 (v7l) +BogoMIPS : 120.00 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x2 +CPU part : 0xc0f +CPU revision : 3 +processor : 7 +model name : ARMv7 Processor rev 3 (v7l) +BogoMIPS : 120.00 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x2 +CPU part : 0xc0f +CPU revision : 3 +Hardware : ODROID-XU4 +Revision : 0100 +Serial : 0000000000000000 diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo new file mode 100644 index 00000000..f36075c2 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo @@ -0,0 +1,39 @@ +processor : 0 +model name : ARMv7 Processor rev 4 (v7l) +BogoMIPS : 38.40 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32 +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +processor : 1 +model name : ARMv7 Processor rev 4 (v7l) +BogoMIPS : 38.40 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32 +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +processor : 2 +model name : ARMv7 Processor rev 4 (v7l) +BogoMIPS : 38.40 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32 +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +processor : 3 +model name : ARMv7 Processor rev 4 (v7l) +BogoMIPS : 38.40 +Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32 +CPU implementer : 0x41 +CPU architecture: 7 +CPU variant : 0x0 +CPU part : 0xd03 +CPU revision : 4 +Hardware : BCM2835 +Revision : a02082 +Serial : 000000007881bb80 diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo new file mode 100644 index 00000000..1309c585 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo @@ -0,0 +1,44 @@ +processor : 0 +cpu : POWER7 (architected), altivec supported +clock : 3550.000000MHz +revision : 2.1 (pvr 003f 0201) + +processor : 1 +cpu : POWER7 (architected), altivec supported +clock : 3550.000000MHz +revision : 2.1 (pvr 003f 0201) + +processor : 2 +cpu : POWER7 (architected), altivec supported +clock : 3550.000000MHz +revision : 2.1 (pvr 003f 0201) + +processor : 3 +cpu : POWER7 (architected), altivec supported +clock : 3550.000000MHz +revision : 2.1 (pvr 003f 0201) + +processor : 4 +cpu : POWER7 (architected), altivec supported +clock : 3550.000000MHz +revision : 2.1 (pvr 003f 0201) + +processor : 5 +cpu : POWER7 (architected), altivec supported +clock : 3550.000000MHz +revision : 2.1 (pvr 003f 0201) + +processor : 6 +cpu : POWER7 (architected), altivec supported +clock : 3550.000000MHz +revision : 2.1 (pvr 003f 0201) + +processor : 7 +cpu : POWER7 (architected), altivec supported +clock : 3550.000000MHz +revision : 2.1 (pvr 003f 0201) + +timebase : 512000000 +platform : pSeries +model : IBM,8231-E2B +machine : CHRP IBM,8231-E2B \ No newline at end of file diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo new file mode 100644 index 00000000..4cbd5ac0 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo @@ -0,0 +1,125 @@ +processor : 0 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 1 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 2 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 3 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 4 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 5 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 6 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 7 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 8 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 9 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 10 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 11 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 12 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 13 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 14 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 15 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 16 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 17 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 18 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 19 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 20 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 21 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 22 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +processor : 23 +cpu : POWER8 (architected), altivec supported +clock : 3425.000000MHz +revision : 2.1 (pvr 004b 0201) + +timebase : 512000000 +platform : pSeries +model : IBM,8247-21L +machine : CHRP IBM,8247-21L + diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu b/test/units/module_utils/facts/fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu new file mode 100644 index 00000000..8c29faa4 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu @@ -0,0 +1,61 @@ +cpu : UltraSparc T5 (Niagara5) +fpu : UltraSparc T5 integrated FPU +pmu : niagara5 +prom : OBP 4.38.12 2018/03/28 14:54 +type : sun4v +ncpus probed : 24 +ncpus active : 24 +D$ parity tl1 : 0 +I$ parity tl1 : 0 +cpucaps : flush,stbar,swap,muldiv,v9,blkinit,n2,mul32,div32,v8plus,popc,vis,vis2,ASIBlkInit,fmaf,vis3,hpc,ima,pause,cbcond,aes,des,kasumi,camellia,md5,sha1,sha256,sha512,mpmul,montmul,montsqr,crc32c +Cpu0ClkTck : 00000000d6924470 +Cpu1ClkTck : 00000000d6924470 +Cpu2ClkTck : 00000000d6924470 +Cpu3ClkTck : 00000000d6924470 +Cpu4ClkTck : 00000000d6924470 +Cpu5ClkTck : 00000000d6924470 +Cpu6ClkTck : 00000000d6924470 +Cpu7ClkTck : 00000000d6924470 +Cpu8ClkTck : 00000000d6924470 +Cpu9ClkTck : 00000000d6924470 +Cpu10ClkTck : 00000000d6924470 +Cpu11ClkTck : 00000000d6924470 +Cpu12ClkTck : 00000000d6924470 +Cpu13ClkTck : 00000000d6924470 +Cpu14ClkTck : 00000000d6924470 +Cpu15ClkTck : 00000000d6924470 +Cpu16ClkTck : 00000000d6924470 +Cpu17ClkTck : 00000000d6924470 +Cpu18ClkTck : 00000000d6924470 +Cpu19ClkTck : 00000000d6924470 +Cpu20ClkTck : 00000000d6924470 +Cpu21ClkTck : 00000000d6924470 +Cpu22ClkTck : 00000000d6924470 +Cpu23ClkTck : 00000000d6924470 +MMU Type : Hypervisor (sun4v) +MMU PGSZs : 8K,64K,4MB,256MB +State: +CPU0: online +CPU1: online +CPU2: online +CPU3: online +CPU4: online +CPU5: online +CPU6: online +CPU7: online +CPU8: online +CPU9: online +CPU10: online +CPU11: online +CPU12: online +CPU13: online +CPU14: online +CPU15: online +CPU16: online +CPU17: online +CPU18: online +CPU19: online +CPU20: online +CPU21: online +CPU22: online +CPU23: online diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-2cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-2cpu-cpuinfo new file mode 100644 index 00000000..1d233f8d --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-2cpu-cpuinfo @@ -0,0 +1,56 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2799.998 +cache size : 16384 KB +physical id : 0 +siblings : 1 +core id : 0 +cpu cores : 1 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp l' +m constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt tsc_deadlin' +e_timer aes xsave avx f16c rdrand hypervisor lahf_lm pti fsgsbase tsc_adjust smep erms xsaveopt arat +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf +bogomips : 5602.32 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2799.998 +cache size : 16384 KB +physical id : 1 +siblings : 1 +core id : 0 +cpu cores : 1 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp l' +m constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt tsc_deadlin' +e_timer aes xsave avx f16c rdrand hypervisor lahf_lm pti fsgsbase tsc_adjust smep erms xsaveopt arat +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf +bogomips : 5602.32 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-4cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-4cpu-cpuinfo new file mode 100644 index 00000000..fcc396db --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-4cpu-cpuinfo @@ -0,0 +1,104 @@ +processor : 0 +vendor_id : AuthenticAMD +cpu family : 15 +model : 65 +model name : Dual-Core AMD Opteron(tm) Processor 2216 +stepping : 2 +cpu MHz : 1000.000 +cache size : 1024 KB +physical id : 0 +siblings : 2 +core id : 0 +cpu cores : 2 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt ' +rdtscp lm 3dnowext 3dnow art rep_good nopl extd_apicid pni cx16 lahf_lm cmp_legacy svm extapic cr8_legacy retpoline_amd vmmcall +bogomips : 1994.60 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp tm stc +processor : 1 +vendor_id : AuthenticAMD +cpu family : 15 +model : 65 +model name : Dual-Core AMD Opteron(tm) Processor 2216 +stepping : 2 +cpu MHz : 1000.000 +cache size : 1024 KB +physical id : 0 +siblings : 2 +core id : 1 +cpu cores : 2 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt ' +rdtscp lm 3dnowext 3dnow art rep_good nopl extd_apicid pni cx16 lahf_lm cmp_legacy svm extapic cr8_legacy retpoline_amd vmmcall +bogomips : 1994.60 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp tm stc +processor : 2 +vendor_id : AuthenticAMD +cpu family : 15 +model : 65 +model name : Dual-Core AMD Opteron(tm) Processor 2216 +stepping : 2 +cpu MHz : 1000.000 +cache size : 1024 KB +physical id : 1 +siblings : 2 +core id : 0 +cpu cores : 2 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt ' +rdtscp lm 3dnowext 3dnow art rep_good nopl extd_apicid pni cx16 lahf_lm cmp_legacy svm extapic cr8_legacy retpoline_amd vmmcall +bogomips : 1994.60 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp tm stc +processor : 3 +vendor_id : AuthenticAMD +cpu family : 15 +model : 65 +model name : Dual-Core AMD Opteron(tm) Processor 2216 +stepping : 2 +cpu MHz : 1000.000 +cache size : 1024 KB +physical id : 1 +siblings : 2 +core id : 1 +cpu cores : 2 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt ' +rdtscp lm 3dnowext 3dnow art rep_good nopl extd_apicid pni cx16 lahf_lm cmp_legacy svm extapic cr8_legacy retpoline_amd vmmcall +bogomips : 1994.60 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp tm stc diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-8cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-8cpu-cpuinfo new file mode 100644 index 00000000..63abea2c --- /dev/null +++ b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-8cpu-cpuinfo @@ -0,0 +1,216 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 60 +model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz +stepping : 3 +microcode : 0x20 +cpu MHz : 2703.625 +cache size : 6144 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts +bugs : +bogomips : 5388.06 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 60 +model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz +stepping : 3 +microcode : 0x20 +cpu MHz : 3398.565 +cache size : 6144 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts +bugs : +bogomips : 5393.53 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 60 +model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz +stepping : 3 +microcode : 0x20 +cpu MHz : 3390.325 +cache size : 6144 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts +bugs : +bogomips : 5391.63 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 60 +model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz +stepping : 3 +microcode : 0x20 +cpu MHz : 3262.774 +cache size : 6144 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts +bugs : +bogomips : 5392.08 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 60 +model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz +stepping : 3 +microcode : 0x20 +cpu MHz : 2905.169 +cache size : 6144 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts +bugs : +bogomips : 5391.97 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 60 +model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz +stepping : 3 +microcode : 0x20 +cpu MHz : 1834.826 +cache size : 6144 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts +bugs : +bogomips : 5392.11 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 60 +model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz +stepping : 3 +microcode : 0x20 +cpu MHz : 2781.573 +cache size : 6144 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts +bugs : +bogomips : 5391.98 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 60 +model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz +stepping : 3 +microcode : 0x20 +cpu MHz : 3593.353 +cache size : 6144 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts +bugs : +bogomips : 5392.07 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + diff --git a/test/units/module_utils/facts/fixtures/distribution_files/ClearLinux b/test/units/module_utils/facts/fixtures/distribution_files/ClearLinux new file mode 100644 index 00000000..a5442de4 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/distribution_files/ClearLinux @@ -0,0 +1,10 @@ +NAME="Clear Linux OS" +VERSION=1 +ID=clear-linux-os +ID_LIKE=clear-linux-os +VERSION_ID=28120 +PRETTY_NAME="Clear Linux OS" +ANSI_COLOR="1;35" +HOME_URL="https://clearlinux.org" +SUPPORT_URL="https://clearlinux.org" +BUG_REPORT_URL="mailto:dev@lists.clearlinux.org"', diff --git a/test/units/module_utils/facts/fixtures/distribution_files/CoreOS b/test/units/module_utils/facts/fixtures/distribution_files/CoreOS new file mode 100644 index 00000000..806ce306 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/distribution_files/CoreOS @@ -0,0 +1,10 @@ +NAME="Container Linux by CoreOS" +ID=coreos +VERSION=1911.5.0 +VERSION_ID=1911.5.0 +BUILD_ID=2018-12-15-2317 +PRETTY_NAME="Container Linux by CoreOS 1911.5.0 (Rhyolite)" +ANSI_COLOR="38;5;75" +HOME_URL="https://coreos.com/" +BUG_REPORT_URL="https://issues.coreos.com" +COREOS_BOARD="amd64-usr" diff --git a/test/units/module_utils/facts/fixtures/distribution_files/LinuxMint b/test/units/module_utils/facts/fixtures/distribution_files/LinuxMint new file mode 100644 index 00000000..850f6b78 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/distribution_files/LinuxMint @@ -0,0 +1,12 @@ +NAME="Linux Mint" +VERSION="19.1 (Tessa)" +ID=linuxmint +ID_LIKE=ubuntu +PRETTY_NAME="Linux Mint 19.1" +VERSION_ID="19.1" +HOME_URL="https://www.linuxmint.com/" +SUPPORT_URL="https://forums.ubuntu.com/" +BUG_REPORT_URL="http://linuxmint-troubleshooting-guide.readthedocs.io/en/latest/" +PRIVACY_POLICY_URL="https://www.linuxmint.com/" +VERSION_CODENAME=tessa +UBUNTU_CODENAME=bionic diff --git a/test/units/module_utils/facts/fixtures/distribution_files/Slackware b/test/units/module_utils/facts/fixtures/distribution_files/Slackware new file mode 100644 index 00000000..1147d297 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/distribution_files/Slackware @@ -0,0 +1 @@ +Slackware 14.1 diff --git a/test/units/module_utils/facts/fixtures/distribution_files/SlackwareCurrent b/test/units/module_utils/facts/fixtures/distribution_files/SlackwareCurrent new file mode 100644 index 00000000..62c046c8 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/distribution_files/SlackwareCurrent @@ -0,0 +1 @@ +Slackware 14.2+ diff --git a/test/units/module_utils/facts/fixtures/findmount_output.txt b/test/units/module_utils/facts/fixtures/findmount_output.txt new file mode 100644 index 00000000..299a2627 --- /dev/null +++ b/test/units/module_utils/facts/fixtures/findmount_output.txt @@ -0,0 +1,40 @@ +/sys sysfs sysfs rw,nosuid,nodev,noexec,relatime,seclabel +/proc proc proc rw,nosuid,nodev,noexec,relatime +/dev devtmpfs devtmpfs rw,nosuid,seclabel,size=8044400k,nr_inodes=2011100,mode=755 +/sys/kernel/security securityfs securityfs rw,nosuid,nodev,noexec,relatime +/dev/shm tmpfs tmpfs rw,nosuid,nodev,seclabel +/dev/pts devpts devpts rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000 +/run tmpfs tmpfs rw,nosuid,nodev,seclabel,mode=755 +/sys/fs/cgroup tmpfs tmpfs ro,nosuid,nodev,noexec,seclabel,mode=755 +/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,na +/sys/fs/pstore pstore pstore rw,nosuid,nodev,noexec,relatime,seclabel +/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,relatime,devices +/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,relatime,freezer +/sys/fs/cgroup/memory cgroup cgroup rw,nosuid,nodev,noexec,relatime,memory +/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,relatime,pids +/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,relatime,blkio +/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpuset +/sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct +/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,relatime,hugetlb +/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,relatime,perf_event +/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio +/sys/kernel/config configfs configfs rw,relatime +/ /dev/mapper/fedora_dhcp129--186-root ext4 rw,relatime,seclabel,data=ordered +/sys/fs/selinux selinuxfs selinuxfs rw,relatime +/proc/sys/fs/binfmt_misc systemd-1 autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct +/sys/kernel/debug debugfs debugfs rw,relatime,seclabel +/dev/hugepages hugetlbfs hugetlbfs rw,relatime,seclabel +/tmp tmpfs tmpfs rw,seclabel +/dev/mqueue mqueue mqueue rw,relatime,seclabel +/var/lib/machines /dev/loop0 btrfs rw,relatime,seclabel,space_cache,subvolid=5,subvol=/ +/boot /dev/sda1 ext4 rw,relatime,seclabel,data=ordered +/home /dev/mapper/fedora_dhcp129--186-home ext4 rw,relatime,seclabel,data=ordered +/run/user/1000 tmpfs tmpfs rw,nosuid,nodev,relatime,seclabel,size=1611044k,mode=700,uid=1000,gid=1000 +/run/user/1000/gvfs gvfsd-fuse fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 +/sys/fs/fuse/connections fusectl fusectl rw,relatime +/not/a/real/bind_mount /dev/sdz4[/some/other/path] ext4 rw,relatime,seclabel,data=ordered +/home/adrian/sshfs-grimlock grimlock.g.a: fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 +/home/adrian/sshfs-grimlock-single-quote grimlock.g.a:test_path/path_with'single_quotes + fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 +/home/adrian/sshfs-grimlock-single-quote-2 grimlock.g.a:path_with'single_quotes fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 +/home/adrian/fotos grimlock.g.a:/mnt/data/foto's fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 diff --git a/test/units/module_utils/facts/hardware/__init__.py b/test/units/module_utils/facts/hardware/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/facts/hardware/linux_data.py b/test/units/module_utils/facts/hardware/linux_data.py new file mode 100644 index 00000000..8e056769 --- /dev/null +++ b/test/units/module_utils/facts/hardware/linux_data.py @@ -0,0 +1,585 @@ +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +LSBLK_OUTPUT = b""" +/dev/sda +/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 +/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK +/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d +/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce +/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d +/dev/sr0 +/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390 +/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a +/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390 +/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a +/dev/mapper/docker-253:1-1050967-pool +/dev/loop2 +/dev/mapper/docker-253:1-1050967-pool +""" + +LSBLK_OUTPUT_2 = b""" +/dev/sda +/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 +/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK +/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d +/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce +/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d +/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373 +/dev/sr0 +/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390 +""" + +LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'} + +UDEVADM_UUID = 'N/A' + +UDEVADM_OUTPUT = """ +UDEV_LOG=3 +DEVPATH=/devices/pci0000:00/0000:00:07.0/virtio2/block/vda/vda1 +MAJOR=252 +MINOR=1 +DEVNAME=/dev/vda1 +DEVTYPE=partition +SUBSYSTEM=block +MPATH_SBIN_PATH=/sbin +ID_PATH=pci-0000:00:07.0-virtio-pci-virtio2 +ID_PART_TABLE_TYPE=dos +ID_FS_UUID=57b1a3e7-9019-4747-9809-7ec52bba9179 +ID_FS_UUID_ENC=57b1a3e7-9019-4747-9809-7ec52bba9179 +ID_FS_VERSION=1.0 +ID_FS_TYPE=ext4 +ID_FS_USAGE=filesystem +LVM_SBIN_PATH=/sbin +DEVLINKS=/dev/block/252:1 /dev/disk/by-path/pci-0000:00:07.0-virtio-pci-virtio2-part1 /dev/disk/by-uuid/57b1a3e7-9019-4747-9809-7ec52bba9179 +""" + +MTAB = """ +sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0 +proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 +devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0 +securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0 +tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0 +devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 +tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0 +tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0 +cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 +pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0 +cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0 +cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 +cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0 +cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0 +cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0 +cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0 +cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0 +cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0 +cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0 +cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0 +configfs /sys/kernel/config configfs rw,relatime 0 0 +/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0 +selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0 +systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0 +debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0 +hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0 +tmpfs /tmp tmpfs rw,seclabel 0 0 +mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0 +/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0 +/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0 +/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0 +tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0 +gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0 +grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +""" + +MTAB_ENTRIES = [ + [ + 'sysfs', + '/sys', + 'sysfs', + 'rw,seclabel,nosuid,nodev,noexec,relatime', + '0', + '0' + ], + ['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'], + [ + 'devtmpfs', + '/dev', + 'devtmpfs', + 'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755', + '0', + '0' + ], + [ + 'securityfs', + '/sys/kernel/security', + 'securityfs', + 'rw,nosuid,nodev,noexec,relatime', + '0', + '0' + ], + ['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'], + [ + 'devpts', + '/dev/pts', + 'devpts', + 'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000', + '0', + '0' + ], + ['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'], + [ + 'tmpfs', + '/sys/fs/cgroup', + 'tmpfs', + 'ro,seclabel,nosuid,nodev,noexec,mode=755', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/systemd', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd', + '0', + '0' + ], + [ + 'pstore', + '/sys/fs/pstore', + 'pstore', + 'rw,seclabel,nosuid,nodev,noexec,relatime', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/devices', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,devices', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/freezer', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,freezer', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/memory', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,memory', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/pids', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,pids', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/blkio', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,blkio', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/cpuset', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,cpuset', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/cpu,cpuacct', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/hugetlb', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,hugetlb', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/perf_event', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,perf_event', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/net_cls,net_prio', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio', + '0', + '0' + ], + ['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'], + [ + '/dev/mapper/fedora_dhcp129--186-root', + '/', + 'ext4', + 'rw,seclabel,relatime,data=ordered', + '0', + '0' + ], + ['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'], + [ + 'systemd-1', + '/proc/sys/fs/binfmt_misc', + 'autofs', + 'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct', + '0', + '0' + ], + ['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'], + [ + 'hugetlbfs', + '/dev/hugepages', + 'hugetlbfs', + 'rw,seclabel,relatime', + '0', + '0' + ], + ['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'], + ['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'], + [ + '/dev/loop0', + '/var/lib/machines', + 'btrfs', + 'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/', + '0', + '0' + ], + ['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], + # A 'none' fstype + ['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'], + # lets assume this is a bindmount + ['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], + [ + '/dev/mapper/fedora_dhcp129--186-home', + '/home', + 'ext4', + 'rw,seclabel,relatime,data=ordered', + '0', + '0' + ], + [ + 'tmpfs', + '/run/user/1000', + 'tmpfs', + 'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000', + '0', + '0' + ], + [ + 'gvfsd-fuse', + '/run/user/1000/gvfs', + 'fuse.gvfsd-fuse', + 'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000', + '0', + '0' + ], + ['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']] + +STATVFS_INFO = {'/': {'block_available': 10192323, + 'block_size': 4096, + 'block_total': 12868728, + 'block_used': 2676405, + 'inode_available': 3061699, + 'inode_total': 3276800, + 'inode_used': 215101, + 'size_available': 41747755008, + 'size_total': 52710309888}, + '/not/a/real/bind_mount': {}, + '/home': {'block_available': 1001578731, + 'block_size': 4096, + 'block_total': 105871006, + 'block_used': 5713133, + 'inode_available': 26860880, + 'inode_total': 26902528, + 'inode_used': 41648, + 'size_available': 410246647808, + 'size_total': 433647640576}, + '/var/lib/machines': {'block_available': 10192316, + 'block_size': 4096, + 'block_total': 12868728, + 'block_used': 2676412, + 'inode_available': 3061699, + 'inode_total': 3276800, + 'inode_used': 215101, + 'size_available': 41747726336, + 'size_total': 52710309888}, + '/boot': {'block_available': 187585, + 'block_size': 4096, + 'block_total': 249830, + 'block_used': 62245, + 'inode_available': 65096, + 'inode_total': 65536, + 'inode_used': 440, + 'size_available': 768348160, + 'size_total': 1023303680} + } + +# ['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], + +BIND_MOUNTS = ['/not/a/real/bind_mount'] + +CPU_INFO_TEST_SCENARIOS = [ + { + 'architecture': 'armv61', + 'nproc_out': 1, + 'sched_getaffinity': set([0]), + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo')).readlines(), + 'expected_result': { + 'processor': ['0', 'ARMv6-compatible processor rev 7 (v6l)'], + 'processor_cores': 1, + 'processor_count': 1, + 'processor_nproc': 1, + 'processor_threads_per_core': 1, + 'processor_vcpus': 1}, + }, + { + 'architecture': 'armv71', + 'nproc_out': 4, + 'sched_getaffinity': set([0, 1, 2, 3]), + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo')).readlines(), + 'expected_result': { + 'processor': [ + '0', 'ARMv7 Processor rev 4 (v7l)', + '1', 'ARMv7 Processor rev 4 (v7l)', + '2', 'ARMv7 Processor rev 4 (v7l)', + '3', 'ARMv7 Processor rev 4 (v7l)', + ], + 'processor_cores': 1, + 'processor_count': 4, + 'processor_nproc': 4, + 'processor_threads_per_core': 1, + 'processor_vcpus': 4}, + }, + { + 'architecture': 'aarch64', + 'nproc_out': 4, + 'sched_getaffinity': set([0, 1, 2, 3]), + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/aarch64-4cpu-cpuinfo')).readlines(), + 'expected_result': { + 'processor': [ + '0', 'AArch64 Processor rev 4 (aarch64)', + '1', 'AArch64 Processor rev 4 (aarch64)', + '2', 'AArch64 Processor rev 4 (aarch64)', + '3', 'AArch64 Processor rev 4 (aarch64)', + ], + 'processor_cores': 1, + 'processor_count': 4, + 'processor_nproc': 4, + 'processor_threads_per_core': 1, + 'processor_vcpus': 4}, + }, + { + 'architecture': 'x86_64', + 'nproc_out': 4, + 'sched_getaffinity': set([0, 1, 2, 3]), + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/x86_64-4cpu-cpuinfo')).readlines(), + 'expected_result': { + 'processor': [ + '0', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216', + '1', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216', + '2', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216', + '3', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216', + ], + 'processor_cores': 2, + 'processor_count': 2, + 'processor_nproc': 4, + 'processor_threads_per_core': 1, + 'processor_vcpus': 4}, + }, + { + 'architecture': 'x86_64', + 'nproc_out': 4, + 'sched_getaffinity': set([0, 1, 2, 3]), + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/x86_64-8cpu-cpuinfo')).readlines(), + 'expected_result': { + 'processor': [ + '0', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz', + '1', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz', + '2', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz', + '3', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz', + '4', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz', + '5', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz', + '6', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz', + '7', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz', + ], + 'processor_cores': 4, + 'processor_count': 1, + 'processor_nproc': 4, + 'processor_threads_per_core': 2, + 'processor_vcpus': 8}, + }, + { + 'architecture': 'arm64', + 'nproc_out': 4, + 'sched_getaffinity': set([0, 1, 2, 3]), + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/arm64-4cpu-cpuinfo')).readlines(), + 'expected_result': { + 'processor': ['0', '1', '2', '3'], + 'processor_cores': 1, + 'processor_count': 4, + 'processor_nproc': 4, + 'processor_threads_per_core': 1, + 'processor_vcpus': 4}, + }, + { + 'architecture': 'armv71', + 'nproc_out': 8, + 'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7]), + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo')).readlines(), + 'expected_result': { + 'processor': [ + '0', 'ARMv7 Processor rev 3 (v7l)', + '1', 'ARMv7 Processor rev 3 (v7l)', + '2', 'ARMv7 Processor rev 3 (v7l)', + '3', 'ARMv7 Processor rev 3 (v7l)', + '4', 'ARMv7 Processor rev 3 (v7l)', + '5', 'ARMv7 Processor rev 3 (v7l)', + '6', 'ARMv7 Processor rev 3 (v7l)', + '7', 'ARMv7 Processor rev 3 (v7l)', + ], + 'processor_cores': 1, + 'processor_count': 8, + 'processor_nproc': 8, + 'processor_threads_per_core': 1, + 'processor_vcpus': 8}, + }, + { + 'architecture': 'x86_64', + 'nproc_out': 2, + 'sched_getaffinity': set([0, 1]), + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/x86_64-2cpu-cpuinfo')).readlines(), + 'expected_result': { + 'processor': [ + '0', 'GenuineIntel', 'Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz', + '1', 'GenuineIntel', 'Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz', + ], + 'processor_cores': 1, + 'processor_count': 2, + 'processor_nproc': 2, + 'processor_threads_per_core': 1, + 'processor_vcpus': 2}, + }, + { + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo')).readlines(), + 'architecture': 'ppc64', + 'nproc_out': 8, + 'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7]), + 'expected_result': { + 'processor': [ + '0', 'POWER7 (architected), altivec supported', + '1', 'POWER7 (architected), altivec supported', + '2', 'POWER7 (architected), altivec supported', + '3', 'POWER7 (architected), altivec supported', + '4', 'POWER7 (architected), altivec supported', + '5', 'POWER7 (architected), altivec supported', + '6', 'POWER7 (architected), altivec supported', + '7', 'POWER7 (architected), altivec supported' + ], + 'processor_cores': 1, + 'processor_count': 8, + 'processor_nproc': 8, + 'processor_threads_per_core': 1, + 'processor_vcpus': 8 + }, + }, + { + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo')).readlines(), + 'architecture': 'ppc64le', + 'nproc_out': 24, + 'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), + 'expected_result': { + 'processor': [ + '0', 'POWER8 (architected), altivec supported', + '1', 'POWER8 (architected), altivec supported', + '2', 'POWER8 (architected), altivec supported', + '3', 'POWER8 (architected), altivec supported', + '4', 'POWER8 (architected), altivec supported', + '5', 'POWER8 (architected), altivec supported', + '6', 'POWER8 (architected), altivec supported', + '7', 'POWER8 (architected), altivec supported', + '8', 'POWER8 (architected), altivec supported', + '9', 'POWER8 (architected), altivec supported', + '10', 'POWER8 (architected), altivec supported', + '11', 'POWER8 (architected), altivec supported', + '12', 'POWER8 (architected), altivec supported', + '13', 'POWER8 (architected), altivec supported', + '14', 'POWER8 (architected), altivec supported', + '15', 'POWER8 (architected), altivec supported', + '16', 'POWER8 (architected), altivec supported', + '17', 'POWER8 (architected), altivec supported', + '18', 'POWER8 (architected), altivec supported', + '19', 'POWER8 (architected), altivec supported', + '20', 'POWER8 (architected), altivec supported', + '21', 'POWER8 (architected), altivec supported', + '22', 'POWER8 (architected), altivec supported', + '23', 'POWER8 (architected), altivec supported', + ], + 'processor_cores': 1, + 'processor_count': 24, + 'processor_nproc': 24, + 'processor_threads_per_core': 1, + 'processor_vcpus': 24 + }, + }, + { + 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu')).readlines(), + 'architecture': 'sparc64', + 'nproc_out': 24, + 'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), + 'expected_result': { + 'processor': [ + 'UltraSparc T5 (Niagara5)', + ], + 'processor_cores': 1, + 'processor_count': 24, + 'processor_nproc': 24, + 'processor_threads_per_core': 1, + 'processor_vcpus': 24 + }, + }, +] diff --git a/test/units/module_utils/facts/hardware/test_linux.py b/test/units/module_utils/facts/hardware/test_linux.py new file mode 100644 index 00000000..6e77683a --- /dev/null +++ b/test/units/module_utils/facts/hardware/test_linux.py @@ -0,0 +1,175 @@ +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from units.compat import unittest +from units.compat.mock import Mock, patch + +from ansible.module_utils.facts import timeout + +from ansible.module_utils.facts.hardware import linux + +from . linux_data import LSBLK_OUTPUT, LSBLK_OUTPUT_2, LSBLK_UUIDS, MTAB, MTAB_ENTRIES, BIND_MOUNTS, STATVFS_INFO, UDEVADM_UUID, UDEVADM_OUTPUT + +with open(os.path.join(os.path.dirname(__file__), '../fixtures/findmount_output.txt')) as f: + FINDMNT_OUTPUT = f.read() + +GET_MOUNT_SIZE = {} + + +def mock_get_mount_size(mountpoint): + return STATVFS_INFO.get(mountpoint, {}) + + +class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase): + + # FIXME: mock.patch instead + def setUp(self): + timeout.GATHER_TIMEOUT = 10 + + def tearDown(self): + timeout.GATHER_TIMEOUT = None + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES) + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS) + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS) + @patch('ansible.module_utils.facts.hardware.linux.get_mount_size', side_effect=mock_get_mount_size) + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._udevadm_uuid', return_value=UDEVADM_UUID) + def test_get_mount_facts(self, + mock_get_mount_size, + mock_lsblk_uuid, + mock_find_bind_mounts, + mock_mtab_entries, + mock_udevadm_uuid): + module = Mock() + # Returns a LinuxHardware-ish + lh = linux.LinuxHardware(module=module, load_on_init=False) + + # Nothing returned, just self.facts modified as a side effect + mount_facts = lh.get_mount_facts() + self.assertIsInstance(mount_facts, dict) + self.assertIn('mounts', mount_facts) + self.assertIsInstance(mount_facts['mounts'], list) + self.assertIsInstance(mount_facts['mounts'][0], dict) + + home_expected = {'block_available': 1001578731, + 'block_size': 4096, + 'block_total': 105871006, + 'block_used': 5713133, + 'device': '/dev/mapper/fedora_dhcp129--186-home', + 'fstype': 'ext4', + 'inode_available': 26860880, + 'inode_total': 26902528, + 'inode_used': 41648, + 'mount': '/home', + 'options': 'rw,seclabel,relatime,data=ordered', + 'size_available': 410246647808, + 'size_total': 433647640576, + 'uuid': 'N/A'} + home_info = [x for x in mount_facts['mounts'] if x['mount'] == '/home'][0] + + self.maxDiff = 4096 + self.assertDictEqual(home_info, home_expected) + + @patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB) + def test_get_mtab_entries(self, mock_get_file_content): + + module = Mock() + lh = linux.LinuxHardware(module=module, load_on_init=False) + mtab_entries = lh._mtab_entries() + self.assertIsInstance(mtab_entries, list) + self.assertIsInstance(mtab_entries[0], list) + self.assertEqual(len(mtab_entries), 38) + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, '')) + def test_find_bind_mounts(self, mock_run_findmnt): + module = Mock() + lh = linux.LinuxHardware(module=module, load_on_init=False) + bind_mounts = lh._find_bind_mounts() + + # If bind_mounts becomes another seq type, feel free to change + self.assertIsInstance(bind_mounts, set) + self.assertEqual(len(bind_mounts), 1) + self.assertIn('/not/a/real/bind_mount', bind_mounts) + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', '')) + def test_find_bind_mounts_non_zero(self, mock_run_findmnt): + module = Mock() + lh = linux.LinuxHardware(module=module, load_on_init=False) + bind_mounts = lh._find_bind_mounts() + + self.assertIsInstance(bind_mounts, set) + self.assertEqual(len(bind_mounts), 0) + + def test_find_bind_mounts_no_findmnts(self): + module = Mock() + module.get_bin_path = Mock(return_value=None) + lh = linux.LinuxHardware(module=module, load_on_init=False) + bind_mounts = lh._find_bind_mounts() + + self.assertIsInstance(bind_mounts, set) + self.assertEqual(len(bind_mounts), 0) + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, '')) + def test_lsblk_uuid(self, mock_run_lsblk): + module = Mock() + lh = linux.LinuxHardware(module=module, load_on_init=False) + lsblk_uuids = lh._lsblk_uuid() + + self.assertIsInstance(lsblk_uuids, dict) + self.assertIn(b'/dev/loop9', lsblk_uuids) + self.assertIn(b'/dev/sda1', lsblk_uuids) + self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0') + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, '')) + def test_lsblk_uuid_non_zero(self, mock_run_lsblk): + module = Mock() + lh = linux.LinuxHardware(module=module, load_on_init=False) + lsblk_uuids = lh._lsblk_uuid() + + self.assertIsInstance(lsblk_uuids, dict) + self.assertEqual(len(lsblk_uuids), 0) + + def test_lsblk_uuid_no_lsblk(self): + module = Mock() + module.get_bin_path = Mock(return_value=None) + lh = linux.LinuxHardware(module=module, load_on_init=False) + lsblk_uuids = lh._lsblk_uuid() + + self.assertIsInstance(lsblk_uuids, dict) + self.assertEqual(len(lsblk_uuids), 0) + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, '')) + def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk): + module = Mock() + lh = linux.LinuxHardware(module=module, load_on_init=False) + lsblk_uuids = lh._lsblk_uuid() + self.assertIsInstance(lsblk_uuids, dict) + self.assertIn(b'/dev/loop0', lsblk_uuids) + self.assertIn(b'/dev/sda1', lsblk_uuids) + self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373') + self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0') + + def test_udevadm_uuid(self): + module = Mock() + module.run_command = Mock(return_value=(0, UDEVADM_OUTPUT, '')) # (rc, out, err) + lh = linux.LinuxHardware(module=module, load_on_init=False) + udevadm_uuid = lh._udevadm_uuid('mock_device') + + self.assertEqual(udevadm_uuid, '57b1a3e7-9019-4747-9809-7ec52bba9179') diff --git a/test/units/module_utils/facts/hardware/test_linux_get_cpu_info.py b/test/units/module_utils/facts/hardware/test_linux_get_cpu_info.py new file mode 100644 index 00000000..aea8694e --- /dev/null +++ b/test/units/module_utils/facts/hardware/test_linux_get_cpu_info.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.facts.hardware import linux + +from . linux_data import CPU_INFO_TEST_SCENARIOS + + +def test_get_cpu_info(mocker): + module = mocker.Mock() + inst = linux.LinuxHardware(module) + + mocker.patch('os.path.exists', return_value=False) + mocker.patch('os.access', return_value=True) + for test in CPU_INFO_TEST_SCENARIOS: + mocker.patch('ansible.module_utils.facts.hardware.linux.get_file_lines', side_effect=[[], test['cpuinfo']]) + mocker.patch('os.sched_getaffinity', create=True, return_value=test['sched_getaffinity']) + module.run_command.return_value = (0, test['nproc_out'], '') + collected_facts = {'ansible_architecture': test['architecture']} + + assert test['expected_result'] == inst.get_cpu_facts(collected_facts=collected_facts) + + +def test_get_cpu_info_nproc(mocker): + module = mocker.Mock() + inst = linux.LinuxHardware(module) + + mocker.patch('os.path.exists', return_value=False) + mocker.patch('os.access', return_value=True) + for test in CPU_INFO_TEST_SCENARIOS: + mocker.patch('ansible.module_utils.facts.hardware.linux.get_file_lines', side_effect=[[], test['cpuinfo']]) + mocker.patch('os.sched_getaffinity', create=True, side_effect=AttributeError) + mocker.patch('ansible.module_utils.facts.hardware.linux.get_bin_path', return_value='/usr/bin/nproc') + module.run_command.return_value = (0, test['nproc_out'], '') + collected_facts = {'ansible_architecture': test['architecture']} + + assert test['expected_result'] == inst.get_cpu_facts(collected_facts=collected_facts) + + +def test_get_cpu_info_missing_arch(mocker): + module = mocker.Mock() + inst = linux.LinuxHardware(module) + + # ARM and Power will report incorrect processor count if architecture is not available + mocker.patch('os.path.exists', return_value=False) + mocker.patch('os.access', return_value=True) + for test in CPU_INFO_TEST_SCENARIOS: + mocker.patch('ansible.module_utils.facts.hardware.linux.get_file_lines', side_effect=[[], test['cpuinfo']]) + mocker.patch('os.sched_getaffinity', create=True, return_value=test['sched_getaffinity']) + + module.run_command.return_value = (0, test['nproc_out'], '') + + test_result = inst.get_cpu_facts() + + if test['architecture'].startswith(('armv', 'aarch', 'ppc')): + assert test['expected_result'] != test_result + else: + assert test['expected_result'] == test_result diff --git a/test/units/module_utils/facts/hardware/test_sunos_get_uptime_facts.py b/test/units/module_utils/facts/hardware/test_sunos_get_uptime_facts.py new file mode 100644 index 00000000..e14a2da8 --- /dev/null +++ b/test/units/module_utils/facts/hardware/test_sunos_get_uptime_facts.py @@ -0,0 +1,20 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import time +from ansible.module_utils.facts.hardware import sunos + + +def test_sunos_get_uptime_facts(mocker): + kstat_output = '\nunix:0:system_misc:boot_time\t1548249689\n' + + module_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule') + module = module_mock() + module.run_command.return_value = (0, kstat_output, '') + + inst = sunos.SunOSHardware(module) + + mocker.patch('time.time', return_value=1567052602.5089788) + expected = int(time.time()) - 1548249689 + result = inst.get_uptime_facts() + assert expected == result['uptime_seconds'] diff --git a/test/units/module_utils/facts/network/__init__.py b/test/units/module_utils/facts/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/facts/network/test_fc_wwn.py b/test/units/module_utils/facts/network/test_fc_wwn.py new file mode 100644 index 00000000..b98ae378 --- /dev/null +++ b/test/units/module_utils/facts/network/test_fc_wwn.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.facts.network import fc_wwn +from units.compat.mock import Mock + + +# AIX lsdev +LSDEV_OUTPUT = """ +fcs0 Defined 00-00 8Gb PCI Express Dual Port FC Adapter (df1000f114108a03) +fcs1 Available 04-00 8Gb PCI Express Dual Port FC Adapter (df1000f114108a03) +""" + +# a bit cutted output of lscfg (from Z0 to ZC) +LSCFG_OUTPUT = """ + fcs1 U78CB.001.WZS00ZS-P1-C9-T1 8Gb PCI Express Dual Port FC Adapter (df1000f114108a03) + + Part Number.................00E0806 + Serial Number...............1C4090830F + Manufacturer................001C + EC Level.................... D77161 + Customer Card ID Number.....577D + FRU Number..................00E0806 + Device Specific.(ZM)........3 + Network Address.............10000090FA551508 + ROS Level and ID............027820B7 + Device Specific.(Z0)........31004549 + Device Specific.(ZC)........00000000 + Hardware Location Code......U78CB.001.WZS00ZS-P1-C9-T1 +""" + +# Solaris +FCINFO_OUTPUT = """ +HBA Port WWN: 10000090fa1658de + Port Mode: Initiator + Port ID: 30100 + OS Device Name: /dev/cfg/c13 + Manufacturer: Emulex + Model: LPe12002-S + Firmware Version: LPe12002-S 2.01a12 + FCode/BIOS Version: Boot:5.03a0 Fcode:3.01a1 + Serial Number: 4925381+13090001ER + Driver Name: emlxs + Driver Version: 3.3.00.1 (2018.01.05.16.30) + Type: N-port + State: online + Supported Speeds: 2Gb 4Gb 8Gb + Current Speed: 8Gb + Node WWN: 20000090fa1658de + NPIV Not Supported +""" + + +def mock_get_bin_path(cmd, required=False): + result = None + if cmd == 'lsdev': + result = '/usr/sbin/lsdev' + elif cmd == 'lscfg': + result = '/usr/sbin/lscfg' + elif cmd == 'fcinfo': + result = '/usr/sbin/fcinfo' + return result + + +def mock_run_command(cmd): + rc = 0 + if 'lsdev' in cmd: + result = LSDEV_OUTPUT + elif 'lscfg' in cmd: + result = LSCFG_OUTPUT + elif 'fcinfo' in cmd: + result = FCINFO_OUTPUT + else: + rc = 1 + result = 'Error' + return (rc, result, '') + + +def test_get_fc_wwn_info(mocker): + module = Mock() + inst = fc_wwn.FcWwnInitiatorFactCollector() + + mocker.patch.object(module, 'get_bin_path', side_effect=mock_get_bin_path) + mocker.patch.object(module, 'run_command', side_effect=mock_run_command) + + d = {'aix6': ['10000090FA551508'], 'sunos5': ['10000090fa1658de']} + for key, value in d.items(): + mocker.patch('sys.platform', key) + wwn_expected = {"fibre_channel_wwn": value} + assert wwn_expected == inst.collect(module=module) diff --git a/test/units/module_utils/facts/network/test_generic_bsd.py b/test/units/module_utils/facts/network/test_generic_bsd.py new file mode 100644 index 00000000..afb698c5 --- /dev/null +++ b/test/units/module_utils/facts/network/test_generic_bsd.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat.mock import Mock +from units.compat import unittest + +from ansible.module_utils.facts.network import generic_bsd + + +def get_bin_path(command): + if command == 'ifconfig': + return 'fake/ifconfig' + elif command == 'route': + return 'fake/route' + return None + + +netbsd_ifconfig_a_out_7_1 = r''' +lo0: flags=8049 mtu 33624 + inet 127.0.0.1 netmask 0xff000000 + inet6 ::1 prefixlen 128 + inet6 fe80::1%lo0 prefixlen 64 scopeid 0x1 +re0: flags=8843 mtu 1500 + capabilities=3f80 + capabilities=3f80 + enabled=0 + ec_capabilities=3 + ec_enabled=0 + address: 52:54:00:63:55:af + media: Ethernet autoselect (100baseTX full-duplex) + status: active + inet 192.168.122.205 netmask 0xffffff00 broadcast 192.168.122.255 + inet6 fe80::5054:ff:fe63:55af%re0 prefixlen 64 scopeid 0x2 +''' + +netbsd_ifconfig_a_out_post_7_1 = r''' +lo0: flags=0x8049 mtu 33624 + inet 127.0.0.1/8 flags 0x0 + inet6 ::1/128 flags 0x20 + inet6 fe80::1%lo0/64 flags 0x0 scopeid 0x1 +re0: flags=0x8843 mtu 1500 + capabilities=3f80 + capabilities=3f80 + enabled=0 + ec_capabilities=3 + ec_enabled=0 + address: 52:54:00:63:55:af + media: Ethernet autoselect (100baseTX full-duplex) + status: active + inet 192.168.122.205/24 broadcast 192.168.122.255 flags 0x0 + inet6 fe80::5054:ff:fe63:55af%re0/64 flags 0x0 scopeid 0x2 +''' + +NETBSD_EXPECTED = {'all_ipv4_addresses': ['192.168.122.205'], + 'all_ipv6_addresses': ['fe80::5054:ff:fe63:55af%re0'], + 'default_ipv4': {}, + 'default_ipv6': {}, + 'interfaces': ['lo0', 're0'], + 'lo0': {'device': 'lo0', + 'flags': ['UP', 'LOOPBACK', 'RUNNING', 'MULTICAST'], + 'ipv4': [{'address': '127.0.0.1', + 'broadcast': '127.255.255.255', + 'netmask': '255.0.0.0', + 'network': '127.0.0.0'}], + 'ipv6': [{'address': '::1', 'prefix': '128'}, + {'address': 'fe80::1%lo0', 'prefix': '64', 'scope': '0x1'}], + 'macaddress': 'unknown', + 'mtu': '33624', + 'type': 'loopback'}, + 're0': {'device': 're0', + 'flags': ['UP', 'BROADCAST', 'RUNNING', 'SIMPLEX', 'MULTICAST'], + 'ipv4': [{'address': '192.168.122.205', + 'broadcast': '192.168.122.255', + 'netmask': '255.255.255.0', + 'network': '192.168.122.0'}], + 'ipv6': [{'address': 'fe80::5054:ff:fe63:55af%re0', + 'prefix': '64', + 'scope': '0x2'}], + 'macaddress': 'unknown', + 'media': 'Ethernet', + 'media_options': [], + 'media_select': 'autoselect', + 'media_type': '100baseTX', + 'mtu': '1500', + 'status': 'active', + 'type': 'ether'}} + + +def run_command_old_ifconfig(command): + if command == 'fake/route': + return 0, 'Foo', '' + if command == ['fake/ifconfig', '-a']: + return 0, netbsd_ifconfig_a_out_7_1, '' + return 1, '', '' + + +def run_command_post_7_1_ifconfig(command): + if command == 'fake/route': + return 0, 'Foo', '' + if command == ['fake/ifconfig', '-a']: + return 0, netbsd_ifconfig_a_out_post_7_1, '' + return 1, '', '' + + +class TestGenericBsdNetworkNetBSD(unittest.TestCase): + gather_subset = ['all'] + + def setUp(self): + self.maxDiff = None + self.longMessage = True + + # TODO: extract module run_command/get_bin_path usage to methods I can mock without mocking all of run_command + def test(self): + module = self._mock_module() + module.get_bin_path.side_effect = get_bin_path + module.run_command.side_effect = run_command_old_ifconfig + + bsd_net = generic_bsd.GenericBsdIfconfigNetwork(module) + + res = bsd_net.populate() + self.assertDictEqual(res, NETBSD_EXPECTED) + + def test_ifconfig_post_7_1(self): + module = self._mock_module() + module.get_bin_path.side_effect = get_bin_path + module.run_command.side_effect = run_command_post_7_1_ifconfig + + bsd_net = generic_bsd.GenericBsdIfconfigNetwork(module) + + res = bsd_net.populate() + self.assertDictEqual(res, NETBSD_EXPECTED) + + def test_netbsd_ifconfig_old_and_new(self): + module_new = self._mock_module() + module_new.get_bin_path.side_effect = get_bin_path + module_new.run_command.side_effect = run_command_post_7_1_ifconfig + + bsd_net_new = generic_bsd.GenericBsdIfconfigNetwork(module_new) + res_new = bsd_net_new.populate() + + module_old = self._mock_module() + module_old.get_bin_path.side_effect = get_bin_path + module_old.run_command.side_effect = run_command_old_ifconfig + + bsd_net_old = generic_bsd.GenericBsdIfconfigNetwork(module_old) + res_old = bsd_net_old.populate() + + self.assertDictEqual(res_old, res_new) + self.assertDictEqual(res_old, NETBSD_EXPECTED) + self.assertDictEqual(res_new, NETBSD_EXPECTED) + + def _mock_module(self): + mock_module = Mock() + mock_module.params = {'gather_subset': self.gather_subset, + 'gather_timeout': 5, + 'filter': '*'} + mock_module.get_bin_path = Mock(return_value=None) + return mock_module diff --git a/test/units/module_utils/facts/network/test_iscsi_get_initiator.py b/test/units/module_utils/facts/network/test_iscsi_get_initiator.py new file mode 100644 index 00000000..2048ba2a --- /dev/null +++ b/test/units/module_utils/facts/network/test_iscsi_get_initiator.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.facts.network import iscsi +from units.compat.mock import Mock + + +# AIX # lsattr -E -l iscsi0 +LSATTR_OUTPUT = """ +disc_filename /etc/iscsi/targets Configuration file False +disc_policy file Discovery Policy True +initiator_name iqn.localhost.hostid.7f000002 iSCSI Initiator Name True +isns_srvnames auto iSNS Servers IP Addresses True +isns_srvports iSNS Servers Port Numbers True +max_targets 16 Maximum Targets Allowed True +num_cmd_elems 200 Maximum number of commands to queue to driver True +""" + +# HP-UX # iscsiutil -l +ISCSIUTIL_OUTPUT = """ +Initiator Name : iqn.2001-04.com.hp.stor:svcio +Initiator Alias : +Authentication Method : None +CHAP Method : CHAP_UNI +Initiator CHAP Name : +CHAP Secret : +NAS Hostname : +NAS Secret : +Radius Server Hostname : +Header Digest : None,CRC32C (default) +Data Digest : None,CRC32C (default) +SLP Scope list for iSLPD : +""" + + +def test_get_iscsi_info(mocker): + module = Mock() + inst = iscsi.IscsiInitiatorNetworkCollector() + + mocker.patch('sys.platform', 'aix6') + mocker.patch('ansible.module_utils.facts.network.iscsi.get_bin_path', return_value='/usr/sbin/lsattr') + mocker.patch.object(module, 'run_command', return_value=(0, LSATTR_OUTPUT, '')) + aix_iscsi_expected = {"iscsi_iqn": "iqn.localhost.hostid.7f000002"} + assert aix_iscsi_expected == inst.collect(module=module) + + mocker.patch('sys.platform', 'hp-ux') + mocker.patch('ansible.module_utils.facts.network.iscsi.get_bin_path', return_value='/opt/iscsi/bin/iscsiutil') + mocker.patch.object(module, 'run_command', return_value=(0, ISCSIUTIL_OUTPUT, '')) + hpux_iscsi_expected = {"iscsi_iqn": " iqn.2001-04.com.hp.stor:svcio"} + assert hpux_iscsi_expected == inst.collect(module=module) diff --git a/test/units/module_utils/facts/other/__init__.py b/test/units/module_utils/facts/other/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/facts/other/test_facter.py b/test/units/module_utils/facts/other/test_facter.py new file mode 100644 index 00000000..7466338e --- /dev/null +++ b/test/units/module_utils/facts/other/test_facter.py @@ -0,0 +1,228 @@ +# unit tests for ansible other facter fact collector +# -*- coding: utf-8 -*- +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat.mock import Mock, patch + +from .. base import BaseFactsTest + +from ansible.module_utils.facts.other.facter import FacterFactCollector + +facter_json_output = ''' +{ + "operatingsystemmajrelease": "25", + "hardwareisa": "x86_64", + "kernel": "Linux", + "path": "/home/testuser/src/ansible/bin:/home/testuser/perl5/bin:/home/testuser/perl5/bin:/home/testuser/bin:/home/testuser/.local/bin:/home/testuser/pythons/bin:/usr/lib64/qt-3.3/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/testuser/.cabal/bin:/home/testuser/gopath/bin:/home/testuser/.rvm/bin", + "memorysize": "15.36 GB", + "memoryfree": "4.88 GB", + "swapsize": "7.70 GB", + "swapfree": "6.75 GB", + "swapsize_mb": "7880.00", + "swapfree_mb": "6911.41", + "memorysize_mb": "15732.95", + "memoryfree_mb": "4997.68", + "lsbmajdistrelease": "25", + "macaddress": "02:42:ea:15:d8:84", + "id": "testuser", + "domain": "example.com", + "augeasversion": "1.7.0", + "os": { + "name": "Fedora", + "family": "RedHat", + "release": { + "major": "25", + "full": "25" + }, + "lsb": { + "distcodename": "TwentyFive", + "distid": "Fedora", + "distdescription": "Fedora release 25 (Twenty Five)", + "release": ":core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch", + "distrelease": "25", + "majdistrelease": "25" + } + }, + "processors": { + "models": [ + "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz" + ], + "count": 8, + "physicalcount": 1 + }, + "architecture": "x86_64", + "hardwaremodel": "x86_64", + "operatingsystem": "Fedora", + "processor0": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "processor1": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "processor2": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "processor3": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "processor4": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "processor5": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "processor6": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "processor7": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "processorcount": 8, + "uptime_seconds": 1558090, + "fqdn": "myhostname.example.com", + "rubyversion": "2.3.3", + "gid": "testuser", + "physicalprocessorcount": 1, + "netmask": "255.255.0.0", + "uniqueid": "a8c01301", + "uptime_days": 18, + "interfaces": "docker0,em1,lo,vethf20ff12,virbr0,virbr1,virbr0_nic,virbr1_nic,wlp4s0", + "ipaddress_docker0": "172.17.0.1", + "macaddress_docker0": "02:42:ea:15:d8:84", + "netmask_docker0": "255.255.0.0", + "mtu_docker0": 1500, + "macaddress_em1": "3c:97:0e:e9:28:8e", + "mtu_em1": 1500, + "ipaddress_lo": "127.0.0.1", + "netmask_lo": "255.0.0.0", + "mtu_lo": 65536, + "macaddress_vethf20ff12": "ae:6e:2b:1e:a1:31", + "mtu_vethf20ff12": 1500, + "ipaddress_virbr0": "192.168.137.1", + "macaddress_virbr0": "52:54:00:ce:82:5e", + "netmask_virbr0": "255.255.255.0", + "mtu_virbr0": 1500, + "ipaddress_virbr1": "192.168.121.1", + "macaddress_virbr1": "52:54:00:b4:68:a9", + "netmask_virbr1": "255.255.255.0", + "mtu_virbr1": 1500, + "macaddress_virbr0_nic": "52:54:00:ce:82:5e", + "mtu_virbr0_nic": 1500, + "macaddress_virbr1_nic": "52:54:00:b4:68:a9", + "mtu_virbr1_nic": 1500, + "ipaddress_wlp4s0": "192.168.1.19", + "macaddress_wlp4s0": "5c:51:4f:e6:a8:e3", + "netmask_wlp4s0": "255.255.255.0", + "mtu_wlp4s0": 1500, + "virtual": "physical", + "is_virtual": false, + "partitions": { + "sda2": { + "size": "499091456" + }, + "sda1": { + "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0", + "size": "1024000", + "mount": "/boot" + } + }, + "lsbdistcodename": "TwentyFive", + "lsbrelease": ":core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch", # noqa + "filesystems": "btrfs,ext2,ext3,ext4,xfs", + "system_uptime": { + "seconds": 1558090, + "hours": 432, + "days": 18, + "uptime": "18 days" + }, + "ipaddress": "172.17.0.1", + "timezone": "EDT", + "ps": "ps -ef", + "rubyplatform": "x86_64-linux", + "rubysitedir": "/usr/local/share/ruby/site_ruby", + "uptime": "18 days", + "lsbdistrelease": "25", + "operatingsystemrelease": "25", + "facterversion": "2.4.3", + "kernelrelease": "4.9.14-200.fc25.x86_64", + "lsbdistdescription": "Fedora release 25 (Twenty Five)", + "network_docker0": "172.17.0.0", + "network_lo": "127.0.0.0", + "network_virbr0": "192.168.137.0", + "network_virbr1": "192.168.121.0", + "network_wlp4s0": "192.168.1.0", + "lsbdistid": "Fedora", + "selinux": true, + "selinux_enforced": false, + "selinux_policyversion": "30", + "selinux_current_mode": "permissive", + "selinux_config_mode": "permissive", + "selinux_config_policy": "targeted", + "hostname": "myhostname", + "osfamily": "RedHat", + "kernelmajversion": "4.9", + "blockdevice_sr0_size": 1073741312, + "blockdevice_sr0_vendor": "MATSHITA", + "blockdevice_sr0_model": "DVD-RAM UJ8E2", + "blockdevice_sda_size": 256060514304, + "blockdevice_sda_vendor": "ATA", + "blockdevice_sda_model": "SAMSUNG MZ7TD256", + "blockdevices": "sda,sr0", + "uptime_hours": 432, + "kernelversion": "4.9.14" +} +''' + + +class TestFacterCollector(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'facter'] + valid_subsets = ['facter'] + fact_namespace = 'ansible_facter' + collector_class = FacterFactCollector + + def _mock_module(self): + mock_module = Mock() + mock_module.params = {'gather_subset': self.gather_subset, + 'gather_timeout': 10, + 'filter': '*'} + mock_module.get_bin_path = Mock(return_value='/not/actually/facter') + mock_module.run_command = Mock(return_value=(0, facter_json_output, '')) + return mock_module + + @patch('ansible.module_utils.facts.other.facter.FacterFactCollector.get_facter_output') + def test_bogus_json(self, mock_get_facter_output): + module = self._mock_module() + + # bogus json + mock_get_facter_output.return_value = '{' + + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + + self.assertIsInstance(facts_dict, dict) + self.assertEqual(facts_dict, {}) + + @patch('ansible.module_utils.facts.other.facter.FacterFactCollector.run_facter') + def test_facter_non_zero_return_code(self, mock_run_facter): + module = self._mock_module() + + # bogus json + mock_run_facter.return_value = (1, '{}', '') + + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + + self.assertIsInstance(facts_dict, dict) + + # This assumes no 'facter' entry at all is correct + self.assertNotIn('facter', facts_dict) + self.assertEqual(facts_dict, {}) diff --git a/test/units/module_utils/facts/other/test_ohai.py b/test/units/module_utils/facts/other/test_ohai.py new file mode 100644 index 00000000..42a72d97 --- /dev/null +++ b/test/units/module_utils/facts/other/test_ohai.py @@ -0,0 +1,6768 @@ +# unit tests for ansible ohai fact collector +# -*- coding: utf-8 -*- +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat.mock import Mock, patch + +from .. base import BaseFactsTest + +from ansible.module_utils.facts.other.ohai import OhaiFactCollector + +ohai_json_output = r''' +{ + "kernel": { + "name": "Linux", + "release": "4.9.14-200.fc25.x86_64", + "version": "#1 SMP Mon Mar 13 19:26:40 UTC 2017", + "machine": "x86_64", + "processor": "x86_64", + "os": "GNU/Linux", + "modules": { + "binfmt_misc": { + "size": "20480", + "refcount": "1" + }, + "veth": { + "size": "16384", + "refcount": "0" + }, + "xfs": { + "size": "1200128", + "refcount": "1" + }, + "xt_addrtype": { + "size": "16384", + "refcount": "2" + }, + "br_netfilter": { + "size": "24576", + "refcount": "0" + }, + "dm_thin_pool": { + "size": "65536", + "refcount": "2" + }, + "dm_persistent_data": { + "size": "69632", + "refcount": "1" + }, + "dm_bio_prison": { + "size": "16384", + "refcount": "1" + }, + "libcrc32c": { + "size": "16384", + "refcount": "2" + }, + "rfcomm": { + "size": "77824", + "refcount": "14", + "version": "1.11" + }, + "fuse": { + "size": "102400", + "refcount": "3" + }, + "ccm": { + "size": "20480", + "refcount": "2" + }, + "xt_CHECKSUM": { + "size": "16384", + "refcount": "2" + }, + "iptable_mangle": { + "size": "16384", + "refcount": "1" + }, + "ipt_MASQUERADE": { + "size": "16384", + "refcount": "7" + }, + "nf_nat_masquerade_ipv4": { + "size": "16384", + "refcount": "1" + }, + "iptable_nat": { + "size": "16384", + "refcount": "1" + }, + "nf_nat_ipv4": { + "size": "16384", + "refcount": "1" + }, + "nf_nat": { + "size": "28672", + "refcount": "2" + }, + "nf_conntrack_ipv4": { + "size": "16384", + "refcount": "4" + }, + "nf_defrag_ipv4": { + "size": "16384", + "refcount": "1" + }, + "xt_conntrack": { + "size": "16384", + "refcount": "3" + }, + "nf_conntrack": { + "size": "106496", + "refcount": "5" + }, + "ip6t_REJECT": { + "size": "16384", + "refcount": "2" + }, + "nf_reject_ipv6": { + "size": "16384", + "refcount": "1" + }, + "tun": { + "size": "28672", + "refcount": "4" + }, + "bridge": { + "size": "135168", + "refcount": "1", + "version": "2.3" + }, + "stp": { + "size": "16384", + "refcount": "1" + }, + "llc": { + "size": "16384", + "refcount": "2" + }, + "ebtable_filter": { + "size": "16384", + "refcount": "0" + }, + "ebtables": { + "size": "36864", + "refcount": "1" + }, + "ip6table_filter": { + "size": "16384", + "refcount": "1" + }, + "ip6_tables": { + "size": "28672", + "refcount": "1" + }, + "cmac": { + "size": "16384", + "refcount": "3" + }, + "uhid": { + "size": "20480", + "refcount": "2" + }, + "bnep": { + "size": "20480", + "refcount": "2", + "version": "1.3" + }, + "btrfs": { + "size": "1056768", + "refcount": "1" + }, + "xor": { + "size": "24576", + "refcount": "1" + }, + "raid6_pq": { + "size": "106496", + "refcount": "1" + }, + "loop": { + "size": "28672", + "refcount": "6" + }, + "arc4": { + "size": "16384", + "refcount": "2" + }, + "snd_hda_codec_hdmi": { + "size": "45056", + "refcount": "1" + }, + "intel_rapl": { + "size": "20480", + "refcount": "0" + }, + "x86_pkg_temp_thermal": { + "size": "16384", + "refcount": "0" + }, + "intel_powerclamp": { + "size": "16384", + "refcount": "0" + }, + "coretemp": { + "size": "16384", + "refcount": "0" + }, + "kvm_intel": { + "size": "192512", + "refcount": "0" + }, + "kvm": { + "size": "585728", + "refcount": "1" + }, + "irqbypass": { + "size": "16384", + "refcount": "1" + }, + "crct10dif_pclmul": { + "size": "16384", + "refcount": "0" + }, + "crc32_pclmul": { + "size": "16384", + "refcount": "0" + }, + "iTCO_wdt": { + "size": "16384", + "refcount": "0", + "version": "1.11" + }, + "ghash_clmulni_intel": { + "size": "16384", + "refcount": "0" + }, + "mei_wdt": { + "size": "16384", + "refcount": "0" + }, + "iTCO_vendor_support": { + "size": "16384", + "refcount": "1", + "version": "1.04" + }, + "iwlmvm": { + "size": "364544", + "refcount": "0" + }, + "intel_cstate": { + "size": "16384", + "refcount": "0" + }, + "uvcvideo": { + "size": "90112", + "refcount": "0", + "version": "1.1.1" + }, + "videobuf2_vmalloc": { + "size": "16384", + "refcount": "1" + }, + "intel_uncore": { + "size": "118784", + "refcount": "0" + }, + "videobuf2_memops": { + "size": "16384", + "refcount": "1" + }, + "videobuf2_v4l2": { + "size": "24576", + "refcount": "1" + }, + "videobuf2_core": { + "size": "40960", + "refcount": "2" + }, + "intel_rapl_perf": { + "size": "16384", + "refcount": "0" + }, + "mac80211": { + "size": "749568", + "refcount": "1" + }, + "videodev": { + "size": "172032", + "refcount": "3" + }, + "snd_usb_audio": { + "size": "180224", + "refcount": "3" + }, + "e1000e": { + "size": "249856", + "refcount": "0", + "version": "3.2.6-k" + } + } + }, + "os": "linux", + "os_version": "4.9.14-200.fc25.x86_64", + "lsb": { + "id": "Fedora", + "description": "Fedora release 25 (Twenty Five)", + "release": "25", + "codename": "TwentyFive" + }, + "platform": "fedora", + "platform_version": "25", + "platform_family": "fedora", + "packages": { + "ansible": { + "epoch": "0", + "version": "2.2.1.0", + "release": "1.fc25", + "installdate": "1486050042", + "arch": "noarch" + }, + "python3": { + "epoch": "0", + "version": "3.5.3", + "release": "3.fc25", + "installdate": "1490025957", + "arch": "x86_64" + }, + "kernel": { + "epoch": "0", + "version": "4.9.6", + "release": "200.fc25", + "installdate": "1486047522", + "arch": "x86_64" + }, + "glibc": { + "epoch": "0", + "version": "2.24", + "release": "4.fc25", + "installdate": "1483402427", + "arch": "x86_64" + } + }, + "chef_packages": { + ohai": { + "version": "13.0.0", + "ohai_root": "/home/some_user/.gem/ruby/gems/ohai-13.0.0/lib/ohai" + } + }, + "dmi": { + "dmidecode_version": "3.0" + }, + "uptime_seconds": 2509008, + "uptime": "29 days 00 hours 56 minutes 48 seconds", + "idletime_seconds": 19455087, + "idletime": "225 days 04 hours 11 minutes 27 seconds", + "memory": { + "swap": { + "cached": "262436kB", + "total": "8069116kB", + "free": "5154396kB" + }, + "hugepages": { + "total": "0", + "free": "0", + "reserved": "0", + "surplus": "0" + }, + "total": "16110540kB", + "free": "3825844kB", + "buffers": "377240kB", + "cached": "3710084kB", + "active": "8104320kB", + "inactive": "3192920kB", + "dirty": "812kB", + "writeback": "0kB", + "anon_pages": "7124992kB", + "mapped": "580700kB", + "slab": "622848kB", + "slab_reclaimable": "307300kB", + "slab_unreclaim": "315548kB", + "page_tables": "157572kB", + "nfs_unstable": "0kB", + "bounce": "0kB", + "commit_limit": "16124384kB", + "committed_as": "31345068kB", + "vmalloc_total": "34359738367kB", + "vmalloc_used": "0kB", + "vmalloc_chunk": "0kB", + "hugepage_size": "2048kB" + }, + "filesystem": { + "by_device": { + "devtmpfs": { + "kb_size": "8044124", + "kb_used": "0", + "kb_available": "8044124", + "percent_used": "0%", + "total_inodes": "2011031", + "inodes_used": "629", + "inodes_available": "2010402", + "inodes_percent_used": "1%", + "fs_type": "devtmpfs", + "mount_options": [ + "rw", + "nosuid", + "seclabel", + "size=8044124k", + "nr_inodes=2011031", + "mode=755" + ], + "mounts": [ + "/dev" + ] + }, + "tmpfs": { + "kb_size": "1611052", + "kb_used": "72", + "kb_available": "1610980", + "percent_used": "1%", + "total_inodes": "2013817", + "inodes_used": "36", + "inodes_available": "2013781", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700", + "uid=1000", + "gid=1000" + ], + "mounts": [ + "/dev/shm", + "/run", + "/sys/fs/cgroup", + "/tmp", + "/run/user/0", + "/run/user/1000" + ] + }, + "/dev/mapper/fedora_host--186-root": { + "kb_size": "51475068", + "kb_used": "42551284", + "kb_available": "6285960", + "percent_used": "88%", + "total_inodes": "3276800", + "inodes_used": "532908", + "inodes_available": "2743892", + "inodes_percent_used": "17%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "12312331-3449-4a6c-8179-a1feb2bca6ce", + "mounts": [ + "/", + "/var/lib/docker/devicemapper" + ] + }, + "/dev/sda1": { + "kb_size": "487652", + "kb_used": "126628", + "kb_available": "331328", + "percent_used": "28%", + "total_inodes": "128016", + "inodes_used": "405", + "inodes_available": "127611", + "inodes_percent_used": "1%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "12312311-ef40-4691-a3b6-438c3f9bc1c0", + "mounts": [ + "/boot" + ] + }, + "/dev/mapper/fedora_host--186-home": { + "kb_size": "185948124", + "kb_used": "105904724", + "kb_available": "70574680", + "percent_used": "61%", + "total_inodes": "11821056", + "inodes_used": "1266687", + "inodes_available": "10554369", + "inodes_percent_used": "11%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d", + "mounts": [ + "/home" + ] + }, + "/dev/loop0": { + "kb_size": "512000", + "kb_used": "16672", + "kb_available": "429056", + "percent_used": "4%", + "fs_type": "btrfs", + "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390", + "mounts": [ + "/var/lib/machines" + ] + }, + "sysfs": { + "fs_type": "sysfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ], + "mounts": [ + "/sys" + ] + }, + "proc": { + "fs_type": "proc", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ], + "mounts": [ + "/proc" + ] + }, + "securityfs": { + "fs_type": "securityfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ], + "mounts": [ + "/sys/kernel/security" + ] + }, + "devpts": { + "fs_type": "devpts", + "mount_options": [ + "rw", + "nosuid", + "noexec", + "relatime", + "seclabel", + "gid=5", + "mode=620", + "ptmxmode=000" + ], + "mounts": [ + "/dev/pts" + ] + }, + "cgroup": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "net_cls", + "net_prio" + ], + "mounts": [ + "/sys/fs/cgroup/systemd", + "/sys/fs/cgroup/devices", + "/sys/fs/cgroup/cpuset", + "/sys/fs/cgroup/perf_event", + "/sys/fs/cgroup/hugetlb", + "/sys/fs/cgroup/cpu,cpuacct", + "/sys/fs/cgroup/blkio", + "/sys/fs/cgroup/freezer", + "/sys/fs/cgroup/memory", + "/sys/fs/cgroup/pids", + "/sys/fs/cgroup/net_cls,net_prio" + ] + }, + "pstore": { + "fs_type": "pstore", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ], + "mounts": [ + "/sys/fs/pstore" + ] + }, + "configfs": { + "fs_type": "configfs", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/sys/kernel/config" + ] + }, + "selinuxfs": { + "fs_type": "selinuxfs", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/sys/fs/selinux" + ] + }, + "debugfs": { + "fs_type": "debugfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "mounts": [ + "/sys/kernel/debug" + ] + }, + "hugetlbfs": { + "fs_type": "hugetlbfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "mounts": [ + "/dev/hugepages" + ] + }, + "mqueue": { + "fs_type": "mqueue", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "mounts": [ + "/dev/mqueue" + ] + }, + "systemd-1": { + "fs_type": "autofs", + "mount_options": [ + "rw", + "relatime", + "fd=40", + "pgrp=1", + "timeout=0", + "minproto=5", + "maxproto=5", + "direct", + "pipe_ino=17610" + ], + "mounts": [ + "/proc/sys/fs/binfmt_misc" + ] + }, + "/var/lib/machines.raw": { + "fs_type": "btrfs", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "space_cache", + "subvolid=5", + "subvol=/" + ], + "mounts": [ + "/var/lib/machines" + ] + }, + "fusectl": { + "fs_type": "fusectl", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/sys/fs/fuse/connections" + ] + }, + "gvfsd-fuse": { + "fs_type": "fuse.gvfsd-fuse", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "user_id=1000", + "group_id=1000" + ], + "mounts": [ + "/run/user/1000/gvfs" + ] + }, + "binfmt_misc": { + "fs_type": "binfmt_misc", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/proc/sys/fs/binfmt_misc" + ] + }, + "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": { + "fs_type": "xfs", + "mount_options": [ + "rw", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "nouuid", + "attr2", + "inode64", + "logbsize=64k", + "sunit=128", + "swidth=128", + "noquota" + ], + "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123", + "mounts": [ + "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8" + ] + }, + "shm": { + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "size=65536k" + ], + "mounts": [ + "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm" + ] + }, + "nsfs": { + "fs_type": "nsfs", + "mount_options": [ + "rw" + ], + "mounts": [ + "/run/docker/netns/1ce89fd79f3d" + ] + }, + "tracefs": { + "fs_type": "tracefs", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/sys/kernel/debug/tracing" + ] + }, + "/dev/loop1": { + "fs_type": "xfs", + "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123", + "mounts": [ + + ] + }, + "/dev/mapper/docker-253:1-1180487-pool": { + "mounts": [ + + ] + }, + "/dev/sr0": { + "mounts": [ + + ] + }, + "/dev/loop2": { + "mounts": [ + + ] + }, + "/dev/sda": { + "mounts": [ + + ] + }, + "/dev/sda2": { + "fs_type": "LVM2_member", + "uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK", + "mounts": [ + + ] + }, + "/dev/mapper/fedora_host--186-swap": { + "fs_type": "swap", + "uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d", + "mounts": [ + + ] + } + }, + "by_mountpoint": { + "/dev": { + "kb_size": "8044124", + "kb_used": "0", + "kb_available": "8044124", + "percent_used": "0%", + "total_inodes": "2011031", + "inodes_used": "629", + "inodes_available": "2010402", + "inodes_percent_used": "1%", + "fs_type": "devtmpfs", + "mount_options": [ + "rw", + "nosuid", + "seclabel", + "size=8044124k", + "nr_inodes=2011031", + "mode=755" + ], + "devices": [ + "devtmpfs" + ] + }, + "/dev/shm": { + "kb_size": "8055268", + "kb_used": "96036", + "kb_available": "7959232", + "percent_used": "2%", + "total_inodes": "2013817", + "inodes_used": "217", + "inodes_available": "2013600", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel" + ], + "devices": [ + "tmpfs" + ] + }, + "/run": { + "kb_size": "8055268", + "kb_used": "2280", + "kb_available": "8052988", + "percent_used": "1%", + "total_inodes": "2013817", + "inodes_used": "1070", + "inodes_available": "2012747", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel", + "mode=755" + ], + "devices": [ + "tmpfs" + ] + }, + "/sys/fs/cgroup": { + "kb_size": "8055268", + "kb_used": "0", + "kb_available": "8055268", + "percent_used": "0%", + "total_inodes": "2013817", + "inodes_used": "16", + "inodes_available": "2013801", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "ro", + "nosuid", + "nodev", + "noexec", + "seclabel", + "mode=755" + ], + "devices": [ + "tmpfs" + ] + }, + "/": { + "kb_size": "51475068", + "kb_used": "42551284", + "kb_available": "6285960", + "percent_used": "88%", + "total_inodes": "3276800", + "inodes_used": "532908", + "inodes_available": "2743892", + "inodes_percent_used": "17%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce", + "devices": [ + "/dev/mapper/fedora_host--186-root" + ] + }, + "/tmp": { + "kb_size": "8055268", + "kb_used": "848396", + "kb_available": "7206872", + "percent_used": "11%", + "total_inodes": "2013817", + "inodes_used": "1353", + "inodes_available": "2012464", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel" + ], + "devices": [ + "tmpfs" + ] + }, + "/boot": { + "kb_size": "487652", + "kb_used": "126628", + "kb_available": "331328", + "percent_used": "28%", + "total_inodes": "128016", + "inodes_used": "405", + "inodes_available": "127611", + "inodes_percent_used": "1%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0", + "devices": [ + "/dev/sda1" + ] + }, + "/home": { + "kb_size": "185948124", + "kb_used": "105904724", + "kb_available": "70574680", + "percent_used": "61%", + "total_inodes": "11821056", + "inodes_used": "1266687", + "inodes_available": "10554369", + "inodes_percent_used": "11%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d", + "devices": [ + "/dev/mapper/fedora_host--186-home" + ] + }, + "/var/lib/machines": { + "kb_size": "512000", + "kb_used": "16672", + "kb_available": "429056", + "percent_used": "4%", + "fs_type": "btrfs", + "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390", + "devices": [ + "/dev/loop0", + "/var/lib/machines.raw" + ], + "mount_options": [ + "rw", + "relatime", + "seclabel", + "space_cache", + "subvolid=5", + "subvol=/" + ] + }, + "/run/user/0": { + "kb_size": "1611052", + "kb_used": "0", + "kb_available": "1611052", + "percent_used": "0%", + "total_inodes": "2013817", + "inodes_used": "7", + "inodes_available": "2013810", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700" + ], + "devices": [ + "tmpfs" + ] + }, + "/run/user/1000": { + "kb_size": "1611052", + "kb_used": "72", + "kb_available": "1610980", + "percent_used": "1%", + "total_inodes": "2013817", + "inodes_used": "36", + "inodes_available": "2013781", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700", + "uid=1000", + "gid=1000" + ], + "devices": [ + "tmpfs" + ] + }, + "/sys": { + "fs_type": "sysfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ], + "devices": [ + "sysfs" + ] + }, + "/proc": { + "fs_type": "proc", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ], + "devices": [ + "proc" + ] + }, + "/sys/kernel/security": { + "fs_type": "securityfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ], + "devices": [ + "securityfs" + ] + }, + "/dev/pts": { + "fs_type": "devpts", + "mount_options": [ + "rw", + "nosuid", + "noexec", + "relatime", + "seclabel", + "gid=5", + "mode=620", + "ptmxmode=000" + ], + "devices": [ + "devpts" + ] + }, + "/sys/fs/cgroup/systemd": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "xattr", + "release_agent=/usr/lib/systemd/systemd-cgroups-agent", + "name=systemd" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/pstore": { + "fs_type": "pstore", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ], + "devices": [ + "pstore" + ] + }, + "/sys/fs/cgroup/devices": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "devices" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/cpuset": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "cpuset" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/perf_event": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "perf_event" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/hugetlb": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "hugetlb" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/cpu,cpuacct": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "cpu", + "cpuacct" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/blkio": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "blkio" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/freezer": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "freezer" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/memory": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "memory" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/pids": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "pids" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/net_cls,net_prio": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "net_cls", + "net_prio" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/kernel/config": { + "fs_type": "configfs", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "configfs" + ] + }, + "/sys/fs/selinux": { + "fs_type": "selinuxfs", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "selinuxfs" + ] + }, + "/sys/kernel/debug": { + "fs_type": "debugfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "devices": [ + "debugfs" + ] + }, + "/dev/hugepages": { + "fs_type": "hugetlbfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "devices": [ + "hugetlbfs" + ] + }, + "/dev/mqueue": { + "fs_type": "mqueue", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "devices": [ + "mqueue" + ] + }, + "/proc/sys/fs/binfmt_misc": { + "fs_type": "binfmt_misc", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "systemd-1", + "binfmt_misc" + ] + }, + "/sys/fs/fuse/connections": { + "fs_type": "fusectl", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "fusectl" + ] + }, + "/run/user/1000/gvfs": { + "fs_type": "fuse.gvfsd-fuse", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "user_id=1000", + "group_id=1000" + ], + "devices": [ + "gvfsd-fuse" + ] + }, + "/var/lib/docker/devicemapper": { + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce", + "devices": [ + "/dev/mapper/fedora_host--186-root" + ] + }, + "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": { + "fs_type": "xfs", + "mount_options": [ + "rw", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "nouuid", + "attr2", + "inode64", + "logbsize=64k", + "sunit=128", + "swidth=128", + "noquota" + ], + "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123", + "devices": [ + "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8" + ] + }, + "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm": { + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "size=65536k" + ], + "devices": [ + "shm" + ] + }, + "/run/docker/netns/1ce89fd79f3d": { + "fs_type": "nsfs", + "mount_options": [ + "rw" + ], + "devices": [ + "nsfs" + ] + }, + "/sys/kernel/debug/tracing": { + "fs_type": "tracefs", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "tracefs" + ] + } + }, + "by_pair": { + "devtmpfs,/dev": { + "device": "devtmpfs", + "kb_size": "8044124", + "kb_used": "0", + "kb_available": "8044124", + "percent_used": "0%", + "mount": "/dev", + "total_inodes": "2011031", + "inodes_used": "629", + "inodes_available": "2010402", + "inodes_percent_used": "1%", + "fs_type": "devtmpfs", + "mount_options": [ + "rw", + "nosuid", + "seclabel", + "size=8044124k", + "nr_inodes=2011031", + "mode=755" + ] + }, + "tmpfs,/dev/shm": { + "device": "tmpfs", + "kb_size": "8055268", + "kb_used": "96036", + "kb_available": "7959232", + "percent_used": "2%", + "mount": "/dev/shm", + "total_inodes": "2013817", + "inodes_used": "217", + "inodes_available": "2013600", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel" + ] + }, + "tmpfs,/run": { + "device": "tmpfs", + "kb_size": "8055268", + "kb_used": "2280", + "kb_available": "8052988", + "percent_used": "1%", + "mount": "/run", + "total_inodes": "2013817", + "inodes_used": "1070", + "inodes_available": "2012747", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel", + "mode=755" + ] + }, + "tmpfs,/sys/fs/cgroup": { + "device": "tmpfs", + "kb_size": "8055268", + "kb_used": "0", + "kb_available": "8055268", + "percent_used": "0%", + "mount": "/sys/fs/cgroup", + "total_inodes": "2013817", + "inodes_used": "16", + "inodes_available": "2013801", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "ro", + "nosuid", + "nodev", + "noexec", + "seclabel", + "mode=755" + ] + }, + "/dev/mapper/fedora_host--186-root,/": { + "device": "/dev/mapper/fedora_host--186-root", + "kb_size": "51475068", + "kb_used": "42551284", + "kb_available": "6285960", + "percent_used": "88%", + "mount": "/", + "total_inodes": "3276800", + "inodes_used": "532908", + "inodes_available": "2743892", + "inodes_percent_used": "17%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce" + }, + "tmpfs,/tmp": { + "device": "tmpfs", + "kb_size": "8055268", + "kb_used": "848396", + "kb_available": "7206872", + "percent_used": "11%", + "mount": "/tmp", + "total_inodes": "2013817", + "inodes_used": "1353", + "inodes_available": "2012464", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel" + ] + }, + "/dev/sda1,/boot": { + "device": "/dev/sda1", + "kb_size": "487652", + "kb_used": "126628", + "kb_available": "331328", + "percent_used": "28%", + "mount": "/boot", + "total_inodes": "128016", + "inodes_used": "405", + "inodes_available": "127611", + "inodes_percent_used": "1%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0" + }, + "/dev/mapper/fedora_host--186-home,/home": { + "device": "/dev/mapper/fedora_host--186-home", + "kb_size": "185948124", + "kb_used": "105904724", + "kb_available": "70574680", + "percent_used": "61%", + "mount": "/home", + "total_inodes": "11821056", + "inodes_used": "1266687", + "inodes_available": "10554369", + "inodes_percent_used": "11%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d" + }, + "/dev/loop0,/var/lib/machines": { + "device": "/dev/loop0", + "kb_size": "512000", + "kb_used": "16672", + "kb_available": "429056", + "percent_used": "4%", + "mount": "/var/lib/machines", + "fs_type": "btrfs", + "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390" + }, + "tmpfs,/run/user/0": { + "device": "tmpfs", + "kb_size": "1611052", + "kb_used": "0", + "kb_available": "1611052", + "percent_used": "0%", + "mount": "/run/user/0", + "total_inodes": "2013817", + "inodes_used": "7", + "inodes_available": "2013810", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700" + ] + }, + "tmpfs,/run/user/1000": { + "device": "tmpfs", + "kb_size": "1611052", + "kb_used": "72", + "kb_available": "1610980", + "percent_used": "1%", + "mount": "/run/user/1000", + "total_inodes": "2013817", + "inodes_used": "36", + "inodes_available": "2013781", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700", + "uid=1000", + "gid=1000" + ] + }, + "sysfs,/sys": { + "device": "sysfs", + "mount": "/sys", + "fs_type": "sysfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ] + }, + "proc,/proc": { + "device": "proc", + "mount": "/proc", + "fs_type": "proc", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ] + }, + "securityfs,/sys/kernel/security": { + "device": "securityfs", + "mount": "/sys/kernel/security", + "fs_type": "securityfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ] + }, + "devpts,/dev/pts": { + "device": "devpts", + "mount": "/dev/pts", + "fs_type": "devpts", + "mount_options": [ + "rw", + "nosuid", + "noexec", + "relatime", + "seclabel", + "gid=5", + "mode=620", + "ptmxmode=000" + ] + }, + "cgroup,/sys/fs/cgroup/systemd": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/systemd", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "xattr", + "release_agent=/usr/lib/systemd/systemd-cgroups-agent", + "name=systemd" + ] + }, + "pstore,/sys/fs/pstore": { + "device": "pstore", + "mount": "/sys/fs/pstore", + "fs_type": "pstore", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ] + }, + "cgroup,/sys/fs/cgroup/devices": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/devices", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "devices" + ] + }, + "cgroup,/sys/fs/cgroup/cpuset": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/cpuset", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "cpuset" + ] + }, + "cgroup,/sys/fs/cgroup/perf_event": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/perf_event", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "perf_event" + ] + }, + "cgroup,/sys/fs/cgroup/hugetlb": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/hugetlb", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "hugetlb" + ] + }, + "cgroup,/sys/fs/cgroup/cpu,cpuacct": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/cpu,cpuacct", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "cpu", + "cpuacct" + ] + }, + "cgroup,/sys/fs/cgroup/blkio": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/blkio", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "blkio" + ] + }, + "cgroup,/sys/fs/cgroup/freezer": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/freezer", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "freezer" + ] + }, + "cgroup,/sys/fs/cgroup/memory": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/memory", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "memory" + ] + }, + "cgroup,/sys/fs/cgroup/pids": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/pids", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "pids" + ] + }, + "cgroup,/sys/fs/cgroup/net_cls,net_prio": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/net_cls,net_prio", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "net_cls", + "net_prio" + ] + }, + "configfs,/sys/kernel/config": { + "device": "configfs", + "mount": "/sys/kernel/config", + "fs_type": "configfs", + "mount_options": [ + "rw", + "relatime" + ] + }, + "selinuxfs,/sys/fs/selinux": { + "device": "selinuxfs", + "mount": "/sys/fs/selinux", + "fs_type": "selinuxfs", + "mount_options": [ + "rw", + "relatime" + ] + }, + "debugfs,/sys/kernel/debug": { + "device": "debugfs", + "mount": "/sys/kernel/debug", + "fs_type": "debugfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ] + }, + "hugetlbfs,/dev/hugepages": { + "device": "hugetlbfs", + "mount": "/dev/hugepages", + "fs_type": "hugetlbfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ] + }, + "mqueue,/dev/mqueue": { + "device": "mqueue", + "mount": "/dev/mqueue", + "fs_type": "mqueue", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ] + }, + "systemd-1,/proc/sys/fs/binfmt_misc": { + "device": "systemd-1", + "mount": "/proc/sys/fs/binfmt_misc", + "fs_type": "autofs", + "mount_options": [ + "rw", + "relatime", + "fd=40", + "pgrp=1", + "timeout=0", + "minproto=5", + "maxproto=5", + "direct", + "pipe_ino=17610" + ] + }, + "/var/lib/machines.raw,/var/lib/machines": { + "device": "/var/lib/machines.raw", + "mount": "/var/lib/machines", + "fs_type": "btrfs", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "space_cache", + "subvolid=5", + "subvol=/" + ] + }, + "fusectl,/sys/fs/fuse/connections": { + "device": "fusectl", + "mount": "/sys/fs/fuse/connections", + "fs_type": "fusectl", + "mount_options": [ + "rw", + "relatime" + ] + }, + "gvfsd-fuse,/run/user/1000/gvfs": { + "device": "gvfsd-fuse", + "mount": "/run/user/1000/gvfs", + "fs_type": "fuse.gvfsd-fuse", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "user_id=1000", + "group_id=1000" + ] + }, + "/dev/mapper/fedora_host--186-root,/var/lib/docker/devicemapper": { + "device": "/dev/mapper/fedora_host--186-root", + "mount": "/var/lib/docker/devicemapper", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce" + }, + "binfmt_misc,/proc/sys/fs/binfmt_misc": { + "device": "binfmt_misc", + "mount": "/proc/sys/fs/binfmt_misc", + "fs_type": "binfmt_misc", + "mount_options": [ + "rw", + "relatime" + ] + }, + "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8,/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": { + "device": "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8", + "mount": "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8", + "fs_type": "xfs", + "mount_options": [ + "rw", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "nouuid", + "attr2", + "inode64", + "logbsize=64k", + "sunit=128", + "swidth=128", + "noquota" + ], + "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123" + }, + "shm,/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm": { + "device": "shm", + "mount": "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "size=65536k" + ] + }, + "nsfs,/run/docker/netns/1ce89fd79f3d": { + "device": "nsfs", + "mount": "/run/docker/netns/1ce89fd79f3d", + "fs_type": "nsfs", + "mount_options": [ + "rw" + ] + }, + "tracefs,/sys/kernel/debug/tracing": { + "device": "tracefs", + "mount": "/sys/kernel/debug/tracing", + "fs_type": "tracefs", + "mount_options": [ + "rw", + "relatime" + ] + }, + "/dev/loop1,": { + "device": "/dev/loop1", + "fs_type": "xfs", + "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123" + }, + "/dev/mapper/docker-253:1-1180487-pool,": { + "device": "/dev/mapper/docker-253:1-1180487-pool" + }, + "/dev/sr0,": { + "device": "/dev/sr0" + }, + "/dev/loop2,": { + "device": "/dev/loop2" + }, + "/dev/sda,": { + "device": "/dev/sda" + }, + "/dev/sda2,": { + "device": "/dev/sda2", + "fs_type": "LVM2_member", + "uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK" + }, + "/dev/mapper/fedora_host--186-swap,": { + "device": "/dev/mapper/fedora_host--186-swap", + "fs_type": "swap", + "uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d" + } + } + }, + "filesystem2": { + "by_device": { + "devtmpfs": { + "kb_size": "8044124", + "kb_used": "0", + "kb_available": "8044124", + "percent_used": "0%", + "total_inodes": "2011031", + "inodes_used": "629", + "inodes_available": "2010402", + "inodes_percent_used": "1%", + "fs_type": "devtmpfs", + "mount_options": [ + "rw", + "nosuid", + "seclabel", + "size=8044124k", + "nr_inodes=2011031", + "mode=755" + ], + "mounts": [ + "/dev" + ] + }, + "tmpfs": { + "kb_size": "1611052", + "kb_used": "72", + "kb_available": "1610980", + "percent_used": "1%", + "total_inodes": "2013817", + "inodes_used": "36", + "inodes_available": "2013781", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700", + "uid=1000", + "gid=1000" + ], + "mounts": [ + "/dev/shm", + "/run", + "/sys/fs/cgroup", + "/tmp", + "/run/user/0", + "/run/user/1000" + ] + }, + "/dev/mapper/fedora_host--186-root": { + "kb_size": "51475068", + "kb_used": "42551284", + "kb_available": "6285960", + "percent_used": "88%", + "total_inodes": "3276800", + "inodes_used": "532908", + "inodes_available": "2743892", + "inodes_percent_used": "17%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce", + "mounts": [ + "/", + "/var/lib/docker/devicemapper" + ] + }, + "/dev/sda1": { + "kb_size": "487652", + "kb_used": "126628", + "kb_available": "331328", + "percent_used": "28%", + "total_inodes": "128016", + "inodes_used": "405", + "inodes_available": "127611", + "inodes_percent_used": "1%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0", + "mounts": [ + "/boot" + ] + }, + "/dev/mapper/fedora_host--186-home": { + "kb_size": "185948124", + "kb_used": "105904724", + "kb_available": "70574680", + "percent_used": "61%", + "total_inodes": "11821056", + "inodes_used": "1266687", + "inodes_available": "10554369", + "inodes_percent_used": "11%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d", + "mounts": [ + "/home" + ] + }, + "/dev/loop0": { + "kb_size": "512000", + "kb_used": "16672", + "kb_available": "429056", + "percent_used": "4%", + "fs_type": "btrfs", + "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390", + "mounts": [ + "/var/lib/machines" + ] + }, + "sysfs": { + "fs_type": "sysfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ], + "mounts": [ + "/sys" + ] + }, + "proc": { + "fs_type": "proc", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ], + "mounts": [ + "/proc" + ] + }, + "securityfs": { + "fs_type": "securityfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ], + "mounts": [ + "/sys/kernel/security" + ] + }, + "devpts": { + "fs_type": "devpts", + "mount_options": [ + "rw", + "nosuid", + "noexec", + "relatime", + "seclabel", + "gid=5", + "mode=620", + "ptmxmode=000" + ], + "mounts": [ + "/dev/pts" + ] + }, + "cgroup": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "net_cls", + "net_prio" + ], + "mounts": [ + "/sys/fs/cgroup/systemd", + "/sys/fs/cgroup/devices", + "/sys/fs/cgroup/cpuset", + "/sys/fs/cgroup/perf_event", + "/sys/fs/cgroup/hugetlb", + "/sys/fs/cgroup/cpu,cpuacct", + "/sys/fs/cgroup/blkio", + "/sys/fs/cgroup/freezer", + "/sys/fs/cgroup/memory", + "/sys/fs/cgroup/pids", + "/sys/fs/cgroup/net_cls,net_prio" + ] + }, + "pstore": { + "fs_type": "pstore", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ], + "mounts": [ + "/sys/fs/pstore" + ] + }, + "configfs": { + "fs_type": "configfs", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/sys/kernel/config" + ] + }, + "selinuxfs": { + "fs_type": "selinuxfs", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/sys/fs/selinux" + ] + }, + "debugfs": { + "fs_type": "debugfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "mounts": [ + "/sys/kernel/debug" + ] + }, + "hugetlbfs": { + "fs_type": "hugetlbfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "mounts": [ + "/dev/hugepages" + ] + }, + "mqueue": { + "fs_type": "mqueue", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "mounts": [ + "/dev/mqueue" + ] + }, + "systemd-1": { + "fs_type": "autofs", + "mount_options": [ + "rw", + "relatime", + "fd=40", + "pgrp=1", + "timeout=0", + "minproto=5", + "maxproto=5", + "direct", + "pipe_ino=17610" + ], + "mounts": [ + "/proc/sys/fs/binfmt_misc" + ] + }, + "/var/lib/machines.raw": { + "fs_type": "btrfs", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "space_cache", + "subvolid=5", + "subvol=/" + ], + "mounts": [ + "/var/lib/machines" + ] + }, + "fusectl": { + "fs_type": "fusectl", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/sys/fs/fuse/connections" + ] + }, + "gvfsd-fuse": { + "fs_type": "fuse.gvfsd-fuse", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "user_id=1000", + "group_id=1000" + ], + "mounts": [ + "/run/user/1000/gvfs" + ] + }, + "binfmt_misc": { + "fs_type": "binfmt_misc", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/proc/sys/fs/binfmt_misc" + ] + }, + "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": { + "fs_type": "xfs", + "mount_options": [ + "rw", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "nouuid", + "attr2", + "inode64", + "logbsize=64k", + "sunit=128", + "swidth=128", + "noquota" + ], + "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123", + "mounts": [ + "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8" + ] + }, + "shm": { + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "size=65536k" + ], + "mounts": [ + "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm" + ] + }, + "nsfs": { + "fs_type": "nsfs", + "mount_options": [ + "rw" + ], + "mounts": [ + "/run/docker/netns/1ce89fd79f3d" + ] + }, + "tracefs": { + "fs_type": "tracefs", + "mount_options": [ + "rw", + "relatime" + ], + "mounts": [ + "/sys/kernel/debug/tracing" + ] + }, + "/dev/loop1": { + "fs_type": "xfs", + "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123", + "mounts": [ + + ] + }, + "/dev/mapper/docker-253:1-1180487-pool": { + "mounts": [ + + ] + }, + "/dev/sr0": { + "mounts": [ + + ] + }, + "/dev/loop2": { + "mounts": [ + + ] + }, + "/dev/sda": { + "mounts": [ + + ] + }, + "/dev/sda2": { + "fs_type": "LVM2_member", + "uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK", + "mounts": [ + + ] + }, + "/dev/mapper/fedora_host--186-swap": { + "fs_type": "swap", + "uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d", + "mounts": [ + + ] + } + }, + "by_mountpoint": { + "/dev": { + "kb_size": "8044124", + "kb_used": "0", + "kb_available": "8044124", + "percent_used": "0%", + "total_inodes": "2011031", + "inodes_used": "629", + "inodes_available": "2010402", + "inodes_percent_used": "1%", + "fs_type": "devtmpfs", + "mount_options": [ + "rw", + "nosuid", + "seclabel", + "size=8044124k", + "nr_inodes=2011031", + "mode=755" + ], + "devices": [ + "devtmpfs" + ] + }, + "/dev/shm": { + "kb_size": "8055268", + "kb_used": "96036", + "kb_available": "7959232", + "percent_used": "2%", + "total_inodes": "2013817", + "inodes_used": "217", + "inodes_available": "2013600", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel" + ], + "devices": [ + "tmpfs" + ] + }, + "/run": { + "kb_size": "8055268", + "kb_used": "2280", + "kb_available": "8052988", + "percent_used": "1%", + "total_inodes": "2013817", + "inodes_used": "1070", + "inodes_available": "2012747", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel", + "mode=755" + ], + "devices": [ + "tmpfs" + ] + }, + "/sys/fs/cgroup": { + "kb_size": "8055268", + "kb_used": "0", + "kb_available": "8055268", + "percent_used": "0%", + "total_inodes": "2013817", + "inodes_used": "16", + "inodes_available": "2013801", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "ro", + "nosuid", + "nodev", + "noexec", + "seclabel", + "mode=755" + ], + "devices": [ + "tmpfs" + ] + }, + "/": { + "kb_size": "51475068", + "kb_used": "42551284", + "kb_available": "6285960", + "percent_used": "88%", + "total_inodes": "3276800", + "inodes_used": "532908", + "inodes_available": "2743892", + "inodes_percent_used": "17%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce", + "devices": [ + "/dev/mapper/fedora_host--186-root" + ] + }, + "/tmp": { + "kb_size": "8055268", + "kb_used": "848396", + "kb_available": "7206872", + "percent_used": "11%", + "total_inodes": "2013817", + "inodes_used": "1353", + "inodes_available": "2012464", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel" + ], + "devices": [ + "tmpfs" + ] + }, + "/boot": { + "kb_size": "487652", + "kb_used": "126628", + "kb_available": "331328", + "percent_used": "28%", + "total_inodes": "128016", + "inodes_used": "405", + "inodes_available": "127611", + "inodes_percent_used": "1%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0", + "devices": [ + "/dev/sda1" + ] + }, + "/home": { + "kb_size": "185948124", + "kb_used": "105904724", + "kb_available": "70574680", + "percent_used": "61%", + "total_inodes": "11821056", + "inodes_used": "1266687", + "inodes_available": "10554369", + "inodes_percent_used": "11%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d", + "devices": [ + "/dev/mapper/fedora_host--186-home" + ] + }, + "/var/lib/machines": { + "kb_size": "512000", + "kb_used": "16672", + "kb_available": "429056", + "percent_used": "4%", + "fs_type": "btrfs", + "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390", + "devices": [ + "/dev/loop0", + "/var/lib/machines.raw" + ], + "mount_options": [ + "rw", + "relatime", + "seclabel", + "space_cache", + "subvolid=5", + "subvol=/" + ] + }, + "/run/user/0": { + "kb_size": "1611052", + "kb_used": "0", + "kb_available": "1611052", + "percent_used": "0%", + "total_inodes": "2013817", + "inodes_used": "7", + "inodes_available": "2013810", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700" + ], + "devices": [ + "tmpfs" + ] + }, + "/run/user/1000": { + "kb_size": "1611052", + "kb_used": "72", + "kb_available": "1610980", + "percent_used": "1%", + "total_inodes": "2013817", + "inodes_used": "36", + "inodes_available": "2013781", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700", + "uid=1000", + "gid=1000" + ], + "devices": [ + "tmpfs" + ] + }, + "/sys": { + "fs_type": "sysfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ], + "devices": [ + "sysfs" + ] + }, + "/proc": { + "fs_type": "proc", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ], + "devices": [ + "proc" + ] + }, + "/sys/kernel/security": { + "fs_type": "securityfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ], + "devices": [ + "securityfs" + ] + }, + "/dev/pts": { + "fs_type": "devpts", + "mount_options": [ + "rw", + "nosuid", + "noexec", + "relatime", + "seclabel", + "gid=5", + "mode=620", + "ptmxmode=000" + ], + "devices": [ + "devpts" + ] + }, + "/sys/fs/cgroup/systemd": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "xattr", + "release_agent=/usr/lib/systemd/systemd-cgroups-agent", + "name=systemd" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/pstore": { + "fs_type": "pstore", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ], + "devices": [ + "pstore" + ] + }, + "/sys/fs/cgroup/devices": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "devices" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/cpuset": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "cpuset" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/perf_event": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "perf_event" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/hugetlb": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "hugetlb" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/cpu,cpuacct": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "cpu", + "cpuacct" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/blkio": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "blkio" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/freezer": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "freezer" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/memory": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "memory" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/pids": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "pids" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/fs/cgroup/net_cls,net_prio": { + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "net_cls", + "net_prio" + ], + "devices": [ + "cgroup" + ] + }, + "/sys/kernel/config": { + "fs_type": "configfs", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "configfs" + ] + }, + "/sys/fs/selinux": { + "fs_type": "selinuxfs", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "selinuxfs" + ] + }, + "/sys/kernel/debug": { + "fs_type": "debugfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "devices": [ + "debugfs" + ] + }, + "/dev/hugepages": { + "fs_type": "hugetlbfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "devices": [ + "hugetlbfs" + ] + }, + "/dev/mqueue": { + "fs_type": "mqueue", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ], + "devices": [ + "mqueue" + ] + }, + "/proc/sys/fs/binfmt_misc": { + "fs_type": "binfmt_misc", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "systemd-1", + "binfmt_misc" + ] + }, + "/sys/fs/fuse/connections": { + "fs_type": "fusectl", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "fusectl" + ] + }, + "/run/user/1000/gvfs": { + "fs_type": "fuse.gvfsd-fuse", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "user_id=1000", + "group_id=1000" + ], + "devices": [ + "gvfsd-fuse" + ] + }, + "/var/lib/docker/devicemapper": { + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce", + "devices": [ + "/dev/mapper/fedora_host--186-root" + ] + }, + { + "/run/docker/netns/1ce89fd79f3d": { + "fs_type": "nsfs", + "mount_options": [ + "rw" + ], + "devices": [ + "nsfs" + ] + }, + "/sys/kernel/debug/tracing": { + "fs_type": "tracefs", + "mount_options": [ + "rw", + "relatime" + ], + "devices": [ + "tracefs" + ] + } + }, + "by_pair": { + "devtmpfs,/dev": { + "device": "devtmpfs", + "kb_size": "8044124", + "kb_used": "0", + "kb_available": "8044124", + "percent_used": "0%", + "mount": "/dev", + "total_inodes": "2011031", + "inodes_used": "629", + "inodes_available": "2010402", + "inodes_percent_used": "1%", + "fs_type": "devtmpfs", + "mount_options": [ + "rw", + "nosuid", + "seclabel", + "size=8044124k", + "nr_inodes=2011031", + "mode=755" + ] + }, + "tmpfs,/dev/shm": { + "device": "tmpfs", + "kb_size": "8055268", + "kb_used": "96036", + "kb_available": "7959232", + "percent_used": "2%", + "mount": "/dev/shm", + "total_inodes": "2013817", + "inodes_used": "217", + "inodes_available": "2013600", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel" + ] + }, + "tmpfs,/run": { + "device": "tmpfs", + "kb_size": "8055268", + "kb_used": "2280", + "kb_available": "8052988", + "percent_used": "1%", + "mount": "/run", + "total_inodes": "2013817", + "inodes_used": "1070", + "inodes_available": "2012747", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel", + "mode=755" + ] + }, + "tmpfs,/sys/fs/cgroup": { + "device": "tmpfs", + "kb_size": "8055268", + "kb_used": "0", + "kb_available": "8055268", + "percent_used": "0%", + "mount": "/sys/fs/cgroup", + "total_inodes": "2013817", + "inodes_used": "16", + "inodes_available": "2013801", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "ro", + "nosuid", + "nodev", + "noexec", + "seclabel", + "mode=755" + ] + }, + "/dev/mapper/fedora_host--186-root,/": { + "device": "/dev/mapper/fedora_host--186-root", + "kb_size": "51475068", + "kb_used": "42551284", + "kb_available": "6285960", + "percent_used": "88%", + "mount": "/", + "total_inodes": "3276800", + "inodes_used": "532908", + "inodes_available": "2743892", + "inodes_percent_used": "17%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce" + }, + "tmpfs,/tmp": { + "device": "tmpfs", + "kb_size": "8055268", + "kb_used": "848396", + "kb_available": "7206872", + "percent_used": "11%", + "mount": "/tmp", + "total_inodes": "2013817", + "inodes_used": "1353", + "inodes_available": "2012464", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "seclabel" + ] + }, + "/dev/sda1,/boot": { + "device": "/dev/sda1", + "kb_size": "487652", + "kb_used": "126628", + "kb_available": "331328", + "percent_used": "28%", + "mount": "/boot", + "total_inodes": "128016", + "inodes_used": "405", + "inodes_available": "127611", + "inodes_percent_used": "1%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0" + }, + "/dev/mapper/fedora_host--186-home,/home": { + "device": "/dev/mapper/fedora_host--186-home", + "kb_size": "185948124", + "kb_used": "105904724", + "kb_available": "70574680", + "percent_used": "61%", + "mount": "/home", + "total_inodes": "11821056", + "inodes_used": "1266687", + "inodes_available": "10554369", + "inodes_percent_used": "11%", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d" + }, + "/dev/loop0,/var/lib/machines": { + "device": "/dev/loop0", + "kb_size": "512000", + "kb_used": "16672", + "kb_available": "429056", + "percent_used": "4%", + "mount": "/var/lib/machines", + "fs_type": "btrfs", + "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390" + }, + "tmpfs,/run/user/0": { + "device": "tmpfs", + "kb_size": "1611052", + "kb_used": "0", + "kb_available": "1611052", + "percent_used": "0%", + "mount": "/run/user/0", + "total_inodes": "2013817", + "inodes_used": "7", + "inodes_available": "2013810", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700" + ] + }, + "tmpfs,/run/user/1000": { + "device": "tmpfs", + "kb_size": "1611052", + "kb_used": "72", + "kb_available": "1610980", + "percent_used": "1%", + "mount": "/run/user/1000", + "total_inodes": "2013817", + "inodes_used": "36", + "inodes_available": "2013781", + "inodes_percent_used": "1%", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "seclabel", + "size=1611052k", + "mode=700", + "uid=1000", + "gid=1000" + ] + }, + "sysfs,/sys": { + "device": "sysfs", + "mount": "/sys", + "fs_type": "sysfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ] + }, + "proc,/proc": { + "device": "proc", + "mount": "/proc", + "fs_type": "proc", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ] + }, + "securityfs,/sys/kernel/security": { + "device": "securityfs", + "mount": "/sys/kernel/security", + "fs_type": "securityfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime" + ] + }, + "devpts,/dev/pts": { + "device": "devpts", + "mount": "/dev/pts", + "fs_type": "devpts", + "mount_options": [ + "rw", + "nosuid", + "noexec", + "relatime", + "seclabel", + "gid=5", + "mode=620", + "ptmxmode=000" + ] + }, + "cgroup,/sys/fs/cgroup/systemd": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/systemd", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "xattr", + "release_agent=/usr/lib/systemd/systemd-cgroups-agent", + "name=systemd" + ] + }, + "pstore,/sys/fs/pstore": { + "device": "pstore", + "mount": "/sys/fs/pstore", + "fs_type": "pstore", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "seclabel" + ] + }, + "cgroup,/sys/fs/cgroup/devices": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/devices", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "devices" + ] + }, + "cgroup,/sys/fs/cgroup/cpuset": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/cpuset", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "cpuset" + ] + }, + "cgroup,/sys/fs/cgroup/perf_event": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/perf_event", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "perf_event" + ] + }, + "cgroup,/sys/fs/cgroup/hugetlb": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/hugetlb", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "hugetlb" + ] + }, + "cgroup,/sys/fs/cgroup/cpu,cpuacct": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/cpu,cpuacct", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "cpu", + "cpuacct" + ] + }, + "cgroup,/sys/fs/cgroup/blkio": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/blkio", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "blkio" + ] + }, + "cgroup,/sys/fs/cgroup/freezer": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/freezer", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "freezer" + ] + }, + "cgroup,/sys/fs/cgroup/memory": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/memory", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "memory" + ] + }, + "cgroup,/sys/fs/cgroup/pids": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/pids", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "pids" + ] + }, + "cgroup,/sys/fs/cgroup/net_cls,net_prio": { + "device": "cgroup", + "mount": "/sys/fs/cgroup/net_cls,net_prio", + "fs_type": "cgroup", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "net_cls", + "net_prio" + ] + }, + "configfs,/sys/kernel/config": { + "device": "configfs", + "mount": "/sys/kernel/config", + "fs_type": "configfs", + "mount_options": [ + "rw", + "relatime" + ] + }, + "selinuxfs,/sys/fs/selinux": { + "device": "selinuxfs", + "mount": "/sys/fs/selinux", + "fs_type": "selinuxfs", + "mount_options": [ + "rw", + "relatime" + ] + }, + "debugfs,/sys/kernel/debug": { + "device": "debugfs", + "mount": "/sys/kernel/debug", + "fs_type": "debugfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ] + }, + "hugetlbfs,/dev/hugepages": { + "device": "hugetlbfs", + "mount": "/dev/hugepages", + "fs_type": "hugetlbfs", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ] + }, + "mqueue,/dev/mqueue": { + "device": "mqueue", + "mount": "/dev/mqueue", + "fs_type": "mqueue", + "mount_options": [ + "rw", + "relatime", + "seclabel" + ] + }, + "systemd-1,/proc/sys/fs/binfmt_misc": { + "device": "systemd-1", + "mount": "/proc/sys/fs/binfmt_misc", + "fs_type": "autofs", + "mount_options": [ + "rw", + "relatime", + "fd=40", + "pgrp=1", + "timeout=0", + "minproto=5", + "maxproto=5", + "direct", + "pipe_ino=17610" + ] + }, + "/var/lib/machines.raw,/var/lib/machines": { + "device": "/var/lib/machines.raw", + "mount": "/var/lib/machines", + "fs_type": "btrfs", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "space_cache", + "subvolid=5", + "subvol=/" + ] + }, + "fusectl,/sys/fs/fuse/connections": { + "device": "fusectl", + "mount": "/sys/fs/fuse/connections", + "fs_type": "fusectl", + "mount_options": [ + "rw", + "relatime" + ] + }, + "gvfsd-fuse,/run/user/1000/gvfs": { + "device": "gvfsd-fuse", + "mount": "/run/user/1000/gvfs", + "fs_type": "fuse.gvfsd-fuse", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "relatime", + "user_id=1000", + "group_id=1000" + ] + }, + "/dev/mapper/fedora_host--186-root,/var/lib/docker/devicemapper": { + "device": "/dev/mapper/fedora_host--186-root", + "mount": "/var/lib/docker/devicemapper", + "fs_type": "ext4", + "mount_options": [ + "rw", + "relatime", + "seclabel", + "data=ordered" + ], + "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce" + }, + "binfmt_misc,/proc/sys/fs/binfmt_misc": { + "device": "binfmt_misc", + "mount": "/proc/sys/fs/binfmt_misc", + "fs_type": "binfmt_misc", + "mount_options": [ + "rw", + "relatime" + ] + }, + "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8,/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": { + "device": "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8", + "mount": "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8", + "fs_type": "xfs", + "mount_options": [ + "rw", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "nouuid", + "attr2", + "inode64", + "logbsize=64k", + "sunit=128", + "swidth=128", + "noquota" + ], + "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123" + }, + "shm,/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm": { + "device": "shm", + "mount": "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm", + "fs_type": "tmpfs", + "mount_options": [ + "rw", + "nosuid", + "nodev", + "noexec", + "relatime", + "context=\"system_u:object_r:container_file_t:s0:c523", + "c681\"", + "size=65536k" + ] + }, + "nsfs,/run/docker/netns/1ce89fd79f3d": { + "device": "nsfs", + "mount": "/run/docker/netns/1ce89fd79f3d", + "fs_type": "nsfs", + "mount_options": [ + "rw" + ] + }, + "tracefs,/sys/kernel/debug/tracing": { + "device": "tracefs", + "mount": "/sys/kernel/debug/tracing", + "fs_type": "tracefs", + "mount_options": [ + "rw", + "relatime" + ] + }, + "/dev/loop1,": { + "device": "/dev/loop1", + "fs_type": "xfs", + "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123" + }, + "/dev/mapper/docker-253:1-1180487-pool,": { + "device": "/dev/mapper/docker-253:1-1180487-pool" + }, + "/dev/sr0,": { + "device": "/dev/sr0" + }, + "/dev/loop2,": { + "device": "/dev/loop2" + }, + "/dev/sda,": { + "device": "/dev/sda" + }, + "/dev/sda2,": { + "device": "/dev/sda2", + "fs_type": "LVM2_member", + "uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK" + }, + "/dev/mapper/fedora_host--186-swap,": { + "device": "/dev/mapper/fedora_host--186-swap", + "fs_type": "swap", + "uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d" + } + } + }, + "virtualization": { + "systems": { + "kvm": "host" + }, + "system": "kvm", + "role": "host", + "libvirt_version": "2.2.0", + "uri": "qemu:///system", + "capabilities": { + + }, + "nodeinfo": { + "cores": 4, + "cpus": 8, + "memory": 16110540, + "mhz": 2832, + "model": "x86_64", + "nodes": 1, + "sockets": 1, + "threads": 2 + }, + "domains": { + + }, + "networks": { + "vagrant-libvirt": { + "bridge_name": "virbr1", + "uuid": "877ddb27-b39c-427e-a7bf-1aa829389eeb" + }, + "default": { + "bridge_name": "virbr0", + "uuid": "750d2567-23a8-470d-8a2b-71cd651e30d1" + } + }, + "storage": { + "virt-images": { + "autostart": true, + "uuid": "d8a189fa-f98c-462f-9ea4-204eb77a96a1", + "allocation": 106412863488, + "available": 83998015488, + "capacity": 190410878976, + "state": 2, + "volumes": { + "rhel-atomic-host-standard-2014-7-1.qcow2": { + "key": "/home/some_user/virt-images/rhel-atomic-host-standard-2014-7-1.qcow2", + "name": "rhel-atomic-host-standard-2014-7-1.qcow2", + "path": "/home/some_user/virt-images/rhel-atomic-host-standard-2014-7-1.qcow2", + "allocation": 1087115264, + "capacity": 8589934592, + "type": 0 + }, + "atomic-beta-instance-7.qcow2": { + "key": "/home/some_user/virt-images/atomic-beta-instance-7.qcow2", + "name": "atomic-beta-instance-7.qcow2", + "path": "/home/some_user/virt-images/atomic-beta-instance-7.qcow2", + "allocation": 200704, + "capacity": 8589934592, + "type": 0 + }, + "os1-atomic-meta-data": { + "key": "/home/some_user/virt-images/os1-atomic-meta-data", + "name": "os1-atomic-meta-data", + "path": "/home/some_user/virt-images/os1-atomic-meta-data", + "allocation": 4096, + "capacity": 49, + "type": 0 + }, + "atomic-user-data": { + "key": "/home/some_user/virt-images/atomic-user-data", + "name": "atomic-user-data", + "path": "/home/some_user/virt-images/atomic-user-data", + "allocation": 4096, + "capacity": 512, + "type": 0 + }, + "qemu-snap.txt": { + "key": "/home/some_user/virt-images/qemu-snap.txt", + "name": "qemu-snap.txt", + "path": "/home/some_user/virt-images/qemu-snap.txt", + "allocation": 4096, + "capacity": 111, + "type": 0 + }, + "atomic-beta-instance-5.qcow2": { + "key": "/home/some_user/virt-images/atomic-beta-instance-5.qcow2", + "name": "atomic-beta-instance-5.qcow2", + "path": "/home/some_user/virt-images/atomic-beta-instance-5.qcow2", + "allocation": 339091456, + "capacity": 8589934592, + "type": 0 + }, + "meta-data": { + "key": "/home/some_user/virt-images/meta-data", + "name": "meta-data", + "path": "/home/some_user/virt-images/meta-data", + "allocation": 4096, + "capacity": 49, + "type": 0 + }, + "atomic-beta-instance-8.qcow2": { + "key": "/home/some_user/virt-images/atomic-beta-instance-8.qcow2", + "name": "atomic-beta-instance-8.qcow2", + "path": "/home/some_user/virt-images/atomic-beta-instance-8.qcow2", + "allocation": 322576384, + "capacity": 8589934592, + "type": 0 + }, + "user-data": { + "key": "/home/some_user/virt-images/user-data", + "name": "user-data", + "path": "/home/some_user/virt-images/user-data", + "allocation": 4096, + "capacity": 512, + "type": 0 + }, + "rhel-6-2015-10-16.qcow2": { + "key": "/home/some_user/virt-images/rhel-6-2015-10-16.qcow2", + "name": "rhel-6-2015-10-16.qcow2", + "path": "/home/some_user/virt-images/rhel-6-2015-10-16.qcow2", + "allocation": 7209422848, + "capacity": 17179869184, + "type": 0 + }, + "atomic_demo_notes.txt": { + "key": "/home/some_user/virt-images/atomic_demo_notes.txt", + "name": "atomic_demo_notes.txt", + "path": "/home/some_user/virt-images/atomic_demo_notes.txt", + "allocation": 4096, + "capacity": 354, + "type": 0 + }, + "packer-windows-2012-R2-standard": { + "key": "/home/some_user/virt-images/packer-windows-2012-R2-standard", + "name": "packer-windows-2012-R2-standard", + "path": "/home/some_user/virt-images/packer-windows-2012-R2-standard", + "allocation": 16761495552, + "capacity": 64424509440, + "type": 0 + }, + "atomic3-cidata.iso": { + "key": "/home/some_user/virt-images/atomic3-cidata.iso", + "name": "atomic3-cidata.iso", + "path": "/home/some_user/virt-images/atomic3-cidata.iso", + "allocation": 376832, + "capacity": 374784, + "type": 0 + }, + ".atomic_demo_notes.txt.swp": { + "key": "/home/some_user/virt-images/.atomic_demo_notes.txt.swp", + "name": ".atomic_demo_notes.txt.swp", + "path": "/home/some_user/virt-images/.atomic_demo_notes.txt.swp", + "allocation": 12288, + "capacity": 12288, + "type": 0 + }, + "rhel7-2015-10-13.qcow2": { + "key": "/home/some_user/virt-images/rhel7-2015-10-13.qcow2", + "name": "rhel7-2015-10-13.qcow2", + "path": "/home/some_user/virt-images/rhel7-2015-10-13.qcow2", + "allocation": 4679413760, + "capacity": 12884901888, + "type": 0 + } + } + }, + "default": { + "autostart": true, + "uuid": "c8d9d160-efc0-4207-81c2-e79d6628f7e1", + "allocation": 43745488896, + "available": 8964980736, + "capacity": 52710469632, + "state": 2, + "volumes": { + "s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img": { + "key": "/var/lib/libvirt/images/s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img", + "name": "s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img", + "path": "/var/lib/libvirt/images/s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img", + "allocation": 1258622976, + "capacity": 42949672960, + "type": 0 + }, + "centos-7.0_vagrant_box_image.img": { + "key": "/var/lib/libvirt/images/centos-7.0_vagrant_box_image.img", + "name": "centos-7.0_vagrant_box_image.img", + "path": "/var/lib/libvirt/images/centos-7.0_vagrant_box_image.img", + "allocation": 1649414144, + "capacity": 42949672960, + "type": 0 + }, + "baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img": { + "key": "/var/lib/libvirt/images/baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img", + "name": "baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img", + "path": "/var/lib/libvirt/images/baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img", + "allocation": 810422272, + "capacity": 42949672960, + "type": 0 + }, + "centos-6_vagrant_box_image.img": { + "key": "/var/lib/libvirt/images/centos-6_vagrant_box_image.img", + "name": "centos-6_vagrant_box_image.img", + "path": "/var/lib/libvirt/images/centos-6_vagrant_box_image.img", + "allocation": 1423642624, + "capacity": 42949672960, + "type": 0 + }, + "centos5-ansible_default.img": { + "key": "/var/lib/libvirt/images/centos5-ansible_default.img", + "name": "centos5-ansible_default.img", + "path": "/var/lib/libvirt/images/centos5-ansible_default.img", + "allocation": 8986624, + "capacity": 42949672960, + "type": 0 + }, + "ubuntu_default.img": { + "key": "/var/lib/libvirt/images/ubuntu_default.img", + "name": "ubuntu_default.img", + "path": "/var/lib/libvirt/images/ubuntu_default.img", + "allocation": 3446833152, + "capacity": 42949672960, + "type": 0 + } + } + }, + "boot-scratch": { + "autostart": true, + "uuid": "e5ef4360-b889-4843-84fb-366e8fb30f20", + "allocation": 43745488896, + "available": 8964980736, + "capacity": 52710469632, + "state": 2, + "volumes": { + + } + } + } + }, + "network": { + "interfaces": { + "lo": { + "mtu": "65536", + "flags": [ + "LOOPBACK", + "UP", + "LOWER_UP" + ], + "encapsulation": "Loopback", + "addresses": { + "127.0.0.1": { + "family": "inet", + "prefixlen": "8", + "netmask": "255.0.0.0", + "scope": "Node", + "ip_scope": "LOOPBACK" + }, + "::1": { + "family": "inet6", + "prefixlen": "128", + "scope": "Node", + "tags": [ + + ], + "ip_scope": "LINK LOCAL LOOPBACK" + } + }, + "state": "unknown" + }, + "em1": { + "type": "em", + "number": "1", + "mtu": "1500", + "flags": [ + "BROADCAST", + "MULTICAST", + "UP" + ], + "encapsulation": "Ethernet", + "addresses": { + "3C:97:0E:E9:28:8E": { + "family": "lladdr" + } + }, + "state": "down", + "link_speed": 0, + "duplex": "Unknown! (255)", + "port": "Twisted Pair", + "transceiver": "internal", + "auto_negotiation": "on", + "mdi_x": "Unknown (auto)", + "ring_params": { + "max_rx": 4096, + "max_rx_mini": 0, + "max_rx_jumbo": 0, + "max_tx": 4096, + "current_rx": 256, + "current_rx_mini": 0, + "current_rx_jumbo": 0, + "current_tx": 256 + } + }, + "wlp4s0": { + "type": "wlp4s", + "number": "0", + "mtu": "1500", + "flags": [ + "BROADCAST", + "MULTICAST", + "UP", + "LOWER_UP" + ], + "encapsulation": "Ethernet", + "addresses": { + "5C:51:4F:E6:A8:E3": { + "family": "lladdr" + }, + "192.168.1.19": { + "family": "inet", + "prefixlen": "24", + "netmask": "255.255.255.0", + "broadcast": "192.168.1.255", + "scope": "Global", + "ip_scope": "RFC1918 PRIVATE" + }, + "fe80::5e51:4fff:fee6:a8e3": { + "family": "inet6", + "prefixlen": "64", + "scope": "Link", + "tags": [ + + ], + "ip_scope": "LINK LOCAL UNICAST" + } + }, + "state": "up", + "arp": { + "192.168.1.33": "00:11:d9:39:3e:e0", + "192.168.1.20": "ac:3a:7a:a7:49:e8", + "192.168.1.17": "00:09:b0:d0:64:19", + "192.168.1.22": "ac:bc:32:82:30:bb", + "192.168.1.15": "00:11:32:2e:10:d5", + "192.168.1.1": "84:1b:5e:03:50:b2", + "192.168.1.34": "00:11:d9:5f:e8:e6", + "192.168.1.16": "dc:a5:f4:ac:22:3a", + "192.168.1.21": "74:c2:46:73:28:d8", + "192.168.1.27": "00:17:88:09:3c:bb", + "192.168.1.24": "08:62:66:90:a2:b8" + }, + "routes": [ + { + "destination": "default", + "family": "inet", + "via": "192.168.1.1", + "metric": "600", + "proto": "static" + }, + { + "destination": "66.187.232.64", + "family": "inet", + "via": "192.168.1.1", + "metric": "600", + "proto": "static" + }, + { + "destination": "192.168.1.0/24", + "family": "inet", + "scope": "link", + "metric": "600", + "proto": "kernel", + "src": "192.168.1.19" + }, + { + "destination": "192.168.1.1", + "family": "inet", + "scope": "link", + "metric": "600", + "proto": "static" + }, + { + "destination": "fe80::/64", + "family": "inet6", + "metric": "256", + "proto": "kernel" + } + ], + "ring_params": { + "max_rx": 0, + "max_rx_mini": 0, + "max_rx_jumbo": 0, + "max_tx": 0, + "current_rx": 0, + "current_rx_mini": 0, + "current_rx_jumbo": 0, + "current_tx": 0 + } + }, + "virbr1": { + "type": "virbr", + "number": "1", + "mtu": "1500", + "flags": [ + "BROADCAST", + "MULTICAST", + "UP" + ], + "encapsulation": "Ethernet", + "addresses": { + "52:54:00:B4:68:A9": { + "family": "lladdr" + }, + "192.168.121.1": { + "family": "inet", + "prefixlen": "24", + "netmask": "255.255.255.0", + "broadcast": "192.168.121.255", + "scope": "Global", + "ip_scope": "RFC1918 PRIVATE" + } + }, + "state": "1", + "routes": [ + { + "destination": "192.168.121.0/24", + "family": "inet", + "scope": "link", + "proto": "kernel", + "src": "192.168.121.1" + } + ], + "ring_params": { + + } + }, + "virbr1-nic": { + "type": "virbr", + "number": "1-nic", + "mtu": "1500", + "flags": [ + "BROADCAST", + "MULTICAST" + ], + "encapsulation": "Ethernet", + "addresses": { + "52:54:00:B4:68:A9": { + "family": "lladdr" + } + }, + "state": "disabled", + "link_speed": 10, + "duplex": "Full", + "port": "Twisted Pair", + "transceiver": "internal", + "auto_negotiation": "off", + "mdi_x": "Unknown", + "ring_params": { + + } + }, + "virbr0": { + "type": "virbr", + "number": "0", + "mtu": "1500", + "flags": [ + "BROADCAST", + "MULTICAST", + "UP" + ], + "encapsulation": "Ethernet", + "addresses": { + "52:54:00:CE:82:5E": { + "family": "lladdr" + }, + "192.168.137.1": { + "family": "inet", + "prefixlen": "24", + "netmask": "255.255.255.0", + "broadcast": "192.168.137.255", + "scope": "Global", + "ip_scope": "RFC1918 PRIVATE" + } + }, + "state": "1", + "routes": [ + { + "destination": "192.168.137.0/24", + "family": "inet", + "scope": "link", + "proto": "kernel", + "src": "192.168.137.1" + } + ], + "ring_params": { + + } + }, + "virbr0-nic": { + "type": "virbr", + "number": "0-nic", + "mtu": "1500", + "flags": [ + "BROADCAST", + "MULTICAST" + ], + "encapsulation": "Ethernet", + "addresses": { + "52:54:00:CE:82:5E": { + "family": "lladdr" + } + }, + "state": "disabled", + "link_speed": 10, + "duplex": "Full", + "port": "Twisted Pair", + "transceiver": "internal", + "auto_negotiation": "off", + "mdi_x": "Unknown", + "ring_params": { + + } + }, + "docker0": { + "type": "docker", + "number": "0", + "mtu": "1500", + "flags": [ + "BROADCAST", + "MULTICAST", + "UP", + "LOWER_UP" + ], + "encapsulation": "Ethernet", + "addresses": { + "02:42:EA:15:D8:84": { + "family": "lladdr" + }, + "172.17.0.1": { + "family": "inet", + "prefixlen": "16", + "netmask": "255.255.0.0", + "scope": "Global", + "ip_scope": "RFC1918 PRIVATE" + }, + "fe80::42:eaff:fe15:d884": { + "family": "inet6", + "prefixlen": "64", + "scope": "Link", + "tags": [ + + ], + "ip_scope": "LINK LOCAL UNICAST" + } + }, + "state": "0", + "arp": { + "172.17.0.2": "02:42:ac:11:00:02", + "172.17.0.4": "02:42:ac:11:00:04", + "172.17.0.3": "02:42:ac:11:00:03" + }, + "routes": [ + { + "destination": "172.17.0.0/16", + "family": "inet", + "scope": "link", + "proto": "kernel", + "src": "172.17.0.1" + }, + { + "destination": "fe80::/64", + "family": "inet6", + "metric": "256", + "proto": "kernel" + } + ], + "ring_params": { + + } + }, + "vethf20ff12": { + "type": "vethf20ff1", + "number": "2", + "mtu": "1500", + "flags": [ + "BROADCAST", + "MULTICAST", + "UP", + "LOWER_UP" + ], + "encapsulation": "Ethernet", + "addresses": { + "AE:6E:2B:1E:A1:31": { + "family": "lladdr" + }, + "fe80::ac6e:2bff:fe1e:a131": { + "family": "inet6", + "prefixlen": "64", + "scope": "Link", + "tags": [ + + ], + "ip_scope": "LINK LOCAL UNICAST" + } + }, + "state": "forwarding", + "routes": [ + { + "destination": "fe80::/64", + "family": "inet6", + "metric": "256", + "proto": "kernel" + } + ], + "link_speed": 10000, + "duplex": "Full", + "port": "Twisted Pair", + "transceiver": "internal", + "auto_negotiation": "off", + "mdi_x": "Unknown", + "ring_params": { + + } + }, + "tun0": { + "type": "tun", + "number": "0", + "mtu": "1360", + "flags": [ + "MULTICAST", + "NOARP", + "UP", + "LOWER_UP" + ], + "addresses": { + "10.10.120.68": { + "family": "inet", + "prefixlen": "21", + "netmask": "255.255.248.0", + "broadcast": "10.10.127.255", + "scope": "Global", + "ip_scope": "RFC1918 PRIVATE" + }, + "fe80::365e:885c:31ca:7670": { + "family": "inet6", + "prefixlen": "64", + "scope": "Link", + "tags": [ + "flags", + "800" + ], + "ip_scope": "LINK LOCAL UNICAST" + } + }, + "state": "unknown", + "routes": [ + { + "destination": "10.0.0.0/8", + "family": "inet", + "via": "10.10.120.1", + "metric": "50", + "proto": "static" + }, + { + "destination": "10.10.120.0/21", + "family": "inet", + "scope": "link", + "metric": "50", + "proto": "kernel", + "src": "10.10.120.68" + }, + { + "destination": "fe80::/64", + "family": "inet6", + "metric": "256", + "proto": "kernel" + } + ] + } + }, + "default_interface": "wlp4s0", + "default_gateway": "192.168.1.1" + }, + "counters": { + "network": { + "interfaces": { + "lo": { + "tx": { + "queuelen": "1", + "bytes": "202568405", + "packets": "1845473", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + }, + "rx": { + "bytes": "202568405", + "packets": "1845473", + "errors": "0", + "drop": "0", + "overrun": "0" + } + }, + "em1": { + "tx": { + "queuelen": "1000", + "bytes": "673898037", + "packets": "1631282", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + }, + "rx": { + "bytes": "1536186718", + "packets": "1994394", + "errors": "0", + "drop": "0", + "overrun": "0" + } + }, + "wlp4s0": { + "tx": { + "queuelen": "1000", + "bytes": "3927670539", + "packets": "15146886", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + }, + "rx": { + "bytes": "12367173401", + "packets": "23981258", + "errors": "0", + "drop": "0", + "overrun": "0" + } + }, + "virbr1": { + "tx": { + "queuelen": "1000", + "bytes": "0", + "packets": "0", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + }, + "rx": { + "bytes": "0", + "packets": "0", + "errors": "0", + "drop": "0", + "overrun": "0" + } + }, + "virbr1-nic": { + "tx": { + "queuelen": "1000", + "bytes": "0", + "packets": "0", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + }, + "rx": { + "bytes": "0", + "packets": "0", + "errors": "0", + "drop": "0", + "overrun": "0" + } + }, + "virbr0": { + "tx": { + "queuelen": "1000", + "bytes": "0", + "packets": "0", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + }, + "rx": { + "bytes": "0", + "packets": "0", + "errors": "0", + "drop": "0", + "overrun": "0" + } + }, + "virbr0-nic": { + "tx": { + "queuelen": "1000", + "bytes": "0", + "packets": "0", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + }, + "rx": { + "bytes": "0", + "packets": "0", + "errors": "0", + "drop": "0", + "overrun": "0" + } + }, + "docker0": { + "rx": { + "bytes": "2471313", + "packets": "36915", + "errors": "0", + "drop": "0", + "overrun": "0" + }, + "tx": { + "bytes": "413371670", + "packets": "127713", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + } + }, + "vethf20ff12": { + "rx": { + "bytes": "34391", + "packets": "450", + "errors": "0", + "drop": "0", + "overrun": "0" + }, + "tx": { + "bytes": "17919115", + "packets": "108069", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + } + }, + "tun0": { + "tx": { + "queuelen": "100", + "bytes": "22343462", + "packets": "253442", + "errors": "0", + "drop": "0", + "carrier": "0", + "collisions": "0" + }, + "rx": { + "bytes": "115160002", + "packets": "197529", + "errors": "0", + "drop": "0", + "overrun": "0" + } + } + } + } + }, + "ipaddress": "192.168.1.19", + "macaddress": "5C:51:4F:E6:A8:E3", + "ip6address": "fe80::42:eaff:fe15:d884", + "cpu": { + "0": { + "vendor_id": "GenuineIntel", + "family": "6", + "model": "60", + "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "stepping": "3", + "mhz": "3238.714", + "cache_size": "6144 KB", + "physical_id": "0", + "core_id": "0", + "cores": "4", + "flags": [ + "fpu", + "vme", + "de", + "pse", + "tsc", + "msr", + "pae", + "mce", + "cx8", + "apic", + "sep", + "mtrr", + "pge", + "mca", + "cmov", + "pat", + "pse36", + "clflush", + "dts", + "acpi", + "mmx", + "fxsr", + "sse", + "sse2", + "ss", + "ht", + "tm", + "pbe", + "syscall", + "nx", + "pdpe1gb", + "rdtscp", + "lm", + "constant_tsc", + "arch_perfmon", + "pebs", + "bts", + "rep_good", + "nopl", + "xtopology", + "nonstop_tsc", + "aperfmperf", + "eagerfpu", + "pni", + "pclmulqdq", + "dtes64", + "monitor", + "ds_cpl", + "vmx", + "smx", + "est", + "tm2", + "ssse3", + "sdbg", + "fma", + "cx16", + "xtpr", + "pdcm", + "pcid", + "sse4_1", + "sse4_2", + "x2apic", + "movbe", + "popcnt", + "tsc_deadline_timer", + "aes", + "xsave", + "avx", + "f16c", + "rdrand", + "lahf_lm", + "abm", + "epb", + "tpr_shadow", + "vnmi", + "flexpriority", + "ept", + "vpid", + "fsgsbase", + "tsc_adjust", + "bmi1", + "avx2", + "smep", + "bmi2", + "erms", + "invpcid", + "xsaveopt", + "dtherm", + "ida", + "arat", + "pln", + "pts" + ] + }, + "1": { + "vendor_id": "GenuineIntel", + "family": "6", + "model": "60", + "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "stepping": "3", + "mhz": "3137.200", + "cache_size": "6144 KB", + "physical_id": "0", + "core_id": "0", + "cores": "4", + "flags": [ + "fpu", + "vme", + "de", + "pse", + "tsc", + "msr", + "pae", + "mce", + "cx8", + "apic", + "sep", + "mtrr", + "pge", + "mca", + "cmov", + "pat", + "pse36", + "clflush", + "dts", + "acpi", + "mmx", + "fxsr", + "sse", + "sse2", + "ss", + "ht", + "tm", + "pbe", + "syscall", + "nx", + "pdpe1gb", + "rdtscp", + "lm", + "constant_tsc", + "arch_perfmon", + "pebs", + "bts", + "rep_good", + "nopl", + "xtopology", + "nonstop_tsc", + "aperfmperf", + "eagerfpu", + "pni", + "pclmulqdq", + "dtes64", + "monitor", + "ds_cpl", + "vmx", + "smx", + "est", + "tm2", + "ssse3", + "sdbg", + "fma", + "cx16", + "xtpr", + "pdcm", + "pcid", + "sse4_1", + "sse4_2", + "x2apic", + "movbe", + "popcnt", + "tsc_deadline_timer", + "aes", + "xsave", + "avx", + "f16c", + "rdrand", + "lahf_lm", + "abm", + "epb", + "tpr_shadow", + "vnmi", + "flexpriority", + "ept", + "vpid", + "fsgsbase", + "tsc_adjust", + "bmi1", + "avx2", + "smep", + "bmi2", + "erms", + "invpcid", + "xsaveopt", + "dtherm", + "ida", + "arat", + "pln", + "pts" + ] + }, + "2": { + "vendor_id": "GenuineIntel", + "family": "6", + "model": "60", + "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "stepping": "3", + "mhz": "3077.050", + "cache_size": "6144 KB", + "physical_id": "0", + "core_id": "1", + "cores": "4", + "flags": [ + "fpu", + "vme", + "de", + "pse", + "tsc", + "msr", + "pae", + "mce", + "cx8", + "apic", + "sep", + "mtrr", + "pge", + "mca", + "cmov", + "pat", + "pse36", + "clflush", + "dts", + "acpi", + "mmx", + "fxsr", + "sse", + "sse2", + "ss", + "ht", + "tm", + "pbe", + "syscall", + "nx", + "pdpe1gb", + "rdtscp", + "lm", + "constant_tsc", + "arch_perfmon", + "pebs", + "bts", + "rep_good", + "nopl", + "xtopology", + "nonstop_tsc", + "aperfmperf", + "eagerfpu", + "pni", + "pclmulqdq", + "dtes64", + "monitor", + "ds_cpl", + "vmx", + "smx", + "est", + "tm2", + "ssse3", + "sdbg", + "fma", + "cx16", + "xtpr", + "pdcm", + "pcid", + "sse4_1", + "sse4_2", + "x2apic", + "movbe", + "popcnt", + "tsc_deadline_timer", + "aes", + "xsave", + "avx", + "f16c", + "rdrand", + "lahf_lm", + "abm", + "epb", + "tpr_shadow", + "vnmi", + "flexpriority", + "ept", + "vpid", + "fsgsbase", + "tsc_adjust", + "bmi1", + "avx2", + "smep", + "bmi2", + "erms", + "invpcid", + "xsaveopt", + "dtherm", + "ida", + "arat", + "pln", + "pts" + ] + }, + "3": { + "vendor_id": "GenuineIntel", + "family": "6", + "model": "60", + "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "stepping": "3", + "mhz": "2759.655", + "cache_size": "6144 KB", + "physical_id": "0", + "core_id": "1", + "cores": "4", + "flags": [ + "fpu", + "vme", + "de", + "pse", + "tsc", + "msr", + "pae", + "mce", + "cx8", + "apic", + "sep", + "mtrr", + "pge", + "mca", + "cmov", + "pat", + "pse36", + "clflush", + "dts", + "acpi", + "mmx", + "fxsr", + "sse", + "sse2", + "ss", + "ht", + "tm", + "pbe", + "syscall", + "nx", + "pdpe1gb", + "rdtscp", + "lm", + "constant_tsc", + "arch_perfmon", + "pebs", + "bts", + "rep_good", + "nopl", + "xtopology", + "nonstop_tsc", + "aperfmperf", + "eagerfpu", + "pni", + "pclmulqdq", + "dtes64", + "monitor", + "ds_cpl", + "vmx", + "smx", + "est", + "tm2", + "ssse3", + "sdbg", + "fma", + "cx16", + "xtpr", + "pdcm", + "pcid", + "sse4_1", + "sse4_2", + "x2apic", + "movbe", + "popcnt", + "tsc_deadline_timer", + "aes", + "xsave", + "avx", + "f16c", + "rdrand", + "lahf_lm", + "abm", + "epb", + "tpr_shadow", + "vnmi", + "flexpriority", + "ept", + "vpid", + "fsgsbase", + "tsc_adjust", + "bmi1", + "avx2", + "smep", + "bmi2", + "erms", + "invpcid", + "xsaveopt", + "dtherm", + "ida", + "arat", + "pln", + "pts" + ] + }, + "4": { + "vendor_id": "GenuineIntel", + "family": "6", + "model": "60", + "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "stepping": "3", + "mhz": "3419.000", + "cache_size": "6144 KB", + "physical_id": "0", + "core_id": "2", + "cores": "4", + "flags": [ + "fpu", + "vme", + "de", + "pse", + "tsc", + "msr", + "pae", + "mce", + "cx8", + "apic", + "sep", + "mtrr", + "pge", + "mca", + "cmov", + "pat", + "pse36", + "clflush", + "dts", + "acpi", + "mmx", + "fxsr", + "sse", + "sse2", + "ss", + "ht", + "tm", + "pbe", + "syscall", + "nx", + "pdpe1gb", + "rdtscp", + "lm", + "constant_tsc", + "arch_perfmon", + "pebs", + "bts", + "rep_good", + "nopl", + "xtopology", + "nonstop_tsc", + "aperfmperf", + "eagerfpu", + "pni", + "pclmulqdq", + "dtes64", + "monitor", + "ds_cpl", + "vmx", + "smx", + "est", + "tm2", + "ssse3", + "sdbg", + "fma", + "cx16", + "xtpr", + "pdcm", + "pcid", + "sse4_1", + "sse4_2", + "x2apic", + "movbe", + "popcnt", + "tsc_deadline_timer", + "aes", + "xsave", + "avx", + "f16c", + "rdrand", + "lahf_lm", + "abm", + "epb", + "tpr_shadow", + "vnmi", + "flexpriority", + "ept", + "vpid", + "fsgsbase", + "tsc_adjust", + "bmi1", + "avx2", + "smep", + "bmi2", + "erms", + "invpcid", + "xsaveopt", + "dtherm", + "ida", + "arat", + "pln", + "pts" + ] + }, + "5": { + "vendor_id": "GenuineIntel", + "family": "6", + "model": "60", + "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "stepping": "3", + "mhz": "2752.569", + "cache_size": "6144 KB", + "physical_id": "0", + "core_id": "2", + "cores": "4", + "flags": [ + "fpu", + "vme", + "de", + "pse", + "tsc", + "msr", + "pae", + "mce", + "cx8", + "apic", + "sep", + "mtrr", + "pge", + "mca", + "cmov", + "pat", + "pse36", + "clflush", + "dts", + "acpi", + "mmx", + "fxsr", + "sse", + "sse2", + "ss", + "ht", + "tm", + "pbe", + "syscall", + "nx", + "pdpe1gb", + "rdtscp", + "lm", + "constant_tsc", + "arch_perfmon", + "pebs", + "bts", + "rep_good", + "nopl", + "xtopology", + "nonstop_tsc", + "aperfmperf", + "eagerfpu", + "pni", + "pclmulqdq", + "dtes64", + "monitor", + "ds_cpl", + "vmx", + "smx", + "est", + "tm2", + "ssse3", + "sdbg", + "fma", + "cx16", + "xtpr", + "pdcm", + "pcid", + "sse4_1", + "sse4_2", + "x2apic", + "movbe", + "popcnt", + "tsc_deadline_timer", + "aes", + "xsave", + "avx", + "f16c", + "rdrand", + "lahf_lm", + "abm", + "epb", + "tpr_shadow", + "vnmi", + "flexpriority", + "ept", + "vpid", + "fsgsbase", + "tsc_adjust", + "bmi1", + "avx2", + "smep", + "bmi2", + "erms", + "invpcid", + "xsaveopt", + "dtherm", + "ida", + "arat", + "pln", + "pts" + ] + }, + "6": { + "vendor_id": "GenuineIntel", + "family": "6", + "model": "60", + "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "stepping": "3", + "mhz": "2953.619", + "cache_size": "6144 KB", + "physical_id": "0", + "core_id": "3", + "cores": "4", + "flags": [ + "fpu", + "vme", + "de", + "pse", + "tsc", + "msr", + "pae", + "mce", + "cx8", + "apic", + "sep", + "mtrr", + "pge", + "mca", + "cmov", + "pat", + "pse36", + "clflush", + "dts", + "acpi", + "mmx", + "fxsr", + "sse", + "sse2", + "ss", + "ht", + "tm", + "pbe", + "syscall", + "nx", + "pdpe1gb", + "rdtscp", + "lm", + "constant_tsc", + "arch_perfmon", + "pebs", + "bts", + "rep_good", + "nopl", + "xtopology", + "nonstop_tsc", + "aperfmperf", + "eagerfpu", + "pni", + "pclmulqdq", + "dtes64", + "monitor", + "ds_cpl", + "vmx", + "smx", + "est", + "tm2", + "ssse3", + "sdbg", + "fma", + "cx16", + "xtpr", + "pdcm", + "pcid", + "sse4_1", + "sse4_2", + "x2apic", + "movbe", + "popcnt", + "tsc_deadline_timer", + "aes", + "xsave", + "avx", + "f16c", + "rdrand", + "lahf_lm", + "abm", + "epb", + "tpr_shadow", + "vnmi", + "flexpriority", + "ept", + "vpid", + "fsgsbase", + "tsc_adjust", + "bmi1", + "avx2", + "smep", + "bmi2", + "erms", + "invpcid", + "xsaveopt", + "dtherm", + "ida", + "arat", + "pln", + "pts" + ] + }, + "7": { + "vendor_id": "GenuineIntel", + "family": "6", + "model": "60", + "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", + "stepping": "3", + "mhz": "2927.087", + "cache_size": "6144 KB", + "physical_id": "0", + "core_id": "3", + "cores": "4", + "flags": [ + "fpu", + "vme", + "de", + "pse", + "tsc", + "msr", + "pae", + "mce", + "cx8", + "apic", + "sep", + "mtrr", + "pge", + "mca", + "cmov", + "pat", + "pse36", + "clflush", + "dts", + "acpi", + "mmx", + "fxsr", + "sse", + "sse2", + "ss", + "ht", + "tm", + "pbe", + "syscall", + "nx", + "pdpe1gb", + "rdtscp", + "lm", + "constant_tsc", + "arch_perfmon", + "pebs", + "bts", + "rep_good", + "nopl", + "xtopology", + "nonstop_tsc", + "aperfmperf", + "eagerfpu", + "pni", + "pclmulqdq", + "dtes64", + "monitor", + "ds_cpl", + "vmx", + "smx", + "est", + "tm2", + "ssse3", + "sdbg", + "fma", + "cx16", + "xtpr", + "pdcm", + "pcid", + "sse4_1", + "sse4_2", + "x2apic", + "movbe", + "popcnt", + "tsc_deadline_timer", + "aes", + "xsave", + "avx", + "f16c", + "rdrand", + "lahf_lm", + "abm", + "epb", + "tpr_shadow", + "vnmi", + "flexpriority", + "ept", + "vpid", + "fsgsbase", + "tsc_adjust", + "bmi1", + "avx2", + "smep", + "bmi2", + "erms", + "invpcid", + "xsaveopt", + "dtherm", + "ida", + "arat", + "pln", + "pts" + ] + }, + "total": 8, + "real": 1, + "cores": 4 + }, + "etc": { + "passwd": { + "root": { + "dir": "/root", + "gid": 0, + "uid": 0, + "shell": "/bin/bash", + "gecos": "root" + }, + "bin": { + "dir": "/bin", + "gid": 1, + "uid": 1, + "shell": "/sbin/nologin", + "gecos": "bin" + }, + "daemon": { + "dir": "/sbin", + "gid": 2, + "uid": 2, + "shell": "/sbin/nologin", + "gecos": "daemon" + }, + "adm": { + "dir": "/var/adm", + "gid": 4, + "uid": 3, + "shell": "/sbin/nologin", + "gecos": "adm" + }, + "lp": { + "dir": "/var/spool/lpd", + "gid": 7, + "uid": 4, + "shell": "/sbin/nologin", + "gecos": "lp" + }, + "sync": { + "dir": "/sbin", + "gid": 0, + "uid": 5, + "shell": "/bin/sync", + "gecos": "sync" + }, + "shutdown": { + "dir": "/sbin", + "gid": 0, + "uid": 6, + "shell": "/sbin/shutdown", + "gecos": "shutdown" + }, + "halt": { + "dir": "/sbin", + "gid": 0, + "uid": 7, + "shell": "/sbin/halt", + "gecos": "halt" + }, + "mail": { + "dir": "/var/spool/mail", + "gid": 12, + "uid": 8, + "shell": "/sbin/nologin", + "gecos": "mail" + }, + "operator": { + "dir": "/root", + "gid": 0, + "uid": 11, + "shell": "/sbin/nologin", + "gecos": "operator" + }, + "games": { + "dir": "/usr/games", + "gid": 100, + "uid": 12, + "shell": "/sbin/nologin", + "gecos": "games" + }, + "ftp": { + "dir": "/var/ftp", + "gid": 50, + "uid": 14, + "shell": "/sbin/nologin", + "gecos": "FTP User" + }, + "nobody": { + "dir": "/", + "gid": 99, + "uid": 99, + "shell": "/sbin/nologin", + "gecos": "Nobody" + }, + "avahi-autoipd": { + "dir": "/var/lib/avahi-autoipd", + "gid": 170, + "uid": 170, + "shell": "/sbin/nologin", + "gecos": "Avahi IPv4LL Stack" + }, + "dbus": { + "dir": "/", + "gid": 81, + "uid": 81, + "shell": "/sbin/nologin", + "gecos": "System message bus" + }, + "polkitd": { + "dir": "/", + "gid": 999, + "uid": 999, + "shell": "/sbin/nologin", + "gecos": "User for polkitd" + }, + "abrt": { + "dir": "/etc/abrt", + "gid": 173, + "uid": 173, + "shell": "/sbin/nologin", + "gecos": "" + }, + "usbmuxd": { + "dir": "/", + "gid": 113, + "uid": 113, + "shell": "/sbin/nologin", + "gecos": "usbmuxd user" + }, + "colord": { + "dir": "/var/lib/colord", + "gid": 998, + "uid": 998, + "shell": "/sbin/nologin", + "gecos": "User for colord" + }, + "geoclue": { + "dir": "/var/lib/geoclue", + "gid": 997, + "uid": 997, + "shell": "/sbin/nologin", + "gecos": "User for geoclue" + }, + "rpc": { + "dir": "/var/lib/rpcbind", + "gid": 32, + "uid": 32, + "shell": "/sbin/nologin", + "gecos": "Rpcbind Daemon" + }, + "rpcuser": { + "dir": "/var/lib/nfs", + "gid": 29, + "uid": 29, + "shell": "/sbin/nologin", + "gecos": "RPC Service User" + }, + "nfsnobody": { + "dir": "/var/lib/nfs", + "gid": 65534, + "uid": 65534, + "shell": "/sbin/nologin", + "gecos": "Anonymous NFS User" + }, + "qemu": { + "dir": "/", + "gid": 107, + "uid": 107, + "shell": "/sbin/nologin", + "gecos": "qemu user" + }, + "rtkit": { + "dir": "/proc", + "gid": 172, + "uid": 172, + "shell": "/sbin/nologin", + "gecos": "RealtimeKit" + }, + "radvd": { + "dir": "/", + "gid": 75, + "uid": 75, + "shell": "/sbin/nologin", + "gecos": "radvd user" + }, + "tss": { + "dir": "/dev/null", + "gid": 59, + "uid": 59, + "shell": "/sbin/nologin", + "gecos": "Account used by the trousers package to sandbox the tcsd daemon" + }, + "unbound": { + "dir": "/etc/unbound", + "gid": 995, + "uid": 996, + "shell": "/sbin/nologin", + "gecos": "Unbound DNS resolver" + }, + "openvpn": { + "dir": "/etc/openvpn", + "gid": 994, + "uid": 995, + "shell": "/sbin/nologin", + "gecos": "OpenVPN" + }, + "saslauth": { + "dir": "/run/saslauthd", + "gid": 76, + "uid": 994, + "shell": "/sbin/nologin", + "gecos": "\"Saslauthd user\"" + }, + "avahi": { + "dir": "/var/run/avahi-daemon", + "gid": 70, + "uid": 70, + "shell": "/sbin/nologin", + "gecos": "Avahi mDNS/DNS-SD Stack" + }, + "pulse": { + "dir": "/var/run/pulse", + "gid": 992, + "uid": 993, + "shell": "/sbin/nologin", + "gecos": "PulseAudio System Daemon" + }, + "gdm": { + "dir": "/var/lib/gdm", + "gid": 42, + "uid": 42, + "shell": "/sbin/nologin", + "gecos": "" + }, + "gnome-initial-setup": { + "dir": "/run/gnome-initial-setup/", + "gid": 990, + "uid": 992, + "shell": "/sbin/nologin", + "gecos": "" + }, + "nm-openconnect": { + "dir": "/", + "gid": 989, + "uid": 991, + "shell": "/sbin/nologin", + "gecos": "NetworkManager user for OpenConnect" + }, + "sshd": { + "dir": "/var/empty/sshd", + "gid": 74, + "uid": 74, + "shell": "/sbin/nologin", + "gecos": "Privilege-separated SSH" + }, + "chrony": { + "dir": "/var/lib/chrony", + "gid": 988, + "uid": 990, + "shell": "/sbin/nologin", + "gecos": "" + }, + "tcpdump": { + "dir": "/", + "gid": 72, + "uid": 72, + "shell": "/sbin/nologin", + "gecos": "" + }, + "some_user": { + "dir": "/home/some_user", + "gid": 1000, + "uid": 1000, + "shell": "/bin/bash", + "gecos": "some_user" + }, + "systemd-journal-gateway": { + "dir": "/var/log/journal", + "gid": 191, + "uid": 191, + "shell": "/sbin/nologin", + "gecos": "Journal Gateway" + }, + "postgres": { + "dir": "/var/lib/pgsql", + "gid": 26, + "uid": 26, + "shell": "/bin/bash", + "gecos": "PostgreSQL Server" + }, + "dockerroot": { + "dir": "/var/lib/docker", + "gid": 977, + "uid": 984, + "shell": "/sbin/nologin", + "gecos": "Docker User" + }, + "apache": { + "dir": "/usr/share/httpd", + "gid": 48, + "uid": 48, + "shell": "/sbin/nologin", + "gecos": "Apache" + }, + "systemd-network": { + "dir": "/", + "gid": 974, + "uid": 982, + "shell": "/sbin/nologin", + "gecos": "systemd Network Management" + }, + "systemd-resolve": { + "dir": "/", + "gid": 973, + "uid": 981, + "shell": "/sbin/nologin", + "gecos": "systemd Resolver" + }, + "systemd-bus-proxy": { + "dir": "/", + "gid": 972, + "uid": 980, + "shell": "/sbin/nologin", + "gecos": "systemd Bus Proxy" + }, + "systemd-journal-remote": { + "dir": "//var/log/journal/remote", + "gid": 970, + "uid": 979, + "shell": "/sbin/nologin", + "gecos": "Journal Remote" + }, + "systemd-journal-upload": { + "dir": "//var/log/journal/upload", + "gid": 969, + "uid": 978, + "shell": "/sbin/nologin", + "gecos": "Journal Upload" + }, + "setroubleshoot": { + "dir": "/var/lib/setroubleshoot", + "gid": 967, + "uid": 977, + "shell": "/sbin/nologin", + "gecos": "" + }, + "oprofile": { + "dir": "/var/lib/oprofile", + "gid": 16, + "uid": 16, + "shell": "/sbin/nologin", + "gecos": "Special user account to be used by OProfile" + } + }, + "group": { + "root": { + "gid": 0, + "members": [ + + ] + }, + "bin": { + "gid": 1, + "members": [ + + ] + }, + "daemon": { + "gid": 2, + "members": [ + + ] + }, + "sys": { + "gid": 3, + "members": [ + + ] + }, + "adm": { + "gid": 4, + "members": [ + "logcheck" + ] + }, + "tty": { + "gid": 5, + "members": [ + + ] + }, + "disk": { + "gid": 6, + "members": [ + + ] + }, + "lp": { + "gid": 7, + "members": [ + + ] + }, + "mem": { + "gid": 8, + "members": [ + + ] + }, + "kmem": { + "gid": 9, + "members": [ + + ] + }, + "wheel": { + "gid": 10, + "members": [ + + ] + }, + "cdrom": { + "gid": 11, + "members": [ + + ] + }, + "mail": { + "gid": 12, + "members": [ + + ] + }, + "man": { + "gid": 15, + "members": [ + + ] + }, + "dialout": { + "gid": 18, + "members": [ + "lirc" + ] + }, + "floppy": { + "gid": 19, + "members": [ + + ] + }, + "games": { + "gid": 20, + "members": [ + + ] + }, + "tape": { + "gid": 30, + "members": [ + + ] + }, + "video": { + "gid": 39, + "members": [ + + ] + }, + "ftp": { + "gid": 50, + "members": [ + + ] + }, + "lock": { + "gid": 54, + "members": [ + "lirc" + ] + }, + "audio": { + "gid": 63, + "members": [ + + ] + }, + "nobody": { + "gid": 99, + "members": [ + + ] + }, + "users": { + "gid": 100, + "members": [ + + ] + }, + "utmp": { + "gid": 22, + "members": [ + + ] + }, + "utempter": { + "gid": 35, + "members": [ + + ] + }, + "avahi-autoipd": { + "gid": 170, + "members": [ + + ] + }, + "systemd-journal": { + "gid": 190, + "members": [ + + ] + }, + "dbus": { + "gid": 81, + "members": [ + + ] + }, + "polkitd": { + "gid": 999, + "members": [ + + ] + }, + "abrt": { + "gid": 173, + "members": [ + + ] + }, + "dip": { + "gid": 40, + "members": [ + + ] + }, + "usbmuxd": { + "gid": 113, + "members": [ + + ] + }, + "colord": { + "gid": 998, + "members": [ + + ] + }, + "geoclue": { + "gid": 997, + "members": [ + + ] + }, + "ssh_keys": { + "gid": 996, + "members": [ + + ] + }, + "rpc": { + "gid": 32, + "members": [ + + ] + }, + "rpcuser": { + "gid": 29, + "members": [ + + ] + }, + "nfsnobody": { + "gid": 65534, + "members": [ + + ] + }, + "kvm": { + "gid": 36, + "members": [ + "qemu" + ] + }, + "qemu": { + "gid": 107, + "members": [ + + ] + }, + "rtkit": { + "gid": 172, + "members": [ + + ] + }, + "radvd": { + "gid": 75, + "members": [ + + ] + }, + "tss": { + "gid": 59, + "members": [ + + ] + }, + "unbound": { + "gid": 995, + "members": [ + + ] + }, + "openvpn": { + "gid": 994, + "members": [ + + ] + }, + "saslauth": { + "gid": 76, + "members": [ + + ] + }, + "avahi": { + "gid": 70, + "members": [ + + ] + }, + "brlapi": { + "gid": 993, + "members": [ + + ] + }, + "pulse": { + "gid": 992, + "members": [ + + ] + }, + "pulse-access": { + "gid": 991, + "members": [ + + ] + }, + "gdm": { + "gid": 42, + "members": [ + + ] + }, + "gnome-initial-setup": { + "gid": 990, + "members": [ + + ] + }, + "nm-openconnect": { + "gid": 989, + "members": [ + + ] + }, + "sshd": { + "gid": 74, + "members": [ + + ] + }, + "slocate": { + "gid": 21, + "members": [ + + ] + }, + "chrony": { + "gid": 988, + "members": [ + + ] + }, + "tcpdump": { + "gid": 72, + "members": [ + + ] + }, + "some_user": { + "gid": 1000, + "members": [ + "some_user" + ] + }, + "docker": { + "gid": 986, + "members": [ + "some_user" + ] + } + }, + "c": { + "gcc": { + "target": "x86_64-redhat-linux", + "configured_with": "../configure --enable-bootstrap --enable-languages=c,c++,objc,obj-c++,fortran,ada,go,lto --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info --with-bugurl=http://bugzilla.redhat.com/bugzilla --enable-shared --enable-threads=posix --enable-checking=release --enable-multilib --with-system-zlib --enable-__cxa_atexit --disable-libunwind-exceptions --enable-gnu-unique-object --enable-linker-build-id --with-linker-hash-style=gnu --enable-plugin --enable-initfini-array --disable-libgcj --with-isl --enable-libmpx --enable-gnu-indirect-function --with-tune=generic --with-arch_32=i686 --build=x86_64-redhat-linux", + "thread_model": "posix", + "description": "gcc version 6.3.1 20161221 (Red Hat 6.3.1-1) (GCC) ", + "version": "6.3.1" + }, + "glibc": { + "version": "2.24", + "description": "GNU C Library (GNU libc) stable release version 2.24, by Roland McGrath et al." + } + }, + "lua": { + "version": "5.3.4" + }, + "ruby": { + "platform": "x86_64-linux", + "version": "2.3.3", + "release_date": "2016-11-21", + "target": "x86_64-redhat-linux-gnu", + "target_cpu": "x86_64", + "target_vendor": "redhat", + "target_os": "linux", + "host": "x86_64-redhat-linux-gnu", + "host_cpu": "x86_64", + "host_os": "linux-gnu", + "host_vendor": "redhat", + "bin_dir": "/usr/bin", + "ruby_bin": "/usr/bin/ruby", + "gems_dir": "/home/some_user/.gem/ruby", + "gem_bin": "/usr/bin/gem" + } + }, + "command": { + "ps": "ps -ef" + }, + "root_group": "root", + "fips": { + "kernel": { + "enabled": false + } + }, + "hostname": "myhostname", + "machinename": "myhostname", + "fqdn": "myhostname", + "domain": null, + "machine_id": "1234567abcede123456123456123456a", + "privateaddress": "192.168.1.100", + "keys": { + "ssh": { + + } + }, + "time": { + "timezone": "EDT" + }, + "sessions": { + "by_session": { + "1918": { + "session": "1918", + "uid": "1000", + "user": "some_user", + "seat": null + }, + "5": { + "session": "5", + "uid": "1000", + "user": "some_user", + "seat": "seat0" + }, + "3": { + "session": "3", + "uid": "0", + "user": "root", + "seat": "seat0" + } + }, + "by_user": { + "some_user": [ + { + "session": "1918", + "uid": "1000", + "user": "some_user", + "seat": null + }, + { + "session": "5", + "uid": "1000", + "user": "some_user", + "seat": "seat0" + } + ], + "root": [ + { + "session": "3", + "uid": "0", + "user": "root", + "seat": "seat0" + } + ] + } + }, + "hostnamectl": { + "static_hostname": "myhostname", + "icon_name": "computer-laptop", + "chassis": "laptop", + "machine_id": "24dc16bd7694404c825b517ab46d9d6b", + "machine_id": "12345123451234512345123451242323", + "boot_id": "3d5d5512341234123412341234123423", + "operating_system": "Fedora 25 (Workstation Edition)", + "cpe_os_name": "cpe", + "kernel": "Linux 4.9.14-200.fc25.x86_64", + "architecture": "x86-64" + }, + "block_device": { + "dm-1": { + "size": "104857600", + "removable": "0", + "rotational": "0", + "physical_block_size": "512", + "logical_block_size": "512" + }, + "loop1": { + "size": "209715200", + "removable": "0", + "rotational": "1", + "physical_block_size": "512", + "logical_block_size": "512" + }, + "sr0": { + "size": "2097151", + "removable": "1", + "model": "DVD-RAM UJ8E2", + "rev": "SB01", + "state": "running", + "timeout": "30", + "vendor": "MATSHITA", + "queue_depth": "1", + "rotational": "1", + "physical_block_size": "512", + "logical_block_size": "512" + }, + "dm-2": { + "size": "378093568", + "removable": "0", + "rotational": "0", + "physical_block_size": "512", + "logical_block_size": "512" + }, + "loop2": { + "size": "4194304", + "removable": "0", + "rotational": "1", + "physical_block_size": "512", + "logical_block_size": "512" + }, + "dm-0": { + "size": "16138240", + "removable": "0", + "rotational": "0", + "physical_block_size": "512", + "logical_block_size": "512" + }, + "loop0": { + "size": "1024000", + "removable": "0", + "rotational": "1", + "physical_block_size": "512", + "logical_block_size": "512" + }, + "sda": { + "size": "500118192", + "removable": "0", + "model": "SAMSUNG MZ7TD256", + "rev": "2L5Q", + "state": "running", + "timeout": "30", + "vendor": "ATA", + "queue_depth": "31", + "rotational": "0", + "physical_block_size": "512", + "logical_block_size": "512" + }, + "dm-5": { + "size": "20971520", + "removable": "0", + "rotational": "1", + "physical_block_size": "512", + "logical_block_size": "512" + }, + "dm-3": { + "size": "209715200", + "removable": "0", + "rotational": "1", + "physical_block_size": "512", + "logical_block_size": "512" + } + }, + "sysconf": { + "LINK_MAX": 65000, + "_POSIX_LINK_MAX": 65000, + "MAX_CANON": 255, + "_POSIX_MAX_CANON": 255, + "MAX_INPUT": 255, + "_POSIX_MAX_INPUT": 255, + "NAME_MAX": 255, + "_POSIX_NAME_MAX": 255, + "PATH_MAX": 4096, + "_POSIX_PATH_MAX": 4096, + "PIPE_BUF": 4096, + "_POSIX_PIPE_BUF": 4096, + "SOCK_MAXBUF": null, + "_POSIX_ASYNC_IO": null, + "_POSIX_CHOWN_RESTRICTED": 1, + "_POSIX_NO_TRUNC": 1, + "_POSIX_PRIO_IO": null, + "_POSIX_SYNC_IO": null, + "_POSIX_VDISABLE": 0, + "ARG_MAX": 2097152, + "ATEXIT_MAX": 2147483647, + "CHAR_BIT": 8, + "CHAR_MAX": 127, + "CHAR_MIN": -128, + "CHILD_MAX": 62844, + "CLK_TCK": 100, + "INT_MAX": 2147483647, + "INT_MIN": -2147483648, + "IOV_MAX": 1024, + "LOGNAME_MAX": 256, + "LONG_BIT": 64, + "MB_LEN_MAX": 16, + "NGROUPS_MAX": 65536, + "NL_ARGMAX": 4096, + "NL_LANGMAX": 2048, + "NL_MSGMAX": 2147483647, + "NL_NMAX": 2147483647, + "NL_SETMAX": 2147483647, + "NL_TEXTMAX": 2147483647, + "NSS_BUFLEN_GROUP": 1024, + "NSS_BUFLEN_PASSWD": 1024, + "NZERO": 20, + "OPEN_MAX": 1024, + "PAGESIZE": 4096, + "PAGE_SIZE": 4096, + "PASS_MAX": 8192, + "PTHREAD_DESTRUCTOR_ITERATIONS": 4, + "PTHREAD_KEYS_MAX": 1024, + "PTHREAD_STACK_MIN": 16384, + "PTHREAD_THREADS_MAX": null, + "SCHAR_MAX": 127, + "SCHAR_MIN": -128, + "SHRT_MAX": 32767, + "SHRT_MIN": -32768, + "SSIZE_MAX": 32767, + "TTY_NAME_MAX": 32, + "TZNAME_MAX": 6, + "UCHAR_MAX": 255, + "UINT_MAX": 4294967295, + "UIO_MAXIOV": 1024, + "ULONG_MAX": 18446744073709551615, + "USHRT_MAX": 65535, + "WORD_BIT": 32, + "_AVPHYS_PAGES": 955772, + "_NPROCESSORS_CONF": 8, + "_NPROCESSORS_ONLN": 8, + "_PHYS_PAGES": 4027635, + "_POSIX_ARG_MAX": 2097152, + "_POSIX_ASYNCHRONOUS_IO": 200809, + "_POSIX_CHILD_MAX": 62844, + "_POSIX_FSYNC": 200809, + "_POSIX_JOB_CONTROL": 1, + "_POSIX_MAPPED_FILES": 200809, + "_POSIX_MEMLOCK": 200809, + "_POSIX_MEMLOCK_RANGE": 200809, + "_POSIX_MEMORY_PROTECTION": 200809, + "_POSIX_MESSAGE_PASSING": 200809, + "_POSIX_NGROUPS_MAX": 65536, + "_POSIX_OPEN_MAX": 1024, + "_POSIX_PII": null, + "_POSIX_PII_INTERNET": null, + "_POSIX_PII_INTERNET_DGRAM": null, + "_POSIX_PII_INTERNET_STREAM": null, + "_POSIX_PII_OSI": null, + "_POSIX_PII_OSI_CLTS": null, + "_POSIX_PII_OSI_COTS": null, + "_POSIX_PII_OSI_M": null, + "_POSIX_PII_SOCKET": null, + "_POSIX_PII_XTI": null, + "_POSIX_POLL": null, + "_POSIX_PRIORITIZED_IO": 200809, + "_POSIX_PRIORITY_SCHEDULING": 200809, + "_POSIX_REALTIME_SIGNALS": 200809, + "_POSIX_SAVED_IDS": 1, + "_POSIX_SELECT": null, + "_POSIX_SEMAPHORES": 200809, + "_POSIX_SHARED_MEMORY_OBJECTS": 200809, + "_POSIX_SSIZE_MAX": 32767, + "_POSIX_STREAM_MAX": 16, + "_POSIX_SYNCHRONIZED_IO": 200809, + "_POSIX_THREADS": 200809, + "_POSIX_THREAD_ATTR_STACKADDR": 200809, + "_POSIX_THREAD_ATTR_STACKSIZE": 200809, + "_POSIX_THREAD_PRIORITY_SCHEDULING": 200809, + "_POSIX_THREAD_PRIO_INHERIT": 200809, + "_POSIX_THREAD_PRIO_PROTECT": 200809, + "_POSIX_THREAD_ROBUST_PRIO_INHERIT": null, + "_POSIX_THREAD_ROBUST_PRIO_PROTECT": null, + "_POSIX_THREAD_PROCESS_SHARED": 200809, + "_POSIX_THREAD_SAFE_FUNCTIONS": 200809, + "_POSIX_TIMERS": 200809, + "TIMER_MAX": null, + "_POSIX_TZNAME_MAX": 6, + "_POSIX_VERSION": 200809, + "_T_IOV_MAX": null, + "_XOPEN_CRYPT": 1, + "_XOPEN_ENH_I18N": 1, + "_XOPEN_LEGACY": 1, + "_XOPEN_REALTIME": 1, + "_XOPEN_REALTIME_THREADS": 1, + "_XOPEN_SHM": 1, + "_XOPEN_UNIX": 1, + "_XOPEN_VERSION": 700, + "_XOPEN_XCU_VERSION": 4, + "_XOPEN_XPG2": 1, + "_XOPEN_XPG3": 1, + "_XOPEN_XPG4": 1, + "BC_BASE_MAX": 99, + "BC_DIM_MAX": 2048, + "BC_SCALE_MAX": 99, + "BC_STRING_MAX": 1000, + "CHARCLASS_NAME_MAX": 2048, + "COLL_WEIGHTS_MAX": 255, + "EQUIV_CLASS_MAX": null, + "EXPR_NEST_MAX": 32, + "LINE_MAX": 2048, + "POSIX2_BC_BASE_MAX": 99, + "POSIX2_BC_DIM_MAX": 2048, + "POSIX2_BC_SCALE_MAX": 99, + "POSIX2_BC_STRING_MAX": 1000, + "POSIX2_CHAR_TERM": 200809, + "POSIX2_COLL_WEIGHTS_MAX": 255, + "POSIX2_C_BIND": 200809, + "POSIX2_C_DEV": 200809, + "POSIX2_C_VERSION": 200809, + "POSIX2_EXPR_NEST_MAX": 32, + "POSIX2_FORT_DEV": null, + "POSIX2_FORT_RUN": null, + "_POSIX2_LINE_MAX": 2048, + "POSIX2_LINE_MAX": 2048, + "POSIX2_LOCALEDEF": 200809, + "POSIX2_RE_DUP_MAX": 32767, + "POSIX2_SW_DEV": 200809, + "POSIX2_UPE": null, + "POSIX2_VERSION": 200809, + "RE_DUP_MAX": 32767, + "PATH": "/usr/bin", + "CS_PATH": "/usr/bin", + "LFS_CFLAGS": null, + "LFS_LDFLAGS": null, + "LFS_LIBS": null, + "LFS_LINTFLAGS": null, + "LFS64_CFLAGS": "-D_LARGEFILE64_SOURCE", + "LFS64_LDFLAGS": null, + "LFS64_LIBS": null, + "LFS64_LINTFLAGS": "-D_LARGEFILE64_SOURCE", + "_XBS5_WIDTH_RESTRICTED_ENVS": "XBS5_LP64_OFF64", + "XBS5_WIDTH_RESTRICTED_ENVS": "XBS5_LP64_OFF64", + "_XBS5_ILP32_OFF32": null, + "XBS5_ILP32_OFF32_CFLAGS": null, + "XBS5_ILP32_OFF32_LDFLAGS": null, + "XBS5_ILP32_OFF32_LIBS": null, + "XBS5_ILP32_OFF32_LINTFLAGS": null, + "_XBS5_ILP32_OFFBIG": null, + "XBS5_ILP32_OFFBIG_CFLAGS": null, + "XBS5_ILP32_OFFBIG_LDFLAGS": null, + "XBS5_ILP32_OFFBIG_LIBS": null, + "XBS5_ILP32_OFFBIG_LINTFLAGS": null, + "_XBS5_LP64_OFF64": 1, + "XBS5_LP64_OFF64_CFLAGS": "-m64", + "XBS5_LP64_OFF64_LDFLAGS": "-m64", + "XBS5_LP64_OFF64_LIBS": null, + "XBS5_LP64_OFF64_LINTFLAGS": null, + "_XBS5_LPBIG_OFFBIG": null, + "XBS5_LPBIG_OFFBIG_CFLAGS": null, + "XBS5_LPBIG_OFFBIG_LDFLAGS": null, + "XBS5_LPBIG_OFFBIG_LIBS": null, + "XBS5_LPBIG_OFFBIG_LINTFLAGS": null, + "_POSIX_V6_ILP32_OFF32": null, + "POSIX_V6_ILP32_OFF32_CFLAGS": null, + "POSIX_V6_ILP32_OFF32_LDFLAGS": null, + "POSIX_V6_ILP32_OFF32_LIBS": null, + "POSIX_V6_ILP32_OFF32_LINTFLAGS": null, + "_POSIX_V6_WIDTH_RESTRICTED_ENVS": "POSIX_V6_LP64_OFF64", + "POSIX_V6_WIDTH_RESTRICTED_ENVS": "POSIX_V6_LP64_OFF64", + "_POSIX_V6_ILP32_OFFBIG": null, + "POSIX_V6_ILP32_OFFBIG_CFLAGS": null, + "POSIX_V6_ILP32_OFFBIG_LDFLAGS": null, + "POSIX_V6_ILP32_OFFBIG_LIBS": null, + "POSIX_V6_ILP32_OFFBIG_LINTFLAGS": null, + "_POSIX_V6_LP64_OFF64": 1, + "POSIX_V6_LP64_OFF64_CFLAGS": "-m64", + "POSIX_V6_LP64_OFF64_LDFLAGS": "-m64", + "POSIX_V6_LP64_OFF64_LIBS": null, + "POSIX_V6_LP64_OFF64_LINTFLAGS": null, + "_POSIX_V6_LPBIG_OFFBIG": null, + "POSIX_V6_LPBIG_OFFBIG_CFLAGS": null, + "POSIX_V6_LPBIG_OFFBIG_LDFLAGS": null, + "POSIX_V6_LPBIG_OFFBIG_LIBS": null, + "POSIX_V6_LPBIG_OFFBIG_LINTFLAGS": null, + "_POSIX_V7_ILP32_OFF32": null, + "POSIX_V7_ILP32_OFF32_CFLAGS": null, + "POSIX_V7_ILP32_OFF32_LDFLAGS": null, + "POSIX_V7_ILP32_OFF32_LIBS": null, + "POSIX_V7_ILP32_OFF32_LINTFLAGS": null, + "_POSIX_V7_WIDTH_RESTRICTED_ENVS": "POSIX_V7_LP64_OFF64", + "POSIX_V7_WIDTH_RESTRICTED_ENVS": "POSIX_V7_LP64_OFF64", + "_POSIX_V7_ILP32_OFFBIG": null, + "POSIX_V7_ILP32_OFFBIG_CFLAGS": null, + "POSIX_V7_ILP32_OFFBIG_LDFLAGS": null, + "POSIX_V7_ILP32_OFFBIG_LIBS": null, + "POSIX_V7_ILP32_OFFBIG_LINTFLAGS": null, + "_POSIX_V7_LP64_OFF64": 1, + "POSIX_V7_LP64_OFF64_CFLAGS": "-m64", + "POSIX_V7_LP64_OFF64_LDFLAGS": "-m64", + "POSIX_V7_LP64_OFF64_LIBS": null, + "POSIX_V7_LP64_OFF64_LINTFLAGS": null, + "_POSIX_V7_LPBIG_OFFBIG": null, + "POSIX_V7_LPBIG_OFFBIG_CFLAGS": null, + "POSIX_V7_LPBIG_OFFBIG_LDFLAGS": null, + "POSIX_V7_LPBIG_OFFBIG_LIBS": null, + "POSIX_V7_LPBIG_OFFBIG_LINTFLAGS": null, + "_POSIX_ADVISORY_INFO": 200809, + "_POSIX_BARRIERS": 200809, + "_POSIX_BASE": null, + "_POSIX_C_LANG_SUPPORT": null, + "_POSIX_C_LANG_SUPPORT_R": null, + "_POSIX_CLOCK_SELECTION": 200809, + "_POSIX_CPUTIME": 200809, + "_POSIX_THREAD_CPUTIME": 200809, + "_POSIX_DEVICE_SPECIFIC": null, + "_POSIX_DEVICE_SPECIFIC_R": null, + "_POSIX_FD_MGMT": null, + "_POSIX_FIFO": null, + "_POSIX_PIPE": null, + "_POSIX_FILE_ATTRIBUTES": null, + "_POSIX_FILE_LOCKING": null, + "_POSIX_FILE_SYSTEM": null, + "_POSIX_MONOTONIC_CLOCK": 200809, + "_POSIX_MULTI_PROCESS": null, + "_POSIX_SINGLE_PROCESS": null, + "_POSIX_NETWORKING": null, + "_POSIX_READER_WRITER_LOCKS": 200809, + "_POSIX_SPIN_LOCKS": 200809, + "_POSIX_REGEXP": 1, + "_REGEX_VERSION": null, + "_POSIX_SHELL": 1, + "_POSIX_SIGNALS": null, + "_POSIX_SPAWN": 200809, + "_POSIX_SPORADIC_SERVER": null, + "_POSIX_THREAD_SPORADIC_SERVER": null, + "_POSIX_SYSTEM_DATABASE": null, + "_POSIX_SYSTEM_DATABASE_R": null, + "_POSIX_TIMEOUTS": 200809, + "_POSIX_TYPED_MEMORY_OBJECTS": null, + "_POSIX_USER_GROUPS": null, + "_POSIX_USER_GROUPS_R": null, + "POSIX2_PBS": null, + "POSIX2_PBS_ACCOUNTING": null, + "POSIX2_PBS_LOCATE": null, + "POSIX2_PBS_TRACK": null, + "POSIX2_PBS_MESSAGE": null, + "SYMLOOP_MAX": null, + "STREAM_MAX": 16, + "AIO_LISTIO_MAX": null, + "AIO_MAX": null, + "AIO_PRIO_DELTA_MAX": 20, + "DELAYTIMER_MAX": 2147483647, + "HOST_NAME_MAX": 64, + "LOGIN_NAME_MAX": 256, + "MQ_OPEN_MAX": null, + "MQ_PRIO_MAX": 32768, + "_POSIX_DEVICE_IO": null, + "_POSIX_TRACE": null, + "_POSIX_TRACE_EVENT_FILTER": null, + "_POSIX_TRACE_INHERIT": null, + "_POSIX_TRACE_LOG": null, + "RTSIG_MAX": 32, + "SEM_NSEMS_MAX": null, + "SEM_VALUE_MAX": 2147483647, + "SIGQUEUE_MAX": 62844, + "FILESIZEBITS": 64, + "POSIX_ALLOC_SIZE_MIN": 4096, + "POSIX_REC_INCR_XFER_SIZE": null, + "POSIX_REC_MAX_XFER_SIZE": null, + "POSIX_REC_MIN_XFER_SIZE": 4096, + "POSIX_REC_XFER_ALIGN": 4096, + "SYMLINK_MAX": null, + "GNU_LIBC_VERSION": "glibc 2.24", + "GNU_LIBPTHREAD_VERSION": "NPTL 2.24", + "POSIX2_SYMLINKS": 1, + "LEVEL1_ICACHE_SIZE": 32768, + "LEVEL1_ICACHE_ASSOC": 8, + "LEVEL1_ICACHE_LINESIZE": 64, + "LEVEL1_DCACHE_SIZE": 32768, + "LEVEL1_DCACHE_ASSOC": 8, + "LEVEL1_DCACHE_LINESIZE": 64, + "LEVEL2_CACHE_SIZE": 262144, + "LEVEL2_CACHE_ASSOC": 8, + "LEVEL2_CACHE_LINESIZE": 64, + "LEVEL3_CACHE_SIZE": 6291456, + "LEVEL3_CACHE_ASSOC": 12, + "LEVEL3_CACHE_LINESIZE": 64, + "LEVEL4_CACHE_SIZE": 0, + "LEVEL4_CACHE_ASSOC": 0, + "LEVEL4_CACHE_LINESIZE": 0, + "IPV6": 200809, + "RAW_SOCKETS": 200809, + "_POSIX_IPV6": 200809, + "_POSIX_RAW_SOCKETS": 200809 + }, + "init_package": "systemd", + "shells": [ + "/bin/sh", + "/bin/bash", + "/sbin/nologin", + "/usr/bin/sh", + "/usr/bin/bash", + "/usr/sbin/nologin", + "/usr/bin/zsh", + "/bin/zsh" + ], + "ohai_time": 1492535225.41052, + "cloud_v2": null, + "cloud": null +} +''' # noqa + + +class TestOhaiCollector(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'ohai'] + valid_subsets = ['ohai'] + fact_namespace = 'ansible_ohai' + collector_class = OhaiFactCollector + + def _mock_module(self): + mock_module = Mock() + mock_module.params = {'gather_subset': self.gather_subset, + 'gather_timeout': 10, + 'filter': '*'} + mock_module.get_bin_path = Mock(return_value='/not/actually/ohai') + mock_module.run_command = Mock(return_value=(0, ohai_json_output, '')) + return mock_module + + @patch('ansible.module_utils.facts.other.ohai.OhaiFactCollector.get_ohai_output') + def test_bogus_json(self, mock_get_ohai_output): + module = self._mock_module() + + # bogus json + mock_get_ohai_output.return_value = '{' + + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + + self.assertIsInstance(facts_dict, dict) + self.assertEqual(facts_dict, {}) + + @patch('ansible.module_utils.facts.other.ohai.OhaiFactCollector.run_ohai') + def test_ohai_non_zero_return_code(self, mock_run_ohai): + module = self._mock_module() + + # bogus json + mock_run_ohai.return_value = (1, '{}', '') + + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + + self.assertIsInstance(facts_dict, dict) + + # This assumes no 'ohai' entry at all is correct + self.assertNotIn('ohai', facts_dict) + self.assertEqual(facts_dict, {}) diff --git a/test/units/module_utils/facts/system/__init__.py b/test/units/module_utils/facts/system/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/facts/system/distribution/__init__.py b/test/units/module_utils/facts/system/distribution/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/facts/system/distribution/conftest.py b/test/units/module_utils/facts/system/distribution/conftest.py new file mode 100644 index 00000000..d27b97f0 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/conftest.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import pytest + +from units.compat.mock import Mock + + +@pytest.fixture +def mock_module(): + mock_module = Mock() + mock_module.params = {'gather_subset': ['all'], + 'gather_timeout': 5, + 'filter': '*'} + mock_module.get_bin_path = Mock(return_value=None) + return mock_module diff --git a/test/units/module_utils/facts/system/distribution/fixtures/almalinux_8_3_beta.json b/test/units/module_utils/facts/system/distribution/fixtures/almalinux_8_3_beta.json new file mode 100644 index 00000000..2d8df50b --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/almalinux_8_3_beta.json @@ -0,0 +1,53 @@ +{ + "name": "AlmaLinux 8.3", + "distro": { + "codename": "Purple Manul", + "id": "almalinux", + "name": "AlmaLinux", + "version": "8.3", + "version_best": "8.3", + "lsb_release_info": { + "lsb_version": ":core-4.1-amd64:core-4.1-noarch", + "distributor_id": "AlmaLinux", + "description": "AlmaLinux release 8.3 Beta (Purple Manul)", + "release": "8.3", + "codename": "PurpleManul" + }, + "os_release_info": { + "name": "AlmaLinux", + "version": "8.3 (Purple Manul)", + "id": "almalinux", + "id_like": "rhel centos fedora", + "version_id": "8.3", + "platform_id": "platform:el8", + "pretty_name": "AlmaLinux 8.3 Beta (Purple Manul)", + "ansi_color": "0;34", + "cpe_name": "cpe:/o:almalinux:almalinux:8.3:beta", + "home_url": "https://almalinux.org/", + "bug_report_url": "https://bugs.almalinux.org/", + "almalinux_mantisbt_project": "AlmaLinux-8", + "almalinux_mantisbt_project_version": "8", + "codename": "Purple Manul" + } + }, + "input": { + "/etc/centos-release": "AlmaLinux release 8.3 Beta (Purple Manul)\n", + "/etc/redhat-release": "AlmaLinux release 8.3 Beta (Purple Manul)\n", + "/etc/system-release": "AlmaLinux release 8.3 Beta (Purple Manul)\n", + "/etc/os-release": "NAME=\"AlmaLinux\"\nVERSION=\"8.3 (Purple Manul)\"\nID=\"almalinux\"\nID_LIKE=\"rhel centos fedora\"\nVERSION_ID=\"8.3\"\nPLATFORM_ID=\"platform:el8\"\nPRETTY_NAME=\"AlmaLinux 8.3 Beta (Purple Manul)\"\nANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:almalinux:almalinux:8.3:beta\"\nHOME_URL=\"https://almalinux.org/\"\nBUG_REPORT_URL=\"https://bugs.almalinux.org/\"\n\nALMALINUX_MANTISBT_PROJECT=\"AlmaLinux-8\" \nALMALINUX_MANTISBT_PROJECT_VERSION=\"8\" \n\n", + "/usr/lib/os-release": "NAME=\"AlmaLinux\"\nVERSION=\"8.3 (Purple Manul)\"\nID=\"almalinux\"\nID_LIKE=\"rhel centos fedora\"\nVERSION_ID=\"8.3\"\nPLATFORM_ID=\"platform:el8\"\nPRETTY_NAME=\"AlmaLinux 8.3 Beta (Purple Manul)\"\nANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:almalinux:almalinux:8.3:beta\"\nHOME_URL=\"https://almalinux.org/\"\nBUG_REPORT_URL=\"https://bugs.almalinux.org/\"\n\nALMALINUX_MANTISBT_PROJECT=\"AlmaLinux-8\" \nALMALINUX_MANTISBT_PROJECT_VERSION=\"8\" \n\n" + }, + "platform.dist": [ + "almalinux", + "8.3", + "Purple Manul" + ], + "result": { + "distribution": "AlmaLinux", + "distribution_version": "8.3", + "distribution_release": "Purple Manul", + "distribution_major_version": "8", + "os_family": "RedHat" + }, + "platform.release": "4.18.0-240.el8.x86_64" +} diff --git a/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_na.json b/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_na.json new file mode 100644 index 00000000..88d9ad8d --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_na.json @@ -0,0 +1,24 @@ +{ + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "arch", + "name": "Arch Linux", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nID_LIKE=archlinux\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\n\n", + "/etc/arch-release": "" + }, + "name": "Arch Linux NA", + "result": { + "distribution_release": "NA", + "distribution": "Archlinux", + "distribution_major_version": "NA", + "os_family": "Archlinux", + "distribution_version": "NA" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_no_arch-release_na.json b/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_no_arch-release_na.json new file mode 100644 index 00000000..a24bb3af --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_no_arch-release_na.json @@ -0,0 +1,23 @@ +{ + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "arch", + "name": "Arch Linux", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nID_LIKE=archlinux\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\n\n" + }, + "name": "Arch Linux no arch-release NA", + "result": { + "distribution_release": "NA", + "distribution": "Archlinux", + "distribution_major_version": "NA", + "os_family": "Archlinux", + "distribution_version": "NA" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/archlinux_rolling.json b/test/units/module_utils/facts/system/distribution/fixtures/archlinux_rolling.json new file mode 100644 index 00000000..8f356367 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/archlinux_rolling.json @@ -0,0 +1,31 @@ +{ + "name": "Archlinux rolling", + "distro": { + "codename": "n/a", + "id": "arch", + "name": "Arch", + "version": "rolling", + "version_best": "rolling", + "lsb_release_info": { + "lsb_version": "1.4", + "distributor_id": "Arch", + "description": "Arch Linux", + "release": "rolling", + "codename": "n/a" + }, + "os_release_info": {} + }, + "input": { + "/etc/arch-release": "Arch Linux release\n", + "/etc/lsb-release": "LSB_VERSION=1.4\nDISTRIB_ID=Arch\nDISTRIB_RELEASE=rolling\nDISTRIB_DESCRIPTION=\"Arch Linux\"\n", + "/usr/lib/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nBUILD_ID=rolling\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nDOCUMENTATION_URL=\"https://wiki.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\nLOGO=archlinux\n" + }, + "platform.dist": ["arch", "rolling", "n/a"], + "result": { + "distribution": "Archlinux", + "distribution_version": "rolling", + "distribution_release": "n/a", + "distribution_major_version": "rolling", + "os_family": "Archlinux" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/centos_6.7.json b/test/units/module_utils/facts/system/distribution/fixtures/centos_6.7.json new file mode 100644 index 00000000..c99a0739 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/centos_6.7.json @@ -0,0 +1,31 @@ +{ + "name": "CentOS 6.7", + "platform.dist": ["centos", "6.7", "Final"], + "distro": { + "codename": "Final", + "id": "centos", + "name": "CentOS Linux", + "version": "6.7", + "version_best": "6.7", + "os_release_info": {}, + "lsb_release_info": { + "release": "6.7", + "codename": "Final", + "distributor_id": "CentOS", + "lsb_version": ":base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch", + "description": "CentOS release 6.7 (Final)" + } + }, + "input": { + "/etc/redhat-release": "CentOS release 6.7 (Final)\n", + "/etc/lsb-release": "LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch\n", + "/etc/system-release": "CentOS release 6.7 (Final)\n" + }, + "result": { + "distribution_release": "Final", + "distribution": "CentOS", + "distribution_major_version": "6", + "os_family": "RedHat", + "distribution_version": "6.7" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_26580.json b/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_26580.json new file mode 100644 index 00000000..1a99a86f --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_26580.json @@ -0,0 +1,24 @@ +{ + "platform.dist": ["Clear Linux OS", "26580", "clear-linux-os"], + "distro": { + "codename": "", + "id": "clear-linux-os", + "name": "Clear Linux OS", + "version": "26580", + "version_best": "26580", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/os-release": "NAME=\"Clear Linux OS\"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=26580\nPRETTY_NAME=\"Clear Linux OS\"\nANSI_COLOR=\"1;35\"\nHOME_URL=\"https://clearlinux.org\"\nSUPPORT_URL=\"https://clearlinux.org\"\nBUG_REPORT_URL=\"mailto:dev@lists.clearlinux.org\"\nPRIVACY_POLICY_URL=\"http://www.intel.com/privacy\"", + "/usr/lib/os-release": "NAME=\"Clear Linux OS\"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=26580\nPRETTY_NAME=\"Clear Linux OS\"\nANSI_COLOR=\"1;35\"\nHOME_URL=\"https://clearlinux.org\"\nSUPPORT_URL=\"https://clearlinux.org\"\nBUG_REPORT_URL=\"mailto:dev@lists.clearlinux.org\"\nPRIVACY_POLICY_URL=\"http://www.intel.com/privacy\"" + }, + "name": "ClearLinux 26580", + "result": { + "distribution_release": "clear-linux-os", + "distribution": "Clear Linux OS", + "distribution_major_version": "26580", + "os_family": "ClearLinux", + "distribution_version": "26580" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_28120.json b/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_28120.json new file mode 100644 index 00000000..30b76688 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_28120.json @@ -0,0 +1,24 @@ +{ + "platform.dist": ["Clear Linux OS", "28120", "clear-linux-os"], + "distro": { + "codename": "", + "id": "clear-linux-os", + "name": "Clear Linux OS", + "version": "28120", + "version_best": "28120", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/os-release": "NAME=\"Clear Linux OS\"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=28120\nPRETTY_NAME=\"Clear Linux OS\"\nANSI_COLOR=\"1;35\"\nHOME_URL=\"https://clearlinux.org\"\nSUPPORT_URL=\"https://clearlinux.org\"\nBUG_REPORT_URL=\"mailto:dev@lists.clearlinux.org\"\nPRIVACY_POLICY_URL=\"http://www.intel.com/privacy\"", + "/usr/lib/os-release": "NAME=\"Clear Linux OS\"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=28120\nPRETTY_NAME=\"Clear Linux OS\"\nANSI_COLOR=\"1;35\"\nHOME_URL=\"https://clearlinux.org\"\nSUPPORT_URL=\"https://clearlinux.org\"\nBUG_REPORT_URL=\"mailto:dev@lists.clearlinux.org\"\nPRIVACY_POLICY_URL=\"http://www.intel.com/privacy\"" + }, + "name": "ClearLinux 28120", + "result": { + "distribution_release": "clear-linux-os", + "distribution": "Clear Linux OS", + "distribution_major_version": "28120", + "os_family": "ClearLinux", + "distribution_version": "28120" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/core_os_1911.5.0.json b/test/units/module_utils/facts/system/distribution/fixtures/core_os_1911.5.0.json new file mode 100644 index 00000000..af43704c --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/core_os_1911.5.0.json @@ -0,0 +1,23 @@ +{ + "name": "Core OS", + "input": { + "/usr/lib/os-release": "NAME=\"Container Linux by CoreOS\"\nID=coreos\nVERSION=1911.5.0\nVERSION_ID=1911.5.0\nBUILD_ID=2018-12-15-2317\nPRETTY_NAME=\"Container Linux by CoreOS 1911.5.0 (Rhyolite)\"\nANSI_COLOR=\"38;5;75\"\nHOME_URL=\"https://coreos.com/\"\nBUG_REPORT_URL=\"https://issues.coreos.com\"\nCOREOS_BOARD=\"amd64-usr\"", + "/etc/lsb-release": "DISTRIB_ID=CoreOS\nDISTRIB_RELEASE=1911.5.0\nDISTRIB_CODENAME=\"Rhyolite\"\nDISTRIB_DESCRIPTION=\"CoreOS 1911.5.0 (Rhyolite)\"" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "Rhyolite", + "id": "coreos", + "name": "CoreOS", + "version": "1911.5.0", + "version_best": "1911.5.0", + "os_release_info": {}, + "lsb_release_info": {} + }, + "platform.release": "", + "result": { + "distribution": "Coreos", + "distribution_major_version": "1911", + "distribution_version": "1911.5.0" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/core_os_976.0.0.json b/test/units/module_utils/facts/system/distribution/fixtures/core_os_976.0.0.json new file mode 100644 index 00000000..ccd06d99 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/core_os_976.0.0.json @@ -0,0 +1,23 @@ +{ + "name": "Core OS", + "input": { + "/etc/os-release": "NAME=CoreOS\nID=coreos\nVERSION=976.0.0\nVERSION_ID=976.0.0\nBUILD_ID=2016-03-03-2324\nPRETTY_NAME=\"CoreOS 976.0.0 (Coeur Rouge)\"\nANSI_COLOR=\"1;32\"\nHOME_URL=\"https://coreos.com/\"\nBUG_REPORT_URL=\"https://github.com/coreos/bugs/issues\"", + "/etc/lsb-release": "DISTRIB_ID=CoreOS\nDISTRIB_RELEASE=976.0.0\nDISTRIB_CODENAME=\"Coeur Rouge\"\nDISTRIB_DESCRIPTION=\"CoreOS 976.0.0 (Coeur Rouge)\"" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "Coeur Rouge", + "id": "coreos", + "name": "CoreOS", + "version": "976.0.0", + "version_best": "976.0.0", + "os_release_info": {}, + "lsb_release_info": {} + }, + "platform.release": "", + "result": { + "distribution": "CoreOS", + "distribution_major_version": "976", + "distribution_version": "976.0.0" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_2.5.4.json b/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_2.5.4.json new file mode 100644 index 00000000..ad9c3f79 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_2.5.4.json @@ -0,0 +1,23 @@ +{ + "name": "Cumulus Linux 2.5.4", + "input": { + "/etc/os-release": "NAME=\"Cumulus Linux\"\nVERSION_ID=2.5.4\nVERSION=\"2.5.4-6dc6e80-201510091936-build\"\nPRETTY_NAME=\"Cumulus Linux\"\nID=cumulus-linux\nID_LIKE=debian\nCPE_NAME=cpe:/o:cumulusnetworks:cumulus_linux:2.5.4-6dc6e80-201510091936-build\nHOME_URL=\"http://www.cumulusnetworks.com/\"\nSUPPORT_URL=\"http://support.cumulusnetworks.com/\"" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "cumulus-linux", + "name": "Cumulus Linux", + "version": "2.5.4", + "version_best": "2.5.4", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "Cumulus Linux", + "distribution_major_version": "2", + "distribution_release": "2.5.4-6dc6e80-201510091936-build", + "os_family": "Debian", + "distribution_version": "2.5.4" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_3.7.3.json b/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_3.7.3.json new file mode 100644 index 00000000..ec44af13 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_3.7.3.json @@ -0,0 +1,23 @@ +{ + "name": "Cumulus Linux 3.7.3", + "input": { + "/etc/os-release": "NAME=\"Cumulus Linux\"\nVERSION_ID=3.7.3\nVERSION=\"Cumulus Linux 3.7.3\"\nPRETTY_NAME=\"Cumulus Linux\"\nID=cumulus-linux\nID_LIKE=debian\nCPE_NAME=cpe:/o:cumulusnetworks:cumulus_linux:3.7.3\nHOME_URL=\"http://www.cumulusnetworks.com/\"\nSUPPORT_URL=\"http://support.cumulusnetworks.com/\"" + }, + "platform.dist": ["debian", "8.11", ""], + "distro": { + "codename": "", + "id": "cumulus-linux", + "name": "Cumulus Linux", + "version": "3.7.3", + "version_best": "3.7.3", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "Cumulus Linux", + "distribution_major_version": "3", + "distribution_release": "Cumulus Linux 3.7.3", + "os_family": "Debian", + "distribution_version": "3.7.3" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/debian_10.json b/test/units/module_utils/facts/system/distribution/fixtures/debian_10.json new file mode 100644 index 00000000..20544c1e --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/debian_10.json @@ -0,0 +1,40 @@ +{ + "name": "Debian 10", + "distro": { + "codename": "buster", + "id": "debian", + "name": "Debian GNU/Linux", + "version": "10", + "version_best": "10", + "lsb_release_info": { + "distributor_id": "Debian", + "description": "Debian GNU/Linux 10 (buster)", + "release": "10", + "codename": "buster" + }, + "os_release_info": { + "pretty_name": "Debian GNU/Linux 10 (buster)", + "name": "Debian GNU/Linux", + "version_id": "10", + "version": "10 (buster)", + "version_codename": "buster", + "id": "debian", + "home_url": "https://www.debian.org/", + "support_url": "https://www.debian.org/support", + "bug_report_url": "https://bugs.debian.org/", + "codename": "buster" + } + }, + "input": { + "/etc/os-release": "PRETTY_NAME=\"Debian GNU/Linux 10 (buster)\"\nNAME=\"Debian GNU/Linux\"\nVERSION_ID=\"10\"\nVERSION=\"10 (buster)\"\nVERSION_CODENAME=buster\nID=debian\nHOME_URL=\"https://www.debian.org/\"\nSUPPORT_URL=\"https://www.debian.org/support\"\nBUG_REPORT_URL=\"https://bugs.debian.org/\"\n", + "/usr/lib/os-release": "PRETTY_NAME=\"Debian GNU/Linux 10 (buster)\"\nNAME=\"Debian GNU/Linux\"\nVERSION_ID=\"10\"\nVERSION=\"10 (buster)\"\nVERSION_CODENAME=buster\nID=debian\nHOME_URL=\"https://www.debian.org/\"\nSUPPORT_URL=\"https://www.debian.org/support\"\nBUG_REPORT_URL=\"https://bugs.debian.org/\"\n" + }, + "platform.dist": ["debian", "10", "buster"], + "result": { + "distribution": "Debian", + "distribution_version": "10", + "distribution_release": "buster", + "distribution_major_version": "10", + "os_family": "Debian" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/debian_7.9.json b/test/units/module_utils/facts/system/distribution/fixtures/debian_7.9.json new file mode 100644 index 00000000..894c9424 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/debian_7.9.json @@ -0,0 +1,39 @@ +{ + "name": "Debian 7.9", + "input": { + "/etc/os-release": "PRETTY_NAME=\"Debian GNU/Linux 7 (wheezy)\"\nNAME=\"Debian GNU/Linux\"\nVERSION_ID=\"7\"\nVERSION=\"7 (wheezy)\"\nID=debian\nANSI_COLOR=\"1;31\"\nHOME_URL=\"http://www.debian.org/\"\nSUPPORT_URL=\"http://www.debian.org/support/\"\nBUG_REPORT_URL=\"http://bugs.debian.org/\"" + }, + "platform.dist": ["debian", "7.9", ""], + "distro": { + "codename": "wheezy", + "id": "debian", + "name": "Debian GNU/Linux", + "version": "7", + "version_best": "7.9", + "os_release_info": { + "name": "Debian GNU/Linux", + "ansi_color": "1;31", + "support_url": "http://www.debian.org/support/", + "version_id": "7", + "bug_report_url": "http://bugs.debian.org/", + "pretty_name": "Debian GNU/Linux 7 (wheezy)", + "version": "7 (wheezy)", + "codename": "wheezy", + "home_url": "http://www.debian.org/", + "id": "debian" + }, + "lsb_release_info": { + "release": "7.9", + "codename": "wheezy", + "distributor_id": "Debian", + "description": "Debian GNU/Linux 7.9 (wheezy)" + } + }, + "result": { + "distribution": "Debian", + "distribution_major_version": "7", + "distribution_release": "wheezy", + "os_family": "Debian", + "distribution_version": "7.9" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/debian_stretch_sid.json b/test/units/module_utils/facts/system/distribution/fixtures/debian_stretch_sid.json new file mode 100644 index 00000000..23388303 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/debian_stretch_sid.json @@ -0,0 +1,36 @@ +{ + "name": "Debian stretch/sid", + "input": { + "/etc/os-release": "PRETTY_NAME=\"Debian GNU/Linux stretch/sid\"\nNAME=\"Debian GNU/Linux\"\nID=debian\nHOME_URL=\"https://www.debian.org/\"\nSUPPORT_URL=\"https://www.debian.org/support\"\nBUG_REPORT_URL=\"https://bugs.debian.org/\"", + "/etc/debian_version": "stretch/sid\n" + }, + "platform.dist": ["debian", "stretch/sid", ""], + "distro": { + "codename": "stretch", + "id": "debian", + "name": "Debian GNU/Linux", + "version": "9", + "version_best": "9.8", + "lsb_release_info": { + "release": "unstable", + "codename": "sid", + "distributor_id": "Debian", + "description": "Debian GNU/Linux stretch/sid" + }, + "os_release_info": { + "name": "Debian GNU/Linux", + "support_url": "https://www.debian.org/support", + "bug_report_url": "https://bugs.debian.org/", + "pretty_name": "Debian GNU/Linux stretch/sid", + "home_url": "https://www.debian.org/", + "id": "debian" + } + }, + "result": { + "distribution": "Debian", + "distribution_major_version": "9", + "distribution_release": "stretch", + "os_family": "Debian", + "distribution_version": "9.8" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/devuan.json b/test/units/module_utils/facts/system/distribution/fixtures/devuan.json new file mode 100644 index 00000000..d02fc2e4 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/devuan.json @@ -0,0 +1,23 @@ +{ + "name": "Devuan", + "input": { + "/etc/os-release": "PRETTY_NAME=\"Devuan GNU/Linux ascii\"\nNAME=\"Devuan GNU/Linux\"\nID=devuan\nHOME_URL=\"https://www.devuan.org/\"\nSUPPORT_URL=\"https://devuan.org/os/community\"\nBUG_REPORT_URL=\"https://bugs.devuan.org/\"" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "devuan", + "name": "Devuan GNU/Linux", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "Devuan", + "distribution_major_version": "NA", + "distribution_release": "ascii", + "os_family": "Debian", + "distribution_version": "NA" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/fedora_22.json b/test/units/module_utils/facts/system/distribution/fixtures/fedora_22.json new file mode 100644 index 00000000..cec68d42 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/fedora_22.json @@ -0,0 +1,25 @@ +{ + "name": "Fedora 22", + "platform.dist": ["fedora", "22", "Twenty Two"], + "distro": { + "codename": "Twenty Two", + "id": "fedora", + "name": "Fedora", + "version": "22", + "version_best": "22", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/redhat-release": "Fedora release 22 (Twenty Two)\n", + "/etc/os-release": "NAME=Fedora\nVERSION=\"22 (Twenty Two)\"\nID=fedora\nVERSION_ID=22\nPRETTY_NAME=\"Fedora 22 (Twenty Two)\"\nANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:22\"\nHOME_URL=\"https://fedoraproject.org/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=22\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=22\nPRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy\n", + "/etc/system-release": "Fedora release 22 (Twenty Two)\n" + }, + "result": { + "distribution_release": "Twenty Two", + "distribution": "Fedora", + "distribution_major_version": "22", + "os_family": "RedHat", + "distribution_version": "22" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/fedora_25.json b/test/units/module_utils/facts/system/distribution/fixtures/fedora_25.json new file mode 100644 index 00000000..70b5bc39 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/fedora_25.json @@ -0,0 +1,25 @@ +{ + "platform.dist": ["fedora", "25", "Rawhide"], + "distro": { + "codename": "Rawhide", + "id": "fedora", + "name": "Fedora", + "version": "25", + "version_best": "25", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/redhat-release": "Fedora release 25 (Rawhide)\n", + "/etc/os-release": "NAME=Fedora\nVERSION=\"25 (Workstation Edition)\"\nID=fedora\nVERSION_ID=25\nPRETTY_NAME=\"Fedora 25 (Workstation Edition)\"\nANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:25\"\nHOME_URL=\"https://fedoraproject.org/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=rawhide\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=rawhide\nPRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy\nVARIANT=\"Workstation Edition\"\nVARIANT_ID=workstation\n", + "/etc/system-release": "Fedora release 25 (Rawhide)\n" + }, + "name": "Fedora 25", + "result": { + "distribution_release": "Rawhide", + "distribution": "Fedora", + "distribution_major_version": "25", + "os_family": "RedHat", + "distribution_version": "25" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/fedora_31.json b/test/units/module_utils/facts/system/distribution/fixtures/fedora_31.json new file mode 100644 index 00000000..e6d905e9 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/fedora_31.json @@ -0,0 +1,55 @@ +{ + "name": "Fedora 31", + "distro": { + "codename": "", + "id": "fedora", + "name": "Fedora", + "version": "31", + "version_best": "31", + "lsb_release_info": { + "lsb_version": ":core-4.1-amd64:core-4.1-noarch", + "distributor_id": "Fedora", + "description": "Fedora release 31 (Thirty One)", + "release": "31", + "codename": "ThirtyOne" + }, + "os_release_info": { + "name": "Fedora", + "version": "31 (Workstation Edition)", + "id": "fedora", + "version_id": "31", + "version_codename": "", + "platform_id": "platform:f31", + "pretty_name": "Fedora 31 (Workstation Edition)", + "ansi_color": "0;34", + "logo": "fedora-logo-icon", + "cpe_name": "cpe:/o:fedoraproject:fedora:31", + "home_url": "https://fedoraproject.org/", + "documentation_url": "https://docs.fedoraproject.org/en-US/fedora/f31/system-administrators-guide/", + "support_url": "https://fedoraproject.org/wiki/Communicating_and_getting_help", + "bug_report_url": "https://bugzilla.redhat.com/", + "redhat_bugzilla_product": "Fedora", + "redhat_bugzilla_product_version": "31", + "redhat_support_product": "Fedora", + "redhat_support_product_version": "31", + "privacy_policy_url": "https://fedoraproject.org/wiki/Legal:PrivacyPolicy", + "variant": "Workstation Edition", + "variant_id": "workstation", + "codename": "" + } + }, + "input": { + "/etc/redhat-release": "Fedora release 31 (Thirty One)\n", + "/etc/system-release": "Fedora release 31 (Thirty One)\n", + "/etc/os-release": "NAME=Fedora\nVERSION=\"31 (Workstation Edition)\"\nID=fedora\nVERSION_ID=31\nVERSION_CODENAME=\"\"\nPLATFORM_ID=\"platform:f31\"\nPRETTY_NAME=\"Fedora 31 (Workstation Edition)\"\nANSI_COLOR=\"0;34\"\nLOGO=fedora-logo-icon\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:31\"\nHOME_URL=\"https://fedoraproject.org/\"\nDOCUMENTATION_URL=\"https://docs.fedoraproject.org/en-US/fedora/f31/system-administrators-guide/\"\nSUPPORT_URL=\"https://fedoraproject.org/wiki/Communicating_and_getting_help\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=31\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=31\nPRIVACY_POLICY_URL=\"https://fedoraproject.org/wiki/Legal:PrivacyPolicy\"\nVARIANT=\"Workstation Edition\"\nVARIANT_ID=workstation\n", + "/usr/lib/os-release": "NAME=Fedora\nVERSION=\"31 (Workstation Edition)\"\nID=fedora\nVERSION_ID=31\nVERSION_CODENAME=\"\"\nPLATFORM_ID=\"platform:f31\"\nPRETTY_NAME=\"Fedora 31 (Workstation Edition)\"\nANSI_COLOR=\"0;34\"\nLOGO=fedora-logo-icon\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:31\"\nHOME_URL=\"https://fedoraproject.org/\"\nDOCUMENTATION_URL=\"https://docs.fedoraproject.org/en-US/fedora/f31/system-administrators-guide/\"\nSUPPORT_URL=\"https://fedoraproject.org/wiki/Communicating_and_getting_help\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=31\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=31\nPRIVACY_POLICY_URL=\"https://fedoraproject.org/wiki/Legal:PrivacyPolicy\"\nVARIANT=\"Workstation Edition\"\nVARIANT_ID=workstation\n" + }, + "platform.dist": ["fedora", "31", ""], + "result": { + "distribution": "Fedora", + "distribution_version": "31", + "distribution_release": "", + "distribution_major_version": "31", + "os_family": "RedHat" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/flatcar_2492.0.0.json b/test/units/module_utils/facts/system/distribution/fixtures/flatcar_2492.0.0.json new file mode 100644 index 00000000..618b2259 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/flatcar_2492.0.0.json @@ -0,0 +1,24 @@ +{ + "name": "Flatcar Container Linux", + "input": { + "/usr/lib/os-release": "NAME=\"Flatcar Container Linux by Kinvolk\"\nID=flatcar\nID_LIKE=coreos\nVERSION=2492.0.0\nVERSION_ID=2492.0.0\nBUILD_ID=2020-04-28-2210\nPRETTY_NAME=\"Flatcar Container Linux by Kinvolk 2492.0.0 (Rhyolite)\"\nANSI_COLOR=\"38;5;75\"\nHOME_URL=\"https://flatcar-linux.org/\"\nBUG_REPORT_URL=\"https://issues.flatcar-linux.org\"", + "/etc/lsb-release": "DISTRIB_ID=\"Flatcar Container Linux by Kinvolk\"\nDISTRIB_RELEASE=2492.0.0\nDISTRIB_CODENAME=\"Rhyolite\"\nDISTRIB_DESCRIPTION=\"Flatcar Container Linux by Kinvolk 2492.0.0 (Rhyolite)\"" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "Rhyolite", + "id": "flatcar", + "id_like": "coreos", + "name": "Flatcar", + "version": "2492.0.0", + "version_best": "2492.0.0", + "os_release_info": {}, + "lsb_release_info": {} + }, + "platform.release": "", + "result": { + "distribution": "Flatcar", + "distribution_major_version": "2492", + "distribution_version": "2492.0.0" + } +} diff --git a/test/units/module_utils/facts/system/distribution/fixtures/kali_2019.1.json b/test/units/module_utils/facts/system/distribution/fixtures/kali_2019.1.json new file mode 100644 index 00000000..096b66ff --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/kali_2019.1.json @@ -0,0 +1,25 @@ +{ + "name": "Kali 2019.1", + "input": { + "/etc/os-release": "PRETTY_NAME=\"Kali GNU/Linux Rolling\"\nNAME=\"Kali GNU/Linux\"\nID=kali\nVERSION=\"2019.1\"\nVERSION_ID=\"2019.1\"\nID_LIKE=debian\nANSI_COLOR=\"1;31\"\nHOME_URL=\"https://www.kali.org/\"\nSUPPORT_URL=\"https://forums.kali.org/\"\nBUG_REPORT_URL=\"https://bugs.kali.org/\"\n", + "/etc/lsb-release": "DISTRIB_ID=Kali\nDISTRIB_RELEASE=kali-rolling\nDISTRIB_CODENAME=kali-rolling\nDISTRIB_DESCRIPTION=\"Kali GNU/Linux Rolling\"\n", + "/usr/lib/os-release": "PRETTY_NAME=\"Kali GNU/Linux Rolling\"\nNAME=\"Kali GNU/Linux\"\nID=kali\nVERSION=\"2019.1\"\nVERSION_ID=\"2019.1\"\nID_LIKE=debian\nANSI_COLOR=\"1;31\"\nHOME_URL=\"https://www.kali.org/\"\nSUPPORT_URL=\"https://forums.kali.org/\"\nBUG_REPORT_URL=\"https://bugs.kali.org/\"\n" + }, + "platform.dist": ["kali", "2019.1", ""], + "distro": { + "codename": "kali-rolling", + "id": "kali", + "name": "Kali GNU/Linux Rolling", + "version": "2019.1", + "version_best": "2019.1", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "Kali", + "distribution_version": "2019.1", + "distribution_release": "kali-rolling", + "distribution_major_version": "2019", + "os_family": "Debian" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/kde_neon_16.04.json b/test/units/module_utils/facts/system/distribution/fixtures/kde_neon_16.04.json new file mode 100644 index 00000000..5ff59c72 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/kde_neon_16.04.json @@ -0,0 +1,42 @@ +{ + "platform.dist": ["neon", "16.04", "xenial"], + "distro": { + "codename": "xenial", + "id": "neon", + "name": "KDE neon", + "version": "16.04", + "version_best": "16.04", + "os_release_info": { + "support_url": "http://help.ubuntu.com/", + "version_codename": "xenial", + "pretty_name": "Ubuntu 16.04.6 LTS", + "home_url": "http://www.ubuntu.com/", + "bug_report_url": "http://bugs.launchpad.net/ubuntu/", + "version": "16.04.6 LTS (Xenial Xerus)", + "version_id": "16.04", + "id": "ubuntu", + "ubuntu_codename": "xenial", + "codename": "xenial", + "name": "Ubuntu", + "id_like": "debian" + }, + "lsb_release_info": { + "description": "Ubuntu 16.04.6 LTS", + "release": "16.04", + "distributor_id": "Ubuntu", + "codename": "xenial" + } + }, + "input": { + "/etc/os-release": "NAME=\"KDE neon\"\nVERSION=\"5.8\"\nID=neon\nID_LIKE=\"ubuntu debian\"\nPRETTY_NAME=\"KDE neon User Edition 5.8\"\nVERSION_ID=\"16.04\"\nHOME_URL=\"http://neon.kde.org/\"\nSUPPORT_URL=\"http://neon.kde.org/\"\nBUG_REPORT_URL=\"http://bugs.kde.org/\"\nVERSION_CODENAME=xenial\nUBUNTU_CODENAME=xenial\n", + "/etc/lsb-release": "DISTRIB_ID=neon\nDISTRIB_RELEASE=16.04\nDISTRIB_CODENAME=xenial\nDISTRIB_DESCRIPTION=\"KDE neon User Edition 5.8\"\n" + }, + "name": "KDE neon 16.04", + "result": { + "distribution_release": "xenial", + "distribution": "KDE neon", + "distribution_major_version": "16", + "os_family": "Debian", + "distribution_version": "16.04" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_18.2.json b/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_18.2.json new file mode 100644 index 00000000..74e628e1 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_18.2.json @@ -0,0 +1,25 @@ +{ + "platform.dist": ["linuxmint", "18.2", "sonya"], + "input": { + "/etc/os-release": "NAME=\"Linux Mint\"\nVERSION=\"18.2 (Sonya)\"\nID=linuxmint\nID_LIKE=ubuntu\nPRETTY_NAME=\"Linux Mint 18.2\"\nVERSION_ID=\"18.2\"\nHOME_URL=\"http://www.linuxmint.com/\"\nSUPPORT_URL=\"http://forums.linuxmint.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/linuxmint/\"\nVERSION_CODENAME=sonya\nUBUNTU_CODENAME=xenial\n", + "/usr/lib/os-release": "NAME=\"Linux Mint\"\nVERSION=\"18.2 (Sonya)\"\nID=linuxmint\nID_LIKE=ubuntu\nPRETTY_NAME=\"Linux Mint 18.2\"\nVERSION_ID=\"18.2\"\nHOME_URL=\"http://www.linuxmint.com/\"\nSUPPORT_URL=\"http://forums.linuxmint.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/linuxmint/\"\nVERSION_CODENAME=sonya\nUBUNTU_CODENAME=xenial\n", + "/etc/lsb-release": "DISTRIB_ID=LinuxMint\nDISTRIB_RELEASE=18.2\nDISTRIB_CODENAME=sonya\nDISTRIB_DESCRIPTION=\"Linux Mint 18.2 Sonya\"\n" + }, + "result": { + "distribution_release": "sonya", + "distribution": "Linux Mint", + "distribution_major_version": "18", + "os_family": "Debian", + "distribution_version": "18.2" + }, + "name": "Linux Mint 18.2", + "distro": { + "codename": "sonya", + "version": "18.2", + "id": "linuxmint", + "version_best": "18.2", + "name": "Linux Mint", + "os_release_info": {}, + "lsb_release_info": {} + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_19.1.json b/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_19.1.json new file mode 100644 index 00000000..7712856a --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_19.1.json @@ -0,0 +1,24 @@ +{ + "platform.dist": ["linuxmint", "19.1", "tessa"], + "input": { + "/usr/lib/os-release": "NAME=\"Linux Mint\"\nVERSION=\"19.1 (Tessa)\"\nID=linuxmint\nID_LIKE=ubuntu\nPRETTY_NAME=\"Linux Mint 19.1\"\nVERSION_ID=\"19.1\"\nHOME_URL=\"https://www.linuxmint.com/\"\nSUPPORT_URL=\"https://forums.ubuntu.com/\"\nBUG_REPORT_URL=\"http: //linuxmint-troubleshooting-guide.readthedocs.io/en/latest/\"\nPRIVACY_POLICY_URL=\"https://www.linuxmint.com/\"\nVERSION_CODENAME=tessa\nUBUNTU_CODENAME=bionic\n", + "/etc/lsb-release": "DISTRIB_ID=LinuxMint\nDISTRIB_RELEASE=19.1\nDISTRIB_CODENAME=tessa\nDISTRIB_DESCRIPTION=\"Linux Mint 19.1 Tessa\"\n" + }, + "result": { + "distribution_release": "tessa", + "distribution": "Linux Mint", + "distribution_major_version": "19", + "os_family": "Debian", + "distribution_version": "19.1" + }, + "name": "Linux Mint 19.1", + "distro": { + "codename": "tessa", + "version": "19.1", + "id": "linuxmint", + "version_best": "19.1", + "name": "Linux Mint", + "os_release_info": {}, + "lsb_release_info": {} + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/nexenta_3.json b/test/units/module_utils/facts/system/distribution/fixtures/nexenta_3.json new file mode 100644 index 00000000..bdc942ba --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/nexenta_3.json @@ -0,0 +1,25 @@ +{ + "name": "Nexenta 3", + "uname_v": "NexentaOS_134f", + "result": { + "distribution_release": "Open Storage Appliance v3.1.6", + "distribution": "Nexenta", + "os_family": "Solaris", + "distribution_version": "3.1.6" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "platform.release:": "", + "input": { + "/etc/release": " Open Storage Appliance v3.1.6\n Copyright (c) 2014 Nexenta Systems, Inc. All Rights Reserved.\n Copyright (c) 2011 Oracle. All Rights Reserved.\n Use is subject to license terms.\n" + }, + "platform.system": "SunOS" +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/nexenta_4.json b/test/units/module_utils/facts/system/distribution/fixtures/nexenta_4.json new file mode 100644 index 00000000..d24e9bc2 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/nexenta_4.json @@ -0,0 +1,24 @@ +{ + "name": "Nexenta 4", + "uname_v": "NexentaOS_4:cd604cd066", + "result": { + "distribution_release": "Open Storage Appliance 4.0.3-FP2", + "distribution": "Nexenta", + "os_family": "Solaris", + "distribution_version": "4.0.3-FP2" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/release": " Open Storage Appliance 4.0.3-FP2\n Copyright (c) 2014 Nexenta Systems, Inc. All Rights Reserved.\n Copyright (c) 2010 Oracle. All Rights Reserved.\n Use is subject to license terms.\n" + }, + "platform.system": "SunOS" +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/omnios.json b/test/units/module_utils/facts/system/distribution/fixtures/omnios.json new file mode 100644 index 00000000..8bb2b445 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/omnios.json @@ -0,0 +1,24 @@ +{ + "name": "OmniOS", + "uname_v": "omnios-10b9c79", + "result": { + "distribution_release": "OmniOS v11 r151012", + "distribution": "OmniOS", + "os_family": "Solaris", + "distribution_version": "r151012" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/release": " OmniOS v11 r151012\n Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.\n Use is subject to license terms.\n\n" + }, + "platform.system": "SunOS" +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/openeuler_20.03.json b/test/units/module_utils/facts/system/distribution/fixtures/openeuler_20.03.json new file mode 100644 index 00000000..83103864 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/openeuler_20.03.json @@ -0,0 +1,28 @@ +{ + "platform.dist": [ + "openeuler", + "20.03", + "LTS" + ], + "input": { + "/etc/os-release": "NAME=\"openEuler\"\nVERSION=\"20.03 (LTS)\"\nID=\"openEuler\"\nVERSION_ID=\"20.03\"\nPRETTY_NAME=\"openEuler 20.03 (LTS)\"\nANSI_COLOR=\"0;31\"\n\n", + "/etc/system-release": "openEuler release 20.03 (LTS)\n" + }, + "result": { + "distribution_release": "LTS", + "distribution": "openEuler", + "distribution_major_version": "20", + "os_family": "RedHat", + "distribution_version": "20.03" + }, + "name": "openEuler 20.03", + "distro": { + "codename": "LTS", + "version": "20.03", + "id": "openeuler", + "version_best": "20.03", + "name": "openEuler", + "os_release_info": {}, + "lsb_release_info": {} + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/openindiana.json b/test/units/module_utils/facts/system/distribution/fixtures/openindiana.json new file mode 100644 index 00000000..a055bb07 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/openindiana.json @@ -0,0 +1,24 @@ +{ + "name": "OpenIndiana", + "uname_v": "oi_151a9", + "result": { + "distribution_release": "OpenIndiana Development oi_151.1.9 X86 (powered by illumos)", + "distribution": "OpenIndiana", + "os_family": "Solaris", + "distribution_version": "oi_151a9" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/release": " OpenIndiana Development oi_151.1.9 X86 (powered by illumos)\n Copyright 2011 Oracle and/or its affiliates. All rights reserved.\n Use is subject to license terms.\n Assembled 17 January 2014\n" + }, + "platform.system": "SunOS" +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_13.2.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_13.2.json new file mode 100644 index 00000000..76d3a338 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_13.2.json @@ -0,0 +1,24 @@ +{ + "name": "openSUSE 13.2", + "input": { + "/etc/SuSE-release": "openSUSE 13.2 (x86_64)\nVERSION = 13.2\nCODENAME = Harlequin\n# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead", + "/etc/os-release": "NAME=openSUSE\nVERSION=\"13.2 (Harlequin)\"\nVERSION_ID=\"13.2\"\nPRETTY_NAME=\"openSUSE 13.2 (Harlequin) (x86_64)\"\nID=opensuse\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:opensuse:13.2\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://opensuse.org/\"\nID_LIKE=\"suse\"" + }, + "platform.dist": ["SuSE", "13.2", "x86_64"], + "distro": { + "codename": "", + "id": "opensuse-harlequin", + "name": "openSUSE Harlequin", + "version": "13.2", + "version_best": "13.2", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "openSUSE", + "distribution_major_version": "13", + "distribution_release": "2", + "os_family": "Suse", + "distribution_version": "13.2" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.0.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.0.json new file mode 100644 index 00000000..54f1265c --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.0.json @@ -0,0 +1,23 @@ +{ + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "opensuse-leap", + "name": "openSUSE Leap", + "version": "15.0", + "version_best": "15.0", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/os-release": "NAME=\"openSUSE Leap\"\n# VERSION=\"15.0\"\nID=opensuse-leap\nID_LIKE=\"suse opensuse\"\nVERSION_ID=\"15.0\"\nPRETTY_NAME=\"openSUSE Leap 15.0\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:leap:15.0\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n" + }, + "name": "openSUSE Leap 15.0", + "result": { + "distribution_release": "0", + "distribution": "openSUSE Leap", + "distribution_major_version": "15", + "os_family": "Suse", + "distribution_version": "15.0" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.1.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.1.json new file mode 100644 index 00000000..d029423b --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.1.json @@ -0,0 +1,36 @@ +{ + "name": "openSUSE Leap 15.1", + "distro": { + "codename": "", + "id": "opensuse-leap", + "name": "openSUSE Leap", + "version": "15.1", + "version_best": "15.1", + "lsb_release_info": {}, + "os_release_info": { + "name": "openSUSE Leap", + "version": "15.1", + "codename": "", + "id": "opensuse-leap", + "id_like": "suse opensuse", + "version_id": "15.1", + "pretty_name": "openSUSE Leap 15.1", + "ansi_color": "0;32", + "cpe_name": "cpe:/o:opensuse:leap:15.1", + "bug_report_url": "https://bugs.opensuse.org", + "home_url": "https://www.opensuse.org/" + } + }, + "input": { + "/etc/os-release": "NAME=\"openSUSE Leap\"\nVERSION=\"15.1\"\nID=\"opensuse-leap\"\nID_LIKE=\"suse opensuse\"\nVERSION_ID=\"15.1\"\nPRETTY_NAME=\"openSUSE Leap 15.1\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:leap:15.1\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n", + "/usr/lib/os-release": "NAME=\"openSUSE Leap\"\nVERSION=\"15.1\"\nID=\"opensuse-leap\"\nID_LIKE=\"suse opensuse\"\nVERSION_ID=\"15.1\"\nPRETTY_NAME=\"openSUSE Leap 15.1\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:leap:15.1\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n" + }, + "platform.dist": ["opensuse-leap", "15.1", ""], + "result": { + "distribution": "openSUSE Leap", + "distribution_version": "15.1", + "distribution_release": "1", + "distribution_major_version": "15", + "os_family": "Suse" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_42.1.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_42.1.json new file mode 100644 index 00000000..2142932e --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_42.1.json @@ -0,0 +1,24 @@ +{ + "name": "openSUSE Leap 42.1", + "input": { + "/etc/os-release": "NAME=\"openSUSE Leap\"\nVERSION=\"42.1\"\nVERSION_ID=\"42.1\"\nPRETTY_NAME=\"openSUSE Leap 42.1 (x86_64)\"\nID=opensuse\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:opensuse:42.1\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://opensuse.org/\"\nID_LIKE=\"suse\"", + "/etc/SuSE-release": "openSUSE 42.1 (x86_64)\nVERSION = 42.1\nCODENAME = Malachite\n# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead" + }, + "platform.dist": ["SuSE", "42.1", "x86_64"], + "distro": { + "codename": "", + "id": "opensuse-leap", + "name": "openSUSE Leap", + "version": "42.1", + "version_best": "42.1", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "openSUSE Leap", + "distribution_major_version": "42", + "distribution_release": "1", + "os_family": "Suse", + "distribution_version": "42.1" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_tumbleweed_20160917.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_tumbleweed_20160917.json new file mode 100644 index 00000000..db1a26ca --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_tumbleweed_20160917.json @@ -0,0 +1,23 @@ +{ + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "opensuse-tumbleweed", + "name": "openSUSE Tumbleweed", + "version": "20160917", + "version_best": "20160917", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/os-release": "NAME=\"openSUSE Tumbleweed\"\n# VERSION=\"20160917\"\nID=opensuse\nID_LIKE=\"suse\"\nVERSION_ID=\"20160917\"\nPRETTY_NAME=\"openSUSE Tumbleweed\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:tumbleweed:20160917\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n" + }, + "name": "openSUSE Tumbleweed 20160917", + "result": { + "distribution_release": "", + "distribution": "openSUSE Tumbleweed", + "distribution_major_version": "20160917", + "os_family": "Suse", + "distribution_version": "20160917" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/pop_os_20.04.json b/test/units/module_utils/facts/system/distribution/fixtures/pop_os_20.04.json new file mode 100644 index 00000000..d3184ef6 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/pop_os_20.04.json @@ -0,0 +1,29 @@ +{ + "name": "Pop!_OS 20.04", + "distro": { + "codename": "focal", + "id": "pop", + "name": "Pop!_OS", + "version": "20.04", + "version_best": "20.04", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/os-release": "NAME=\"Pop!_OS\"\nVERSION=\"20.04\"\nID=pop\nID_LIKE=\"ubuntu debian\"\nPRETTY_NAME=\"Pop!_OS 20.04\"\nVERSION_ID=\"20.04\"\nHOME_URL=\"https://system76.com/pop\"\nSUPPORT_URL=\"http://support.system76.com\"\nBUG_REPORT_URL=\"https://github.com/pop-os/pop/issues\"\nPRIVACY_POLICY_URL=\"https://system76.com/privacy\"\nVERSION_CODENAME=focal\nUBUNTU_CODENAME=focal\nLOGO=distributor-logo-pop-os\n", + "/etc/lsb-release": "DISTRIB_ID=Pop\nDISTRIB_RELEASE=20.04\nDISTRIB_CODENAME=focal\nDISTRIB_DESCRIPTION=\"Pop!_OS 20.04\"\n", + "/usr/lib/os-release": "NAME=\"Pop!_OS\"\nVERSION=\"20.04\"\nID=pop\nID_LIKE=\"ubuntu debian\"\nPRETTY_NAME=\"Pop!_OS 20.04\"\nVERSION_ID=\"20.04\"\nHOME_URL=\"https://system76.com/pop\"\nSUPPORT_URL=\"http://support.system76.com\"\nBUG_REPORT_URL=\"https://github.com/pop-os/pop/issues\"\nPRIVACY_POLICY_URL=\"https://system76.com/privacy\"\nVERSION_CODENAME=focal\nUBUNTU_CODENAME=focal\nLOGO=distributor-logo-pop-os\n" + }, + "platform.dist": [ + "pop", + "20.04", + "focal" + ], + "result": { + "distribution": "Pop!_OS", + "distribution_version": "20.04", + "distribution_release": "focal", + "distribution_major_version": "20", + "os_family": "Debian" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/redhat_6.7.json b/test/units/module_utils/facts/system/distribution/fixtures/redhat_6.7.json new file mode 100644 index 00000000..27a77d0a --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/redhat_6.7.json @@ -0,0 +1,25 @@ +{ + "name": "RedHat 6.7", + "platform.dist": ["redhat", "6.7", "Santiago"], + "distro": { + "codename": "Santiago", + "id": "rhel", + "name": "RedHat Enterprise Linux", + "version": "6.7", + "version_best": "6.7", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/redhat-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n", + "/etc/lsb-release": "LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch\n", + "/etc/system-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n" + }, + "result": { + "distribution_release": "Santiago", + "distribution": "RedHat", + "distribution_major_version": "6", + "os_family": "RedHat", + "distribution_version": "6.7" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.2.json b/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.2.json new file mode 100644 index 00000000..3900f82a --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.2.json @@ -0,0 +1,25 @@ +{ + "name": "RedHat 7.2", + "platform.dist": ["redhat", "7.2", "Maipo"], + "distro": { + "codename": "Maipo", + "id": "rhel", + "name": "RedHat Enterprise Linux", + "version": "7.2", + "version_best": "7.2", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/redhat-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n", + "/etc/os-release": "NAME=\"Red Hat Enterprise Linux Server\"\nVERSION=\"7.2 (Maipo)\"\nID=\"rhel\"\nID_LIKE=\"fedora\"\nVERSION_ID=\"7.2\"\nPRETTY_NAME=\"Red Hat Enterprise Linux Server 7.2 (Maipo)\"\nANSI_COLOR=\"0;31\"\nCPE_NAME=\"cpe:/o:redhat:enterprise_linux:7.2:GA:server\"\nHOME_URL=\"https://www.redhat.com/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n\nREDHAT_BUGZILLA_PRODUCT=\"Red Hat Enterprise Linux 7\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=7.2\nREDHAT_SUPPORT_PRODUCT=\"Red Hat Enterprise Linux\"\nREDHAT_SUPPORT_PRODUCT_VERSION=\"7.2\"\n", + "/etc/system-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n" + }, + "result": { + "distribution_release": "Maipo", + "distribution": "RedHat", + "distribution_major_version": "7", + "os_family": "RedHat", + "distribution_version": "7.2" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.7.json b/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.7.json new file mode 100644 index 00000000..b240efce --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.7.json @@ -0,0 +1,43 @@ +{ + "name": "RedHat 7.7", + "distro": { + "codename": "Maipo", + "id": "rhel", + "name": "Red Hat Enterprise Linux Server", + "version": "7.7", + "version_best": "7.7", + "lsb_release_info": {}, + "os_release_info": { + "name": "Red Hat Enterprise Linux Server", + "version": "7.7 (Maipo)", + "id": "rhel", + "id_like": "fedora", + "variant": "Server", + "variant_id": "server", + "version_id": "7.7", + "pretty_name": "Red Hat Enterprise Linux Server 7.7 (Maipo)", + "ansi_color": "0;31", + "cpe_name": "cpe:/o:redhat:enterprise_linux:7.7:GA:server", + "home_url": "https://www.redhat.com/", + "bug_report_url": "https://bugzilla.redhat.com/", + "redhat_bugzilla_product": "Red Hat Enterprise Linux 7", + "redhat_bugzilla_product_version": "7.7", + "redhat_support_product": "Red Hat Enterprise Linux", + "redhat_support_product_version": "7.7", + "codename": "Maipo" + } + }, + "input": { + "/etc/redhat-release": "Red Hat Enterprise Linux Server release 7.7 (Maipo)\n", + "/etc/system-release": "Red Hat Enterprise Linux Server release 7.7 (Maipo)\n", + "/etc/os-release": "NAME=\"Red Hat Enterprise Linux Server\"\nVERSION=\"7.7 (Maipo)\"\nID=\"rhel\"\nID_LIKE=\"fedora\"\nVARIANT=\"Server\"\nVARIANT_ID=\"server\"\nVERSION_ID=\"7.7\"\nPRETTY_NAME=\"Red Hat Enterprise Linux Server 7.7 (Maipo)\"\nANSI_COLOR=\"0;31\"\nCPE_NAME=\"cpe:/o:redhat:enterprise_linux:7.7:GA:server\"\nHOME_URL=\"https://www.redhat.com/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n\nREDHAT_BUGZILLA_PRODUCT=\"Red Hat Enterprise Linux 7\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=7.7\nREDHAT_SUPPORT_PRODUCT=\"Red Hat Enterprise Linux\"\nREDHAT_SUPPORT_PRODUCT_VERSION=\"7.7\"\n" + }, + "platform.dist": ["rhel", "7.7", "Maipo"], + "result": { + "distribution": "RedHat", + "distribution_version": "7.7", + "distribution_release": "Maipo", + "distribution_major_version": "7", + "os_family": "RedHat" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/sles_11.3.json b/test/units/module_utils/facts/system/distribution/fixtures/sles_11.3.json new file mode 100644 index 00000000..be71f1cb --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/sles_11.3.json @@ -0,0 +1,23 @@ +{ + "name": "SLES 11.3", + "input": { + "/etc/SuSE-release": "SUSE Linux Enterprise Server 11 (x86_64)\nVERSION = 11\nPATCHLEVEL = 3" + }, + "platform.dist": ["SuSE", "11", "x86_64"], + "distro": { + "codename": "", + "id": "sles", + "name": "SUSE Linux Enterprise Server", + "version": "11", + "version_best": "11", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "SLES", + "distribution_major_version": "11", + "distribution_release": "3", + "os_family": "Suse", + "distribution_version": "11.3" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/sles_11.4.json b/test/units/module_utils/facts/system/distribution/fixtures/sles_11.4.json new file mode 100644 index 00000000..3e4012a1 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/sles_11.4.json @@ -0,0 +1,24 @@ +{ + "name": "SLES 11.4", + "input": { + "/etc/SuSE-release": "\nSUSE Linux Enterprise Server 11 (x86_64)\nVERSION = 11\nPATCHLEVEL = 4", + "/etc/os-release": "NAME=\"SLES\"\nVERSION=\"11.4\"\nVERSION_ID=\"11.4\"\nPRETTY_NAME=\"SUSE Linux Enterprise Server 11 SP4\"\nID=\"sles\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:suse:sles:11:4\"" + }, + "platform.dist": ["SuSE", "11", "x86_64"], + "distro": { + "codename": "", + "id": "sles", + "name": "SUSE Linux Enterprise Server", + "version": "11.4", + "version_best": "11.4", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "SLES", + "distribution_major_version": "11", + "distribution_release": "4", + "os_family": "Suse", + "distribution_version": "11.4" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp0.json b/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp0.json new file mode 100644 index 00000000..e84bbe5c --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp0.json @@ -0,0 +1,24 @@ +{ + "name": "SLES 12 SP0", + "input": { + "/etc/SuSE-release": "\nSUSE Linux Enterprise Server 12 (x86_64)\nVERSION = 12\nPATCHLEVEL = 0\n# This file is deprecated and will be removed in a future service pack or release.\n# Please check /etc/os-release for details about this release.", + "/etc/os-release": "NAME=\"SLES\"\nVERSION=\"12\"\nVERSION_ID=\"12\"\nPRETTY_NAME=\"SUSE Linux Enterprise Server 12\"\nID=\"sles\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:suse:sles:12\"" + }, + "platform.dist": ["SuSE", "12", "x86_64"], + "distro": { + "codename": "", + "id": "sles", + "name": "SUSE Linux Enterprise Server", + "version": "12", + "version_best": "12", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "SLES", + "distribution_major_version": "12", + "distribution_release": "0", + "os_family": "Suse", + "distribution_version": "12" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp1.json b/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp1.json new file mode 100644 index 00000000..c78d53d8 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp1.json @@ -0,0 +1,24 @@ +{ + "name": "SLES 12 SP1", + "input": { + "/etc/SuSE-release": "\nSUSE Linux Enterprise Server 12 (x86_64)\nVERSION = 12\nPATCHLEVEL = 0\n# This file is deprecated and will be removed in a future service pack or release.\n# Please check /etc/os-release for details about this release.", + "/etc/os-release": "NAME=\"SLES\"\nVERSION=\"12-SP1\"\nVERSION_ID=\"12.1\"\nPRETTY_NAME=\"SUSE Linux Enterprise Server 12 SP1\"\nID=\"sles\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:suse:sles:12:sp1\"" + }, + "platform.dist": ["SuSE", "12", "x86_64"], + "distro": { + "codename": "", + "id": "sles", + "name": "SUSE Linux Enterprise Server", + "version": "12.1", + "version_best": "12.1", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "SLES", + "distribution_major_version": "12", + "distribution_release": "1", + "os_family": "Suse", + "distribution_version": "12.1" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/smartos_global_zone.json b/test/units/module_utils/facts/system/distribution/fixtures/smartos_global_zone.json new file mode 100644 index 00000000..ae01a106 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/smartos_global_zone.json @@ -0,0 +1,24 @@ +{ + "name": "SmartOS Global Zone", + "uname_v": "joyent_20160330T234717Z", + "result": { + "distribution_release": "SmartOS 20160330T234717Z x86_64", + "distribution": "SmartOS", + "os_family": "Solaris", + "distribution_version": "joyent_20160330T234717Z" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/release": " SmartOS 20160330T234717Z x86_64\n Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.\n Copyright 2010-2012 Joyent, Inc. All Rights Reserved.\n Use is subject to license terms.\n\n Built with the following components:\n\n[\n { \"repo\": \"smartos-live\", \"branch\": \"release-20160331\", \"rev\": \"a77c410f2afe6dc9853a915733caec3609cc50f1\", \"commit_date\": \"1459340323\", \"url\": \"git@github.com:joyent/smartos-live.git\" }\n , { \"repo\": \"illumos-joyent\", \"branch\": \"release-20160331\", \"rev\": \"ab664c06caf06e9ce7586bff956e7709df1e702e\", \"commit_date\": \"1459362533\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-joyent\" }\n , { \"repo\": \"illumos-extra\", \"branch\": \"release-20160331\", \"rev\": \"cc723855bceace3df7860b607c9e3827d47e0ff4\", \"commit_date\": \"1458153188\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-extra\" }\n , { \"repo\": \"kvm\", \"branch\": \"release-20160331\", \"rev\": \"a8befd521c7e673749c64f118585814009fe4b73\", \"commit_date\": \"1450081968\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-kvm\" }\n , { \"repo\": \"kvm-cmd\", \"branch\": \"release-20160331\", \"rev\": \"c1a197c8e4582c68739ab08f7e3198b2392c9820\", \"commit_date\": \"1454723558\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-kvm-cmd\" }\n , { \"repo\": \"mdata-client\", \"branch\": \"release-20160331\", \"rev\": \"58158c44603a3316928975deccc5d10864832770\", \"commit_date\": \"1429917227\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/mdata-client\" }\n]\n" + }, + "platform.system": "SunOS" +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/smartos_zone.json b/test/units/module_utils/facts/system/distribution/fixtures/smartos_zone.json new file mode 100644 index 00000000..8f20113e --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/smartos_zone.json @@ -0,0 +1,25 @@ +{ + "name": "SmartOS Zone", + "uname_v": "joyent_20160330T234717Z", + "result": { + "distribution_release": "SmartOS x86_64", + "distribution": "SmartOS", + "os_family": "Solaris", + "distribution_version": "14.3.0" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/release": " SmartOS x86_64\n Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.\n Copyright 2010-2013 Joyent, Inc. All Rights Reserved.\n Use is subject to license terms.\n See joyent_20141002T182809Z for assembly date and time.\n", + "/etc/product": "Name: Joyent Instance\nImage: base64 14.3.0\nDocumentation: http://wiki.joyent.com/jpc2/Base+Instance\n" + }, + "platform.system": "SunOS" +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/smgl_na.json b/test/units/module_utils/facts/system/distribution/fixtures/smgl_na.json new file mode 100644 index 00000000..f3436b84 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/smgl_na.json @@ -0,0 +1,23 @@ +{ + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "smgl", + "name": "Source Mage GNU/Linux", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/sourcemage-release": "Source Mage GNU/Linux x86_64-pc-linux-gnu\nInstalled from tarball using chroot image (Grimoire 0.61-rc) on Thu May 17 17:31:37 UTC 2012\n" + }, + "name": "SMGL NA", + "result": { + "distribution_release": "NA", + "distribution": "SMGL", + "distribution_major_version": "NA", + "os_family": "SMGL", + "distribution_version": "NA" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/solaris_10.json b/test/units/module_utils/facts/system/distribution/fixtures/solaris_10.json new file mode 100644 index 00000000..de1dbdc8 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/solaris_10.json @@ -0,0 +1,25 @@ +{ + "name": "Solaris 10", + "uname_r": "5.10", + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/release": " Oracle Solaris 10 1/13 s10x_u11wos_24a X86\n Copyright (c) 1983, 2013, Oracle and/or its affiliates. All rights reserved.\n Assembled 17 January 2013\n" + }, + "platform.system": "SunOS", + "result": { + "distribution_release": "Oracle Solaris 10 1/13 s10x_u11wos_24a X86", + "distribution": "Solaris", + "os_family": "Solaris", + "distribution_major_version": "10", + "distribution_version": "10" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.3.json b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.3.json new file mode 100644 index 00000000..056abe46 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.3.json @@ -0,0 +1,25 @@ +{ + "name": "Solaris 11.3", + "uname_r": "5.11", + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/release": " Oracle Solaris 11.3 X86\n Copyright (c) 1983, 2018, Oracle and/or its affiliates. All rights reserved.\n Assembled 09 May 2018\n" + }, + "platform.system": "SunOS", + "result": { + "distribution_release": "Oracle Solaris 11.3 X86", + "distribution": "Solaris", + "os_family": "Solaris", + "distribution_major_version": "11", + "distribution_version": "11.3" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.4.json b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.4.json new file mode 100644 index 00000000..462d5508 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.4.json @@ -0,0 +1,35 @@ +{ + "name": "Solaris 11.4", + "uname_r": "5.11", + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": { + "support_url": "https://support.oracle.com/", + "name": "Oracle Solaris", + "pretty_name": "Oracle Solaris 11.4", + "version": "11.4", + "id": "solaris", + "version_id": "11.4", + "build_id": "11.4.0.0.1.15.0", + "home_url": "https://www.oracle.com/solaris/", + "cpe_name": "cpe:/o:oracle:solaris:11:4" + }, + "lsb_release_info": {} + }, + "input": { + "/etc/release": " Oracle Solaris 11.4 SPARC\n Copyright (c) 1983, 2018, Oracle and/or its affiliates. All rights reserved.\n Assembled 14 September 2018\n" + }, + "platform.system": "SunOS", + "result": { + "distribution_release": "Oracle Solaris 11.4 SPARC", + "distribution": "Solaris", + "os_family": "Solaris", + "distribution_major_version": "11", + "distribution_version": "11.4" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.json b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.json new file mode 100644 index 00000000..749b8bcd --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.json @@ -0,0 +1,26 @@ +{ + "name": "Solaris 11", + "uname_v": "11.0", + "uname_r": "5.11", + "result": { + "distribution_release": "Oracle Solaris 11 11/11 X86", + "distribution": "Solaris", + "os_family": "Solaris", + "distribution_major_version": "11", + "distribution_version": "11" + }, + "platform.dist": ["", "", ""], + "distro": { + "codename": "", + "id": "", + "name": "", + "version": "", + "version_best": "", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/release": " Oracle Solaris 11 11/11 X86\n Copyright (c) 1983, 2011, Oracle and/or its affiliates. All rights reserved.\n Assembled 18 October 2011\n" + }, + "platform.system": "SunOS" +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/steamos_2.0.json b/test/units/module_utils/facts/system/distribution/fixtures/steamos_2.0.json new file mode 100644 index 00000000..7cb9c12b --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/steamos_2.0.json @@ -0,0 +1,40 @@ +{ + "name": "SteamOS 2.0", + "input": { + "/etc/os-release": "PRETTY_NAME=\"SteamOS GNU/Linux 2.0 (brewmaster)\"\nNAME=\"SteamOS GNU/Linux\"\nVERSION_ID=\"2\"\nVERSION=\"2 (brewmaster)\"\nID=steamos\nID_LIKE=debian\nHOME_URL=\"http://www.steampowered.com/\"\nSUPPORT_URL=\"http://support.steampowered.com/\"\nBUG_REPORT_URL=\"http://support.steampowered.com/\"", + "/etc/lsb-release": "DISTRIB_ID=SteamOS\nDISTRIB_RELEASE=2.0\nDISTRIB_CODENAME=brewmaster\nDISTRIB_DESCRIPTION=\"SteamOS 2.0\"" + }, + "platform.dist": ["Steamos", "2.0", "brewmaster"], + "distro": { + "codename": "brewmaster", + "id": "steamos", + "name": "SteamOS GNU/Linux", + "version": "2.0", + "version_best": "2.0", + "os_release_info": { + "bug_report_url": "http://support.steampowered.com/", + "id_like": "debian", + "version_id": "2", + "pretty_name": "SteamOS GNU/Linux 2.0 (brewmaster)", + "version": "2 (brewmaster)", + "home_url": "http://www.steampowered.com/", + "name": "SteamOS GNU/Linux", + "support_url": "http://support.steampowered.com/", + "codename": "brewmaster", + "id": "steamos" + }, + "lsb_release_info": { + "codename": "brewmaster", + "description": "SteamOS 2.0", + "distributor_id": "SteamOS", + "release": "2.0" + } + }, + "result": { + "distribution": "SteamOS", + "distribution_major_version": "2", + "distribution_release": "brewmaster", + "os_family": "Debian", + "distribution_version": "2.0" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_10.04_guess.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_10.04_guess.json new file mode 100644 index 00000000..38a6040f --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_10.04_guess.json @@ -0,0 +1,23 @@ +{ + "name": "Ubuntu 10.04 guess", + "input": { + "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=10.04\nDISTRIB_CODENAME=lucid\nDISTRIB_DESCRIPTION=\"Ubuntu 10.04.4 LTS" + }, + "platform.dist": ["Ubuntu", "10.04", "lucid"], + "distro": { + "codename": "lucid", + "id": "ubuntu", + "name": "Ubuntu", + "version": "10.04", + "version_best": "10.04.1", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "Ubuntu", + "distribution_major_version": "10", + "distribution_release": "lucid", + "os_family": "Debian", + "distribution_version": "10.04" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_12.04.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_12.04.json new file mode 100644 index 00000000..01203b5b --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_12.04.json @@ -0,0 +1,24 @@ +{ + "name": "Ubuntu 12.04", + "input": { + "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=12.04\nDISTRIB_CODENAME=precise\nDISTRIB_DESCRIPTION=\"Ubuntu 12.04.5 LTS\"", + "/etc/os-release": "NAME=\"Ubuntu\"\nVERSION=\"12.04.5 LTS, Precise Pangolin\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu precise (12.04.5 LTS)\"\nVERSION_ID=\"12.04\"" + }, + "platform.dist": ["Ubuntu", "12.04", "precise"], + "distro": { + "codename": "precise", + "id": "ubuntu", + "name": "Ubuntu", + "version": "12.04", + "version_best": "12.04.5", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "Ubuntu", + "distribution_major_version": "12", + "distribution_release": "precise", + "os_family": "Debian", + "distribution_version": "12.04" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_14.04.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_14.04.json new file mode 100644 index 00000000..5d5af0ae --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_14.04.json @@ -0,0 +1,24 @@ +{ + "name": "Ubuntu 14.04", + "input": { + "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=14.04\nDISTRIB_CODENAME=trusty\nDISTRIB_DESCRIPTION=\"Ubuntu 14.04.4 LTS\"", + "/etc/os-release": "NAME=\"Ubuntu\"\nVERSION=\"14.04.4 LTS, Trusty Tahr\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 14.04.4 LTS\"\nVERSION_ID=\"14.04\"\nHOME_URL=\"http://www.ubuntu.com/\"\nSUPPORT_URL=\"http://help.ubuntu.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/ubuntu/\"" + }, + "platform.dist": ["Ubuntu", "14.04", "trusty"], + "distro": { + "codename": "trusty", + "id": "ubuntu", + "name": "Ubuntu", + "version": "14.04", + "version_best": "14.04.4", + "os_release_info": {}, + "lsb_release_info": {} + }, + "result": { + "distribution": "Ubuntu", + "distribution_major_version": "14", + "distribution_release": "trusty", + "os_family": "Debian", + "distribution_version": "14.04" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_16.04.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_16.04.json new file mode 100644 index 00000000..f8f50a9d --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_16.04.json @@ -0,0 +1,24 @@ +{ + "platform.dist": ["Ubuntu", "16.04", "xenial"], + "distro": { + "codename": "xenial", + "id": "ubuntu", + "name": "Ubuntu", + "version": "16.04", + "version_best": "16.04.6", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/os-release": "NAME=\"Ubuntu\"\nVERSION=\"16.04 LTS (Xenial Xerus)\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 16.04 LTS\"\nVERSION_ID=\"16.04\"\nHOME_URL=\"http://www.ubuntu.com/\"\nSUPPORT_URL=\"http://help.ubuntu.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/ubuntu/\"\nUBUNTU_CODENAME=xenial\n", + "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=16.04\nDISTRIB_CODENAME=xenial\nDISTRIB_DESCRIPTION=\"Ubuntu 16.04 LTS\"\n" + }, + "name": "Ubuntu 16.04", + "result": { + "distribution_release": "xenial", + "distribution": "Ubuntu", + "distribution_major_version": "16", + "os_family": "Debian", + "distribution_version": "16.04" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_18.04.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_18.04.json new file mode 100644 index 00000000..12d15b53 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_18.04.json @@ -0,0 +1,39 @@ +{ + "name": "Ubuntu 18.04", + "distro": { + "codename": "bionic", + "id": "ubuntu", + "name": "Ubuntu", + "version": "18.04", + "version_best": "18.04.3", + "lsb_release_info": {}, + "os_release_info": { + "name": "Ubuntu", + "version": "18.04.3 LTS (Bionic Beaver)", + "id": "ubuntu", + "id_like": "debian", + "pretty_name": "Ubuntu 18.04.3 LTS", + "version_id": "18.04", + "home_url": "https://www.ubuntu.com/", + "support_url": "https://help.ubuntu.com/", + "bug_report_url": "https://bugs.launchpad.net/ubuntu/", + "privacy_policy_url": "https://www.ubuntu.com/legal/terms-and-policies/privacy-policy", + "version_codename": "bionic", + "ubuntu_codename": "bionic", + "codename": "bionic" + } + }, + "input": { + "/etc/os-release": "NAME=\"Ubuntu\"\nVERSION=\"18.04.3 LTS (Bionic Beaver)\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 18.04.3 LTS\"\nVERSION_ID=\"18.04\"\nHOME_URL=\"https://www.ubuntu.com/\"\nSUPPORT_URL=\"https://help.ubuntu.com/\"\nBUG_REPORT_URL=\"https://bugs.launchpad.net/ubuntu/\"\nPRIVACY_POLICY_URL=\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"\nVERSION_CODENAME=bionic\nUBUNTU_CODENAME=bionic\n", + "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=18.04\nDISTRIB_CODENAME=bionic\nDISTRIB_DESCRIPTION=\"Ubuntu 18.04.3 LTS\"\n", + "/usr/lib/os-release": "NAME=\"Ubuntu\"\nVERSION=\"18.04.3 LTS (Bionic Beaver)\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 18.04.3 LTS\"\nVERSION_ID=\"18.04\"\nHOME_URL=\"https://www.ubuntu.com/\"\nSUPPORT_URL=\"https://help.ubuntu.com/\"\nBUG_REPORT_URL=\"https://bugs.launchpad.net/ubuntu/\"\nPRIVACY_POLICY_URL=\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"\nVERSION_CODENAME=bionic\nUBUNTU_CODENAME=bionic\n" + }, + "platform.dist": ["ubuntu", "18.04", "bionic"], + "result": { + "distribution": "Ubuntu", + "distribution_version": "18.04", + "distribution_release": "bionic", + "distribution_major_version": "18", + "os_family": "Debian" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/fixtures/virtuozzo_7.3.json b/test/units/module_utils/facts/system/distribution/fixtures/virtuozzo_7.3.json new file mode 100644 index 00000000..d9c2f474 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/fixtures/virtuozzo_7.3.json @@ -0,0 +1,25 @@ +{ + "name": "Virtuozzo 7.3", + "platform.dist": ["redhat", "7.3", ""], + "distro": { + "codename": "", + "id": "virtuozzo", + "name": "Virtuozzo Linux", + "version": "7.3", + "version_best": "7.3", + "os_release_info": {}, + "lsb_release_info": {} + }, + "input": { + "/etc/redhat-release": "Virtuozzo Linux release 7.3\n", + "/etc/os-release": "NAME=\"Virtuozzo\"\nVERSION=\"7.0.3\"\nID=\"virtuozzo\"\nID_LIKE=\"rhel fedora\"\nVERSION_ID=\"7\"\nPRETTY_NAME=\"Virtuozzo release 7.0.3\"\nANSI_COLOR=\"0;31\"\nCPE_NAME=\"cpe:/o:virtuozzoproject:vz:7\"\nHOME_URL=\"http://www.virtuozzo.com\"\nBUG_REPORT_URL=\"https://bugs.openvz.org/\"\n", + "/etc/system-release": "Virtuozzo release 7.0.3 (640)\n" + }, + "result": { + "distribution_release": "NA", + "distribution": "Virtuozzo", + "distribution_major_version": "7", + "os_family": "RedHat", + "distribution_version": "7.3" + } +} \ No newline at end of file diff --git a/test/units/module_utils/facts/system/distribution/test_distribution_sles4sap.py b/test/units/module_utils/facts/system/distribution/test_distribution_sles4sap.py new file mode 100644 index 00000000..ab465eae --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/test_distribution_sles4sap.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils.facts.system.distribution import DistributionFiles + + +@pytest.mark.parametrize('realpath', ('SUSE_SLES_SAP.prod', 'SLES_SAP.prod')) +def test_distribution_sles4sap_suse_sles_sap(mock_module, mocker, realpath): + mocker.patch('os.path.islink', return_value=True) + mocker.patch('os.path.realpath', return_value='/etc/products.d/' + realpath) + + test_input = { + 'name': 'SUSE', + 'path': '', + 'data': 'suse', + 'collected_facts': None, + } + + test_result = ( + True, + { + 'distribution': 'SLES_SAP', + } + ) + + distribution = DistributionFiles(module=mock_module()) + assert test_result == distribution.parse_distribution_file_SUSE(**test_input) diff --git a/test/units/module_utils/facts/system/distribution/test_distribution_version.py b/test/units/module_utils/facts/system/distribution/test_distribution_version.py new file mode 100644 index 00000000..091ec8ad --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/test_distribution_version.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import glob +import json +import os +import pytest +from itertools import product + +from ansible.module_utils.six.moves import builtins + +# the module we are actually testing (sort of) +from ansible.module_utils.facts.system.distribution import DistributionFactCollector + +# to generate the testcase data, you can use the script gen_distribution_version_testcase.py in hacking/tests +TESTSETS = [] + +for datafile in glob.glob(os.path.join(os.path.dirname(__file__), 'fixtures/*.json')): + with open(os.path.join(os.path.dirname(__file__), '%s' % datafile)) as f: + TESTSETS.append(json.loads(f.read())) + + +@pytest.mark.parametrize("stdin, testcase", product([{}], TESTSETS), ids=lambda x: x.get('name'), indirect=['stdin']) +def test_distribution_version(am, mocker, testcase): + """tests the distribution parsing code of the Facts class + + testsets have + * a name (for output/debugging only) + * input files that are faked + * those should be complete and also include "irrelevant" files that might be mistaken as coming from other distributions + * all files that are not listed here are assumed to not exist at all + * the output of ansible.module_utils.distro.linux_distribution() [called platform.dist() for historical reasons] + * results for the ansible variables distribution* and os_family + + """ + + # prepare some mock functions to get the testdata in + def mock_get_file_content(fname, default=None, strip=True): + """give fake content if it exists, otherwise pretend the file is empty""" + data = default + if fname in testcase['input']: + # for debugging + print('faked %s for %s' % (fname, testcase['name'])) + data = testcase['input'][fname].strip() + if strip and data is not None: + data = data.strip() + return data + + def mock_get_uname(am, flags): + if '-v' in flags: + return testcase.get('uname_v', None) + elif '-r' in flags: + return testcase.get('uname_r', None) + else: + return None + + def mock_file_exists(fname, allow_empty=False): + if fname not in testcase['input']: + return False + + if allow_empty: + return True + return bool(len(testcase['input'][fname])) + + def mock_platform_system(): + return testcase.get('platform.system', 'Linux') + + def mock_platform_release(): + return testcase.get('platform.release', '') + + def mock_platform_version(): + return testcase.get('platform.version', '') + + def mock_distro_name(): + return testcase['distro']['name'] + + def mock_distro_id(): + return testcase['distro']['id'] + + def mock_distro_version(best=False): + if best: + return testcase['distro']['version_best'] + return testcase['distro']['version'] + + def mock_distro_codename(): + return testcase['distro']['codename'] + + def mock_distro_os_release_info(): + return testcase['distro']['os_release_info'] + + def mock_distro_lsb_release_info(): + return testcase['distro']['lsb_release_info'] + + def mock_open(filename, mode='r'): + if filename in testcase['input']: + file_object = mocker.mock_open(read_data=testcase['input'][filename]).return_value + file_object.__iter__.return_value = testcase['input'][filename].splitlines(True) + else: + file_object = real_open(filename, mode) + return file_object + + def mock_os_path_is_file(filename): + if filename in testcase['input']: + return True + return False + + mocker.patch('ansible.module_utils.facts.system.distribution.get_file_content', mock_get_file_content) + mocker.patch('ansible.module_utils.facts.system.distribution.get_uname', mock_get_uname) + mocker.patch('ansible.module_utils.facts.system.distribution._file_exists', mock_file_exists) + mocker.patch('ansible.module_utils.distro.name', mock_distro_name) + mocker.patch('ansible.module_utils.distro.id', mock_distro_id) + mocker.patch('ansible.module_utils.distro.version', mock_distro_version) + mocker.patch('ansible.module_utils.distro.codename', mock_distro_codename) + mocker.patch( + 'ansible.module_utils.common.sys_info.distro.os_release_info', + mock_distro_os_release_info) + mocker.patch( + 'ansible.module_utils.common.sys_info.distro.lsb_release_info', + mock_distro_lsb_release_info) + mocker.patch('os.path.isfile', mock_os_path_is_file) + mocker.patch('platform.system', mock_platform_system) + mocker.patch('platform.release', mock_platform_release) + mocker.patch('platform.version', mock_platform_version) + + real_open = builtins.open + mocker.patch.object(builtins, 'open', new=mock_open) + + # run Facts() + distro_collector = DistributionFactCollector() + generated_facts = distro_collector.collect(am) + + # compare with the expected output + + # testcase['result'] has a list of variables and values it expects Facts() to set + for key, val in testcase['result'].items(): + assert key in generated_facts + msg = 'Comparing value of %s on %s, should: %s, is: %s' %\ + (key, testcase['name'], val, generated_facts[key]) + assert generated_facts[key] == val, msg diff --git a/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py b/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py new file mode 100644 index 00000000..c0957566 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import pytest + +from ansible.module_utils.facts.system.distribution import DistributionFiles + + +@pytest.fixture +def test_input(): + return { + 'name': 'Clearlinux', + 'path': '/usr/lib/os-release', + 'collected_facts': None, + } + + +def test_parse_distribution_file_clear_linux(mock_module, test_input): + test_input['data'] = open(os.path.join(os.path.dirname(__file__), '../../fixtures/distribution_files/ClearLinux')).read() + + result = ( + True, + { + 'distribution': 'Clear Linux OS', + 'distribution_major_version': '28120', + 'distribution_release': 'clear-linux-os', + 'distribution_version': '28120' + } + ) + + distribution = DistributionFiles(module=mock_module()) + assert result == distribution.parse_distribution_file_ClearLinux(**test_input) + + +@pytest.mark.parametrize('distro_file', ('CoreOS', 'LinuxMint')) +def test_parse_distribution_file_clear_linux_no_match(mock_module, distro_file, test_input): + """ + Test against data from Linux Mint and CoreOS to ensure we do not get a reported + match from parse_distribution_file_ClearLinux() + """ + test_input['data'] = open(os.path.join(os.path.dirname(__file__), '../../fixtures/distribution_files', distro_file)).read() + + result = (False, {}) + + distribution = DistributionFiles(module=mock_module()) + assert result == distribution.parse_distribution_file_ClearLinux(**test_input) diff --git a/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_Slackware.py b/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_Slackware.py new file mode 100644 index 00000000..53fd4ea1 --- /dev/null +++ b/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_Slackware.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import pytest + +from ansible.module_utils.facts.system.distribution import DistributionFiles + + +@pytest.mark.parametrize( + ('distro_file', 'expected_version'), + ( + ('Slackware', '14.1'), + ('SlackwareCurrent', '14.2+'), + ) +) +def test_parse_distribution_file_slackware(mock_module, distro_file, expected_version): + test_input = { + 'name': 'Slackware', + 'data': open(os.path.join(os.path.dirname(__file__), '../../fixtures/distribution_files', distro_file)).read(), + 'path': '/etc/os-release', + 'collected_facts': None, + } + + result = ( + True, + { + 'distribution': 'Slackware', + 'distribution_version': expected_version + } + ) + distribution = DistributionFiles(module=mock_module()) + assert result == distribution.parse_distribution_file_Slackware(**test_input) diff --git a/test/units/module_utils/facts/system/test_cmdline.py b/test/units/module_utils/facts/system/test_cmdline.py new file mode 100644 index 00000000..59cfd118 --- /dev/null +++ b/test/units/module_utils/facts/system/test_cmdline.py @@ -0,0 +1,67 @@ +# unit tests for ansible system cmdline fact collectors +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest +from ansible.module_utils.facts.system.cmdline import CmdLineFactCollector + +test_data = [ + ( + "crashkernel=auto rd.lvm.lv=fedora_test-elementary-os/root rd.lvm.lv=fedora_test-elementary-os/swap rhgb quiet", + { + 'crashkernel': 'auto', + 'quiet': True, + 'rd.lvm.lv': [ + 'fedora_test-elementary-os/root', + 'fedora_test-elementary-os/swap', + ], + 'rhgb': True + } + ), + ( + "root=/dev/mapper/vg_ssd-root ro rd.lvm.lv=fedora_xenon/root rd.lvm.lv=fedora_xenon/swap rhgb quiet " + "resume=/dev/mapper/fedora_xenon-swap crashkernel=128M zswap.enabled=1", + { + 'crashkernel': '128M', + 'quiet': True, + 'rd.lvm.lv': [ + 'fedora_xenon/root', + 'fedora_xenon/swap' + ], + 'resume': '/dev/mapper/fedora_xenon-swap', + 'rhgb': True, + 'ro': True, + 'root': '/dev/mapper/vg_ssd-root', + 'zswap.enabled': '1' + } + ), + ( + "rhgb", + { + "rhgb": True + } + ), + ( + "root=/dev/mapper/vg_ssd-root", + { + 'root': '/dev/mapper/vg_ssd-root', + } + ), + ( + "", + {}, + ) +] + +test_ids = ['lvm_1', 'lvm_2', 'single_without_equal_sign', 'single_with_equal_sign', 'blank_cmdline'] + + +@pytest.mark.parametrize("cmdline, cmdline_dict", test_data, ids=test_ids) +def test_cmd_line_factor(cmdline, cmdline_dict): + cmdline_facter = CmdLineFactCollector() + parsed_cmdline = cmdline_facter._parse_proc_cmdline_facts(data=cmdline) + assert parsed_cmdline == cmdline_dict diff --git a/test/units/module_utils/facts/system/test_lsb.py b/test/units/module_utils/facts/system/test_lsb.py new file mode 100644 index 00000000..e2ed2ec0 --- /dev/null +++ b/test/units/module_utils/facts/system/test_lsb.py @@ -0,0 +1,108 @@ +# unit tests for ansible system lsb fact collectors +# -*- coding: utf-8 -*- +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat.mock import Mock, patch + +from .. base import BaseFactsTest + +from ansible.module_utils.facts.system.lsb import LSBFactCollector + + +lsb_release_a_fedora_output = ''' +LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch +Distributor ID: Fedora +Description: Fedora release 25 (Twenty Five) +Release: 25 +Codename: TwentyFive +''' # noqa + +# FIXME: a +etc_lsb_release_ubuntu14 = '''DISTRIB_ID=Ubuntu +DISTRIB_RELEASE=14.04 +DISTRIB_CODENAME=trusty +DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS" +''' +etc_lsb_release_no_decimal = '''DISTRIB_ID=AwesomeOS +DISTRIB_RELEASE=11 +DISTRIB_CODENAME=stonehenge +DISTRIB_DESCRIPTION="AwesomeÖS 11" +''' + + +class TestLSBFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'lsb'] + valid_subsets = ['lsb'] + fact_namespace = 'ansible_lsb' + collector_class = LSBFactCollector + + def _mock_module(self): + mock_module = Mock() + mock_module.params = {'gather_subset': self.gather_subset, + 'gather_timeout': 10, + 'filter': '*'} + mock_module.get_bin_path = Mock(return_value='/usr/bin/lsb_release') + mock_module.run_command = Mock(return_value=(0, lsb_release_a_fedora_output, '')) + return mock_module + + def test_lsb_release_bin(self): + module = self._mock_module() + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + + self.assertIsInstance(facts_dict, dict) + self.assertEqual(facts_dict['lsb']['release'], '25') + self.assertEqual(facts_dict['lsb']['id'], 'Fedora') + self.assertEqual(facts_dict['lsb']['description'], 'Fedora release 25 (Twenty Five)') + self.assertEqual(facts_dict['lsb']['codename'], 'TwentyFive') + self.assertEqual(facts_dict['lsb']['major_release'], '25') + + def test_etc_lsb_release(self): + module = self._mock_module() + module.get_bin_path = Mock(return_value=None) + with patch('ansible.module_utils.facts.system.lsb.os.path.exists', + return_value=True): + with patch('ansible.module_utils.facts.system.lsb.get_file_lines', + return_value=etc_lsb_release_ubuntu14.splitlines()): + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + + self.assertIsInstance(facts_dict, dict) + self.assertEqual(facts_dict['lsb']['release'], '14.04') + self.assertEqual(facts_dict['lsb']['id'], 'Ubuntu') + self.assertEqual(facts_dict['lsb']['description'], 'Ubuntu 14.04.3 LTS') + self.assertEqual(facts_dict['lsb']['codename'], 'trusty') + + def test_etc_lsb_release_no_decimal_release(self): + module = self._mock_module() + module.get_bin_path = Mock(return_value=None) + with patch('ansible.module_utils.facts.system.lsb.os.path.exists', + return_value=True): + with patch('ansible.module_utils.facts.system.lsb.get_file_lines', + return_value=etc_lsb_release_no_decimal.splitlines()): + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + + self.assertIsInstance(facts_dict, dict) + self.assertEqual(facts_dict['lsb']['release'], '11') + self.assertEqual(facts_dict['lsb']['id'], 'AwesomeOS') + self.assertEqual(facts_dict['lsb']['description'], 'AwesomeÖS 11') + self.assertEqual(facts_dict['lsb']['codename'], 'stonehenge') diff --git a/test/units/module_utils/facts/test_ansible_collector.py b/test/units/module_utils/facts/test_ansible_collector.py new file mode 100644 index 00000000..6c236486 --- /dev/null +++ b/test/units/module_utils/facts/test_ansible_collector.py @@ -0,0 +1,504 @@ +# -*- coding: utf-8 -*- +# +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# for testing +from units.compat import unittest +from units.compat.mock import Mock, patch + +from ansible.module_utils.facts import collector +from ansible.module_utils.facts import ansible_collector +from ansible.module_utils.facts import namespace + +from ansible.module_utils.facts.other.facter import FacterFactCollector +from ansible.module_utils.facts.other.ohai import OhaiFactCollector + +from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector +from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector +from ansible.module_utils.facts.system.date_time import DateTimeFactCollector +from ansible.module_utils.facts.system.env import EnvFactCollector +from ansible.module_utils.facts.system.distribution import DistributionFactCollector +from ansible.module_utils.facts.system.dns import DnsFactCollector +from ansible.module_utils.facts.system.fips import FipsFactCollector +from ansible.module_utils.facts.system.local import LocalFactCollector +from ansible.module_utils.facts.system.lsb import LSBFactCollector +from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector, OpenBSDPkgMgrFactCollector +from ansible.module_utils.facts.system.platform import PlatformFactCollector +from ansible.module_utils.facts.system.python import PythonFactCollector +from ansible.module_utils.facts.system.selinux import SelinuxFactCollector +from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector +from ansible.module_utils.facts.system.user import UserFactCollector + +# from ansible.module_utils.facts.hardware.base import HardwareCollector +from ansible.module_utils.facts.network.base import NetworkCollector +from ansible.module_utils.facts.virtual.base import VirtualCollector + +ALL_COLLECTOR_CLASSES = \ + [PlatformFactCollector, + DistributionFactCollector, + SelinuxFactCollector, + ApparmorFactCollector, + SystemCapabilitiesFactCollector, + FipsFactCollector, + PkgMgrFactCollector, + OpenBSDPkgMgrFactCollector, + ServiceMgrFactCollector, + LSBFactCollector, + DateTimeFactCollector, + UserFactCollector, + LocalFactCollector, + EnvFactCollector, + DnsFactCollector, + PythonFactCollector, + # FIXME: re-enable when hardware doesnt Hardware() doesnt munge self.facts + # HardwareCollector + NetworkCollector, + VirtualCollector, + OhaiFactCollector, + FacterFactCollector] + + +def mock_module(gather_subset=None, + filter=None): + if gather_subset is None: + gather_subset = ['all', '!facter', '!ohai'] + if filter is None: + filter = '*' + mock_module = Mock() + mock_module.params = {'gather_subset': gather_subset, + 'gather_timeout': 5, + 'filter': filter} + mock_module.get_bin_path = Mock(return_value=None) + return mock_module + + +def _collectors(module, + all_collector_classes=None, + minimal_gather_subset=None): + gather_subset = module.params.get('gather_subset') + if all_collector_classes is None: + all_collector_classes = ALL_COLLECTOR_CLASSES + if minimal_gather_subset is None: + minimal_gather_subset = frozenset([]) + + collector_classes = \ + collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes, + minimal_gather_subset=minimal_gather_subset, + gather_subset=gather_subset) + + collectors = [] + for collector_class in collector_classes: + collector_obj = collector_class() + collectors.append(collector_obj) + + # Add a collector that knows what gather_subset we used so it it can provide a fact + collector_meta_data_collector = \ + ansible_collector.CollectorMetaDataCollector(gather_subset=gather_subset, + module_setup=True) + collectors.append(collector_meta_data_collector) + + return collectors + + +ns = namespace.PrefixFactNamespace('ansible_facts', 'ansible_') + + +# FIXME: this is brute force, but hopefully enough to get some refactoring to make facts testable +class TestInPlace(unittest.TestCase): + def _mock_module(self, gather_subset=None): + return mock_module(gather_subset=gather_subset) + + def _collectors(self, module, + all_collector_classes=None, + minimal_gather_subset=None): + return _collectors(module=module, + all_collector_classes=all_collector_classes, + minimal_gather_subset=minimal_gather_subset) + + def test(self): + gather_subset = ['all'] + mock_module = self._mock_module(gather_subset=gather_subset) + all_collector_classes = [EnvFactCollector] + collectors = self._collectors(mock_module, + all_collector_classes=all_collector_classes) + + fact_collector = \ + ansible_collector.AnsibleFactCollector(collectors=collectors, + namespace=ns) + + res = fact_collector.collect(module=mock_module) + self.assertIsInstance(res, dict) + self.assertIn('env', res) + self.assertIn('gather_subset', res) + self.assertEqual(res['gather_subset'], ['all']) + + def test1(self): + gather_subset = ['all'] + mock_module = self._mock_module(gather_subset=gather_subset) + collectors = self._collectors(mock_module) + + fact_collector = \ + ansible_collector.AnsibleFactCollector(collectors=collectors, + namespace=ns) + + res = fact_collector.collect(module=mock_module) + self.assertIsInstance(res, dict) + # just assert it's not almost empty + # with run_command and get_file_content mock, many facts are empty, like network + self.assertGreater(len(res), 20) + + def test_empty_all_collector_classes(self): + mock_module = self._mock_module() + all_collector_classes = [] + + collectors = self._collectors(mock_module, + all_collector_classes=all_collector_classes) + + fact_collector = \ + ansible_collector.AnsibleFactCollector(collectors=collectors, + namespace=ns) + + res = fact_collector.collect() + self.assertIsInstance(res, dict) + # just assert it's not almost empty + self.assertLess(len(res), 3) + +# def test_facts_class(self): +# mock_module = self._mock_module() +# Facts(mock_module) + +# def test_facts_class_load_on_init_false(self): +# mock_module = self._mock_module() +# Facts(mock_module, load_on_init=False) +# # FIXME: assert something + + +class TestCollectedFacts(unittest.TestCase): + gather_subset = ['all', '!facter', '!ohai'] + min_fact_count = 30 + max_fact_count = 1000 + + # TODO: add ansible_cmdline, ansible_*_pubkey* back when TempFactCollector goes away + expected_facts = ['date_time', + 'user_id', 'distribution', + 'gather_subset', 'module_setup', + 'env'] + not_expected_facts = ['facter', 'ohai'] + + collected_facts = {} + + def _mock_module(self, gather_subset=None): + return mock_module(gather_subset=self.gather_subset) + + @patch('platform.system', return_value='Linux') + @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value='systemd') + def setUp(self, mock_gfc, mock_ps): + mock_module = self._mock_module() + collectors = self._collectors(mock_module) + + fact_collector = \ + ansible_collector.AnsibleFactCollector(collectors=collectors, + namespace=ns) + self.facts = fact_collector.collect(module=mock_module, + collected_facts=self.collected_facts) + + def _collectors(self, module, + all_collector_classes=None, + minimal_gather_subset=None): + return _collectors(module=module, + all_collector_classes=all_collector_classes, + minimal_gather_subset=minimal_gather_subset) + + def test_basics(self): + self._assert_basics(self.facts) + + def test_expected_facts(self): + self._assert_expected_facts(self.facts) + + def test_not_expected_facts(self): + self._assert_not_expected_facts(self.facts) + + def _assert_basics(self, facts): + self.assertIsInstance(facts, dict) + # just assert it's not almost empty + self.assertGreaterEqual(len(facts), self.min_fact_count) + # and that is not huge number of keys + self.assertLess(len(facts), self.max_fact_count) + + # everything starts with ansible_ namespace + def _assert_ansible_namespace(self, facts): + + # FIXME: kluge for non-namespace fact + facts.pop('module_setup', None) + facts.pop('gather_subset', None) + + for fact_key in facts: + self.assertTrue(fact_key.startswith('ansible_'), + 'The fact name "%s" does not startwith "ansible_"' % fact_key) + + def _assert_expected_facts(self, facts): + + facts_keys = sorted(facts.keys()) + for expected_fact in self.expected_facts: + self.assertIn(expected_fact, facts_keys) + + def _assert_not_expected_facts(self, facts): + + facts_keys = sorted(facts.keys()) + for not_expected_fact in self.not_expected_facts: + self.assertNotIn(not_expected_fact, facts_keys) + + +class ProvidesOtherFactCollector(collector.BaseFactCollector): + name = 'provides_something' + _fact_ids = set(['needed_fact']) + + def collect(self, module=None, collected_facts=None): + return {'needed_fact': 'THE_NEEDED_FACT_VALUE'} + + +class RequiresOtherFactCollector(collector.BaseFactCollector): + name = 'requires_something' + + def collect(self, module=None, collected_facts=None): + collected_facts = collected_facts or {} + fact_dict = {} + fact_dict['needed_fact'] = collected_facts['needed_fact'] + fact_dict['compound_fact'] = "compound-%s" % collected_facts['needed_fact'] + return fact_dict + + +class ConCatFactCollector(collector.BaseFactCollector): + name = 'concat_collected' + + def collect(self, module=None, collected_facts=None): + collected_facts = collected_facts or {} + fact_dict = {} + con_cat_list = [] + for key, value in collected_facts.items(): + con_cat_list.append(value) + + fact_dict['concat_fact'] = '-'.join(con_cat_list) + return fact_dict + + +class TestCollectorDepsWithFilter(unittest.TestCase): + gather_subset = ['all', '!facter', '!ohai'] + + def _mock_module(self, gather_subset=None, filter=None): + return mock_module(gather_subset=self.gather_subset, + filter=filter) + + def setUp(self): + self.mock_module = self._mock_module() + self.collectors = self._collectors(mock_module) + + def _collectors(self, module, + all_collector_classes=None, + minimal_gather_subset=None): + return [ProvidesOtherFactCollector(), + RequiresOtherFactCollector()] + + def test_no_filter(self): + _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai']) + + facts_dict = self._collect(_mock_module) + + expected = {'needed_fact': 'THE_NEEDED_FACT_VALUE', + 'compound_fact': 'compound-THE_NEEDED_FACT_VALUE'} + + self.assertEqual(expected, facts_dict) + + def test_with_filter_on_compound_fact(self): + _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'], + filter='compound_fact') + + facts_dict = self._collect(_mock_module) + + expected = {'compound_fact': 'compound-THE_NEEDED_FACT_VALUE'} + + self.assertEqual(expected, facts_dict) + + def test_with_filter_on_needed_fact(self): + _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'], + filter='needed_fact') + + facts_dict = self._collect(_mock_module) + + expected = {'needed_fact': 'THE_NEEDED_FACT_VALUE'} + + self.assertEqual(expected, facts_dict) + + def test_with_filter_on_compound_gather_compound(self): + _mock_module = mock_module(gather_subset=['!all', '!any', 'compound_fact'], + filter='compound_fact') + + facts_dict = self._collect(_mock_module) + + expected = {'compound_fact': 'compound-THE_NEEDED_FACT_VALUE'} + + self.assertEqual(expected, facts_dict) + + def test_with_filter_no_match(self): + _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'], + filter='ansible_this_doesnt_exist') + + facts_dict = self._collect(_mock_module) + + expected = {} + self.assertEqual(expected, facts_dict) + + def test_concat_collector(self): + _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai']) + + _collectors = self._collectors(_mock_module) + _collectors.append(ConCatFactCollector()) + + fact_collector = \ + ansible_collector.AnsibleFactCollector(collectors=_collectors, + namespace=ns, + filter_spec=_mock_module.params['filter']) + + collected_facts = {} + facts_dict = fact_collector.collect(module=_mock_module, + collected_facts=collected_facts) + self.assertIn('concat_fact', facts_dict) + self.assertTrue('THE_NEEDED_FACT_VALUE' in facts_dict['concat_fact']) + + def test_concat_collector_with_filter_on_concat(self): + _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'], + filter='concat_fact') + + _collectors = self._collectors(_mock_module) + _collectors.append(ConCatFactCollector()) + + fact_collector = \ + ansible_collector.AnsibleFactCollector(collectors=_collectors, + namespace=ns, + filter_spec=_mock_module.params['filter']) + + collected_facts = {} + facts_dict = fact_collector.collect(module=_mock_module, + collected_facts=collected_facts) + self.assertIn('concat_fact', facts_dict) + self.assertTrue('THE_NEEDED_FACT_VALUE' in facts_dict['concat_fact']) + self.assertTrue('compound' in facts_dict['concat_fact']) + + def _collect(self, _mock_module, collected_facts=None): + _collectors = self._collectors(_mock_module) + + fact_collector = \ + ansible_collector.AnsibleFactCollector(collectors=_collectors, + namespace=ns, + filter_spec=_mock_module.params['filter']) + facts_dict = fact_collector.collect(module=_mock_module, + collected_facts=collected_facts) + return facts_dict + + +class ExceptionThrowingCollector(collector.BaseFactCollector): + def collect(self, module=None, collected_facts=None): + raise Exception('A collector failed') + + +class TestExceptionCollectedFacts(TestCollectedFacts): + + def _collectors(self, module, + all_collector_classes=None, + minimal_gather_subset=None): + collectors = _collectors(module=module, + all_collector_classes=all_collector_classes, + minimal_gather_subset=minimal_gather_subset) + + c = [ExceptionThrowingCollector()] + collectors + return c + + +class TestOnlyExceptionCollector(TestCollectedFacts): + expected_facts = [] + min_fact_count = 0 + + def _collectors(self, module, + all_collector_classes=None, + minimal_gather_subset=None): + return [ExceptionThrowingCollector()] + + +class TestMinimalCollectedFacts(TestCollectedFacts): + gather_subset = ['!all'] + min_fact_count = 1 + max_fact_count = 10 + expected_facts = ['gather_subset', + 'module_setup'] + not_expected_facts = ['lsb'] + + +class TestFacterCollectedFacts(TestCollectedFacts): + gather_subset = ['!all', 'facter'] + min_fact_count = 1 + max_fact_count = 10 + expected_facts = ['gather_subset', + 'module_setup'] + not_expected_facts = ['lsb'] + + +class TestOhaiCollectedFacts(TestCollectedFacts): + gather_subset = ['!all', 'ohai'] + min_fact_count = 1 + max_fact_count = 10 + expected_facts = ['gather_subset', + 'module_setup'] + not_expected_facts = ['lsb'] + + +class TestPkgMgrFacts(TestCollectedFacts): + gather_subset = ['pkg_mgr'] + min_fact_count = 1 + max_fact_count = 20 + expected_facts = ['gather_subset', + 'module_setup', + 'pkg_mgr'] + collected_facts = { + "ansible_distribution": "Fedora", + "ansible_distribution_major_version": "28", + "ansible_os_family": "RedHat" + } + + +class TestOpenBSDPkgMgrFacts(TestPkgMgrFacts): + def test_is_openbsd_pkg(self): + self.assertIn('pkg_mgr', self.facts) + self.assertEqual(self.facts['pkg_mgr'], 'openbsd_pkg') + + def setUp(self): + self.patcher = patch('platform.system') + mock_platform = self.patcher.start() + mock_platform.return_value = 'OpenBSD' + + mock_module = self._mock_module() + collectors = self._collectors(mock_module) + + fact_collector = \ + ansible_collector.AnsibleFactCollector(collectors=collectors, + namespace=ns) + self.facts = fact_collector.collect(module=mock_module) + + def tearDown(self): + self.patcher.stop() diff --git a/test/units/module_utils/facts/test_collector.py b/test/units/module_utils/facts/test_collector.py new file mode 100644 index 00000000..9eab89f7 --- /dev/null +++ b/test/units/module_utils/facts/test_collector.py @@ -0,0 +1,563 @@ +# This file is part of Ansible +# -*- coding: utf-8 -*- +# +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from collections import defaultdict +import pprint + +# for testing +from units.compat import unittest + +from ansible.module_utils.facts import collector + +from ansible.module_utils.facts import default_collectors + + +class TestFindCollectorsForPlatform(unittest.TestCase): + def test(self): + compat_platforms = [{'system': 'Generic'}] + res = collector.find_collectors_for_platform(default_collectors.collectors, + compat_platforms) + for coll_class in res: + self.assertIn(coll_class._platform, ('Generic')) + + def test_linux(self): + compat_platforms = [{'system': 'Linux'}] + res = collector.find_collectors_for_platform(default_collectors.collectors, + compat_platforms) + for coll_class in res: + self.assertIn(coll_class._platform, ('Linux')) + + def test_linux_or_generic(self): + compat_platforms = [{'system': 'Generic'}, {'system': 'Linux'}] + res = collector.find_collectors_for_platform(default_collectors.collectors, + compat_platforms) + for coll_class in res: + self.assertIn(coll_class._platform, ('Generic', 'Linux')) + + +class TestSelectCollectorNames(unittest.TestCase): + + def _assert_equal_detail(self, obj1, obj2, msg=None): + msg = 'objects are not equal\n%s\n\n!=\n\n%s' % (pprint.pformat(obj1), pprint.pformat(obj2)) + return self.assertEqual(obj1, obj2, msg) + + def test(self): + collector_names = ['distribution', 'all_ipv4_addresses', + 'local', 'pkg_mgr'] + all_fact_subsets = self._all_fact_subsets() + res = collector.select_collector_classes(collector_names, + all_fact_subsets) + + expected = [default_collectors.DistributionFactCollector, + default_collectors.PkgMgrFactCollector] + + self._assert_equal_detail(res, expected) + + def test_default_collectors(self): + platform_info = {'system': 'Generic'} + compat_platforms = [platform_info] + collectors_for_platform = collector.find_collectors_for_platform(default_collectors.collectors, + compat_platforms) + + all_fact_subsets, aliases_map = collector.build_fact_id_to_collector_map(collectors_for_platform) + + all_valid_subsets = frozenset(all_fact_subsets.keys()) + collector_names = collector.get_collector_names(valid_subsets=all_valid_subsets, + aliases_map=aliases_map, + platform_info=platform_info) + complete_collector_names = collector._solve_deps(collector_names, all_fact_subsets) + + dep_map = collector.build_dep_data(complete_collector_names, all_fact_subsets) + + ordered_deps = collector.tsort(dep_map) + ordered_collector_names = [x[0] for x in ordered_deps] + + res = collector.select_collector_classes(ordered_collector_names, + all_fact_subsets) + + self.assertTrue(res.index(default_collectors.ServiceMgrFactCollector) > + res.index(default_collectors.DistributionFactCollector), + res) + self.assertTrue(res.index(default_collectors.ServiceMgrFactCollector) > + res.index(default_collectors.PlatformFactCollector), + res) + + def _all_fact_subsets(self, data=None): + all_fact_subsets = defaultdict(list) + _data = {'pkg_mgr': [default_collectors.PkgMgrFactCollector], + 'distribution': [default_collectors.DistributionFactCollector], + 'network': [default_collectors.LinuxNetworkCollector]} + data = data or _data + for key, value in data.items(): + all_fact_subsets[key] = value + return all_fact_subsets + + +class TestGetCollectorNames(unittest.TestCase): + def test_none(self): + res = collector.get_collector_names() + self.assertIsInstance(res, set) + self.assertEqual(res, set([])) + + def test_empty_sets(self): + res = collector.get_collector_names(valid_subsets=frozenset([]), + minimal_gather_subset=frozenset([]), + gather_subset=[]) + self.assertIsInstance(res, set) + self.assertEqual(res, set([])) + + def test_empty_valid_and_min_with_all_gather_subset(self): + res = collector.get_collector_names(valid_subsets=frozenset([]), + minimal_gather_subset=frozenset([]), + gather_subset=['all']) + self.assertIsInstance(res, set) + self.assertEqual(res, set([])) + + def test_one_valid_with_all_gather_subset(self): + valid_subsets = frozenset(['my_fact']) + res = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=frozenset([]), + gather_subset=['all']) + self.assertIsInstance(res, set) + self.assertEqual(res, set(['my_fact'])) + + def _compare_res(self, gather_subset1, gather_subset2, + valid_subsets=None, min_subset=None): + + valid_subsets = valid_subsets or frozenset() + minimal_gather_subset = min_subset or frozenset() + + res1 = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=gather_subset1) + + res2 = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=gather_subset2) + + return res1, res2 + + def test_not_all_other_order(self): + valid_subsets = frozenset(['min_fact', 'something_else', 'whatever']) + minimal_gather_subset = frozenset(['min_fact']) + + res1, res2 = self._compare_res(['!all', 'whatever'], + ['whatever', '!all'], + valid_subsets=valid_subsets, + min_subset=minimal_gather_subset) + self.assertEqual(res1, res2) + self.assertEqual(res1, set(['min_fact', 'whatever'])) + + def test_not_all_other_order_min(self): + valid_subsets = frozenset(['min_fact', 'something_else', 'whatever']) + minimal_gather_subset = frozenset(['min_fact']) + + res1, res2 = self._compare_res(['!min_fact', 'whatever'], + ['whatever', '!min_fact'], + valid_subsets=valid_subsets, + min_subset=minimal_gather_subset) + self.assertEqual(res1, res2) + self.assertEqual(res1, set(['whatever'])) + + def test_one_minimal_with_all_gather_subset(self): + my_fact = 'my_fact' + valid_subsets = frozenset([my_fact]) + minimal_gather_subset = valid_subsets + + res = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=['all']) + self.assertIsInstance(res, set) + self.assertEqual(res, set(['my_fact'])) + + def test_with_all_gather_subset(self): + valid_subsets = frozenset(['my_fact', 'something_else', 'whatever']) + minimal_gather_subset = frozenset(['my_fact']) + + # even with '!all', the minimal_gather_subset should be returned + res = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=['all']) + self.assertIsInstance(res, set) + self.assertEqual(res, set(['my_fact', 'something_else', 'whatever'])) + + def test_one_minimal_with_not_all_gather_subset(self): + valid_subsets = frozenset(['my_fact', 'something_else', 'whatever']) + minimal_gather_subset = frozenset(['my_fact']) + + # even with '!all', the minimal_gather_subset should be returned + res = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=['!all']) + self.assertIsInstance(res, set) + self.assertEqual(res, set(['my_fact'])) + + def test_gather_subset_excludes(self): + valid_subsets = frozenset(['my_fact', 'something_else', 'whatever']) + minimal_gather_subset = frozenset(['min_fact', 'min_another']) + + # even with '!all', the minimal_gather_subset should be returned + res = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + # gather_subset=set(['all', '!my_fact', '!whatever'])) + # gather_subset=['all', '!my_fact', '!whatever']) + gather_subset=['!min_fact', '!whatever']) + self.assertIsInstance(res, set) + # min_another is in minimal_gather_subset, so always returned + self.assertEqual(res, set(['min_another'])) + + def test_gather_subset_excludes_ordering(self): + valid_subsets = frozenset(['my_fact', 'something_else', 'whatever']) + minimal_gather_subset = frozenset(['my_fact']) + + res = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=['!all', 'whatever']) + self.assertIsInstance(res, set) + # excludes are higher precedence than includes, so !all excludes everything + # and then minimal_gather_subset is added. so '!all', 'other' == '!all' + self.assertEqual(res, set(['my_fact', 'whatever'])) + + def test_gather_subset_excludes_min(self): + valid_subsets = frozenset(['min_fact', 'something_else', 'whatever']) + minimal_gather_subset = frozenset(['min_fact']) + + res = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=['whatever', '!min']) + self.assertIsInstance(res, set) + # excludes are higher precedence than includes, so !all excludes everything + # and then minimal_gather_subset is added. so '!all', 'other' == '!all' + self.assertEqual(res, set(['whatever'])) + + def test_gather_subset_excludes_min_and_all(self): + valid_subsets = frozenset(['min_fact', 'something_else', 'whatever']) + minimal_gather_subset = frozenset(['min_fact']) + + res = collector.get_collector_names(valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=['whatever', '!all', '!min']) + self.assertIsInstance(res, set) + # excludes are higher precedence than includes, so !all excludes everything + # and then minimal_gather_subset is added. so '!all', 'other' == '!all' + self.assertEqual(res, set(['whatever'])) + + def test_invaid_gather_subset(self): + valid_subsets = frozenset(['my_fact', 'something_else']) + minimal_gather_subset = frozenset(['my_fact']) + + self.assertRaisesRegexp(TypeError, + r'Bad subset .* given to Ansible.*allowed\:.*all,.*my_fact.*', + collector.get_collector_names, + valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=['my_fact', 'not_a_valid_gather_subset']) + + +class TestFindUnresolvedRequires(unittest.TestCase): + def test(self): + names = ['network', 'virtual', 'env'] + all_fact_subsets = {'env': [default_collectors.EnvFactCollector], + 'network': [default_collectors.LinuxNetworkCollector], + 'virtual': [default_collectors.LinuxVirtualCollector]} + res = collector.find_unresolved_requires(names, all_fact_subsets) + # pprint.pprint(res) + + self.assertIsInstance(res, set) + self.assertEqual(res, set(['platform', 'distribution'])) + + def test_resolved(self): + names = ['network', 'virtual', 'env', 'platform', 'distribution'] + all_fact_subsets = {'env': [default_collectors.EnvFactCollector], + 'network': [default_collectors.LinuxNetworkCollector], + 'distribution': [default_collectors.DistributionFactCollector], + 'platform': [default_collectors.PlatformFactCollector], + 'virtual': [default_collectors.LinuxVirtualCollector]} + res = collector.find_unresolved_requires(names, all_fact_subsets) + # pprint.pprint(res) + + self.assertIsInstance(res, set) + self.assertEqual(res, set()) + + +class TestBuildDepData(unittest.TestCase): + def test(self): + names = ['network', 'virtual', 'env'] + all_fact_subsets = {'env': [default_collectors.EnvFactCollector], + 'network': [default_collectors.LinuxNetworkCollector], + 'virtual': [default_collectors.LinuxVirtualCollector]} + res = collector.build_dep_data(names, all_fact_subsets) + + # pprint.pprint(dict(res)) + self.assertIsInstance(res, defaultdict) + self.assertEqual(dict(res), + {'network': set(['platform', 'distribution']), + 'virtual': set(), + 'env': set()}) + + +class TestSolveDeps(unittest.TestCase): + def test_no_solution(self): + unresolved = set(['required_thing1', 'required_thing2']) + all_fact_subsets = {'env': [default_collectors.EnvFactCollector], + 'network': [default_collectors.LinuxNetworkCollector], + 'virtual': [default_collectors.LinuxVirtualCollector]} + + self.assertRaises(collector.CollectorNotFoundError, + collector._solve_deps, + unresolved, + all_fact_subsets) + + def test(self): + unresolved = set(['env', 'network']) + all_fact_subsets = {'env': [default_collectors.EnvFactCollector], + 'network': [default_collectors.LinuxNetworkCollector], + 'virtual': [default_collectors.LinuxVirtualCollector], + 'platform': [default_collectors.PlatformFactCollector], + 'distribution': [default_collectors.DistributionFactCollector]} + res = collector.resolve_requires(unresolved, all_fact_subsets) + + res = collector._solve_deps(unresolved, all_fact_subsets) + + self.assertIsInstance(res, set) + for goal in unresolved: + self.assertIn(goal, res) + + +class TestResolveRequires(unittest.TestCase): + def test_no_resolution(self): + unresolved = ['required_thing1', 'required_thing2'] + all_fact_subsets = {'env': [default_collectors.EnvFactCollector], + 'network': [default_collectors.LinuxNetworkCollector], + 'virtual': [default_collectors.LinuxVirtualCollector]} + self.assertRaisesRegexp(collector.UnresolvedFactDep, + 'unresolved fact dep.*required_thing2', + collector.resolve_requires, + unresolved, all_fact_subsets) + + def test(self): + unresolved = ['env', 'network'] + all_fact_subsets = {'env': [default_collectors.EnvFactCollector], + 'network': [default_collectors.LinuxNetworkCollector], + 'virtual': [default_collectors.LinuxVirtualCollector]} + res = collector.resolve_requires(unresolved, all_fact_subsets) + for goal in unresolved: + self.assertIn(goal, res) + + def test_exception(self): + unresolved = ['required_thing1'] + all_fact_subsets = {} + try: + collector.resolve_requires(unresolved, all_fact_subsets) + except collector.UnresolvedFactDep as exc: + self.assertIn(unresolved[0], '%s' % exc) + + +class TestTsort(unittest.TestCase): + def test(self): + dep_map = {'network': set(['distribution', 'platform']), + 'virtual': set(), + 'platform': set(['what_platform_wants']), + 'what_platform_wants': set(), + 'network_stuff': set(['network'])} + + res = collector.tsort(dep_map) + # pprint.pprint(res) + + self.assertIsInstance(res, list) + names = [x[0] for x in res] + self.assertTrue(names.index('network_stuff') > names.index('network')) + self.assertTrue(names.index('platform') > names.index('what_platform_wants')) + self.assertTrue(names.index('network') > names.index('platform')) + + def test_cycles(self): + dep_map = {'leaf1': set(), + 'leaf2': set(), + 'node1': set(['node2']), + 'node2': set(['node3']), + 'node3': set(['node1'])} + + self.assertRaises(collector.CycleFoundInFactDeps, + collector.tsort, + dep_map) + + def test_just_nodes(self): + dep_map = {'leaf1': set(), + 'leaf4': set(), + 'leaf3': set(), + 'leaf2': set()} + + res = collector.tsort(dep_map) + self.assertIsInstance(res, list) + names = [x[0] for x in res] + # not a lot to assert here, any order of the + # results is valid + self.assertEqual(set(names), set(dep_map.keys())) + + def test_self_deps(self): + dep_map = {'node1': set(['node1']), + 'node2': set(['node2'])} + self.assertRaises(collector.CycleFoundInFactDeps, + collector.tsort, + dep_map) + + def test_unsolvable(self): + dep_map = {'leaf1': set(), + 'node2': set(['leaf2'])} + + res = collector.tsort(dep_map) + self.assertIsInstance(res, list) + names = [x[0] for x in res] + self.assertEqual(set(names), set(dep_map.keys())) + + def test_chain(self): + dep_map = {'leaf1': set(['leaf2']), + 'leaf2': set(['leaf3']), + 'leaf3': set(['leaf4']), + 'leaf4': set(), + 'leaf5': set(['leaf1'])} + res = collector.tsort(dep_map) + self.assertIsInstance(res, list) + names = [x[0] for x in res] + self.assertEqual(set(names), set(dep_map.keys())) + + def test_multi_pass(self): + dep_map = {'leaf1': set(), + 'leaf2': set(['leaf3', 'leaf1', 'leaf4', 'leaf5']), + 'leaf3': set(['leaf4', 'leaf1']), + 'leaf4': set(['leaf1']), + 'leaf5': set(['leaf1'])} + res = collector.tsort(dep_map) + self.assertIsInstance(res, list) + names = [x[0] for x in res] + self.assertEqual(set(names), set(dep_map.keys())) + self.assertTrue(names.index('leaf1') < names.index('leaf2')) + for leaf in ('leaf2', 'leaf3', 'leaf4', 'leaf5'): + self.assertTrue(names.index('leaf1') < names.index(leaf)) + + +class TestCollectorClassesFromGatherSubset(unittest.TestCase): + maxDiff = None + + def _classes(self, + all_collector_classes=None, + valid_subsets=None, + minimal_gather_subset=None, + gather_subset=None, + gather_timeout=None, + platform_info=None): + platform_info = platform_info or {'system': 'Linux'} + return collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes, + valid_subsets=valid_subsets, + minimal_gather_subset=minimal_gather_subset, + gather_subset=gather_subset, + gather_timeout=gather_timeout, + platform_info=platform_info) + + def test_no_args(self): + res = self._classes() + self.assertIsInstance(res, list) + self.assertEqual(res, []) + + def test_not_all(self): + res = self._classes(all_collector_classes=default_collectors.collectors, + gather_subset=['!all']) + self.assertIsInstance(res, list) + self.assertEqual(res, []) + + def test_all(self): + res = self._classes(all_collector_classes=default_collectors.collectors, + gather_subset=['all']) + self.assertIsInstance(res, list) + + def test_hardware(self): + res = self._classes(all_collector_classes=default_collectors.collectors, + gather_subset=['hardware']) + self.assertIsInstance(res, list) + self.assertIn(default_collectors.PlatformFactCollector, res) + self.assertIn(default_collectors.LinuxHardwareCollector, res) + + self.assertTrue(res.index(default_collectors.LinuxHardwareCollector) > + res.index(default_collectors.PlatformFactCollector)) + + def test_network(self): + res = self._classes(all_collector_classes=default_collectors.collectors, + gather_subset=['network']) + self.assertIsInstance(res, list) + self.assertIn(default_collectors.DistributionFactCollector, res) + self.assertIn(default_collectors.PlatformFactCollector, res) + self.assertIn(default_collectors.LinuxNetworkCollector, res) + + self.assertTrue(res.index(default_collectors.LinuxNetworkCollector) > + res.index(default_collectors.PlatformFactCollector)) + self.assertTrue(res.index(default_collectors.LinuxNetworkCollector) > + res.index(default_collectors.DistributionFactCollector)) + + # self.assertEqual(set(res, [default_collectors.DistributionFactCollector, + # default_collectors.PlatformFactCollector, + # default_collectors.LinuxNetworkCollector]) + + def test_env(self): + res = self._classes(all_collector_classes=default_collectors.collectors, + gather_subset=['env']) + self.assertIsInstance(res, list) + self.assertEqual(res, [default_collectors.EnvFactCollector]) + + def test_facter(self): + res = self._classes(all_collector_classes=default_collectors.collectors, + gather_subset=set(['env', 'facter'])) + self.assertIsInstance(res, list) + self.assertEqual(set(res), + set([default_collectors.EnvFactCollector, + default_collectors.FacterFactCollector])) + + def test_facter_ohai(self): + res = self._classes(all_collector_classes=default_collectors.collectors, + gather_subset=set(['env', 'facter', 'ohai'])) + self.assertIsInstance(res, list) + self.assertEqual(set(res), + set([default_collectors.EnvFactCollector, + default_collectors.FacterFactCollector, + default_collectors.OhaiFactCollector])) + + def test_just_facter(self): + res = self._classes(all_collector_classes=default_collectors.collectors, + gather_subset=set(['facter'])) + self.assertIsInstance(res, list) + self.assertEqual(set(res), + set([default_collectors.FacterFactCollector])) + + def test_collector_specified_multiple_times(self): + res = self._classes(all_collector_classes=default_collectors.collectors, + gather_subset=['platform', 'all', 'machine']) + self.assertIsInstance(res, list) + self.assertIn(default_collectors.PlatformFactCollector, + res) + + def test_unknown_collector(self): + # something claims 'unknown_collector' is a valid gather_subset, but there is + # no FactCollector mapped to 'unknown_collector' + self.assertRaisesRegexp(TypeError, + r'Bad subset.*unknown_collector.*given to Ansible.*allowed\:.*all,.*env.*', + self._classes, + all_collector_classes=default_collectors.collectors, + gather_subset=['env', 'unknown_collector']) diff --git a/test/units/module_utils/facts/test_collectors.py b/test/units/module_utils/facts/test_collectors.py new file mode 100644 index 00000000..d9fe79bf --- /dev/null +++ b/test/units/module_utils/facts/test_collectors.py @@ -0,0 +1,430 @@ +# unit tests for ansible fact collectors +# -*- coding: utf-8 -*- +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat.mock import Mock, patch + +from . base import BaseFactsTest + +from ansible.module_utils.facts import collector + +from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector +from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector +from ansible.module_utils.facts.system.cmdline import CmdLineFactCollector +from ansible.module_utils.facts.system.distribution import DistributionFactCollector +from ansible.module_utils.facts.system.dns import DnsFactCollector +from ansible.module_utils.facts.system.env import EnvFactCollector +from ansible.module_utils.facts.system.fips import FipsFactCollector +from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector, OpenBSDPkgMgrFactCollector +from ansible.module_utils.facts.system.platform import PlatformFactCollector +from ansible.module_utils.facts.system.python import PythonFactCollector +from ansible.module_utils.facts.system.selinux import SelinuxFactCollector +from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector +from ansible.module_utils.facts.system.ssh_pub_keys import SshPubKeyFactCollector +from ansible.module_utils.facts.system.user import UserFactCollector + +from ansible.module_utils.facts.virtual.base import VirtualCollector +from ansible.module_utils.facts.network.base import NetworkCollector +from ansible.module_utils.facts.hardware.base import HardwareCollector + + +class CollectorException(Exception): + pass + + +class ExceptionThrowingCollector(collector.BaseFactCollector): + name = 'exc_throwing' + + def __init__(self, collectors=None, namespace=None, exception=None): + super(ExceptionThrowingCollector, self).__init__(collectors, namespace) + self._exception = exception or CollectorException('collection failed') + + def collect(self, module=None, collected_facts=None): + raise self._exception + + +class TestExceptionThrowingCollector(BaseFactsTest): + __test__ = True + gather_subset = ['exc_throwing'] + valid_subsets = ['exc_throwing'] + collector_class = ExceptionThrowingCollector + + def test_collect(self): + module = self._mock_module() + fact_collector = self.collector_class() + self.assertRaises(CollectorException, + fact_collector.collect, + module=module, + collected_facts=self.collected_facts) + + def test_collect_with_namespace(self): + module = self._mock_module() + fact_collector = self.collector_class() + self.assertRaises(CollectorException, + fact_collector.collect_with_namespace, + module=module, + collected_facts=self.collected_facts) + + +class TestApparmorFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'apparmor'] + valid_subsets = ['apparmor'] + fact_namespace = 'ansible_apparmor' + collector_class = ApparmorFactCollector + + def test_collect(self): + facts_dict = super(TestApparmorFacts, self).test_collect() + self.assertIn('status', facts_dict['apparmor']) + + +class TestCapsFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'caps'] + valid_subsets = ['caps'] + fact_namespace = 'ansible_system_capabilities' + collector_class = SystemCapabilitiesFactCollector + + def _mock_module(self): + mock_module = Mock() + mock_module.params = {'gather_subset': self.gather_subset, + 'gather_timeout': 10, + 'filter': '*'} + mock_module.get_bin_path = Mock(return_value='/usr/sbin/capsh') + mock_module.run_command = Mock(return_value=(0, 'Current: =ep', '')) + return mock_module + + +class TestCmdLineFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'cmdline'] + valid_subsets = ['cmdline'] + fact_namespace = 'ansible_cmdline' + collector_class = CmdLineFactCollector + + def test_parse_proc_cmdline_uefi(self): + uefi_cmdline = r'initrd=\70ef65e1a04a47aea04f7b5145ea3537\4.10.0-19-generic\initrd root=UUID=50973b75-4a66-4bf0-9764-2b7614489e64 ro quiet' + expected = {'initrd': r'\70ef65e1a04a47aea04f7b5145ea3537\4.10.0-19-generic\initrd', + 'root': 'UUID=50973b75-4a66-4bf0-9764-2b7614489e64', + 'quiet': True, + 'ro': True} + fact_collector = self.collector_class() + facts_dict = fact_collector._parse_proc_cmdline(uefi_cmdline) + + self.assertDictEqual(facts_dict, expected) + + def test_parse_proc_cmdline_fedora(self): + cmdline_fedora = r'BOOT_IMAGE=/vmlinuz-4.10.16-200.fc25.x86_64 root=/dev/mapper/fedora-root ro rd.lvm.lv=fedora/root rd.luks.uuid=luks-c80b7537-358b-4a07-b88c-c59ef187479b rd.lvm.lv=fedora/swap rhgb quiet LANG=en_US.UTF-8' # noqa + + expected = {'BOOT_IMAGE': '/vmlinuz-4.10.16-200.fc25.x86_64', + 'LANG': 'en_US.UTF-8', + 'quiet': True, + 'rd.luks.uuid': 'luks-c80b7537-358b-4a07-b88c-c59ef187479b', + 'rd.lvm.lv': 'fedora/swap', + 'rhgb': True, + 'ro': True, + 'root': '/dev/mapper/fedora-root'} + + fact_collector = self.collector_class() + facts_dict = fact_collector._parse_proc_cmdline(cmdline_fedora) + + self.assertDictEqual(facts_dict, expected) + + def test_parse_proc_cmdline_dup_console(self): + example = r'BOOT_IMAGE=/boot/vmlinuz-4.4.0-72-generic root=UUID=e12e46d9-06c9-4a64-a7b3-60e24b062d90 ro console=tty1 console=ttyS0' + + # FIXME: Two 'console' keywords? Using a dict for the fact value here loses info. Currently the 'last' one wins + expected = {'BOOT_IMAGE': '/boot/vmlinuz-4.4.0-72-generic', + 'root': 'UUID=e12e46d9-06c9-4a64-a7b3-60e24b062d90', + 'ro': True, + 'console': 'ttyS0'} + + fact_collector = self.collector_class() + facts_dict = fact_collector._parse_proc_cmdline(example) + + # TODO: fails because we lose a 'console' + self.assertDictEqual(facts_dict, expected) + + +class TestDistributionFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'distribution'] + valid_subsets = ['distribution'] + fact_namespace = 'ansible_distribution' + collector_class = DistributionFactCollector + + +class TestDnsFacts(BaseFactsTest): + + __test__ = True + gather_subset = ['!all', 'dns'] + valid_subsets = ['dns'] + fact_namespace = 'ansible_dns' + collector_class = DnsFactCollector + + +class TestEnvFacts(BaseFactsTest): + + __test__ = True + gather_subset = ['!all', 'env'] + valid_subsets = ['env'] + fact_namespace = 'ansible_env' + collector_class = EnvFactCollector + + def test_collect(self): + facts_dict = super(TestEnvFacts, self).test_collect() + self.assertIn('HOME', facts_dict['env']) + + +class TestFipsFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'fips'] + valid_subsets = ['fips'] + fact_namespace = 'ansible_fips' + collector_class = FipsFactCollector + + +class TestHardwareCollector(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'hardware'] + valid_subsets = ['hardware'] + fact_namespace = 'ansible_hardware' + collector_class = HardwareCollector + collected_facts = {'ansible_architecture': 'x86_64'} + + +class TestNetworkCollector(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'network'] + valid_subsets = ['network'] + fact_namespace = 'ansible_network' + collector_class = NetworkCollector + + +class TestPkgMgrFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'pkg_mgr'] + valid_subsets = ['pkg_mgr'] + fact_namespace = 'ansible_pkgmgr' + collector_class = PkgMgrFactCollector + collected_facts = { + "ansible_distribution": "Fedora", + "ansible_distribution_major_version": "28", + "ansible_os_family": "RedHat" + } + + def test_collect(self): + module = self._mock_module() + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts) + self.assertIsInstance(facts_dict, dict) + self.assertIn('pkg_mgr', facts_dict) + + +def _sanitize_os_path_apt_get(path): + if path == '/usr/bin/apt-get': + return True + else: + return False + + +class TestPkgMgrFactsAptFedora(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'pkg_mgr'] + valid_subsets = ['pkg_mgr'] + fact_namespace = 'ansible_pkgmgr' + collector_class = PkgMgrFactCollector + collected_facts = { + "ansible_distribution": "Fedora", + "ansible_distribution_major_version": "28", + "ansible_os_family": "RedHat", + "ansible_pkg_mgr": "apt" + } + + @patch('ansible.module_utils.facts.system.pkg_mgr.os.path.exists', side_effect=_sanitize_os_path_apt_get) + def test_collect(self, mock_os_path_exists): + module = self._mock_module() + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts) + self.assertIsInstance(facts_dict, dict) + self.assertIn('pkg_mgr', facts_dict) + + +class TestOpenBSDPkgMgrFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'pkg_mgr'] + valid_subsets = ['pkg_mgr'] + fact_namespace = 'ansible_pkgmgr' + collector_class = OpenBSDPkgMgrFactCollector + + def test_collect(self): + module = self._mock_module() + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts) + self.assertIsInstance(facts_dict, dict) + self.assertIn('pkg_mgr', facts_dict) + self.assertEqual(facts_dict['pkg_mgr'], 'openbsd_pkg') + + +class TestPlatformFactCollector(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'platform'] + valid_subsets = ['platform'] + fact_namespace = 'ansible_platform' + collector_class = PlatformFactCollector + + +class TestPythonFactCollector(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'python'] + valid_subsets = ['python'] + fact_namespace = 'ansible_python' + collector_class = PythonFactCollector + + +class TestSelinuxFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'selinux'] + valid_subsets = ['selinux'] + fact_namespace = 'ansible_selinux' + collector_class = SelinuxFactCollector + + def test_no_selinux(self): + with patch('ansible.module_utils.facts.system.selinux.HAVE_SELINUX', False): + module = self._mock_module() + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + self.assertIsInstance(facts_dict, dict) + self.assertEqual(facts_dict['selinux']['status'], 'Missing selinux Python library') + return facts_dict + + +class TestServiceMgrFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'service_mgr'] + valid_subsets = ['service_mgr'] + fact_namespace = 'ansible_service_mgr' + collector_class = ServiceMgrFactCollector + + # TODO: dedupe some of this test code + + @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None) + def test_no_proc1(self, mock_gfc): + # no /proc/1/comm, ps returns non-0 + # should fallback to 'service' + module = self._mock_module() + module.run_command = Mock(return_value=(1, '', 'wat')) + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + self.assertIsInstance(facts_dict, dict) + self.assertEqual(facts_dict['service_mgr'], 'service') + + @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None) + def test_no_proc1_ps_random_init(self, mock_gfc): + # no /proc/1/comm, ps returns '/sbin/sys11' which we dont know + # should end up return 'sys11' + module = self._mock_module() + module.run_command = Mock(return_value=(0, '/sbin/sys11', '')) + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module) + self.assertIsInstance(facts_dict, dict) + self.assertEqual(facts_dict['service_mgr'], 'sys11') + + @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None) + def test_clowncar(self, mock_gfc): + # no /proc/1/comm, ps fails, distro and system are clowncar + # should end up return 'sys11' + module = self._mock_module() + module.run_command = Mock(return_value=(1, '', '')) + collected_facts = {'distribution': 'clowncar', + 'system': 'ClownCarOS'} + fact_collector = self.collector_class() + facts_dict = fact_collector.collect(module=module, + collected_facts=collected_facts) + self.assertIsInstance(facts_dict, dict) + self.assertEqual(facts_dict['service_mgr'], 'service') + + # TODO: reenable these tests when we can mock more easily + +# @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None) +# def test_sunos_fallback(self, mock_gfc): +# # no /proc/1/comm, ps fails, 'system' is SunOS +# # should end up return 'smf'? +# module = self._mock_module() +# # FIXME: the result here is a kluge to at least cover more of service_mgr.collect +# # TODO: remove +# # FIXME: have to force a pid for results here to get into any of the system/distro checks +# module.run_command = Mock(return_value=(1, ' 37 ', '')) +# collected_facts = {'system': 'SunOS'} +# fact_collector = self.collector_class(module=module) +# facts_dict = fact_collector.collect(collected_facts=collected_facts) +# print('facts_dict: %s' % facts_dict) +# self.assertIsInstance(facts_dict, dict) +# self.assertEqual(facts_dict['service_mgr'], 'smf') + +# @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None) +# def test_aix_fallback(self, mock_gfc): +# # no /proc/1/comm, ps fails, 'system' is SunOS +# # should end up return 'smf'? +# module = self._mock_module() +# module.run_command = Mock(return_value=(1, '', '')) +# collected_facts = {'system': 'AIX'} +# fact_collector = self.collector_class(module=module) +# facts_dict = fact_collector.collect(collected_facts=collected_facts) +# print('facts_dict: %s' % facts_dict) +# self.assertIsInstance(facts_dict, dict) +# self.assertEqual(facts_dict['service_mgr'], 'src') + +# @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None) +# def test_linux_fallback(self, mock_gfc): +# # no /proc/1/comm, ps fails, 'system' is SunOS +# # should end up return 'smf'? +# module = self._mock_module() +# module.run_command = Mock(return_value=(1, ' 37 ', '')) +# collected_facts = {'system': 'Linux'} +# fact_collector = self.collector_class(module=module) +# facts_dict = fact_collector.collect(collected_facts=collected_facts) +# print('facts_dict: %s' % facts_dict) +# self.assertIsInstance(facts_dict, dict) +# self.assertEqual(facts_dict['service_mgr'], 'sdfadf') + + +class TestSshPubKeyFactCollector(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'ssh_pub_keys'] + valid_subsets = ['ssh_pub_keys'] + fact_namespace = 'ansible_ssh_pub_leys' + collector_class = SshPubKeyFactCollector + + +class TestUserFactCollector(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'user'] + valid_subsets = ['user'] + fact_namespace = 'ansible_user' + collector_class = UserFactCollector + + +class TestVirtualFacts(BaseFactsTest): + __test__ = True + gather_subset = ['!all', 'virtual'] + valid_subsets = ['virtual'] + fact_namespace = 'ansible_virtual' + collector_class = VirtualCollector diff --git a/test/units/module_utils/facts/test_date_time.py b/test/units/module_utils/facts/test_date_time.py new file mode 100644 index 00000000..7c92e521 --- /dev/null +++ b/test/units/module_utils/facts/test_date_time.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest +import datetime +import string +import time + +from ansible.module_utils.facts.system import date_time + +EPOCH_TS = 1594449296.123456 +DT = datetime.datetime(2020, 7, 11, 12, 34, 56, 124356) +DT_UTC = datetime.datetime(2020, 7, 11, 2, 34, 56, 124356) + + +@pytest.fixture +def fake_now(monkeypatch): + """ + Patch `datetime.datetime.fromtimestamp()`, `datetime.datetime.utcfromtimestamp()`, + and `time.time()` to return deterministic values. + """ + + class FakeNow: + @classmethod + def fromtimestamp(cls, timestamp): + return DT + + @classmethod + def utcfromtimestamp(cls, timestamp): + return DT_UTC + + def _time(): + return EPOCH_TS + + monkeypatch.setattr(date_time.datetime, 'datetime', FakeNow) + monkeypatch.setattr(time, 'time', _time) + + +@pytest.fixture +def fake_date_facts(fake_now): + """Return a predictable instance of collected date_time facts.""" + + collector = date_time.DateTimeFactCollector() + data = collector.collect() + + return data + + +@pytest.mark.parametrize( + ('fact_name', 'fact_value'), + ( + ('year', '2020'), + ('month', '07'), + ('weekday', 'Saturday'), + ('weekday_number', '6'), + ('weeknumber', '27'), + ('day', '11'), + ('hour', '12'), + ('minute', '34'), + ('second', '56'), + ('date', '2020-07-11'), + ('time', '12:34:56'), + ('iso8601_basic', '20200711T123456124356'), + ('iso8601_basic_short', '20200711T123456'), + ('iso8601_micro', '2020-07-11T02:34:56.124356Z'), + ('iso8601', '2020-07-11T02:34:56Z'), + ), +) +def test_date_time_facts(fake_date_facts, fact_name, fact_value): + assert fake_date_facts['date_time'][fact_name] == fact_value + + +def test_date_time_epoch(fake_date_facts): + """Test that format of returned epoch value is correct""" + + assert fake_date_facts['date_time']['epoch'].isdigit() + assert len(fake_date_facts['date_time']['epoch']) == 10 # This length will not change any time soon + + +def test_date_time_tz(fake_date_facts): + """ + Test that the returned value for timezone consists of only uppercase + letters and is the expected length. + """ + + assert fake_date_facts['date_time']['tz'].isupper() + assert 2 <= len(fake_date_facts['date_time']['tz']) <= 5 + assert not set(fake_date_facts['date_time']['tz']).difference(set(string.ascii_uppercase)) + + +def test_date_time_tz_offset(fake_date_facts): + """ + Test that the timezone offset begins with a `+` or `-` and ends with a + series of integers. + """ + + assert fake_date_facts['date_time']['tz_offset'][0] in ['-', '+'] + assert fake_date_facts['date_time']['tz_offset'][1:].isdigit() + assert len(fake_date_facts['date_time']['tz_offset']) == 5 diff --git a/test/units/module_utils/facts/test_facts.py b/test/units/module_utils/facts/test_facts.py new file mode 100644 index 00000000..5e2de808 --- /dev/null +++ b/test/units/module_utils/facts/test_facts.py @@ -0,0 +1,644 @@ +# This file is part of Ansible +# -*- coding: utf-8 -*- +# +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +import pytest + +# for testing +from units.compat import unittest +from units.compat.mock import Mock, patch + +from ansible.module_utils import facts +from ansible.module_utils.facts import hardware +from ansible.module_utils.facts import network +from ansible.module_utils.facts import virtual + + +class BaseTestFactsPlatform(unittest.TestCase): + platform_id = 'Generic' + fact_class = hardware.base.Hardware + collector_class = None + + """Verify that the automagic in Hardware.__new__ selects the right subclass.""" + @patch('platform.system') + def test_new(self, mock_platform): + if not self.fact_class: + pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id) + mock_platform.return_value = self.platform_id + inst = self.fact_class(module=Mock(), load_on_init=False) + self.assertIsInstance(inst, self.fact_class) + self.assertEqual(inst.platform, self.platform_id) + + def test_subclass(self): + if not self.fact_class: + pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id) + # 'Generic' will try to map to platform.system() that we are not mocking here + if self.platform_id == 'Generic': + return + inst = self.fact_class(module=Mock(), load_on_init=False) + self.assertIsInstance(inst, self.fact_class) + self.assertEqual(inst.platform, self.platform_id) + + def test_collector(self): + if not self.collector_class: + pytest.skip('This test class needs to be updated to specify collector_class') + inst = self.collector_class() + self.assertIsInstance(inst, self.collector_class) + self.assertEqual(inst._platform, self.platform_id) + + +class TestLinuxFactsPlatform(BaseTestFactsPlatform): + platform_id = 'Linux' + fact_class = hardware.linux.LinuxHardware + collector_class = hardware.linux.LinuxHardwareCollector + + +class TestHurdFactsPlatform(BaseTestFactsPlatform): + platform_id = 'GNU' + fact_class = hardware.hurd.HurdHardware + collector_class = hardware.hurd.HurdHardwareCollector + + +class TestSunOSHardware(BaseTestFactsPlatform): + platform_id = 'SunOS' + fact_class = hardware.sunos.SunOSHardware + collector_class = hardware.sunos.SunOSHardwareCollector + + +class TestOpenBSDHardware(BaseTestFactsPlatform): + platform_id = 'OpenBSD' + fact_class = hardware.openbsd.OpenBSDHardware + collector_class = hardware.openbsd.OpenBSDHardwareCollector + + +class TestFreeBSDHardware(BaseTestFactsPlatform): + platform_id = 'FreeBSD' + fact_class = hardware.freebsd.FreeBSDHardware + collector_class = hardware.freebsd.FreeBSDHardwareCollector + + +class TestDragonFlyHardware(BaseTestFactsPlatform): + platform_id = 'DragonFly' + fact_class = None + collector_class = hardware.dragonfly.DragonFlyHardwareCollector + + +class TestNetBSDHardware(BaseTestFactsPlatform): + platform_id = 'NetBSD' + fact_class = hardware.netbsd.NetBSDHardware + collector_class = hardware.netbsd.NetBSDHardwareCollector + + +class TestAIXHardware(BaseTestFactsPlatform): + platform_id = 'AIX' + fact_class = hardware.aix.AIXHardware + collector_class = hardware.aix.AIXHardwareCollector + + +class TestHPUXHardware(BaseTestFactsPlatform): + platform_id = 'HP-UX' + fact_class = hardware.hpux.HPUXHardware + collector_class = hardware.hpux.HPUXHardwareCollector + + +class TestDarwinHardware(BaseTestFactsPlatform): + platform_id = 'Darwin' + fact_class = hardware.darwin.DarwinHardware + collector_class = hardware.darwin.DarwinHardwareCollector + + +class TestGenericNetwork(BaseTestFactsPlatform): + platform_id = 'Generic' + fact_class = network.base.Network + + +class TestHurdPfinetNetwork(BaseTestFactsPlatform): + platform_id = 'GNU' + fact_class = network.hurd.HurdPfinetNetwork + collector_class = network.hurd.HurdNetworkCollector + + +class TestLinuxNetwork(BaseTestFactsPlatform): + platform_id = 'Linux' + fact_class = network.linux.LinuxNetwork + collector_class = network.linux.LinuxNetworkCollector + + +class TestGenericBsdIfconfigNetwork(BaseTestFactsPlatform): + platform_id = 'Generic_BSD_Ifconfig' + fact_class = network.generic_bsd.GenericBsdIfconfigNetwork + collector_class = None + + +class TestHPUXNetwork(BaseTestFactsPlatform): + platform_id = 'HP-UX' + fact_class = network.hpux.HPUXNetwork + collector_class = network.hpux.HPUXNetworkCollector + + +class TestDarwinNetwork(BaseTestFactsPlatform): + platform_id = 'Darwin' + fact_class = network.darwin.DarwinNetwork + collector_class = network.darwin.DarwinNetworkCollector + + +class TestFreeBSDNetwork(BaseTestFactsPlatform): + platform_id = 'FreeBSD' + fact_class = network.freebsd.FreeBSDNetwork + collector_class = network.freebsd.FreeBSDNetworkCollector + + +class TestDragonFlyNetwork(BaseTestFactsPlatform): + platform_id = 'DragonFly' + fact_class = network.dragonfly.DragonFlyNetwork + collector_class = network.dragonfly.DragonFlyNetworkCollector + + +class TestAIXNetwork(BaseTestFactsPlatform): + platform_id = 'AIX' + fact_class = network.aix.AIXNetwork + collector_class = network.aix.AIXNetworkCollector + + +class TestNetBSDNetwork(BaseTestFactsPlatform): + platform_id = 'NetBSD' + fact_class = network.netbsd.NetBSDNetwork + collector_class = network.netbsd.NetBSDNetworkCollector + + +class TestOpenBSDNetwork(BaseTestFactsPlatform): + platform_id = 'OpenBSD' + fact_class = network.openbsd.OpenBSDNetwork + collector_class = network.openbsd.OpenBSDNetworkCollector + + +class TestSunOSNetwork(BaseTestFactsPlatform): + platform_id = 'SunOS' + fact_class = network.sunos.SunOSNetwork + collector_class = network.sunos.SunOSNetworkCollector + + +class TestLinuxVirtual(BaseTestFactsPlatform): + platform_id = 'Linux' + fact_class = virtual.linux.LinuxVirtual + collector_class = virtual.linux.LinuxVirtualCollector + + +class TestFreeBSDVirtual(BaseTestFactsPlatform): + platform_id = 'FreeBSD' + fact_class = virtual.freebsd.FreeBSDVirtual + collector_class = virtual.freebsd.FreeBSDVirtualCollector + + +class TestNetBSDVirtual(BaseTestFactsPlatform): + platform_id = 'NetBSD' + fact_class = virtual.netbsd.NetBSDVirtual + collector_class = virtual.netbsd.NetBSDVirtualCollector + + +class TestOpenBSDVirtual(BaseTestFactsPlatform): + platform_id = 'OpenBSD' + fact_class = virtual.openbsd.OpenBSDVirtual + collector_class = virtual.openbsd.OpenBSDVirtualCollector + + +class TestHPUXVirtual(BaseTestFactsPlatform): + platform_id = 'HP-UX' + fact_class = virtual.hpux.HPUXVirtual + collector_class = virtual.hpux.HPUXVirtualCollector + + +class TestSunOSVirtual(BaseTestFactsPlatform): + platform_id = 'SunOS' + fact_class = virtual.sunos.SunOSVirtual + collector_class = virtual.sunos.SunOSVirtualCollector + + +LSBLK_OUTPUT = b""" +/dev/sda +/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 +/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK +/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d +/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce +/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d +/dev/sr0 +/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390 +/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a +/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390 +/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a +/dev/mapper/docker-253:1-1050967-pool +/dev/loop2 +/dev/mapper/docker-253:1-1050967-pool +""" + +LSBLK_OUTPUT_2 = b""" +/dev/sda +/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 +/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK +/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d +/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce +/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d +/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373 +/dev/sr0 +/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390 +""" + +LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'} + +UDEVADM_UUID = 'N/A' + +MTAB = """ +sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0 +proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 +devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0 +securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0 +tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0 +devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 +tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0 +tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0 +cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 +pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0 +cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0 +cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 +cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0 +cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0 +cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0 +cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0 +cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0 +cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0 +cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0 +cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0 +configfs /sys/kernel/config configfs rw,relatime 0 0 +/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0 +selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0 +systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0 +debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0 +hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0 +tmpfs /tmp tmpfs rw,seclabel 0 0 +mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0 +/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0 +/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0 +/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0 +tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0 +gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0 +grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 +""" + +MTAB_ENTRIES = [ + [ + 'sysfs', + '/sys', + 'sysfs', + 'rw,seclabel,nosuid,nodev,noexec,relatime', + '0', + '0' + ], + ['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'], + [ + 'devtmpfs', + '/dev', + 'devtmpfs', + 'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755', + '0', + '0' + ], + [ + 'securityfs', + '/sys/kernel/security', + 'securityfs', + 'rw,nosuid,nodev,noexec,relatime', + '0', + '0' + ], + ['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'], + [ + 'devpts', + '/dev/pts', + 'devpts', + 'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000', + '0', + '0' + ], + ['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'], + [ + 'tmpfs', + '/sys/fs/cgroup', + 'tmpfs', + 'ro,seclabel,nosuid,nodev,noexec,mode=755', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/systemd', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd', + '0', + '0' + ], + [ + 'pstore', + '/sys/fs/pstore', + 'pstore', + 'rw,seclabel,nosuid,nodev,noexec,relatime', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/devices', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,devices', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/freezer', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,freezer', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/memory', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,memory', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/pids', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,pids', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/blkio', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,blkio', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/cpuset', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,cpuset', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/cpu,cpuacct', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/hugetlb', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,hugetlb', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/perf_event', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,perf_event', + '0', + '0' + ], + [ + 'cgroup', + '/sys/fs/cgroup/net_cls,net_prio', + 'cgroup', + 'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio', + '0', + '0' + ], + ['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'], + [ + '/dev/mapper/fedora_dhcp129--186-root', + '/', + 'ext4', + 'rw,seclabel,relatime,data=ordered', + '0', + '0' + ], + ['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'], + [ + 'systemd-1', + '/proc/sys/fs/binfmt_misc', + 'autofs', + 'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct', + '0', + '0' + ], + ['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'], + [ + 'hugetlbfs', + '/dev/hugepages', + 'hugetlbfs', + 'rw,seclabel,relatime', + '0', + '0' + ], + ['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'], + ['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'], + [ + '/dev/loop0', + '/var/lib/machines', + 'btrfs', + 'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/', + '0', + '0' + ], + ['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], + # A 'none' fstype + ['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'], + # lets assume this is a bindmount + ['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], + [ + '/dev/mapper/fedora_dhcp129--186-home', + '/home', + 'ext4', + 'rw,seclabel,relatime,data=ordered', + '0', + '0' + ], + [ + 'tmpfs', + '/run/user/1000', + 'tmpfs', + 'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000', + '0', + '0' + ], + [ + 'gvfsd-fuse', + '/run/user/1000/gvfs', + 'fuse.gvfsd-fuse', + 'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000', + '0', + '0' + ], + ['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0'], + # Mount path with space in the name + # The space is encoded as \040 since the fields in /etc/mtab are space-delimeted + ['/dev/sdz9', r'/mnt/foo\040bar', 'ext4', 'rw,relatime', '0', '0'], +] + +BIND_MOUNTS = ['/not/a/real/bind_mount'] + +with open(os.path.join(os.path.dirname(__file__), 'fixtures/findmount_output.txt')) as f: + FINDMNT_OUTPUT = f.read() + + +class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase): + + # FIXME: mock.patch instead + def setUp(self): + # The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global) + facts.GATHER_TIMEOUT = 10 + + def tearDown(self): + facts.GATHER_TIMEOUT = None + + # The Hardware subclasses freakout if instaniated directly, so + # mock platform.system and inst Hardware() so we get a LinuxHardware() + # we can test. + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES) + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS) + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS) + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._udevadm_uuid', return_value=UDEVADM_UUID) + def test_get_mount_facts(self, + mock_lsblk_uuid, + mock_find_bind_mounts, + mock_mtab_entries, + mock_udevadm_uuid): + module = Mock() + # Returns a LinuxHardware-ish + lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) + + # Nothing returned, just self.facts modified as a side effect + mount_facts = lh.get_mount_facts() + self.assertIsInstance(mount_facts, dict) + self.assertIn('mounts', mount_facts) + self.assertIsInstance(mount_facts['mounts'], list) + self.assertIsInstance(mount_facts['mounts'][0], dict) + + # Find mounts with space in the mountpoint path + mounts_with_space = [x for x in mount_facts['mounts'] if ' ' in x['mount']] + self.assertEqual(len(mounts_with_space), 1) + self.assertEqual(mounts_with_space[0]['mount'], '/mnt/foo bar') + + @patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB) + def test_get_mtab_entries(self, mock_get_file_content): + + module = Mock() + lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) + mtab_entries = lh._mtab_entries() + self.assertIsInstance(mtab_entries, list) + self.assertIsInstance(mtab_entries[0], list) + self.assertEqual(len(mtab_entries), 38) + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, '')) + def test_find_bind_mounts(self, mock_run_findmnt): + module = Mock() + lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) + bind_mounts = lh._find_bind_mounts() + + # If bind_mounts becomes another seq type, feel free to change + self.assertIsInstance(bind_mounts, set) + self.assertEqual(len(bind_mounts), 1) + self.assertIn('/not/a/real/bind_mount', bind_mounts) + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', '')) + def test_find_bind_mounts_non_zero(self, mock_run_findmnt): + module = Mock() + lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) + bind_mounts = lh._find_bind_mounts() + + self.assertIsInstance(bind_mounts, set) + self.assertEqual(len(bind_mounts), 0) + + def test_find_bind_mounts_no_findmnts(self): + module = Mock() + module.get_bin_path = Mock(return_value=None) + lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) + bind_mounts = lh._find_bind_mounts() + + self.assertIsInstance(bind_mounts, set) + self.assertEqual(len(bind_mounts), 0) + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, '')) + def test_lsblk_uuid(self, mock_run_lsblk): + module = Mock() + lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) + lsblk_uuids = lh._lsblk_uuid() + + self.assertIsInstance(lsblk_uuids, dict) + self.assertIn(b'/dev/loop9', lsblk_uuids) + self.assertIn(b'/dev/sda1', lsblk_uuids) + self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0') + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, '')) + def test_lsblk_uuid_non_zero(self, mock_run_lsblk): + module = Mock() + lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) + lsblk_uuids = lh._lsblk_uuid() + + self.assertIsInstance(lsblk_uuids, dict) + self.assertEqual(len(lsblk_uuids), 0) + + def test_lsblk_uuid_no_lsblk(self): + module = Mock() + module.get_bin_path = Mock(return_value=None) + lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) + lsblk_uuids = lh._lsblk_uuid() + + self.assertIsInstance(lsblk_uuids, dict) + self.assertEqual(len(lsblk_uuids), 0) + + @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, '')) + def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk): + module = Mock() + lh = hardware.linux.LinuxHardware(module=module, load_on_init=False) + lsblk_uuids = lh._lsblk_uuid() + self.assertIsInstance(lsblk_uuids, dict) + self.assertIn(b'/dev/loop0', lsblk_uuids) + self.assertIn(b'/dev/sda1', lsblk_uuids) + self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373') + self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0') diff --git a/test/units/module_utils/facts/test_timeout.py b/test/units/module_utils/facts/test_timeout.py new file mode 100644 index 00000000..2adbc4a6 --- /dev/null +++ b/test/units/module_utils/facts/test_timeout.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +# (c) 2017, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import time + +import pytest + +from ansible.module_utils.facts import timeout + + +@pytest.fixture +def set_gather_timeout_higher(): + default_timeout = timeout.GATHER_TIMEOUT + timeout.GATHER_TIMEOUT = 5 + yield + timeout.GATHER_TIMEOUT = default_timeout + + +@pytest.fixture +def set_gather_timeout_lower(): + default_timeout = timeout.GATHER_TIMEOUT + timeout.GATHER_TIMEOUT = 2 + yield + timeout.GATHER_TIMEOUT = default_timeout + + +@timeout.timeout +def sleep_amount_implicit(amount): + # implicit refers to the lack of argument to the decorator + time.sleep(amount) + return 'Succeeded after {0} sec'.format(amount) + + +@timeout.timeout(timeout.DEFAULT_GATHER_TIMEOUT + 5) +def sleep_amount_explicit_higher(amount): + # explicit refers to the argument to the decorator + time.sleep(amount) + return 'Succeeded after {0} sec'.format(amount) + + +@timeout.timeout(2) +def sleep_amount_explicit_lower(amount): + # explicit refers to the argument to the decorator + time.sleep(amount) + return 'Succeeded after {0} sec'.format(amount) + + +# +# Tests for how the timeout decorator is specified +# + +def test_defaults_still_within_bounds(): + # If the default changes outside of these bounds, some of the tests will + # no longer test the right thing. Need to review and update the timeouts + # in the other tests if this fails + assert timeout.DEFAULT_GATHER_TIMEOUT >= 4 + + +def test_implicit_file_default_succeeds(): + # amount checked must be less than DEFAULT_GATHER_TIMEOUT + assert sleep_amount_implicit(1) == 'Succeeded after 1 sec' + + +def test_implicit_file_default_timesout(monkeypatch): + monkeypatch.setattr(timeout, 'DEFAULT_GATHER_TIMEOUT', 1) + # sleep_time is greater than the default + sleep_time = timeout.DEFAULT_GATHER_TIMEOUT + 1 + with pytest.raises(timeout.TimeoutError): + assert sleep_amount_implicit(sleep_time) == '(Not expected to succeed)' + + +def test_implicit_file_overridden_succeeds(set_gather_timeout_higher): + # Set sleep_time greater than the default timeout and less than our new timeout + sleep_time = 3 + assert sleep_amount_implicit(sleep_time) == 'Succeeded after {0} sec'.format(sleep_time) + + +def test_implicit_file_overridden_timesout(set_gather_timeout_lower): + # Set sleep_time greater than our new timeout but less than the default + sleep_time = 3 + with pytest.raises(timeout.TimeoutError): + assert sleep_amount_implicit(sleep_time) == '(Not expected to Succeed)' + + +def test_explicit_succeeds(monkeypatch): + monkeypatch.setattr(timeout, 'DEFAULT_GATHER_TIMEOUT', 1) + # Set sleep_time greater than the default timeout and less than our new timeout + sleep_time = 2 + assert sleep_amount_explicit_higher(sleep_time) == 'Succeeded after {0} sec'.format(sleep_time) + + +def test_explicit_timeout(): + # Set sleep_time greater than our new timeout but less than the default + sleep_time = 3 + with pytest.raises(timeout.TimeoutError): + assert sleep_amount_explicit_lower(sleep_time) == '(Not expected to succeed)' + + +# +# Test that exception handling works +# + +@timeout.timeout(1) +def function_times_out(): + time.sleep(2) + + +# This is just about the same test as function_times_out but uses a separate process which is where +# we normally have our timeouts. It's more of an integration test than a unit test. +@timeout.timeout(1) +def function_times_out_in_run_command(am): + am.run_command([sys.executable, '-c', 'import time ; time.sleep(2)']) + + +@timeout.timeout(1) +def function_other_timeout(): + raise TimeoutError('Vanilla Timeout') + + +@timeout.timeout(1) +def function_raises(): + 1 / 0 + + +@timeout.timeout(1) +def function_catches_all_exceptions(): + try: + time.sleep(10) + except BaseException: + raise RuntimeError('We should not have gotten here') + + +def test_timeout_raises_timeout(): + with pytest.raises(timeout.TimeoutError): + assert function_times_out() == '(Not expected to succeed)' + + +@pytest.mark.parametrize('stdin', ({},), indirect=['stdin']) +def test_timeout_raises_timeout_integration_test(am): + with pytest.raises(timeout.TimeoutError): + assert function_times_out_in_run_command(am) == '(Not expected to succeed)' + + +def test_timeout_raises_other_exception(): + with pytest.raises(ZeroDivisionError): + assert function_raises() == '(Not expected to succeed)' + + +def test_exception_not_caught_by_called_code(): + with pytest.raises(timeout.TimeoutError): + assert function_catches_all_exceptions() == '(Not expected to succeed)' diff --git a/test/units/module_utils/facts/test_utils.py b/test/units/module_utils/facts/test_utils.py new file mode 100644 index 00000000..28cb5d31 --- /dev/null +++ b/test/units/module_utils/facts/test_utils.py @@ -0,0 +1,39 @@ +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import patch + +from ansible.module_utils.facts import utils + + +class TestGetMountSize(unittest.TestCase): + def test(self): + mount_info = utils.get_mount_size('/dev/null/not/a/real/mountpoint') + self.assertIsInstance(mount_info, dict) + + def test_proc(self): + mount_info = utils.get_mount_size('/proc') + self.assertIsInstance(mount_info, dict) + + @patch('ansible.module_utils.facts.utils.os.statvfs', side_effect=OSError('intentionally induced os error')) + def test_oserror_on_statvfs(self, mock_statvfs): + mount_info = utils.get_mount_size('/dev/null/doesnt/matter') + self.assertIsInstance(mount_info, dict) + self.assertDictEqual(mount_info, {}) diff --git a/test/units/module_utils/facts/virtual/__init__.py b/test/units/module_utils/facts/virtual/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/facts/virtual/test_linux.py b/test/units/module_utils/facts/virtual/test_linux.py new file mode 100644 index 00000000..d534478c --- /dev/null +++ b/test/units/module_utils/facts/virtual/test_linux.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.facts.virtual import linux + + +def test_get_virtual_facts_bhyve(mocker): + mocker.patch('os.path.exists', return_value=False) + mocker.patch('ansible.module_utils.facts.virtual.linux.get_file_content', return_value='') + mocker.patch('ansible.module_utils.facts.virtual.linux.get_file_lines', return_value=[]) + + module = mocker.Mock() + module.run_command.return_value = (0, 'BHYVE\n', '') + inst = linux.LinuxVirtual(module) + + facts = inst.get_virtual_facts() + expected = { + 'virtualization_role': 'guest', + 'virtualization_type': 'bhyve', + } + + assert facts == expected diff --git a/test/units/module_utils/json_utils/__init__.py b/test/units/module_utils/json_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/json_utils/test_filter_non_json_lines.py b/test/units/module_utils/json_utils/test_filter_non_json_lines.py new file mode 100644 index 00000000..b5b94999 --- /dev/null +++ b/test/units/module_utils/json_utils/test_filter_non_json_lines.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# (c) 2016, Matt Davis +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.module_utils.json_utils import _filter_non_json_lines + + +class TestAnsibleModuleExitJson(unittest.TestCase): + single_line_json_dict = u"""{"key": "value", "olá": "mundo"}""" + single_line_json_array = u"""["a","b","c"]""" + multi_line_json_dict = u"""{ +"key":"value" +}""" + multi_line_json_array = u"""[ +"a", +"b", +"c"]""" + + all_inputs = [ + single_line_json_dict, + single_line_json_array, + multi_line_json_dict, + multi_line_json_array + ] + + junk = [u"single line of junk", u"line 1/2 of junk\nline 2/2 of junk"] + + unparsable_cases = ( + u'No json here', + u'"olá": "mundo"', + u'{"No json": "ending"', + u'{"wrong": "ending"]', + u'["wrong": "ending"}', + ) + + def test_just_json(self): + for i in self.all_inputs: + filtered, warnings = _filter_non_json_lines(i) + self.assertEqual(filtered, i) + self.assertEqual(warnings, []) + + def test_leading_junk(self): + for i in self.all_inputs: + for j in self.junk: + filtered, warnings = _filter_non_json_lines(j + "\n" + i) + self.assertEqual(filtered, i) + self.assertEqual(warnings, []) + + def test_trailing_junk(self): + for i in self.all_inputs: + for j in self.junk: + filtered, warnings = _filter_non_json_lines(i + "\n" + j) + self.assertEqual(filtered, i) + self.assertEqual(warnings, [u"Module invocation had junk after the JSON data: %s" % j.strip()]) + + def test_leading_and_trailing_junk(self): + for i in self.all_inputs: + for j in self.junk: + filtered, warnings = _filter_non_json_lines("\n".join([j, i, j])) + self.assertEqual(filtered, i) + self.assertEqual(warnings, [u"Module invocation had junk after the JSON data: %s" % j.strip()]) + + def test_unparsable_filter_non_json_lines(self): + for i in self.unparsable_cases: + self.assertRaises( + ValueError, + _filter_non_json_lines, + data=i + ) diff --git a/test/units/module_utils/parsing/test_convert_bool.py b/test/units/module_utils/parsing/test_convert_bool.py new file mode 100644 index 00000000..2c5f8121 --- /dev/null +++ b/test/units/module_utils/parsing/test_convert_bool.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2017 Ansible Project +# License: GNU General Public License v3 or later (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt ) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.module_utils.parsing.convert_bool import boolean + + +class TestBoolean: + def test_bools(self): + assert boolean(True) is True + assert boolean(False) is False + + def test_none(self): + with pytest.raises(TypeError): + assert boolean(None, strict=True) is False + assert boolean(None, strict=False) is False + + def test_numbers(self): + assert boolean(1) is True + assert boolean(0) is False + assert boolean(0.0) is False + +# Current boolean() doesn't consider these to be true values +# def test_other_numbers(self): +# assert boolean(2) is True +# assert boolean(-1) is True +# assert boolean(0.1) is True + + def test_strings(self): + assert boolean("true") is True + assert boolean("TRUE") is True + assert boolean("t") is True + assert boolean("yes") is True + assert boolean("y") is True + assert boolean("on") is True + + def test_junk_values_nonstrict(self): + assert boolean("flibbity", strict=False) is False + assert boolean(42, strict=False) is False + assert boolean(42.0, strict=False) is False + assert boolean(object(), strict=False) is False + + def test_junk_values_strict(self): + with pytest.raises(TypeError): + assert boolean("flibbity", strict=True) is False + + with pytest.raises(TypeError): + assert boolean(42, strict=True) is False + + with pytest.raises(TypeError): + assert boolean(42.0, strict=True) is False + + with pytest.raises(TypeError): + assert boolean(object(), strict=True) is False diff --git a/test/units/module_utils/test_api.py b/test/units/module_utils/test_api.py new file mode 100644 index 00000000..0eaea046 --- /dev/null +++ b/test/units/module_utils/test_api.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020, Abhijeet Kasurde +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible.module_utils.api import rate_limit, retry + +import pytest + + +class TestRateLimit: + + def test_ratelimit(self): + @rate_limit(rate=1, rate_limit=1) + def login_database(): + return "success" + r = login_database() + + assert r == 'success' + + +class TestRetry: + + def test_no_retry_required(self): + self.counter = 0 + + @retry(retries=4, retry_pause=2) + def login_database(): + self.counter += 1 + return 'success' + + r = login_database() + + assert r == 'success' + assert self.counter == 1 + + def test_catch_exception(self): + + @retry(retries=1) + def login_database(): + return 'success' + + with pytest.raises(Exception): + login_database() diff --git a/test/units/module_utils/test_distro.py b/test/units/module_utils/test_distro.py new file mode 100644 index 00000000..708e7bca --- /dev/null +++ b/test/units/module_utils/test_distro.py @@ -0,0 +1,38 @@ + +# (c) 2018 Adrian Likins +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# or +# Apache License v2.0 (see http://www.apache.org/licenses/LICENSE-2.0) +# +# Dual licensed so any test cases could potentially be included by the upstream project +# that module_utils/distro.py is from (https://github.com/nir0s/distro) + + +# Note that nir0s/distro has many more tests in it's test suite. The tests here are +# primarily for testing the vendoring. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils import distro +from ansible.module_utils.six import string_types + + +# Generic test case with minimal assertions about specific returned values. +class TestDistro(): + # should run on any platform without errors, even if non-linux without any + # useful info to return + def test_info(self): + info = distro.info() + assert isinstance(info, dict), \ + 'distro.info() returned %s (%s) which is not a dist' % (info, type(info)) + + def test_linux_distribution(self): + linux_dist = distro.linux_distribution() + assert isinstance(linux_dist, tuple), \ + 'linux_distrution() returned %s (%s) which is not a tuple' % (linux_dist, type(linux_dist)) + + def test_id(self): + id = distro.id() + assert isinstance(id, string_types), 'distro.id() returned %s (%s) which is not a string' % (id, type(id)) diff --git a/test/units/module_utils/urls/__init__.py b/test/units/module_utils/urls/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/module_utils/urls/fixtures/client.key b/test/units/module_utils/urls/fixtures/client.key new file mode 100644 index 00000000..0e90d95d --- /dev/null +++ b/test/units/module_utils/urls/fixtures/client.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDTyiVxrsSyZ+Qr +iMT6sFYCqQtkLqlIWfbpTg9B6fZc793uoMzLUGq3efiZUhhxI78dQ3gNPgs1sK3W +heFpk1n4IL8ll1MS1uJKk2vYqzZVhjgcvQpeV9gm7bt0ndPzGj5h4fh7proPntSy +eBvMKVoqTT7tEnapRKy3anbwRPgTt7B5jEvJkPazuIc+ooMsYOHWfvj4oVsev0N2 +SsP0o6cHcsRujFMhz/JTJ1STQxacaVuyKpXacX7Eu1MJgGt/jU/QKNREcV9LdneO +NgqY9tNv0h+9s7DfHYXm8U3POr+bdcW6Yy4791KGCaUNtiNqT1lvu/4yd4WRkXbF +Fm5hJUUpAgMBAAECggEBAJYOac1MSK0nEvENbJM6ERa9cwa+UM6kf176IbFP9XAP +u6zxXWjIR3RMBSmMkyjGbQhs30hypzqZPfH61aUZ8+rsOMKHnyKAAcFZBlZzqIGc +IXGrNwd1Mf8S/Xg4ww1BkOWFV6s0jCu5G3Z/xyI2Ql4qcOVD6bMwpzclRbQjCand +dvqyCdMD0sRDyeOIK5hBhUY60JnWbMCu6pBU+qPoRukbRieaeDLIN1clwEqIQV78 +LLnv4n9fuGozH0JdHHfyXFytCgIJvEspZUja/5R4orADhr3ZB010RLzYvs2ndE3B +4cF9RgxspJZeJ/P+PglViZuzj37pXy+7GAcJLR9ka4kCgYEA/l01XKwkCzMgXHW4 +UPgl1+on42BsN7T9r3S5tihOjHf4ZJWkgYzisLVX+Nc1oUI3HQfM9PDJZXMMNm7J +ZRvERcopU26wWqr6CFPblGv8oqXHqcpeta8i3xZKoPASsTW6ssuPCEajiLZbQ1rH +H/HP+OZIVLM/WCPgA2BckTU9JnsCgYEA1SbXllXnlwGqmjitmY1Z07rUxQ3ah/fB +iccbbg3E4onontYXIlI5zQms3u+qBdi0ZuwaDm5Y4BetOq0a3UyxAsugqVFnzTba +1w/sFb3fw9KeQ/il4CXkbq87nzJfDmEyqHGCCYXbijHBxnq99PkqwVpaAhHHEW0m +vWyMUvPRY6sCgYAbtUWR0cKfYbNdvwkT8OQWcBBmSWOgcdvMmBd+y0c7L/pj4pUn +85PiEe8CUVcrOM5OIEJoUC5wGacz6r+PfwXTYGE+EGmvhr5z18aslVLQ2OQ2D7Bf +dDOFP6VjgKNYoHS0802iZid8RfkNDj9wsGOqRlOMvnXhAQ9u7rlGrBj8LwKBgFfo +ph99nH8eE9N5LrfWoUZ+loQS258aInsFYB26lgnsYMEpgO8JxIb4x5BGffPdVUHh +fDmZbxQ1D5/UhvDgUVzayI8sYMg1KHpsOa0Z2zCzK8zSvu68EgNISCm3J5cRpUft +UHlG+K19KfMG6lMfdG+8KMUTuetI/iI/o3wOzLvzAoGAIrOh30rHt8wit7ELARyx +wPkp2ARYXrKfX3NES4c67zSAi+3dCjxRqywqTI0gLicyMlj8zEu9YE9Ix/rl8lRZ +nQ9LZmqv7QHzhLTUCPGgZYnemvBzo7r0eW8Oag52dbcJO6FBszfWrxskm/fX25Rb +WPxih2vdRy814dNPW25rgdw= +-----END PRIVATE KEY----- diff --git a/test/units/module_utils/urls/fixtures/client.pem b/test/units/module_utils/urls/fixtures/client.pem new file mode 100644 index 00000000..c8c7b828 --- /dev/null +++ b/test/units/module_utils/urls/fixtures/client.pem @@ -0,0 +1,81 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4099 (0x1003) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=North Carolina, L=Durham, O=Ansible, CN=ansible.http.tests + Validity + Not Before: Mar 21 18:22:47 2018 GMT + Not After : Mar 18 18:22:47 2028 GMT + Subject: C=US, ST=North Carolina, O=Ansible, CN=client.ansible.http.tests + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:d3:ca:25:71:ae:c4:b2:67:e4:2b:88:c4:fa:b0: + 56:02:a9:0b:64:2e:a9:48:59:f6:e9:4e:0f:41:e9: + f6:5c:ef:dd:ee:a0:cc:cb:50:6a:b7:79:f8:99:52: + 18:71:23:bf:1d:43:78:0d:3e:0b:35:b0:ad:d6:85: + e1:69:93:59:f8:20:bf:25:97:53:12:d6:e2:4a:93: + 6b:d8:ab:36:55:86:38:1c:bd:0a:5e:57:d8:26:ed: + bb:74:9d:d3:f3:1a:3e:61:e1:f8:7b:a6:ba:0f:9e: + d4:b2:78:1b:cc:29:5a:2a:4d:3e:ed:12:76:a9:44: + ac:b7:6a:76:f0:44:f8:13:b7:b0:79:8c:4b:c9:90: + f6:b3:b8:87:3e:a2:83:2c:60:e1:d6:7e:f8:f8:a1: + 5b:1e:bf:43:76:4a:c3:f4:a3:a7:07:72:c4:6e:8c: + 53:21:cf:f2:53:27:54:93:43:16:9c:69:5b:b2:2a: + 95:da:71:7e:c4:bb:53:09:80:6b:7f:8d:4f:d0:28: + d4:44:71:5f:4b:76:77:8e:36:0a:98:f6:d3:6f:d2: + 1f:bd:b3:b0:df:1d:85:e6:f1:4d:cf:3a:bf:9b:75: + c5:ba:63:2e:3b:f7:52:86:09:a5:0d:b6:23:6a:4f: + 59:6f:bb:fe:32:77:85:91:91:76:c5:16:6e:61:25: + 45:29 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + OpenSSL Generated Certificate + X509v3 Subject Key Identifier: + AF:F3:E5:2A:EB:CF:C7:7E:A4:D6:49:92:F9:29:EE:6A:1B:68:AB:0F + X509v3 Authority Key Identifier: + keyid:13:2E:30:F0:04:EA:41:5F:B7:08:BD:34:31:D7:11:EA:56:A6:99:F0 + + Signature Algorithm: sha256WithRSAEncryption + 29:62:39:25:79:58:eb:a4:b3:0c:ea:aa:1d:2b:96:7c:6e:10: + ce:16:07:b7:70:7f:16:da:fd:20:e6:a2:d9:b4:88:e0:f9:84: + 87:f8:b0:0d:77:8b:ae:27:f5:ee:e6:4f:86:a1:2d:74:07:7c: + c7:5d:c2:bd:e4:70:e7:42:e4:14:ee:b9:b7:63:b8:8c:6d:21: + 61:56:0b:96:f6:15:ba:7a:ae:80:98:ac:57:99:79:3d:7a:a9: + d8:26:93:30:17:53:7c:2d:02:4b:64:49:25:65:e7:69:5a:08: + cf:84:94:8e:6a:42:a7:d1:4f:ba:39:4b:7c:11:67:31:f7:1b: + 2b:cd:79:c2:28:4d:d9:88:66:d6:7f:56:4c:4b:37:d1:3d:a8: + d9:4a:6b:45:1d:4d:a7:12:9f:29:77:6a:55:c1:b5:1d:0e:a5: + b9:4f:38:16:3c:7d:85:ae:ff:23:34:c7:2c:f6:14:0f:55:ef: + b8:00:89:f1:b2:8a:75:15:41:81:72:d0:43:a6:86:d1:06:e6: + ce:81:7e:5f:33:e6:f4:19:d6:70:00:ba:48:6e:05:fd:4c:3c: + c3:51:1b:bd:43:1a:24:c5:79:ea:7a:f0:85:a5:40:10:85:e9: + 23:09:09:80:38:9d:bc:81:5e:59:8c:5a:4d:58:56:b9:71:c2: + 78:cd:f3:b0 +-----BEGIN CERTIFICATE----- +MIIDuTCCAqGgAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwZjELMAkGA1UEBhMCVVMx +FzAVBgNVBAgMDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQHDAZEdXJoYW0xEDAOBgNV +BAoMB0Fuc2libGUxGzAZBgNVBAMMEmFuc2libGUuaHR0cC50ZXN0czAeFw0xODAz +MjExODIyNDdaFw0yODAzMTgxODIyNDdaMFwxCzAJBgNVBAYTAlVTMRcwFQYDVQQI +DA5Ob3J0aCBDYXJvbGluYTEQMA4GA1UECgwHQW5zaWJsZTEiMCAGA1UEAwwZY2xp +ZW50LmFuc2libGUuaHR0cC50ZXN0czCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANPKJXGuxLJn5CuIxPqwVgKpC2QuqUhZ9ulOD0Hp9lzv3e6gzMtQard5 ++JlSGHEjvx1DeA0+CzWwrdaF4WmTWfggvyWXUxLW4kqTa9irNlWGOBy9Cl5X2Cbt +u3Sd0/MaPmHh+Humug+e1LJ4G8wpWipNPu0SdqlErLdqdvBE+BO3sHmMS8mQ9rO4 +hz6igyxg4dZ++PihWx6/Q3ZKw/SjpwdyxG6MUyHP8lMnVJNDFpxpW7IqldpxfsS7 +UwmAa3+NT9Ao1ERxX0t2d442Cpj202/SH72zsN8dhebxTc86v5t1xbpjLjv3UoYJ +pQ22I2pPWW+7/jJ3hZGRdsUWbmElRSkCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglg +hkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0O +BBYEFK/z5Srrz8d+pNZJkvkp7mobaKsPMB8GA1UdIwQYMBaAFBMuMPAE6kFftwi9 +NDHXEepWppnwMA0GCSqGSIb3DQEBCwUAA4IBAQApYjkleVjrpLMM6qodK5Z8bhDO +Fge3cH8W2v0g5qLZtIjg+YSH+LANd4uuJ/Xu5k+GoS10B3zHXcK95HDnQuQU7rm3 +Y7iMbSFhVguW9hW6eq6AmKxXmXk9eqnYJpMwF1N8LQJLZEklZedpWgjPhJSOakKn +0U+6OUt8EWcx9xsrzXnCKE3ZiGbWf1ZMSzfRPajZSmtFHU2nEp8pd2pVwbUdDqW5 +TzgWPH2Frv8jNMcs9hQPVe+4AInxsop1FUGBctBDpobRBubOgX5fM+b0GdZwALpI +bgX9TDzDURu9QxokxXnqevCFpUAQhekjCQmAOJ28gV5ZjFpNWFa5ccJ4zfOw +-----END CERTIFICATE----- diff --git a/test/units/module_utils/urls/fixtures/client.txt b/test/units/module_utils/urls/fixtures/client.txt new file mode 100644 index 00000000..380330f2 --- /dev/null +++ b/test/units/module_utils/urls/fixtures/client.txt @@ -0,0 +1,3 @@ +client.pem and client.key were retrieved from httptester docker image: + +ansible/ansible@sha256:fa5def8c294fc50813af131c0b5737594d852abac9cbe7ba38e17bf1c8476f3f diff --git a/test/units/module_utils/urls/fixtures/multipart.txt b/test/units/module_utils/urls/fixtures/multipart.txt new file mode 100644 index 00000000..1a4a0661 --- /dev/null +++ b/test/units/module_utils/urls/fixtures/multipart.txt @@ -0,0 +1,166 @@ +--===============3996062709511591449== +Content-Type: text/plain +Content-Disposition: form-data; name="file1"; filename="fake_file1.txt" + +file_content_1 +--===============3996062709511591449== +Content-Type: text/html +Content-Disposition: form-data; name="file2"; filename="fake_file2.html" + + +--===============3996062709511591449== +Content-Type: application/json +Content-Disposition: form-data; name="file3"; filename="fake_file3.json" + +{"foo": "bar"} +--===============3996062709511591449== +Content-Transfer-Encoding: base64 +Content-Type: text/plain +Content-Disposition: form-data; name="file4"; filename="client.pem" + +Q2VydGlmaWNhdGU6CiAgICBEYXRhOgogICAgICAgIFZlcnNpb246IDMgKDB4MikKICAgICAgICBT +ZXJpYWwgTnVtYmVyOiA0MDk5ICgweDEwMDMpCiAgICBTaWduYXR1cmUgQWxnb3JpdGhtOiBzaGEy +NTZXaXRoUlNBRW5jcnlwdGlvbgogICAgICAgIElzc3VlcjogQz1VUywgU1Q9Tm9ydGggQ2Fyb2xp +bmEsIEw9RHVyaGFtLCBPPUFuc2libGUsIENOPWFuc2libGUuaHR0cC50ZXN0cwogICAgICAgIFZh +bGlkaXR5CiAgICAgICAgICAgIE5vdCBCZWZvcmU6IE1hciAyMSAxODoyMjo0NyAyMDE4IEdNVAog +ICAgICAgICAgICBOb3QgQWZ0ZXIgOiBNYXIgMTggMTg6MjI6NDcgMjAyOCBHTVQKICAgICAgICBT +dWJqZWN0OiBDPVVTLCBTVD1Ob3J0aCBDYXJvbGluYSwgTz1BbnNpYmxlLCBDTj1jbGllbnQuYW5z +aWJsZS5odHRwLnRlc3RzCiAgICAgICAgU3ViamVjdCBQdWJsaWMgS2V5IEluZm86CiAgICAgICAg +ICAgIFB1YmxpYyBLZXkgQWxnb3JpdGhtOiByc2FFbmNyeXB0aW9uCiAgICAgICAgICAgICAgICBQ +dWJsaWMtS2V5OiAoMjA0OCBiaXQpCiAgICAgICAgICAgICAgICBNb2R1bHVzOgogICAgICAgICAg +ICAgICAgICAgIDAwOmQzOmNhOjI1OjcxOmFlOmM0OmIyOjY3OmU0OjJiOjg4OmM0OmZhOmIwOgog +ICAgICAgICAgICAgICAgICAgIDU2OjAyOmE5OjBiOjY0OjJlOmE5OjQ4OjU5OmY2OmU5OjRlOjBm +OjQxOmU5OgogICAgICAgICAgICAgICAgICAgIGY2OjVjOmVmOmRkOmVlOmEwOmNjOmNiOjUwOjZh +OmI3Ojc5OmY4Ojk5OjUyOgogICAgICAgICAgICAgICAgICAgIDE4OjcxOjIzOmJmOjFkOjQzOjc4 +OjBkOjNlOjBiOjM1OmIwOmFkOmQ2Ojg1OgogICAgICAgICAgICAgICAgICAgIGUxOjY5OjkzOjU5 +OmY4OjIwOmJmOjI1Ojk3OjUzOjEyOmQ2OmUyOjRhOjkzOgogICAgICAgICAgICAgICAgICAgIDZi +OmQ4OmFiOjM2OjU1Ojg2OjM4OjFjOmJkOjBhOjVlOjU3OmQ4OjI2OmVkOgogICAgICAgICAgICAg +ICAgICAgIGJiOjc0OjlkOmQzOmYzOjFhOjNlOjYxOmUxOmY4OjdiOmE2OmJhOjBmOjllOgogICAg +ICAgICAgICAgICAgICAgIGQ0OmIyOjc4OjFiOmNjOjI5OjVhOjJhOjRkOjNlOmVkOjEyOjc2OmE5 +OjQ0OgogICAgICAgICAgICAgICAgICAgIGFjOmI3OjZhOjc2OmYwOjQ0OmY4OjEzOmI3OmIwOjc5 +OjhjOjRiOmM5OjkwOgogICAgICAgICAgICAgICAgICAgIGY2OmIzOmI4Ojg3OjNlOmEyOjgzOjJj +OjYwOmUxOmQ2OjdlOmY4OmY4OmExOgogICAgICAgICAgICAgICAgICAgIDViOjFlOmJmOjQzOjc2 +OjRhOmMzOmY0OmEzOmE3OjA3OjcyOmM0OjZlOjhjOgogICAgICAgICAgICAgICAgICAgIDUzOjIx +OmNmOmYyOjUzOjI3OjU0OjkzOjQzOjE2OjljOjY5OjViOmIyOjJhOgogICAgICAgICAgICAgICAg +ICAgIDk1OmRhOjcxOjdlOmM0OmJiOjUzOjA5OjgwOjZiOjdmOjhkOjRmOmQwOjI4OgogICAgICAg +ICAgICAgICAgICAgIGQ0OjQ0OjcxOjVmOjRiOjc2Ojc3OjhlOjM2OjBhOjk4OmY2OmQzOjZmOmQy +OgogICAgICAgICAgICAgICAgICAgIDFmOmJkOmIzOmIwOmRmOjFkOjg1OmU2OmYxOjRkOmNmOjNh +OmJmOjliOjc1OgogICAgICAgICAgICAgICAgICAgIGM1OmJhOjYzOjJlOjNiOmY3OjUyOjg2OjA5 +OmE1OjBkOmI2OjIzOjZhOjRmOgogICAgICAgICAgICAgICAgICAgIDU5OjZmOmJiOmZlOjMyOjc3 +Ojg1OjkxOjkxOjc2OmM1OjE2OjZlOjYxOjI1OgogICAgICAgICAgICAgICAgICAgIDQ1OjI5CiAg +ICAgICAgICAgICAgICBFeHBvbmVudDogNjU1MzcgKDB4MTAwMDEpCiAgICAgICAgWDUwOXYzIGV4 +dGVuc2lvbnM6CiAgICAgICAgICAgIFg1MDl2MyBCYXNpYyBDb25zdHJhaW50czogCiAgICAgICAg +ICAgICAgICBDQTpGQUxTRQogICAgICAgICAgICBOZXRzY2FwZSBDb21tZW50OiAKICAgICAgICAg +ICAgICAgIE9wZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlCiAgICAgICAgICAgIFg1MDl2MyBT +dWJqZWN0IEtleSBJZGVudGlmaWVyOiAKICAgICAgICAgICAgICAgIEFGOkYzOkU1OjJBOkVCOkNG +OkM3OjdFOkE0OkQ2OjQ5OjkyOkY5OjI5OkVFOjZBOjFCOjY4OkFCOjBGCiAgICAgICAgICAgIFg1 +MDl2MyBBdXRob3JpdHkgS2V5IElkZW50aWZpZXI6IAogICAgICAgICAgICAgICAga2V5aWQ6MTM6 +MkU6MzA6RjA6MDQ6RUE6NDE6NUY6Qjc6MDg6QkQ6MzQ6MzE6RDc6MTE6RUE6NTY6QTY6OTk6RjAK +CiAgICBTaWduYXR1cmUgQWxnb3JpdGhtOiBzaGEyNTZXaXRoUlNBRW5jcnlwdGlvbgogICAgICAg +ICAyOTo2MjozOToyNTo3OTo1ODplYjphNDpiMzowYzplYTphYToxZDoyYjo5Njo3Yzo2ZToxMDoK +ICAgICAgICAgY2U6MTY6MDc6Yjc6NzA6N2Y6MTY6ZGE6ZmQ6MjA6ZTY6YTI6ZDk6YjQ6ODg6ZTA6 +Zjk6ODQ6CiAgICAgICAgIDg3OmY4OmIwOjBkOjc3OjhiOmFlOjI3OmY1OmVlOmU2OjRmOjg2OmEx +OjJkOjc0OjA3OjdjOgogICAgICAgICBjNzo1ZDpjMjpiZDplNDo3MDplNzo0MjplNDoxNDplZTpi +OTpiNzo2MzpiODo4Yzo2ZDoyMToKICAgICAgICAgNjE6NTY6MGI6OTY6ZjY6MTU6YmE6N2E6YWU6 +ODA6OTg6YWM6NTc6OTk6Nzk6M2Q6N2E6YTk6CiAgICAgICAgIGQ4OjI2OjkzOjMwOjE3OjUzOjdj +OjJkOjAyOjRiOjY0OjQ5OjI1OjY1OmU3OjY5OjVhOjA4OgogICAgICAgICBjZjo4NDo5NDo4ZTo2 +YTo0MjphNzpkMTo0ZjpiYTozOTo0Yjo3YzoxMTo2NzozMTpmNzoxYjoKICAgICAgICAgMmI6Y2Q6 +Nzk6YzI6Mjg6NGQ6ZDk6ODg6NjY6ZDY6N2Y6NTY6NGM6NGI6Mzc6ZDE6M2Q6YTg6CiAgICAgICAg +IGQ5OjRhOjZiOjQ1OjFkOjRkOmE3OjEyOjlmOjI5Ojc3OjZhOjU1OmMxOmI1OjFkOjBlOmE1Ogog +ICAgICAgICBiOTo0ZjozODoxNjozYzo3ZDo4NTphZTpmZjoyMzozNDpjNzoyYzpmNjoxNDowZjo1 +NTplZjoKICAgICAgICAgYjg6MDA6ODk6ZjE6YjI6OGE6NzU6MTU6NDE6ODE6NzI6ZDA6NDM6YTY6 +ODY6ZDE6MDY6ZTY6CiAgICAgICAgIGNlOjgxOjdlOjVmOjMzOmU2OmY0OjE5OmQ2OjcwOjAwOmJh +OjQ4OjZlOjA1OmZkOjRjOjNjOgogICAgICAgICBjMzo1MToxYjpiZDo0MzoxYToyNDpjNTo3OTpl +YTo3YTpmMDo4NTphNTo0MDoxMDo4NTplOToKICAgICAgICAgMjM6MDk6MDk6ODA6Mzg6OWQ6YmM6 +ODE6NWU6NTk6OGM6NWE6NGQ6NTg6NTY6Yjk6NzE6YzI6CiAgICAgICAgIDc4OmNkOmYzOmIwCi0t +LS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlEdVRDQ0FxR2dBd0lCQWdJQ0VBTXdEUVlKS29a +SWh2Y05BUUVMQlFBd1pqRUxNQWtHQTFVRUJoTUNWVk14CkZ6QVZCZ05WQkFnTURrNXZjblJvSUVO +aGNtOXNhVzVoTVE4d0RRWURWUVFIREFaRWRYSm9ZVzB4RURBT0JnTlYKQkFvTUIwRnVjMmxpYkdV +eEd6QVpCZ05WQkFNTUVtRnVjMmxpYkdVdWFIUjBjQzUwWlhOMGN6QWVGdzB4T0RBegpNakV4T0RJ +eU5EZGFGdzB5T0RBek1UZ3hPREl5TkRkYU1Gd3hDekFKQmdOVkJBWVRBbFZUTVJjd0ZRWURWUVFJ +CkRBNU9iM0owYUNCRFlYSnZiR2x1WVRFUU1BNEdBMVVFQ2d3SFFXNXphV0pzWlRFaU1DQUdBMVVF +QXd3WlkyeHAKWlc1MExtRnVjMmxpYkdVdWFIUjBjQzUwWlhOMGN6Q0NBU0l3RFFZSktvWklodmNO +QVFFQkJRQURnZ0VQQURDQwpBUW9DZ2dFQkFOUEtKWEd1eExKbjVDdUl4UHF3VmdLcEMyUXVxVWha +OXVsT0QwSHA5bHp2M2U2Z3pNdFFhcmQ1CitKbFNHSEVqdngxRGVBMCtDeld3cmRhRjRXbVRXZmdn +dnlXWFV4TFc0a3FUYTlpck5sV0dPQnk5Q2w1WDJDYnQKdTNTZDAvTWFQbUhoK0h1bXVnK2UxTEo0 +Rzh3cFdpcE5QdTBTZHFsRXJMZHFkdkJFK0JPM3NIbU1TOG1ROXJPNApoejZpZ3l4ZzRkWisrUGlo +V3g2L1EzWkt3L1NqcHdkeXhHNk1VeUhQOGxNblZKTkRGcHhwVzdJcWxkcHhmc1M3ClV3bUFhMytO +VDlBbzFFUnhYMHQyZDQ0MkNwajIwMi9TSDcyenNOOGRoZWJ4VGM4NnY1dDF4YnBqTGp2M1VvWUoK +cFEyMkkycFBXVys3L2pKM2haR1Jkc1VXYm1FbFJTa0NBd0VBQWFON01Ia3dDUVlEVlIwVEJBSXdB +REFzQmdsZwpoa2dCaHZoQ0FRMEVIeFlkVDNCbGJsTlRUQ0JIWlc1bGNtRjBaV1FnUTJWeWRHbG1h +V05oZEdVd0hRWURWUjBPCkJCWUVGSy96NVNycno4ZCtwTlpKa3ZrcDdtb2JhS3NQTUI4R0ExVWRJ +d1FZTUJhQUZCTXVNUEFFNmtGZnR3aTkKTkRIWEVlcFdwcG53TUEwR0NTcUdTSWIzRFFFQkN3VUFB +NElCQVFBcFlqa2xlVmpycExNTTZxb2RLNVo4YmhETwpGZ2UzY0g4VzJ2MGc1cUxadElqZytZU0gr +TEFOZDR1dUovWHU1aytHb1MxMEIzekhYY0s5NUhEblF1UVU3cm0zClk3aU1iU0ZoVmd1VzloVzZl +cTZBbUt4WG1YazllcW5ZSnBNd0YxTjhMUUpMWkVrbFplZHBXZ2pQaEpTT2FrS24KMFUrNk9VdDhF +V2N4OXhzcnpYbkNLRTNaaUdiV2YxWk1TemZSUGFqWlNtdEZIVTJuRXA4cGQycFZ3YlVkRHFXNQpU +emdXUEgyRnJ2OGpOTWNzOWhRUFZlKzRBSW54c29wMUZVR0JjdEJEcG9iUkJ1Yk9nWDVmTStiMEdk +WndBTHBJCmJnWDlURHpEVVJ1OVF4b2t4WG5xZXZDRnBVQVFoZWtqQ1FtQU9KMjhnVjVaakZwTldG +YTVjY0o0emZPdwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + +--===============3996062709511591449== +Content-Transfer-Encoding: base64 +Content-Type: application/pgp-keys +Content-Disposition: form-data; name="file5"; filename="client.key" + +LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZB +QVNDQktjd2dnU2pBZ0VBQW9JQkFRRFR5aVZ4cnNTeVorUXIKaU1UNnNGWUNxUXRrTHFsSVdmYnBU +ZzlCNmZaYzc5M3VvTXpMVUdxM2VmaVpVaGh4STc4ZFEzZ05QZ3Mxc0szVwpoZUZwazFuNElMOGxs +MU1TMXVKS2sydllxelpWaGpnY3ZRcGVWOWdtN2J0MG5kUHpHajVoNGZoN3Byb1BudFN5CmVCdk1L +Vm9xVFQ3dEVuYXBSS3kzYW5id1JQZ1R0N0I1akV2SmtQYXp1SWMrb29Nc1lPSFdmdmo0b1ZzZXYw +TjIKU3NQMG82Y0hjc1J1akZNaHovSlRKMVNUUXhhY2FWdXlLcFhhY1g3RXUxTUpnR3QvalUvUUtO +UkVjVjlMZG5lTwpOZ3FZOXROdjBoKzlzN0RmSFlYbThVM1BPcitiZGNXNll5NDc5MUtHQ2FVTnRp +TnFUMWx2dS80eWQ0V1JrWGJGCkZtNWhKVVVwQWdNQkFBRUNnZ0VCQUpZT2FjMU1TSzBuRXZFTmJK +TTZFUmE5Y3dhK1VNNmtmMTc2SWJGUDlYQVAKdTZ6eFhXaklSM1JNQlNtTWt5akdiUWhzMzBoeXB6 +cVpQZkg2MWFVWjgrcnNPTUtIbnlLQUFjRlpCbFp6cUlHYwpJWEdyTndkMU1mOFMvWGc0d3cxQmtP +V0ZWNnMwakN1NUczWi94eUkyUWw0cWNPVkQ2Yk13cHpjbFJiUWpDYW5kCmR2cXlDZE1EMHNSRHll +T0lLNWhCaFVZNjBKbldiTUN1NnBCVStxUG9SdWtiUmllYWVETElOMWNsd0VxSVFWNzgKTExudjRu +OWZ1R296SDBKZEhIZnlYRnl0Q2dJSnZFc3BaVWphLzVSNG9yQURocjNaQjAxMFJMell2czJuZEUz +Qgo0Y0Y5Umd4c3BKWmVKL1ArUGdsVmladXpqMzdwWHkrN0dBY0pMUjlrYTRrQ2dZRUEvbDAxWEt3 +a0N6TWdYSFc0ClVQZ2wxK29uNDJCc043VDlyM1M1dGloT2pIZjRaSldrZ1l6aXNMVlgrTmMxb1VJ +M0hRZk05UERKWlhNTU5tN0oKWlJ2RVJjb3BVMjZ3V3FyNkNGUGJsR3Y4b3FYSHFjcGV0YThpM3ha +S29QQVNzVFc2c3N1UENFYWppTFpiUTFySApIL0hQK09aSVZMTS9XQ1BnQTJCY2tUVTlKbnNDZ1lF +QTFTYlhsbFhubHdHcW1qaXRtWTFaMDdyVXhRM2FoL2ZCCmljY2JiZzNFNG9ub250WVhJbEk1elFt +czN1K3FCZGkwWnV3YURtNVk0QmV0T3EwYTNVeXhBc3VncVZGbnpUYmEKMXcvc0ZiM2Z3OUtlUS9p +bDRDWGticTg3bnpKZkRtRXlxSEdDQ1lYYmlqSEJ4bnE5OVBrcXdWcGFBaEhIRVcwbQp2V3lNVXZQ +Ulk2c0NnWUFidFVXUjBjS2ZZYk5kdndrVDhPUVdjQkJtU1dPZ2Nkdk1tQmQreTBjN0wvcGo0cFVu +Cjg1UGlFZThDVVZjck9NNU9JRUpvVUM1d0dhY3o2citQZndYVFlHRStFR212aHI1ejE4YXNsVkxR +Mk9RMkQ3QmYKZERPRlA2VmpnS05Zb0hTMDgwMmlaaWQ4UmZrTkRqOXdzR09xUmxPTXZuWGhBUTl1 +N3JsR3JCajhMd0tCZ0ZmbwpwaDk5bkg4ZUU5TjVMcmZXb1VaK2xvUVMyNThhSW5zRllCMjZsZ25z +WU1FcGdPOEp4SWI0eDVCR2ZmUGRWVUhoCmZEbVpieFExRDUvVWh2RGdVVnpheUk4c1lNZzFLSHBz +T2EwWjJ6Q3pLOHpTdnU2OEVnTklTQ20zSjVjUnBVZnQKVUhsRytLMTlLZk1HNmxNZmRHKzhLTVVU +dWV0SS9pSS9vM3dPekx2ekFvR0FJck9oMzBySHQ4d2l0N0VMQVJ5eAp3UGtwMkFSWVhyS2ZYM05F +UzRjNjd6U0FpKzNkQ2p4UnF5d3FUSTBnTGljeU1sajh6RXU5WUU5SXgvcmw4bFJaCm5ROUxabXF2 +N1FIemhMVFVDUEdnWlluZW12QnpvN3IwZVc4T2FnNTJkYmNKTzZGQnN6ZldyeHNrbS9mWDI1UmIK +V1B4aWgydmRSeTgxNGROUFcyNXJnZHc9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K + +--===============3996062709511591449== +Content-Transfer-Encoding: base64 +Content-Type: text/plain +Content-Disposition: form-data; name="file6"; filename="client.txt" + +Y2xpZW50LnBlbSBhbmQgY2xpZW50LmtleSB3ZXJlIHJldHJpZXZlZCBmcm9tIGh0dHB0ZXN0ZXIg +ZG9ja2VyIGltYWdlOgoKYW5zaWJsZS9hbnNpYmxlQHNoYTI1NjpmYTVkZWY4YzI5NGZjNTA4MTNh +ZjEzMWMwYjU3Mzc1OTRkODUyYWJhYzljYmU3YmEzOGUxN2JmMWM4NDc2ZjNmCg== + +--===============3996062709511591449== +Content-Type: text/plain +Content-Disposition: form-data; name="form_field_1" + +form_value_1 +--===============3996062709511591449== +Content-Type: application/octet-stream +Content-Disposition: form-data; name="form_field_2" + +form_value_2 +--===============3996062709511591449== +Content-Type: text/html +Content-Disposition: form-data; name="form_field_3" + + +--===============3996062709511591449== +Content-Type: application/json +Content-Disposition: form-data; name="form_field_4" + +{"foo": "bar"} +--===============3996062709511591449==-- diff --git a/test/units/module_utils/urls/fixtures/netrc b/test/units/module_utils/urls/fixtures/netrc new file mode 100644 index 00000000..8f127170 --- /dev/null +++ b/test/units/module_utils/urls/fixtures/netrc @@ -0,0 +1,3 @@ +machine ansible.com +login user +password passwd diff --git a/test/units/module_utils/urls/test_RedirectHandlerFactory.py b/test/units/module_utils/urls/test_RedirectHandlerFactory.py new file mode 100644 index 00000000..aa3500a1 --- /dev/null +++ b/test/units/module_utils/urls/test_RedirectHandlerFactory.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible.module_utils.urls import HAS_SSLCONTEXT, RedirectHandlerFactory, urllib_request, urllib_error +from ansible.module_utils.six import StringIO + +import pytest + + +@pytest.fixture +def urllib_req(): + req = urllib_request.Request( + 'https://ansible.com/' + ) + return req + + +@pytest.fixture +def request_body(): + return StringIO('TESTS') + + +def test_no_redirs(urllib_req, request_body): + handler = RedirectHandlerFactory('none', False) + inst = handler() + with pytest.raises(urllib_error.HTTPError): + inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + + +def test_urllib2_redir(urllib_req, request_body, mocker): + redir_request_mock = mocker.patch('ansible.module_utils.urls.urllib_request.HTTPRedirectHandler.redirect_request') + + handler = RedirectHandlerFactory('urllib2', False) + inst = handler() + inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + + redir_request_mock.assert_called_once_with(inst, urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + + +def test_all_redir(urllib_req, request_body, mocker): + req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod') + handler = RedirectHandlerFactory('all', False) + inst = handler() + inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + req_mock.assert_called_once_with('https://docs.ansible.com/', data=None, headers={}, method='GET', origin_req_host='ansible.com', unverifiable=True) + + +def test_all_redir_post(request_body, mocker): + handler = RedirectHandlerFactory('all', False) + inst = handler() + + req = urllib_request.Request( + 'https://ansible.com/', + 'POST' + ) + + req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod') + inst.redirect_request(req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + req_mock.assert_called_once_with('https://docs.ansible.com/', data=None, headers={}, method='GET', origin_req_host='ansible.com', unverifiable=True) + + +def test_redir_headers_removal(urllib_req, request_body, mocker): + req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod') + handler = RedirectHandlerFactory('all', False) + inst = handler() + + urllib_req.headers = { + 'Content-Type': 'application/json', + 'Content-Length': 100, + 'Foo': 'bar', + } + + inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + req_mock.assert_called_once_with('https://docs.ansible.com/', data=None, headers={'Foo': 'bar'}, method='GET', origin_req_host='ansible.com', + unverifiable=True) + + +def test_redir_url_spaces(urllib_req, request_body, mocker): + req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod') + handler = RedirectHandlerFactory('all', False) + inst = handler() + + inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/foo bar') + + req_mock.assert_called_once_with('https://docs.ansible.com/foo%20bar', data=None, headers={}, method='GET', origin_req_host='ansible.com', + unverifiable=True) + + +def test_redir_safe(urllib_req, request_body, mocker): + req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod') + handler = RedirectHandlerFactory('safe', False) + inst = handler() + inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + + req_mock.assert_called_once_with('https://docs.ansible.com/', data=None, headers={}, method='GET', origin_req_host='ansible.com', unverifiable=True) + + +def test_redir_safe_not_safe(request_body): + handler = RedirectHandlerFactory('safe', False) + inst = handler() + + req = urllib_request.Request( + 'https://ansible.com/', + 'POST' + ) + + with pytest.raises(urllib_error.HTTPError): + inst.redirect_request(req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + + +def test_redir_no_error_on_invalid(urllib_req, request_body): + handler = RedirectHandlerFactory('invalid', False) + inst = handler() + + with pytest.raises(urllib_error.HTTPError): + inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + + +def test_redir_validate_certs(urllib_req, request_body, mocker): + opener_mock = mocker.patch('ansible.module_utils.urls.urllib_request._opener') + handler = RedirectHandlerFactory('all', True) + inst = handler() + inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/') + + assert opener_mock.add_handler.call_count == int(not HAS_SSLCONTEXT) + + +def test_redir_http_error_308_urllib2(urllib_req, request_body): + handler = RedirectHandlerFactory('urllib2', False) + inst = handler() + + with pytest.raises(urllib_error.HTTPError): + inst.redirect_request(urllib_req, request_body, 308, '308 Permanent Redirect', {}, 'https://docs.ansible.com/') diff --git a/test/units/module_utils/urls/test_Request.py b/test/units/module_utils/urls/test_Request.py new file mode 100644 index 00000000..ebb6de56 --- /dev/null +++ b/test/units/module_utils/urls/test_Request.py @@ -0,0 +1,456 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import datetime +import os + +from ansible.module_utils.urls import (Request, open_url, urllib_request, HAS_SSLCONTEXT, cookiejar, RequestWithMethod, + UnixHTTPHandler, UnixHTTPSConnection, httplib) +from ansible.module_utils.urls import SSLValidationHandler, HTTPSClientAuthHandler, RedirectHandlerFactory + +import pytest +from mock import call + + +if HAS_SSLCONTEXT: + import ssl + + +@pytest.fixture +def urlopen_mock(mocker): + return mocker.patch('ansible.module_utils.urls.urllib_request.urlopen') + + +@pytest.fixture +def install_opener_mock(mocker): + return mocker.patch('ansible.module_utils.urls.urllib_request.install_opener') + + +def test_Request_fallback(urlopen_mock, install_opener_mock, mocker): + cookies = cookiejar.CookieJar() + request = Request( + headers={'foo': 'bar'}, + use_proxy=False, + force=True, + timeout=100, + validate_certs=False, + url_username='user', + url_password='passwd', + http_agent='ansible-tests', + force_basic_auth=True, + follow_redirects='all', + client_cert='/tmp/client.pem', + client_key='/tmp/client.key', + cookies=cookies, + unix_socket='/foo/bar/baz.sock', + ca_path='/foo/bar/baz.pem', + ) + fallback_mock = mocker.spy(request, '_fallback') + + r = request.open('GET', 'https://ansible.com') + + calls = [ + call(None, False), # use_proxy + call(None, True), # force + call(None, 100), # timeout + call(None, False), # validate_certs + call(None, 'user'), # url_username + call(None, 'passwd'), # url_password + call(None, 'ansible-tests'), # http_agent + call(None, True), # force_basic_auth + call(None, 'all'), # follow_redirects + call(None, '/tmp/client.pem'), # client_cert + call(None, '/tmp/client.key'), # client_key + call(None, cookies), # cookies + call(None, '/foo/bar/baz.sock'), # unix_socket + call(None, '/foo/bar/baz.pem'), # ca_path + ] + fallback_mock.assert_has_calls(calls) + + assert fallback_mock.call_count == 14 # All but headers use fallback + + args = urlopen_mock.call_args[0] + assert args[1] is None # data, this is handled in the Request not urlopen + assert args[2] == 100 # timeout + + req = args[0] + assert req.headers == { + 'Authorization': b'Basic dXNlcjpwYXNzd2Q=', + 'Cache-control': 'no-cache', + 'Foo': 'bar', + 'User-agent': 'ansible-tests' + } + assert req.data is None + assert req.get_method() == 'GET' + + +def test_Request_open(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'https://ansible.com/') + args = urlopen_mock.call_args[0] + assert args[1] is None # data, this is handled in the Request not urlopen + assert args[2] == 10 # timeout + + req = args[0] + assert req.headers == {} + assert req.data is None + assert req.get_method() == 'GET' + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + if not HAS_SSLCONTEXT: + expected_handlers = ( + SSLValidationHandler, + RedirectHandlerFactory(), # factory, get handler + ) + else: + expected_handlers = ( + RedirectHandlerFactory(), # factory, get handler + ) + + found_handlers = [] + for handler in handlers: + if isinstance(handler, SSLValidationHandler) or handler.__class__.__name__ == 'RedirectHandler': + found_handlers.append(handler) + + assert len(found_handlers) == len(expected_handlers) + + +def test_Request_open_http(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'http://ansible.com/') + args = urlopen_mock.call_args[0] + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + found_handlers = [] + for handler in handlers: + if isinstance(handler, SSLValidationHandler): + found_handlers.append(handler) + + assert len(found_handlers) == 0 + + +def test_Request_open_unix_socket(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'http://ansible.com/', unix_socket='/foo/bar/baz.sock') + args = urlopen_mock.call_args[0] + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + found_handlers = [] + for handler in handlers: + if isinstance(handler, UnixHTTPHandler): + found_handlers.append(handler) + + assert len(found_handlers) == 1 + + +def test_Request_open_https_unix_socket(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'https://ansible.com/', unix_socket='/foo/bar/baz.sock') + args = urlopen_mock.call_args[0] + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + found_handlers = [] + for handler in handlers: + if isinstance(handler, HTTPSClientAuthHandler): + found_handlers.append(handler) + + assert len(found_handlers) == 1 + + inst = found_handlers[0]._build_https_connection('foo') + assert isinstance(inst, UnixHTTPSConnection) + + +def test_Request_open_ftp(urlopen_mock, install_opener_mock, mocker): + mocker.patch('ansible.module_utils.urls.ParseResultDottedDict.as_list', side_effect=AssertionError) + + # Using ftp scheme should prevent the AssertionError side effect to fire + r = Request().open('GET', 'ftp://foo@ansible.com/') + + +def test_Request_open_headers(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'http://ansible.com/', headers={'Foo': 'bar'}) + args = urlopen_mock.call_args[0] + req = args[0] + assert req.headers == {'Foo': 'bar'} + + +def test_Request_open_username(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'http://ansible.com/', url_username='user') + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + expected_handlers = ( + urllib_request.HTTPBasicAuthHandler, + urllib_request.HTTPDigestAuthHandler, + ) + + found_handlers = [] + for handler in handlers: + if isinstance(handler, expected_handlers): + found_handlers.append(handler) + assert len(found_handlers) == 2 + assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user', None)} + + +def test_Request_open_username_in_url(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'http://user2@ansible.com/') + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + expected_handlers = ( + urllib_request.HTTPBasicAuthHandler, + urllib_request.HTTPDigestAuthHandler, + ) + + found_handlers = [] + for handler in handlers: + if isinstance(handler, expected_handlers): + found_handlers.append(handler) + assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user2', '')} + + +def test_Request_open_username_force_basic(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'http://ansible.com/', url_username='user', url_password='passwd', force_basic_auth=True) + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + expected_handlers = ( + urllib_request.HTTPBasicAuthHandler, + urllib_request.HTTPDigestAuthHandler, + ) + + found_handlers = [] + for handler in handlers: + if isinstance(handler, expected_handlers): + found_handlers.append(handler) + + assert len(found_handlers) == 0 + + args = urlopen_mock.call_args[0] + req = args[0] + assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q=' + + +def test_Request_open_auth_in_netloc(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'http://user:passwd@ansible.com/') + args = urlopen_mock.call_args[0] + req = args[0] + assert req.get_full_url() == 'http://ansible.com/' + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + expected_handlers = ( + urllib_request.HTTPBasicAuthHandler, + urllib_request.HTTPDigestAuthHandler, + ) + + found_handlers = [] + for handler in handlers: + if isinstance(handler, expected_handlers): + found_handlers.append(handler) + + assert len(found_handlers) == 2 + + +def test_Request_open_netrc(urlopen_mock, install_opener_mock, monkeypatch): + here = os.path.dirname(__file__) + + monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc')) + r = Request().open('GET', 'http://ansible.com/') + args = urlopen_mock.call_args[0] + req = args[0] + assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q=' + + r = Request().open('GET', 'http://foo.ansible.com/') + args = urlopen_mock.call_args[0] + req = args[0] + assert 'Authorization' not in req.headers + + monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc.nonexistant')) + r = Request().open('GET', 'http://ansible.com/') + args = urlopen_mock.call_args[0] + req = args[0] + assert 'Authorization' not in req.headers + + +def test_Request_open_no_proxy(urlopen_mock, install_opener_mock, mocker): + build_opener_mock = mocker.patch('ansible.module_utils.urls.urllib_request.build_opener') + + r = Request().open('GET', 'http://ansible.com/', use_proxy=False) + + handlers = build_opener_mock.call_args[0] + found_handlers = [] + for handler in handlers: + if isinstance(handler, urllib_request.ProxyHandler): + found_handlers.append(handler) + + assert len(found_handlers) == 1 + + +@pytest.mark.skipif(not HAS_SSLCONTEXT, reason="requires SSLContext") +def test_Request_open_no_validate_certs(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'https://ansible.com/', validate_certs=False) + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + ssl_handler = None + for handler in handlers: + if isinstance(handler, HTTPSClientAuthHandler): + ssl_handler = handler + break + + assert ssl_handler is not None + + inst = ssl_handler._build_https_connection('foo') + assert isinstance(inst, httplib.HTTPSConnection) + + context = ssl_handler._context + assert context.protocol == ssl.PROTOCOL_SSLv23 + if ssl.OP_NO_SSLv2: + assert context.options & ssl.OP_NO_SSLv2 + assert context.options & ssl.OP_NO_SSLv3 + assert context.verify_mode == ssl.CERT_NONE + assert context.check_hostname is False + + +def test_Request_open_client_cert(urlopen_mock, install_opener_mock): + here = os.path.dirname(__file__) + + client_cert = os.path.join(here, 'fixtures/client.pem') + client_key = os.path.join(here, 'fixtures/client.key') + + r = Request().open('GET', 'https://ansible.com/', client_cert=client_cert, client_key=client_key) + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + ssl_handler = None + for handler in handlers: + if isinstance(handler, HTTPSClientAuthHandler): + ssl_handler = handler + break + + assert ssl_handler is not None + + assert ssl_handler.client_cert == client_cert + assert ssl_handler.client_key == client_key + + https_connection = ssl_handler._build_https_connection('ansible.com') + + assert https_connection.key_file == client_key + assert https_connection.cert_file == client_cert + + +def test_Request_open_cookies(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'https://ansible.com/', cookies=cookiejar.CookieJar()) + + opener = install_opener_mock.call_args[0][0] + handlers = opener.handlers + + cookies_handler = None + for handler in handlers: + if isinstance(handler, urllib_request.HTTPCookieProcessor): + cookies_handler = handler + break + + assert cookies_handler is not None + + +def test_Request_open_invalid_method(urlopen_mock, install_opener_mock): + r = Request().open('UNKNOWN', 'https://ansible.com/') + + args = urlopen_mock.call_args[0] + req = args[0] + + assert req.data is None + assert req.get_method() == 'UNKNOWN' + # assert r.status == 504 + + +def test_Request_open_custom_method(urlopen_mock, install_opener_mock): + r = Request().open('DELETE', 'https://ansible.com/') + + args = urlopen_mock.call_args[0] + req = args[0] + + assert isinstance(req, RequestWithMethod) + + +def test_Request_open_user_agent(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'https://ansible.com/', http_agent='ansible-tests') + + args = urlopen_mock.call_args[0] + req = args[0] + + assert req.headers.get('User-agent') == 'ansible-tests' + + +def test_Request_open_force(urlopen_mock, install_opener_mock): + r = Request().open('GET', 'https://ansible.com/', force=True, last_mod_time=datetime.datetime.now()) + + args = urlopen_mock.call_args[0] + req = args[0] + + assert req.headers.get('Cache-control') == 'no-cache' + assert 'If-modified-since' not in req.headers + + +def test_Request_open_last_mod(urlopen_mock, install_opener_mock): + now = datetime.datetime.now() + r = Request().open('GET', 'https://ansible.com/', last_mod_time=now) + + args = urlopen_mock.call_args[0] + req = args[0] + + assert req.headers.get('If-modified-since') == now.strftime('%a, %d %b %Y %H:%M:%S GMT') + + +def test_Request_open_headers_not_dict(urlopen_mock, install_opener_mock): + with pytest.raises(ValueError): + Request().open('GET', 'https://ansible.com/', headers=['bob']) + + +def test_Request_init_headers_not_dict(urlopen_mock, install_opener_mock): + with pytest.raises(ValueError): + Request(headers=['bob']) + + +@pytest.mark.parametrize('method,kwargs', [ + ('get', {}), + ('options', {}), + ('head', {}), + ('post', {'data': None}), + ('put', {'data': None}), + ('patch', {'data': None}), + ('delete', {}), +]) +def test_methods(method, kwargs, mocker): + expected = method.upper() + open_mock = mocker.patch('ansible.module_utils.urls.Request.open') + request = Request() + getattr(request, method)('https://ansible.com') + open_mock.assert_called_once_with(expected, 'https://ansible.com', **kwargs) + + +def test_open_url(urlopen_mock, install_opener_mock, mocker): + req_mock = mocker.patch('ansible.module_utils.urls.Request.open') + open_url('https://ansible.com/') + req_mock.assert_called_once_with('GET', 'https://ansible.com/', data=None, headers=None, use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, + force_basic_auth=False, follow_redirects='urllib2', + client_cert=None, client_key=None, cookies=None, use_gssapi=False, + unix_socket=None, ca_path=None, unredirected_headers=None) diff --git a/test/units/module_utils/urls/test_RequestWithMethod.py b/test/units/module_utils/urls/test_RequestWithMethod.py new file mode 100644 index 00000000..05105190 --- /dev/null +++ b/test/units/module_utils/urls/test_RequestWithMethod.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.urls import RequestWithMethod + + +def test_RequestWithMethod(): + get = RequestWithMethod('https://ansible.com/', 'GET') + assert get.get_method() == 'GET' + + post = RequestWithMethod('https://ansible.com/', 'POST', data='foo', headers={'Bar': 'baz'}) + assert post.get_method() == 'POST' + assert post.get_full_url() == 'https://ansible.com/' + assert post.data == 'foo' + assert post.headers == {'Bar': 'baz'} + + none = RequestWithMethod('https://ansible.com/', '') + assert none.get_method() == 'GET' diff --git a/test/units/module_utils/urls/test_fetch_url.py b/test/units/module_utils/urls/test_fetch_url.py new file mode 100644 index 00000000..9cac2a35 --- /dev/null +++ b/test/units/module_utils/urls/test_fetch_url.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import socket + +from ansible.module_utils.six import StringIO +from ansible.module_utils.six.moves.http_cookiejar import Cookie +from ansible.module_utils.six.moves.http_client import HTTPMessage +from ansible.module_utils.urls import fetch_url, urllib_error, ConnectionError, NoSSLError, httplib + +import pytest +from mock import MagicMock + + +class AnsibleModuleExit(Exception): + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + +class ExitJson(AnsibleModuleExit): + pass + + +class FailJson(AnsibleModuleExit): + pass + + +@pytest.fixture +def open_url_mock(mocker): + return mocker.patch('ansible.module_utils.urls.open_url') + + +@pytest.fixture +def fake_ansible_module(): + return FakeAnsibleModule() + + +class FakeAnsibleModule: + def __init__(self): + self.params = {} + self.tmpdir = None + + def exit_json(self, *args, **kwargs): + raise ExitJson(*args, **kwargs) + + def fail_json(self, *args, **kwargs): + raise FailJson(*args, **kwargs) + + +def test_fetch_url_no_urlparse(mocker, fake_ansible_module): + mocker.patch('ansible.module_utils.urls.HAS_URLPARSE', new=False) + + with pytest.raises(FailJson): + fetch_url(fake_ansible_module, 'http://ansible.com/') + + +def test_fetch_url(open_url_mock, fake_ansible_module): + r, info = fetch_url(fake_ansible_module, 'http://ansible.com/') + + dummy, kwargs = open_url_mock.call_args + + open_url_mock.assert_called_once_with('http://ansible.com/', client_cert=None, client_key=None, cookies=kwargs['cookies'], data=None, + follow_redirects='urllib2', force=False, force_basic_auth='', headers=None, + http_agent='ansible-httpget', last_mod_time=None, method=None, timeout=10, url_password='', url_username='', + use_proxy=True, validate_certs=True, use_gssapi=False, unix_socket=None, ca_path=None) + + +def test_fetch_url_params(open_url_mock, fake_ansible_module): + fake_ansible_module.params = { + 'validate_certs': False, + 'url_username': 'user', + 'url_password': 'passwd', + 'http_agent': 'ansible-test', + 'force_basic_auth': True, + 'follow_redirects': 'all', + 'client_cert': 'client.pem', + 'client_key': 'client.key', + } + + r, info = fetch_url(fake_ansible_module, 'http://ansible.com/') + + dummy, kwargs = open_url_mock.call_args + + open_url_mock.assert_called_once_with('http://ansible.com/', client_cert='client.pem', client_key='client.key', cookies=kwargs['cookies'], data=None, + follow_redirects='all', force=False, force_basic_auth=True, headers=None, + http_agent='ansible-test', last_mod_time=None, method=None, timeout=10, url_password='passwd', url_username='user', + use_proxy=True, validate_certs=False, use_gssapi=False, unix_socket=None, ca_path=None) + + +def test_fetch_url_cookies(mocker, fake_ansible_module): + def make_cookies(*args, **kwargs): + cookies = kwargs['cookies'] + r = MagicMock() + try: + r.headers = HTTPMessage() + add_header = r.headers.add_header + except TypeError: + # PY2 + r.headers = HTTPMessage(StringIO()) + add_header = r.headers.addheader + r.info.return_value = r.headers + for name, value in (('Foo', 'bar'), ('Baz', 'qux')): + cookie = Cookie( + version=0, + name=name, + value=value, + port=None, + port_specified=False, + domain="ansible.com", + domain_specified=True, + domain_initial_dot=False, + path="/", + path_specified=True, + secure=False, + expires=None, + discard=False, + comment=None, + comment_url=None, + rest=None + ) + cookies.set_cookie(cookie) + add_header('Set-Cookie', '%s=%s' % (name, value)) + + return r + + mocker = mocker.patch('ansible.module_utils.urls.open_url', new=make_cookies) + + r, info = fetch_url(fake_ansible_module, 'http://ansible.com/') + + assert info['cookies'] == {'Baz': 'qux', 'Foo': 'bar'} + # Python sorts cookies in order of most specific (ie. longest) path first + # items with the same path are reversed from response order + assert info['cookies_string'] == 'Baz=qux; Foo=bar' + # The key here has a `-` as opposed to what we see in the `uri` module that converts to `_` + # Note: this is response order, which differs from cookies_string + assert info['set-cookie'] == 'Foo=bar, Baz=qux' + + +def test_fetch_url_nossl(open_url_mock, fake_ansible_module, mocker): + mocker.patch('ansible.module_utils.urls.get_distribution', return_value='notredhat') + + open_url_mock.side_effect = NoSSLError + with pytest.raises(FailJson) as excinfo: + fetch_url(fake_ansible_module, 'http://ansible.com/') + + assert 'python-ssl' not in excinfo.value.kwargs['msg'] + + mocker.patch('ansible.module_utils.urls.get_distribution', return_value='redhat') + + open_url_mock.side_effect = NoSSLError + with pytest.raises(FailJson) as excinfo: + fetch_url(fake_ansible_module, 'http://ansible.com/') + + assert 'python-ssl' in excinfo.value.kwargs['msg'] + assert 'http://ansible.com/' == excinfo.value.kwargs['url'] + assert excinfo.value.kwargs['status'] == -1 + + +def test_fetch_url_connectionerror(open_url_mock, fake_ansible_module): + open_url_mock.side_effect = ConnectionError('TESTS') + with pytest.raises(FailJson) as excinfo: + fetch_url(fake_ansible_module, 'http://ansible.com/') + + assert excinfo.value.kwargs['msg'] == 'TESTS' + assert 'http://ansible.com/' == excinfo.value.kwargs['url'] + assert excinfo.value.kwargs['status'] == -1 + + open_url_mock.side_effect = ValueError('TESTS') + with pytest.raises(FailJson) as excinfo: + fetch_url(fake_ansible_module, 'http://ansible.com/') + + assert excinfo.value.kwargs['msg'] == 'TESTS' + assert 'http://ansible.com/' == excinfo.value.kwargs['url'] + assert excinfo.value.kwargs['status'] == -1 + + +def test_fetch_url_httperror(open_url_mock, fake_ansible_module): + open_url_mock.side_effect = urllib_error.HTTPError( + 'http://ansible.com/', + 500, + 'Internal Server Error', + {'Content-Type': 'application/json'}, + StringIO('TESTS') + ) + + r, info = fetch_url(fake_ansible_module, 'http://ansible.com/') + + assert info == {'msg': 'HTTP Error 500: Internal Server Error', 'body': 'TESTS', + 'status': 500, 'url': 'http://ansible.com/', 'content-type': 'application/json'} + + +def test_fetch_url_urlerror(open_url_mock, fake_ansible_module): + open_url_mock.side_effect = urllib_error.URLError('TESTS') + r, info = fetch_url(fake_ansible_module, 'http://ansible.com/') + assert info == {'msg': 'Request failed: ', 'status': -1, 'url': 'http://ansible.com/'} + + +def test_fetch_url_socketerror(open_url_mock, fake_ansible_module): + open_url_mock.side_effect = socket.error('TESTS') + r, info = fetch_url(fake_ansible_module, 'http://ansible.com/') + assert info == {'msg': 'Connection failure: TESTS', 'status': -1, 'url': 'http://ansible.com/'} + + +def test_fetch_url_exception(open_url_mock, fake_ansible_module): + open_url_mock.side_effect = Exception('TESTS') + r, info = fetch_url(fake_ansible_module, 'http://ansible.com/') + exception = info.pop('exception') + assert info == {'msg': 'An unknown error occurred: TESTS', 'status': -1, 'url': 'http://ansible.com/'} + assert "Exception: TESTS" in exception + + +def test_fetch_url_badstatusline(open_url_mock, fake_ansible_module): + open_url_mock.side_effect = httplib.BadStatusLine('TESTS') + r, info = fetch_url(fake_ansible_module, 'http://ansible.com/') + assert info == {'msg': 'Connection failure: connection was closed before a valid response was received: TESTS', 'status': -1, 'url': 'http://ansible.com/'} diff --git a/test/units/module_utils/urls/test_generic_urlparse.py b/test/units/module_utils/urls/test_generic_urlparse.py new file mode 100644 index 00000000..77537268 --- /dev/null +++ b/test/units/module_utils/urls/test_generic_urlparse.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.urls import generic_urlparse +from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse + + +def test_generic_urlparse(): + url = 'https://ansible.com/blog' + parts = urlparse(url) + generic_parts = generic_urlparse(parts) + assert generic_parts.as_list() == list(parts) + + assert urlunparse(generic_parts.as_list()) == url + + +def test_generic_urlparse_netloc(): + url = 'https://ansible.com:443/blog' + parts = urlparse(url) + generic_parts = generic_urlparse(parts) + assert generic_parts.hostname == parts.hostname + assert generic_parts.hostname == 'ansible.com' + assert generic_parts.port == 443 + assert urlunparse(generic_parts.as_list()) == url + + +def test_generic_urlparse_no_netloc(): + url = 'https://user:passwd@ansible.com:443/blog' + parts = list(urlparse(url)) + generic_parts = generic_urlparse(parts) + assert generic_parts.hostname == 'ansible.com' + assert generic_parts.port == 443 + assert generic_parts.username == 'user' + assert generic_parts.password == 'passwd' + assert urlunparse(generic_parts.as_list()) == url + + +def test_generic_urlparse_no_netloc_no_auth(): + url = 'https://ansible.com:443/blog' + parts = list(urlparse(url)) + generic_parts = generic_urlparse(parts) + assert generic_parts.username is None + assert generic_parts.password is None + + +def test_generic_urlparse_no_netloc_no_host(): + url = '/blog' + parts = list(urlparse(url)) + generic_parts = generic_urlparse(parts) + assert generic_parts.username is None + assert generic_parts.password is None + assert generic_parts.port is None + assert generic_parts.hostname == '' diff --git a/test/units/module_utils/urls/test_prepare_multipart.py b/test/units/module_utils/urls/test_prepare_multipart.py new file mode 100644 index 00000000..e96aa454 --- /dev/null +++ b/test/units/module_utils/urls/test_prepare_multipart.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# (c) 2020 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os + +from io import StringIO + +from email.message import Message + +import pytest + +from ansible.module_utils.urls import prepare_multipart + + +def test_prepare_multipart(): + fixture_boundary = b'===============3996062709511591449==' + + here = os.path.dirname(__file__) + multipart = os.path.join(here, 'fixtures/multipart.txt') + + client_cert = os.path.join(here, 'fixtures/client.pem') + client_key = os.path.join(here, 'fixtures/client.key') + client_txt = os.path.join(here, 'fixtures/client.txt') + fields = { + 'form_field_1': 'form_value_1', + 'form_field_2': { + 'content': 'form_value_2', + }, + 'form_field_3': { + 'content': '', + 'mime_type': 'text/html', + }, + 'form_field_4': { + 'content': '{"foo": "bar"}', + 'mime_type': 'application/json', + }, + 'file1': { + 'content': 'file_content_1', + 'filename': 'fake_file1.txt', + }, + 'file2': { + 'content': '', + 'mime_type': 'text/html', + 'filename': 'fake_file2.html', + }, + 'file3': { + 'content': '{"foo": "bar"}', + 'mime_type': 'application/json', + 'filename': 'fake_file3.json', + }, + 'file4': { + 'filename': client_cert, + 'mime_type': 'text/plain', + }, + 'file5': { + 'filename': client_key, + }, + 'file6': { + 'filename': client_txt, + }, + } + + content_type, b_data = prepare_multipart(fields) + + headers = Message() + headers['Content-Type'] = content_type + assert headers.get_content_type() == 'multipart/form-data' + boundary = headers.get_boundary() + assert boundary is not None + + with open(multipart, 'rb') as f: + b_expected = f.read().replace(fixture_boundary, boundary.encode()) + + # Depending on Python version, there may or may not be a trailing newline + assert b_data.rstrip(b'\r\n') == b_expected.rstrip(b'\r\n') + + +def test_wrong_type(): + pytest.raises(TypeError, prepare_multipart, 'foo') + pytest.raises(TypeError, prepare_multipart, {'foo': None}) + + +def test_empty(): + pytest.raises(ValueError, prepare_multipart, {'foo': {}}) + + +def test_unknown_mime(mocker): + fields = {'foo': {'filename': 'foo.boom', 'content': 'foo'}} + mocker.patch('mimetypes.guess_type', return_value=(None, None)) + content_type, b_data = prepare_multipart(fields) + assert b'Content-Type: application/octet-stream' in b_data + + +def test_bad_mime(mocker): + fields = {'foo': {'filename': 'foo.boom', 'content': 'foo'}} + mocker.patch('mimetypes.guess_type', side_effect=TypeError) + content_type, b_data = prepare_multipart(fields) + assert b'Content-Type: application/octet-stream' in b_data diff --git a/test/units/module_utils/urls/test_urls.py b/test/units/module_utils/urls/test_urls.py new file mode 100644 index 00000000..69c1b824 --- /dev/null +++ b/test/units/module_utils/urls/test_urls.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils import urls +from ansible.module_utils._text import to_native + +import pytest + + +def test_build_ssl_validation_error(mocker): + mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=False) + mocker.patch.object(urls, 'HAS_URLLIB3_PYOPENSSLCONTEXT', new=False) + mocker.patch.object(urls, 'HAS_URLLIB3_SSL_WRAP_SOCKET', new=False) + with pytest.raises(urls.SSLValidationError) as excinfo: + urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None) + + assert 'python >= 2.7.9' in to_native(excinfo.value) + assert 'the python executable used' in to_native(excinfo.value) + assert 'urllib3' in to_native(excinfo.value) + assert 'python >= 2.6' in to_native(excinfo.value) + assert 'validate_certs=False' in to_native(excinfo.value) + + mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=True) + with pytest.raises(urls.SSLValidationError) as excinfo: + urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None) + + assert 'validate_certs=False' in to_native(excinfo.value) + + mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=False) + mocker.patch.object(urls, 'HAS_URLLIB3_PYOPENSSLCONTEXT', new=True) + mocker.patch.object(urls, 'HAS_URLLIB3_SSL_WRAP_SOCKET', new=True) + + mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=True) + with pytest.raises(urls.SSLValidationError) as excinfo: + urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None) + + assert 'urllib3' not in to_native(excinfo.value) + + with pytest.raises(urls.SSLValidationError) as excinfo: + urls.build_ssl_validation_error('hostname', 'port', 'paths', exc='BOOM') + + assert 'BOOM' in to_native(excinfo.value) + + +def test_maybe_add_ssl_handler(mocker): + mocker.patch.object(urls, 'HAS_SSL', new=False) + with pytest.raises(urls.NoSSLError): + urls.maybe_add_ssl_handler('https://ansible.com/', True) + + mocker.patch.object(urls, 'HAS_SSL', new=True) + url = 'https://user:passwd@ansible.com/' + handler = urls.maybe_add_ssl_handler(url, True) + assert handler.hostname == 'ansible.com' + assert handler.port == 443 + + url = 'https://ansible.com:4433/' + handler = urls.maybe_add_ssl_handler(url, True) + assert handler.hostname == 'ansible.com' + assert handler.port == 4433 + + url = 'https://user:passwd@ansible.com:4433/' + handler = urls.maybe_add_ssl_handler(url, True) + assert handler.hostname == 'ansible.com' + assert handler.port == 4433 + + url = 'https://ansible.com/' + handler = urls.maybe_add_ssl_handler(url, True) + assert handler.hostname == 'ansible.com' + assert handler.port == 443 + + url = 'http://ansible.com/' + handler = urls.maybe_add_ssl_handler(url, True) + assert handler is None + + url = 'https://[2a00:16d8:0:7::205]:4443/' + handler = urls.maybe_add_ssl_handler(url, True) + assert handler.hostname == '2a00:16d8:0:7::205' + assert handler.port == 4443 + + url = 'https://[2a00:16d8:0:7::205]/' + handler = urls.maybe_add_ssl_handler(url, True) + assert handler.hostname == '2a00:16d8:0:7::205' + assert handler.port == 443 + + +def test_basic_auth_header(): + header = urls.basic_auth_header('user', 'passwd') + assert header == b'Basic dXNlcjpwYXNzd2Q=' + + +def test_ParseResultDottedDict(): + url = 'https://ansible.com/blog' + parts = urls.urlparse(url) + dotted_parts = urls.ParseResultDottedDict(parts._asdict()) + assert parts[0] == dotted_parts.scheme + + assert dotted_parts.as_list() == list(parts) + + +def test_unix_socket_patch_httpconnection_connect(mocker): + unix_conn = mocker.patch.object(urls.UnixHTTPConnection, 'connect') + conn = urls.httplib.HTTPConnection('ansible.com') + with urls.unix_socket_patch_httpconnection_connect(): + conn.connect() + assert unix_conn.call_count == 1 diff --git a/test/units/modules/__init__.py b/test/units/modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/modules/conftest.py b/test/units/modules/conftest.py new file mode 100644 index 00000000..a7d1e047 --- /dev/null +++ b/test/units/modules/conftest.py @@ -0,0 +1,31 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +import pytest + +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common._collections_compat import MutableMapping + + +@pytest.fixture +def patch_ansible_module(request, mocker): + if isinstance(request.param, string_types): + args = request.param + elif isinstance(request.param, MutableMapping): + if 'ANSIBLE_MODULE_ARGS' not in request.param: + request.param = {'ANSIBLE_MODULE_ARGS': request.param} + if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + args = json.dumps(request.param) + else: + raise Exception('Malformed data to the patch_ansible_module pytest fixture') + + mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) diff --git a/test/units/modules/test_apt.py b/test/units/modules/test_apt.py new file mode 100644 index 00000000..3daf3c11 --- /dev/null +++ b/test/units/modules/test_apt.py @@ -0,0 +1,53 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import collections +import sys + +from units.compat import mock +from units.compat import unittest + +try: + from ansible.modules.apt import ( + expand_pkgspec_from_fnmatches, + ) +except Exception: + # Need some more module_utils work (porting urls.py) before we can test + # modules. So don't error out in this case. + if sys.version_info[0] >= 3: + pass + + +class AptExpandPkgspecTestCase(unittest.TestCase): + + def setUp(self): + FakePackage = collections.namedtuple("Package", ("name",)) + self.fake_cache = [ + FakePackage("apt"), + FakePackage("apt-utils"), + FakePackage("not-selected"), + ] + + def test_trivial(self): + foo = ["apt"] + self.assertEqual( + expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) + + def test_version_wildcard(self): + foo = ["apt=1.0*"] + self.assertEqual( + expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) + + def test_pkgname_wildcard_version_wildcard(self): + foo = ["apt*=1.0*"] + m_mock = mock.Mock() + self.assertEqual( + expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), + ['apt', 'apt-utils']) + + def test_pkgname_expands(self): + foo = ["apt*"] + m_mock = mock.Mock() + self.assertEqual( + expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), + ["apt", "apt-utils"]) diff --git a/test/units/modules/test_async_wrapper.py b/test/units/modules/test_async_wrapper.py new file mode 100644 index 00000000..762fc2fb --- /dev/null +++ b/test/units/modules/test_async_wrapper.py @@ -0,0 +1,57 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import os +import json +import shutil +import tempfile + +import pytest + +from units.compat.mock import patch, MagicMock +from ansible.modules import async_wrapper + +from pprint import pprint + + +class TestAsyncWrapper: + + def test_run_module(self, monkeypatch): + + def mock_get_interpreter(module_path): + return ['/usr/bin/python'] + + module_result = {'rc': 0} + module_lines = [ + '#!/usr/bin/python', + 'import sys', + 'sys.stderr.write("stderr stuff")', + "print('%s')" % json.dumps(module_result) + ] + module_data = '\n'.join(module_lines) + '\n' + module_data = module_data.encode('utf-8') + + workdir = tempfile.mkdtemp() + fh, fn = tempfile.mkstemp(dir=workdir) + + with open(fn, 'wb') as f: + f.write(module_data) + + command = fn + jobid = 0 + jobpath = os.path.join(os.path.dirname(command), 'job') + + monkeypatch.setattr(async_wrapper, '_get_interpreter', mock_get_interpreter) + + res = async_wrapper._run_module(command, jobid, jobpath) + + with open(os.path.join(workdir, 'job'), 'r') as f: + jres = json.loads(f.read()) + + shutil.rmtree(workdir) + + assert jres.get('rc') == 0 + assert jres.get('stderr') == 'stderr stuff' diff --git a/test/units/modules/test_copy.py b/test/units/modules/test_copy.py new file mode 100644 index 00000000..20c309b6 --- /dev/null +++ b/test/units/modules/test_copy.py @@ -0,0 +1,215 @@ +# -*- coding: utf-8 -*- +# Copyright: +# (c) 2018 Ansible Project +# License: GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.modules.copy import AnsibleModuleError, split_pre_existing_dir + +from ansible.module_utils.basic import AnsibleModule + + +THREE_DIRS_DATA = (('/dir1/dir2', + # 0 existing dirs: error (because / should always exist) + None, + # 1 existing dir: + ('/', ['dir1', 'dir2']), + # 2 existing dirs: + ('/dir1', ['dir2']), + # 3 existing dirs: + ('/dir1/dir2', []) + ), + ('/dir1/dir2/', + # 0 existing dirs: error (because / should always exist) + None, + # 1 existing dir: + ('/', ['dir1', 'dir2']), + # 2 existing dirs: + ('/dir1', ['dir2']), + # 3 existing dirs: + ('/dir1/dir2', []) + ), + ) + + +TWO_DIRS_DATA = (('dir1/dir2', + # 0 existing dirs: + ('.', ['dir1', 'dir2']), + # 1 existing dir: + ('dir1', ['dir2']), + # 2 existing dirs: + ('dir1/dir2', []), + # 3 existing dirs: Same as 2 because we never get to the third + ), + ('dir1/dir2/', + # 0 existing dirs: + ('.', ['dir1', 'dir2']), + # 1 existing dir: + ('dir1', ['dir2']), + # 2 existing dirs: + ('dir1/dir2', []), + # 3 existing dirs: Same as 2 because we never get to the third + ), + ('/dir1', + # 0 existing dirs: error (because / should always exist) + None, + # 1 existing dir: + ('/', ['dir1']), + # 2 existing dirs: + ('/dir1', []), + # 3 existing dirs: Same as 2 because we never get to the third + ), + ('/dir1/', + # 0 existing dirs: error (because / should always exist) + None, + # 1 existing dir: + ('/', ['dir1']), + # 2 existing dirs: + ('/dir1', []), + # 3 existing dirs: Same as 2 because we never get to the third + ), + ) + THREE_DIRS_DATA + + +ONE_DIR_DATA = (('dir1', + # 0 existing dirs: + ('.', ['dir1']), + # 1 existing dir: + ('dir1', []), + # 2 existing dirs: Same as 1 because we never get to the third + ), + ('dir1/', + # 0 existing dirs: + ('.', ['dir1']), + # 1 existing dir: + ('dir1', []), + # 2 existing dirs: Same as 1 because we never get to the third + ), + ) + TWO_DIRS_DATA + + +@pytest.mark.parametrize('directory, expected', ((d[0], d[4]) for d in THREE_DIRS_DATA)) +def test_split_pre_existing_dir_three_levels_exist(directory, expected, mocker): + mocker.patch('os.path.exists', side_effect=[True, True, True]) + split_pre_existing_dir(directory) == expected + + +@pytest.mark.parametrize('directory, expected', ((d[0], d[3]) for d in TWO_DIRS_DATA)) +def test_split_pre_existing_dir_two_levels_exist(directory, expected, mocker): + mocker.patch('os.path.exists', side_effect=[True, True, False]) + split_pre_existing_dir(directory) == expected + + +@pytest.mark.parametrize('directory, expected', ((d[0], d[2]) for d in ONE_DIR_DATA)) +def test_split_pre_existing_dir_one_level_exists(directory, expected, mocker): + mocker.patch('os.path.exists', side_effect=[True, False, False]) + split_pre_existing_dir(directory) == expected + + +@pytest.mark.parametrize('directory', (d[0] for d in ONE_DIR_DATA if d[1] is None)) +def test_split_pre_existing_dir_root_does_not_exist(directory, mocker): + mocker.patch('os.path.exists', return_value=False) + with pytest.raises(AnsibleModuleError) as excinfo: + split_pre_existing_dir(directory) + assert excinfo.value.results['msg'].startswith("The '/' directory doesn't exist on this machine.") + + +@pytest.mark.parametrize('directory, expected', ((d[0], d[1]) for d in ONE_DIR_DATA if not d[0].startswith('/'))) +def test_split_pre_existing_dir_working_dir_exists(directory, expected, mocker): + mocker.patch('os.path.exists', return_value=False) + split_pre_existing_dir(directory) == expected + + +# +# Info helpful for making new test cases: +# +# base_mode = {'dir no perms': 0o040000, +# 'file no perms': 0o100000, +# 'dir all perms': 0o400000 | 0o777, +# 'file all perms': 0o100000, | 0o777} +# +# perm_bits = {'x': 0b001, +# 'w': 0b010, +# 'r': 0b100} +# +# role_shift = {'u': 6, +# 'g': 3, +# 'o': 0} + +DATA = ( # Going from no permissions to setting all for user, group, and/or other + (0o040000, u'a+rwx', 0o0777), + (0o040000, u'u+rwx,g+rwx,o+rwx', 0o0777), + (0o040000, u'o+rwx', 0o0007), + (0o040000, u'g+rwx', 0o0070), + (0o040000, u'u+rwx', 0o0700), + + # Going from all permissions to none for user, group, and/or other + (0o040777, u'a-rwx', 0o0000), + (0o040777, u'u-rwx,g-rwx,o-rwx', 0o0000), + (0o040777, u'o-rwx', 0o0770), + (0o040777, u'g-rwx', 0o0707), + (0o040777, u'u-rwx', 0o0077), + + # now using absolute assignment from None to a set of perms + (0o040000, u'a=rwx', 0o0777), + (0o040000, u'u=rwx,g=rwx,o=rwx', 0o0777), + (0o040000, u'o=rwx', 0o0007), + (0o040000, u'g=rwx', 0o0070), + (0o040000, u'u=rwx', 0o0700), + + # X effect on files and dirs + (0o040000, u'a+X', 0o0111), + (0o100000, u'a+X', 0), + (0o040000, u'a=X', 0o0111), + (0o100000, u'a=X', 0), + (0o040777, u'a-X', 0o0666), + # Same as chmod but is it a bug? + # chmod a-X statfile <== removes execute from statfile + (0o100777, u'a-X', 0o0666), + + # Multiple permissions + (0o040000, u'u=rw-x+X,g=r-x+X,o=r-x+X', 0o0755), + (0o100000, u'u=rw-x+X,g=r-x+X,o=r-x+X', 0o0644), +) + +UMASK_DATA = ( + (0o100000, '+rwx', 0o770), + (0o100777, '-rwx', 0o007), +) + +INVALID_DATA = ( + (0o040000, u'a=foo', "bad symbolic permission for mode: a=foo"), + (0o040000, u'f=rwx', "bad symbolic permission for mode: f=rwx"), +) + + +@pytest.mark.parametrize('stat_info, mode_string, expected', DATA) +def test_good_symbolic_modes(mocker, stat_info, mode_string, expected): + mock_stat = mocker.MagicMock() + mock_stat.st_mode = stat_info + assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == expected + + +@pytest.mark.parametrize('stat_info, mode_string, expected', UMASK_DATA) +def test_umask_with_symbolic_modes(mocker, stat_info, mode_string, expected): + mock_umask = mocker.patch('os.umask') + mock_umask.return_value = 0o7 + + mock_stat = mocker.MagicMock() + mock_stat.st_mode = stat_info + + assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == expected + + +@pytest.mark.parametrize('stat_info, mode_string, expected', INVALID_DATA) +def test_invalid_symbolic_modes(mocker, stat_info, mode_string, expected): + mock_stat = mocker.MagicMock() + mock_stat.st_mode = stat_info + with pytest.raises(ValueError) as exc: + assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == 'blah' + assert exc.match(expected) diff --git a/test/units/modules/test_iptables.py b/test/units/modules/test_iptables.py new file mode 100644 index 00000000..25a157e5 --- /dev/null +++ b/test/units/modules/test_iptables.py @@ -0,0 +1,919 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat.mock import patch +from ansible.module_utils import basic +from ansible.modules import iptables +from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + + +def get_bin_path(*args, **kwargs): + return "/sbin/iptables" + + +def get_iptables_version(iptables_path, module): + return "1.8.2" + + +class TestIptables(ModuleTestCase): + + def setUp(self): + super(TestIptables, self).setUp() + self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path) + self.mock_get_bin_path.start() + self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone' + self.mock_get_iptables_version = patch.object(iptables, 'get_iptables_version', get_iptables_version) + self.mock_get_iptables_version.start() + self.addCleanup(self.mock_get_iptables_version.stop) # ensure that the patching is 'undone' + + def test_without_required_parameters(self): + """Failure must occurs when all parameters are missing""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + iptables.main() + + def test_flush_table_without_chain(self): + """Test flush without chain, flush the table""" + set_module_args({ + 'flush': True, + }) + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, '', '' # successful execution, no output + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args[0][0][0], '/sbin/iptables') + self.assertEqual(run_command.call_args[0][0][1], '-t') + self.assertEqual(run_command.call_args[0][0][2], 'filter') + self.assertEqual(run_command.call_args[0][0][3], '-F') + + def test_flush_table_check_true(self): + """Test flush without parameters and check == true""" + set_module_args({ + 'flush': True, + '_ansible_check_mode': True, + }) + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, '', '' # successful execution, no output + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 0) + +# TODO ADD test flush table nat +# TODO ADD test flush with chain +# TODO ADD test flush with chain and table nat + + def test_policy_table(self): + """Test change policy of a chain""" + set_module_args({ + 'policy': 'ACCEPT', + 'chain': 'INPUT', + }) + commands_results = [ + (0, 'Chain INPUT (policy DROP)\n', ''), + (0, '', '') + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 2) + # import pdb + # pdb.set_trace() + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-L', + 'INPUT', + ]) + self.assertEqual(run_command.call_args_list[1][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-P', + 'INPUT', + 'ACCEPT', + ]) + + def test_policy_table_no_change(self): + """Test don't change policy of a chain if the policy is right""" + set_module_args({ + 'policy': 'ACCEPT', + 'chain': 'INPUT', + }) + commands_results = [ + (0, 'Chain INPUT (policy ACCEPT)\n', ''), + (0, '', '') + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertFalse(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + # import pdb + # pdb.set_trace() + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-L', + 'INPUT', + ]) + + def test_policy_table_changed_false(self): + """Test flush without parameters and change == false""" + set_module_args({ + 'policy': 'ACCEPT', + 'chain': 'INPUT', + '_ansible_check_mode': True, + }) + commands_results = [ + (0, 'Chain INPUT (policy DROP)\n', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + # import pdb + # pdb.set_trace() + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-L', + 'INPUT', + ]) + +# TODO ADD test policy without chain fail +# TODO ADD test policy with chain don't exists +# TODO ADD test policy with wrong choice fail + + def test_insert_rule_change_false(self): + """Test flush without parameters""" + set_module_args({ + 'chain': 'OUTPUT', + 'source': '1.2.3.4/32', + 'destination': '7.8.9.10/42', + 'jump': 'ACCEPT', + 'action': 'insert', + '_ansible_check_mode': True, + }) + + commands_results = [ + (1, '', ''), + (0, '', '') + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + # import pdb + # pdb.set_trace() + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'OUTPUT', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'ACCEPT' + ]) + + def test_insert_rule(self): + """Test flush without parameters""" + set_module_args({ + 'chain': 'OUTPUT', + 'source': '1.2.3.4/32', + 'destination': '7.8.9.10/42', + 'jump': 'ACCEPT', + 'action': 'insert' + }) + + commands_results = [ + (1, '', ''), + (0, '', '') + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 2) + # import pdb + # pdb.set_trace() + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'OUTPUT', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'ACCEPT' + ]) + self.assertEqual(run_command.call_args_list[1][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-I', + 'OUTPUT', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'ACCEPT' + ]) + + def test_append_rule_check_mode(self): + """Test append a redirection rule in check mode""" + set_module_args({ + 'chain': 'PREROUTING', + 'source': '1.2.3.4/32', + 'destination': '7.8.9.10/42', + 'jump': 'REDIRECT', + 'table': 'nat', + 'to_destination': '5.5.5.5/32', + 'protocol': 'udp', + 'destination_port': '22', + 'to_ports': '8600', + '_ansible_check_mode': True, + }) + + commands_results = [ + (1, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'nat', + '-C', + 'PREROUTING', + '-p', + 'udp', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'REDIRECT', + '--to-destination', + '5.5.5.5/32', + '--destination-port', + '22', + '--to-ports', + '8600' + ]) + + def test_append_rule(self): + """Test append a redirection rule""" + set_module_args({ + 'chain': 'PREROUTING', + 'source': '1.2.3.4/32', + 'destination': '7.8.9.10/42', + 'jump': 'REDIRECT', + 'table': 'nat', + 'to_destination': '5.5.5.5/32', + 'protocol': 'udp', + 'destination_port': '22', + 'to_ports': '8600' + }) + + commands_results = [ + (1, '', ''), + (0, '', '') + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 2) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'nat', + '-C', + 'PREROUTING', + '-p', + 'udp', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'REDIRECT', + '--to-destination', + '5.5.5.5/32', + '--destination-port', + '22', + '--to-ports', + '8600' + ]) + self.assertEqual(run_command.call_args_list[1][0][0], [ + '/sbin/iptables', + '-t', + 'nat', + '-A', + 'PREROUTING', + '-p', + 'udp', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'REDIRECT', + '--to-destination', + '5.5.5.5/32', + '--destination-port', + '22', + '--to-ports', + '8600' + ]) + + def test_remove_rule(self): + """Test flush without parameters""" + set_module_args({ + 'chain': 'PREROUTING', + 'source': '1.2.3.4/32', + 'destination': '7.8.9.10/42', + 'jump': 'SNAT', + 'table': 'nat', + 'to_source': '5.5.5.5/32', + 'protocol': 'udp', + 'source_port': '22', + 'to_ports': '8600', + 'state': 'absent', + 'in_interface': 'eth0', + 'out_interface': 'eth1', + 'comment': 'this is a comment' + }) + + commands_results = [ + (0, '', ''), + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 2) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'nat', + '-C', + 'PREROUTING', + '-p', + 'udp', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'SNAT', + '--to-source', + '5.5.5.5/32', + '-i', + 'eth0', + '-o', + 'eth1', + '--source-port', + '22', + '--to-ports', + '8600', + '-m', + 'comment', + '--comment', + 'this is a comment' + ]) + self.assertEqual(run_command.call_args_list[1][0][0], [ + '/sbin/iptables', + '-t', + 'nat', + '-D', + 'PREROUTING', + '-p', + 'udp', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'SNAT', + '--to-source', + '5.5.5.5/32', + '-i', + 'eth0', + '-o', + 'eth1', + '--source-port', + '22', + '--to-ports', + '8600', + '-m', + 'comment', + '--comment', + 'this is a comment' + ]) + + def test_remove_rule_check_mode(self): + """Test flush without parameters check mode""" + set_module_args({ + 'chain': 'PREROUTING', + 'source': '1.2.3.4/32', + 'destination': '7.8.9.10/42', + 'jump': 'SNAT', + 'table': 'nat', + 'to_source': '5.5.5.5/32', + 'protocol': 'udp', + 'source_port': '22', + 'to_ports': '8600', + 'state': 'absent', + 'in_interface': 'eth0', + 'out_interface': 'eth1', + 'comment': 'this is a comment', + '_ansible_check_mode': True, + }) + + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'nat', + '-C', + 'PREROUTING', + '-p', + 'udp', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'SNAT', + '--to-source', + '5.5.5.5/32', + '-i', + 'eth0', + '-o', + 'eth1', + '--source-port', + '22', + '--to-ports', + '8600', + '-m', + 'comment', + '--comment', + 'this is a comment' + ]) + + def test_insert_with_reject(self): + """ Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """ + set_module_args({ + 'chain': 'INPUT', + 'protocol': 'tcp', + 'reject_with': 'tcp-reset', + 'ip_version': 'ipv4', + }) + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'INPUT', + '-p', + 'tcp', + '-j', + 'REJECT', + '--reject-with', + 'tcp-reset', + ]) + + def test_insert_jump_reject_with_reject(self): + """ Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """ + set_module_args({ + 'chain': 'INPUT', + 'protocol': 'tcp', + 'jump': 'REJECT', + 'reject_with': 'tcp-reset', + 'ip_version': 'ipv4', + }) + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'INPUT', + '-p', + 'tcp', + '-j', + 'REJECT', + '--reject-with', + 'tcp-reset', + ]) + + def test_jump_tee_gateway_negative(self): + """ Missing gateway when JUMP is set to TEE """ + set_module_args({ + 'table': 'mangle', + 'chain': 'PREROUTING', + 'in_interface': 'eth0', + 'protocol': 'udp', + 'match': 'state', + 'jump': 'TEE', + 'ctstate': ['NEW'], + 'destination_port': '9521', + 'destination': '127.0.0.1' + }) + + with self.assertRaises(AnsibleFailJson) as e: + iptables.main() + self.assertTrue(e.exception.args[0]['failed']) + self.assertEqual(e.exception.args[0]['msg'], 'jump is TEE but all of the following are missing: gateway') + + def test_jump_tee_gateway(self): + """ Using gateway when JUMP is set to TEE """ + set_module_args({ + 'table': 'mangle', + 'chain': 'PREROUTING', + 'in_interface': 'eth0', + 'protocol': 'udp', + 'match': 'state', + 'jump': 'TEE', + 'ctstate': ['NEW'], + 'destination_port': '9521', + 'gateway': '192.168.10.1', + 'destination': '127.0.0.1' + }) + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', 'mangle', + '-C', 'PREROUTING', + '-p', 'udp', + '-d', '127.0.0.1', + '-m', 'state', + '-j', 'TEE', + '--gateway', '192.168.10.1', + '-i', 'eth0', + '--destination-port', '9521', + '--state', 'NEW' + ]) + + def test_tcp_flags(self): + """ Test various ways of inputting tcp_flags """ + args = [ + { + 'chain': 'OUTPUT', + 'protocol': 'tcp', + 'jump': 'DROP', + 'tcp_flags': 'flags=ALL flags_set="ACK,RST,SYN,FIN"' + }, + { + 'chain': 'OUTPUT', + 'protocol': 'tcp', + 'jump': 'DROP', + 'tcp_flags': { + 'flags': 'ALL', + 'flags_set': 'ACK,RST,SYN,FIN' + } + }, + { + 'chain': 'OUTPUT', + 'protocol': 'tcp', + 'jump': 'DROP', + 'tcp_flags': { + 'flags': ['ALL'], + 'flags_set': ['ACK', 'RST', 'SYN', 'FIN'] + } + }, + + ] + + for item in args: + set_module_args(item) + + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'OUTPUT', + '-p', + 'tcp', + '--tcp-flags', + 'ALL', + 'ACK,RST,SYN,FIN', + '-j', + 'DROP' + ]) + + def test_log_level(self): + """ Test various ways of log level flag """ + + log_levels = ['0', '1', '2', '3', '4', '5', '6', '7', + 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug'] + + for log_lvl in log_levels: + set_module_args({ + 'chain': 'INPUT', + 'jump': 'LOG', + 'log_level': log_lvl, + 'source': '1.2.3.4/32', + 'log_prefix': '** DROP-this_ip **' + }) + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', 'filter', + '-C', 'INPUT', + '-s', '1.2.3.4/32', + '-j', 'LOG', + '--log-prefix', '** DROP-this_ip **', + '--log-level', log_lvl + ]) + + def test_iprange(self): + """ Test iprange module with its flags src_range and dst_range """ + set_module_args({ + 'chain': 'INPUT', + 'match': ['iprange'], + 'src_range': '192.168.1.100-192.168.1.199', + 'jump': 'ACCEPT' + }) + + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'INPUT', + '-m', + 'iprange', + '-j', + 'ACCEPT', + '--src-range', + '192.168.1.100-192.168.1.199', + ]) + + set_module_args({ + 'chain': 'INPUT', + 'src_range': '192.168.1.100-192.168.1.199', + 'dst_range': '10.0.0.50-10.0.0.100', + 'jump': 'ACCEPT' + }) + + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'INPUT', + '-j', + 'ACCEPT', + '-m', + 'iprange', + '--src-range', + '192.168.1.100-192.168.1.199', + '--dst-range', + '10.0.0.50-10.0.0.100' + ]) + + set_module_args({ + 'chain': 'INPUT', + 'dst_range': '10.0.0.50-10.0.0.100', + 'jump': 'ACCEPT' + }) + + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'INPUT', + '-j', + 'ACCEPT', + '-m', + 'iprange', + '--dst-range', + '10.0.0.50-10.0.0.100' + ]) + + def test_insert_rule_with_wait(self): + """Test flush without parameters""" + set_module_args({ + 'chain': 'OUTPUT', + 'source': '1.2.3.4/32', + 'destination': '7.8.9.10/42', + 'jump': 'ACCEPT', + 'action': 'insert', + 'wait': '10' + }) + + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'OUTPUT', + '-w', + '10', + '-s', + '1.2.3.4/32', + '-d', + '7.8.9.10/42', + '-j', + 'ACCEPT' + ]) + + def test_comment_position_at_end(self): + """Test flush without parameters""" + set_module_args({ + 'chain': 'INPUT', + 'jump': 'ACCEPT', + 'action': 'insert', + 'ctstate': ['NEW'], + 'comment': 'this is a comment', + '_ansible_check_mode': True, + }) + + commands_results = [ + (0, '', ''), + ] + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.side_effect = commands_results + with self.assertRaises(AnsibleExitJson) as result: + iptables.main() + self.assertTrue(result.exception.args[0]['changed']) + + self.assertEqual(run_command.call_count, 1) + self.assertEqual(run_command.call_args_list[0][0][0], [ + '/sbin/iptables', + '-t', + 'filter', + '-C', + 'INPUT', + '-j', + 'ACCEPT', + '-m', + 'conntrack', + '--ctstate', + 'NEW', + '-m', + 'comment', + '--comment', + 'this is a comment' + ]) + self.assertEqual(run_command.call_args[0][0][14], 'this is a comment') diff --git a/test/units/modules/test_known_hosts.py b/test/units/modules/test_known_hosts.py new file mode 100644 index 00000000..3b6dfd86 --- /dev/null +++ b/test/units/modules/test_known_hosts.py @@ -0,0 +1,110 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import tempfile +from ansible.module_utils import basic + +from units.compat import unittest +from ansible.module_utils._text import to_bytes +from ansible.module_utils.basic import AnsibleModule + +from ansible.modules.known_hosts import compute_diff, sanity_check + + +class KnownHostsDiffTestCase(unittest.TestCase): + + def _create_file(self, content): + tmp_file = tempfile.NamedTemporaryFile(prefix='ansible-test-', suffix='-known_hosts', delete=False) + tmp_file.write(to_bytes(content)) + tmp_file.close() + self.addCleanup(os.unlink, tmp_file.name) + return tmp_file.name + + def test_no_existing_file(self): + path = tempfile.mktemp(prefix='ansible-test-', suffix='-known_hosts') + key = 'example.com ssh-rsa AAAAetc\n' + diff = compute_diff(path, found_line=None, replace_or_add=False, state='present', key=key) + self.assertEqual(diff, { + 'before_header': '/dev/null', + 'after_header': path, + 'before': '', + 'after': 'example.com ssh-rsa AAAAetc\n', + }) + + def test_key_addition(self): + path = self._create_file( + 'two.example.com ssh-rsa BBBBetc\n' + ) + key = 'one.example.com ssh-rsa AAAAetc\n' + diff = compute_diff(path, found_line=None, replace_or_add=False, state='present', key=key) + self.assertEqual(diff, { + 'before_header': path, + 'after_header': path, + 'before': 'two.example.com ssh-rsa BBBBetc\n', + 'after': 'two.example.com ssh-rsa BBBBetc\none.example.com ssh-rsa AAAAetc\n', + }) + + def test_no_change(self): + path = self._create_file( + 'one.example.com ssh-rsa AAAAetc\n' + 'two.example.com ssh-rsa BBBBetc\n' + ) + key = 'one.example.com ssh-rsa AAAAetc\n' + diff = compute_diff(path, found_line=1, replace_or_add=False, state='present', key=key) + self.assertEqual(diff, { + 'before_header': path, + 'after_header': path, + 'before': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n', + 'after': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n', + }) + + def test_key_change(self): + path = self._create_file( + 'one.example.com ssh-rsa AAAaetc\n' + 'two.example.com ssh-rsa BBBBetc\n' + ) + key = 'one.example.com ssh-rsa AAAAetc\n' + diff = compute_diff(path, found_line=1, replace_or_add=True, state='present', key=key) + self.assertEqual(diff, { + 'before_header': path, + 'after_header': path, + 'before': 'one.example.com ssh-rsa AAAaetc\ntwo.example.com ssh-rsa BBBBetc\n', + 'after': 'two.example.com ssh-rsa BBBBetc\none.example.com ssh-rsa AAAAetc\n', + }) + + def test_key_removal(self): + path = self._create_file( + 'one.example.com ssh-rsa AAAAetc\n' + 'two.example.com ssh-rsa BBBBetc\n' + ) + key = 'one.example.com ssh-rsa AAAAetc\n' + diff = compute_diff(path, found_line=1, replace_or_add=False, state='absent', key=key) + self.assertEqual(diff, { + 'before_header': path, + 'after_header': path, + 'before': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n', + 'after': 'two.example.com ssh-rsa BBBBetc\n', + }) + + def test_key_removal_no_change(self): + path = self._create_file( + 'two.example.com ssh-rsa BBBBetc\n' + ) + key = 'one.example.com ssh-rsa AAAAetc\n' + diff = compute_diff(path, found_line=None, replace_or_add=False, state='absent', key=key) + self.assertEqual(diff, { + 'before_header': path, + 'after_header': path, + 'before': 'two.example.com ssh-rsa BBBBetc\n', + 'after': 'two.example.com ssh-rsa BBBBetc\n', + }) + + def test_sanity_check(self): + basic._load_params = lambda: {} + # Module used internally to execute ssh-keygen system executable + module = AnsibleModule(argument_spec={}) + host = '10.0.0.1' + key = '%s ssh-rsa ASDF foo@bar' % (host,) + keygen = module.get_bin_path('ssh-keygen') + sanity_check(module, host, key, keygen) diff --git a/test/units/modules/test_pip.py b/test/units/modules/test_pip.py new file mode 100644 index 00000000..7f0f8b07 --- /dev/null +++ b/test/units/modules/test_pip.py @@ -0,0 +1,38 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +import pytest + +from ansible.modules import pip + + +pytestmark = pytest.mark.usefixtures('patch_ansible_module') + + +@pytest.mark.parametrize('patch_ansible_module', [{'name': 'six'}], indirect=['patch_ansible_module']) +def test_failure_when_pip_absent(mocker, capfd): + get_bin_path = mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + get_bin_path.return_value = None + + with pytest.raises(SystemExit): + pip.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert results['failed'] + assert 'pip needs to be installed' in results['msg'] + + +@pytest.mark.parametrize('patch_ansible_module, test_input, expected', [ + [None, ['django>1.11.1', '<1.11.2', 'ipaddress', 'simpleproject<2.0.0', '>1.1.0'], + ['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']], + [None, ['django>1.11.1,<1.11.2,ipaddress', 'simpleproject<2.0.0,>1.1.0'], + ['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']], + [None, ['django>1.11.1', '<1.11.2', 'ipaddress,simpleproject<2.0.0,>1.1.0'], + ['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']]]) +def test_recover_package_name(test_input, expected): + assert pip._recover_package_name(test_input) == expected diff --git a/test/units/modules/test_systemd.py b/test/units/modules/test_systemd.py new file mode 100644 index 00000000..52c212a0 --- /dev/null +++ b/test/units/modules/test_systemd.py @@ -0,0 +1,52 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.modules.systemd import parse_systemctl_show + + +class ParseSystemctlShowTestCase(unittest.TestCase): + + def test_simple(self): + lines = [ + 'Type=simple', + 'Restart=no', + 'Requires=system.slice sysinit.target', + 'Description=Blah blah blah', + ] + parsed = parse_systemctl_show(lines) + self.assertEqual(parsed, { + 'Type': 'simple', + 'Restart': 'no', + 'Requires': 'system.slice sysinit.target', + 'Description': 'Blah blah blah', + }) + + def test_multiline_exec(self): + # This was taken from a real service that specified "ExecStart=/bin/echo foo\nbar" + lines = [ + 'Type=simple', + 'ExecStart={ path=/bin/echo ; argv[]=/bin/echo foo', + 'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }', + 'Description=blah', + ] + parsed = parse_systemctl_show(lines) + self.assertEqual(parsed, { + 'Type': 'simple', + 'ExecStart': '{ path=/bin/echo ; argv[]=/bin/echo foo\n' + 'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }', + 'Description': 'blah', + }) + + def test_single_line_with_brace(self): + lines = [ + 'Type=simple', + 'Description={ this is confusing', + 'Restart=no', + ] + parsed = parse_systemctl_show(lines) + self.assertEqual(parsed, { + 'Type': 'simple', + 'Description': '{ this is confusing', + 'Restart': 'no', + }) diff --git a/test/units/modules/test_yum.py b/test/units/modules/test_yum.py new file mode 100644 index 00000000..e5d601a6 --- /dev/null +++ b/test/units/modules/test_yum.py @@ -0,0 +1,207 @@ +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest + +from ansible.modules.yum import YumModule + + +yum_plugin_load_error = """ +Plugin "product-id" can't be imported +Plugin "search-disabled-repos" can't be imported +Plugin "subscription-manager" can't be imported +Plugin "product-id" can't be imported +Plugin "search-disabled-repos" can't be imported +Plugin "subscription-manager" can't be imported +""" + +# from https://github.com/ansible/ansible/issues/20608#issuecomment-276106505 +wrapped_output_1 = """ +Загружены модули: fastestmirror +Loading mirror speeds from cached hostfile + * base: mirror.h1host.ru + * extras: mirror.h1host.ru + * updates: mirror.h1host.ru + +vms-agent.x86_64 0.0-9 dev +""" + +# from https://github.com/ansible/ansible/issues/20608#issuecomment-276971275 +wrapped_output_2 = """ +Загружены модули: fastestmirror +Loading mirror speeds from cached hostfile + * base: mirror.corbina.net + * extras: mirror.corbina.net + * updates: mirror.corbina.net + +empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty.x86_64 + 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1-0 + addons +libtiff.x86_64 4.0.3-27.el7_3 updates +""" + +# From https://github.com/ansible/ansible/issues/20608#issuecomment-276698431 +wrapped_output_3 = """ +Loaded plugins: fastestmirror, langpacks +Loading mirror speeds from cached hostfile + +ceph.x86_64 1:11.2.0-0.el7 ceph +ceph-base.x86_64 1:11.2.0-0.el7 ceph +ceph-common.x86_64 1:11.2.0-0.el7 ceph +ceph-mds.x86_64 1:11.2.0-0.el7 ceph +ceph-mon.x86_64 1:11.2.0-0.el7 ceph +ceph-osd.x86_64 1:11.2.0-0.el7 ceph +ceph-selinux.x86_64 1:11.2.0-0.el7 ceph +libcephfs1.x86_64 1:11.0.2-0.el7 ceph +librados2.x86_64 1:11.2.0-0.el7 ceph +libradosstriper1.x86_64 1:11.2.0-0.el7 ceph +librbd1.x86_64 1:11.2.0-0.el7 ceph +librgw2.x86_64 1:11.2.0-0.el7 ceph +python-cephfs.x86_64 1:11.2.0-0.el7 ceph +python-rados.x86_64 1:11.2.0-0.el7 ceph +python-rbd.x86_64 1:11.2.0-0.el7 ceph +""" + +# from https://github.com/ansible/ansible-modules-core/issues/4318#issuecomment-251416661 +wrapped_output_4 = """ +ipxe-roms-qemu.noarch 20160127-1.git6366fa7a.el7 + rhelosp-9.0-director-puddle +quota.x86_64 1:4.01-11.el7_2.1 rhelosp-rhel-7.2-z +quota-nls.noarch 1:4.01-11.el7_2.1 rhelosp-rhel-7.2-z +rdma.noarch 7.2_4.1_rc6-2.el7 rhelosp-rhel-7.2-z +screen.x86_64 4.1.0-0.23.20120314git3c2946.el7_2 + rhelosp-rhel-7.2-z +sos.noarch 3.2-36.el7ost.2 rhelosp-9.0-puddle +sssd-client.x86_64 1.13.0-40.el7_2.12 rhelosp-rhel-7.2-z +""" + + +# A 'normal-ish' yum check-update output, without any wrapped lines +unwrapped_output_rhel7 = """ + +Loaded plugins: etckeeper, product-id, search-disabled-repos, subscription- + : manager +This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register. + +NetworkManager-openvpn.x86_64 1:1.2.6-1.el7 epel +NetworkManager-openvpn-gnome.x86_64 1:1.2.6-1.el7 epel +cabal-install.x86_64 1.16.1.0-2.el7 epel +cgit.x86_64 1.1-1.el7 epel +python34-libs.x86_64 3.4.5-3.el7 epel +python34-test.x86_64 3.4.5-3.el7 epel +python34-tkinter.x86_64 3.4.5-3.el7 epel +python34-tools.x86_64 3.4.5-3.el7 epel +qgit.x86_64 2.6-4.el7 epel +rdiff-backup.x86_64 1.2.8-12.el7 epel +stoken-libs.x86_64 0.91-1.el7 epel +xlockmore.x86_64 5.49-2.el7 epel +""" + +# Some wrapped obsoletes for prepending to output for testing both +wrapped_output_rhel7_obsoletes_postfix = """ +Obsoleting Packages +ddashboard.x86_64 0.2.0.1-1.el7_3 mhlavink-developerdashboard + developerdashboard.x86_64 0.1.12.2-1.el7_2 @mhlavink-developerdashboard +python-bugzilla.noarch 1.2.2-3.el7_2.1 mhlavink-developerdashboard + python-bugzilla-develdashboardfixes.noarch + 1.2.2-3.el7 @mhlavink-developerdashboard +python2-futures.noarch 3.0.5-1.el7 epel + python-futures.noarch 3.0.3-1.el7 @epel +python2-pip.noarch 8.1.2-5.el7 epel + python-pip.noarch 7.1.0-1.el7 @epel +python2-pyxdg.noarch 0.25-6.el7 epel + pyxdg.noarch 0.25-5.el7 @epel +python2-simplejson.x86_64 3.10.0-1.el7 epel + python-simplejson.x86_64 3.3.3-1.el7 @epel +Security: kernel-3.10.0-327.28.2.el7.x86_64 is an installed security update +Security: kernel-3.10.0-327.22.2.el7.x86_64 is the currently running version +""" + +longname = """ +Loaded plugins: fastestmirror, priorities, rhnplugin +This system is receiving updates from RHN Classic or Red Hat Satellite. +Loading mirror speeds from cached hostfile + +xxxxxxxxxxxxxxxxxxxxxxxxxx.noarch + 1.16-1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +glibc.x86_64 2.17-157.el7_3.1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""" + + +unwrapped_output_rhel7_obsoletes = unwrapped_output_rhel7 + wrapped_output_rhel7_obsoletes_postfix +unwrapped_output_rhel7_expected_new_obsoletes_pkgs = [ + "ddashboard", "python-bugzilla", "python2-futures", "python2-pip", + "python2-pyxdg", "python2-simplejson" +] +unwrapped_output_rhel7_expected_old_obsoletes_pkgs = [ + "developerdashboard", "python-bugzilla-develdashboardfixes", + "python-futures", "python-pip", "pyxdg", "python-simplejson" +] +unwrapped_output_rhel7_expected_updated_pkgs = [ + "NetworkManager-openvpn", "NetworkManager-openvpn-gnome", "cabal-install", + "cgit", "python34-libs", "python34-test", "python34-tkinter", + "python34-tools", "qgit", "rdiff-backup", "stoken-libs", "xlockmore" +] + + +class TestYumUpdateCheckParse(unittest.TestCase): + def _assert_expected(self, expected_pkgs, result): + + for expected_pkg in expected_pkgs: + self.assertIn(expected_pkg, result) + self.assertEqual(len(result), len(expected_pkgs)) + self.assertIsInstance(result, dict) + + def test_empty_output(self): + res, obs = YumModule.parse_check_update("") + expected_pkgs = [] + self._assert_expected(expected_pkgs, res) + + def test_longname(self): + res, obs = YumModule.parse_check_update(longname) + expected_pkgs = ['xxxxxxxxxxxxxxxxxxxxxxxxxx', 'glibc'] + self._assert_expected(expected_pkgs, res) + + def test_plugin_load_error(self): + res, obs = YumModule.parse_check_update(yum_plugin_load_error) + expected_pkgs = [] + self._assert_expected(expected_pkgs, res) + + def test_wrapped_output_1(self): + res, obs = YumModule.parse_check_update(wrapped_output_1) + expected_pkgs = ["vms-agent"] + self._assert_expected(expected_pkgs, res) + + def test_wrapped_output_2(self): + res, obs = YumModule.parse_check_update(wrapped_output_2) + expected_pkgs = ["empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty", + "libtiff"] + + self._assert_expected(expected_pkgs, res) + + def test_wrapped_output_3(self): + res, obs = YumModule.parse_check_update(wrapped_output_3) + expected_pkgs = ["ceph", "ceph-base", "ceph-common", "ceph-mds", + "ceph-mon", "ceph-osd", "ceph-selinux", "libcephfs1", + "librados2", "libradosstriper1", "librbd1", "librgw2", + "python-cephfs", "python-rados", "python-rbd"] + self._assert_expected(expected_pkgs, res) + + def test_wrapped_output_4(self): + res, obs = YumModule.parse_check_update(wrapped_output_4) + + expected_pkgs = ["ipxe-roms-qemu", "quota", "quota-nls", "rdma", "screen", + "sos", "sssd-client"] + self._assert_expected(expected_pkgs, res) + + def test_wrapped_output_rhel7(self): + res, obs = YumModule.parse_check_update(unwrapped_output_rhel7) + self._assert_expected(unwrapped_output_rhel7_expected_updated_pkgs, res) + + def test_wrapped_output_rhel7_obsoletes(self): + res, obs = YumModule.parse_check_update(unwrapped_output_rhel7_obsoletes) + self._assert_expected( + unwrapped_output_rhel7_expected_updated_pkgs + unwrapped_output_rhel7_expected_new_obsoletes_pkgs, + res + ) + self._assert_expected(unwrapped_output_rhel7_expected_old_obsoletes_pkgs, obs) diff --git a/test/units/modules/utils.py b/test/units/modules/utils.py new file mode 100644 index 00000000..6d169e36 --- /dev/null +++ b/test/units/modules/utils.py @@ -0,0 +1,50 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from units.compat import unittest +from units.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes + + +def set_module_args(args): + if '_ansible_remote_tmp' not in args: + args['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in args: + args['_ansible_keep_remote_files'] = False + + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) + + +class AnsibleExitJson(Exception): + pass + + +class AnsibleFailJson(Exception): + pass + + +def exit_json(*args, **kwargs): + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class ModuleTestCase(unittest.TestCase): + + def setUp(self): + self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module.start() + self.mock_sleep = patch('time.sleep') + self.mock_sleep.start() + set_module_args({}) + self.addCleanup(self.mock_module.stop) + self.addCleanup(self.mock_sleep.stop) diff --git a/test/units/parsing/__init__.py b/test/units/parsing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/parsing/fixtures/ajson.json b/test/units/parsing/fixtures/ajson.json new file mode 100644 index 00000000..dafec0b3 --- /dev/null +++ b/test/units/parsing/fixtures/ajson.json @@ -0,0 +1,19 @@ +{ + "password": { + "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316562356435376162633631326264383934326565333633366238\n3863373264326461623132613931346165636465346337310a326434313830316337393263616439\n64653937313463396366633861363266633465663730303633323534363331316164623237363831\n3536333561393238370a313330316263373938326162386433313336613532653538376662306435\n3339\n" + }, + "bar": { + "baz": [ + { + "password": { + "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316562356435376162633631326264383934326565333633366238\n3863373264326461623132613931346165636465346337310a326434313830316337393263616439\n64653937313463396366633861363266633465663730303633323534363331316164623237363831\n3536333561393238370a313330316263373938326162386433313336613532653538376662306435\n3338\n" + } + } + ] + }, + "foo": { + "password": { + "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316562356435376162633631326264383934326565333633366238\n3863373264326461623132613931346165636465346337310a326434313830316337393263616439\n64653937313463396366633861363266633465663730303633323534363331316164623237363831\n3536333561393238370a313330316263373938326162386433313336613532653538376662306435\n3339\n" + } + } +} diff --git a/test/units/parsing/fixtures/vault.yml b/test/units/parsing/fixtures/vault.yml new file mode 100644 index 00000000..ca33ab25 --- /dev/null +++ b/test/units/parsing/fixtures/vault.yml @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +33343734386261666161626433386662623039356366656637303939306563376130623138626165 +6436333766346533353463636566313332623130383662340a393835656134633665333861393331 +37666233346464636263636530626332623035633135363732623332313534306438393366323966 +3135306561356164310a343937653834643433343734653137383339323330626437313562306630 +3035 diff --git a/test/units/parsing/test_ajson.py b/test/units/parsing/test_ajson.py new file mode 100644 index 00000000..c38f43ea --- /dev/null +++ b/test/units/parsing/test_ajson.py @@ -0,0 +1,187 @@ +# Copyright 2018, Matt Martz +# Copyright 2019, Andrew Klychkov @Andersson007 +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import json + +import pytest + +from datetime import date, datetime +from pytz import timezone as tz + +from ansible.module_utils.common._collections_compat import Mapping +from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder +from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode +from ansible.utils.unsafe_proxy import AnsibleUnsafeText + + +def test_AnsibleJSONDecoder_vault(): + with open(os.path.join(os.path.dirname(__file__), 'fixtures/ajson.json')) as f: + data = json.load(f, cls=AnsibleJSONDecoder) + + assert isinstance(data['password'], AnsibleVaultEncryptedUnicode) + assert isinstance(data['bar']['baz'][0]['password'], AnsibleVaultEncryptedUnicode) + assert isinstance(data['foo']['password'], AnsibleVaultEncryptedUnicode) + + +def test_encode_decode_unsafe(): + data = { + 'key_value': AnsibleUnsafeText(u'{#NOTACOMMENT#}'), + 'list': [AnsibleUnsafeText(u'{#NOTACOMMENT#}')], + 'list_dict': [{'key_value': AnsibleUnsafeText(u'{#NOTACOMMENT#}')}]} + json_expected = ( + '{"key_value": {"__ansible_unsafe": "{#NOTACOMMENT#}"}, ' + '"list": [{"__ansible_unsafe": "{#NOTACOMMENT#}"}], ' + '"list_dict": [{"key_value": {"__ansible_unsafe": "{#NOTACOMMENT#}"}}]}' + ) + assert json.dumps(data, cls=AnsibleJSONEncoder, preprocess_unsafe=True, sort_keys=True) == json_expected + assert json.loads(json_expected, cls=AnsibleJSONDecoder) == data + + +def vault_data(): + """ + Prepare AnsibleVaultEncryptedUnicode test data for AnsibleJSONEncoder.default(). + + Return a list of tuples (input, expected). + """ + + with open(os.path.join(os.path.dirname(__file__), 'fixtures/ajson.json')) as f: + data = json.load(f, cls=AnsibleJSONDecoder) + + data_0 = data['password'] + data_1 = data['bar']['baz'][0]['password'] + + expected_0 = (u'$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316' + '562356435376162633631326264383934326565333633366238\n3863' + '373264326461623132613931346165636465346337310a32643431383' + '0316337393263616439\n646539373134633963666338613632666334' + '65663730303633323534363331316164623237363831\n35363335613' + '93238370a313330316263373938326162386433313336613532653538' + '376662306435\n3339\n') + + expected_1 = (u'$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316' + '562356435376162633631326264383934326565333633366238\n3863' + '373264326461623132613931346165636465346337310a32643431383' + '0316337393263616439\n646539373134633963666338613632666334' + '65663730303633323534363331316164623237363831\n35363335613' + '93238370a313330316263373938326162386433313336613532653538' + '376662306435\n3338\n') + + return [ + (data_0, expected_0), + (data_1, expected_1), + ] + + +class TestAnsibleJSONEncoder: + + """ + Namespace for testing AnsibleJSONEncoder. + """ + + @pytest.fixture(scope='class') + def mapping(self, request): + """ + Returns object of Mapping mock class. + + The object is used for testing handling of Mapping objects + in AnsibleJSONEncoder.default(). + Using a plain dictionary instead is not suitable because + it is handled by default encoder of the superclass (json.JSONEncoder). + """ + + class M(Mapping): + + """Mock mapping class.""" + + def __init__(self, *args, **kwargs): + self.__dict__.update(*args, **kwargs) + + def __getitem__(self, key): + return self.__dict__[key] + + def __iter__(self): + return iter(self.__dict__) + + def __len__(self): + return len(self.__dict__) + + return M(request.param) + + @pytest.fixture + def ansible_json_encoder(self): + """Return AnsibleJSONEncoder object.""" + return AnsibleJSONEncoder() + + ############### + # Test methods: + + @pytest.mark.parametrize( + 'test_input,expected', + [ + (datetime(2019, 5, 14, 13, 39, 38, 569047), '2019-05-14T13:39:38.569047'), + (datetime(2019, 5, 14, 13, 47, 16, 923866), '2019-05-14T13:47:16.923866'), + (date(2019, 5, 14), '2019-05-14'), + (date(2020, 5, 14), '2020-05-14'), + (datetime(2019, 6, 15, 14, 45, tzinfo=tz('UTC')), '2019-06-15T14:45:00+00:00'), + (datetime(2019, 6, 15, 14, 45, tzinfo=tz('Europe/Helsinki')), '2019-06-15T14:45:00+01:40'), + ] + ) + def test_date_datetime(self, ansible_json_encoder, test_input, expected): + """ + Test for passing datetime.date or datetime.datetime objects to AnsibleJSONEncoder.default(). + """ + assert ansible_json_encoder.default(test_input) == expected + + @pytest.mark.parametrize( + 'mapping,expected', + [ + ({1: 1}, {1: 1}), + ({2: 2}, {2: 2}), + ({1: 2}, {1: 2}), + ({2: 1}, {2: 1}), + ], indirect=['mapping'], + ) + def test_mapping(self, ansible_json_encoder, mapping, expected): + """ + Test for passing Mapping object to AnsibleJSONEncoder.default(). + """ + assert ansible_json_encoder.default(mapping) == expected + + @pytest.mark.parametrize('test_input,expected', vault_data()) + def test_ansible_json_decoder_vault(self, ansible_json_encoder, test_input, expected): + """ + Test for passing AnsibleVaultEncryptedUnicode to AnsibleJSONEncoder.default(). + """ + assert ansible_json_encoder.default(test_input) == {'__ansible_vault': expected} + assert json.dumps(test_input, cls=AnsibleJSONEncoder, preprocess_unsafe=True) == '{"__ansible_vault": "%s"}' % expected.replace('\n', '\\n') + + @pytest.mark.parametrize( + 'test_input,expected', + [ + ({1: 'first'}, {1: 'first'}), + ({2: 'second'}, {2: 'second'}), + ] + ) + def test_default_encoder(self, ansible_json_encoder, test_input, expected): + """ + Test for the default encoder of AnsibleJSONEncoder.default(). + + If objects of different classes that are not tested above were passed, + AnsibleJSONEncoder.default() invokes 'default()' method of json.JSONEncoder superclass. + """ + assert ansible_json_encoder.default(test_input) == expected + + @pytest.mark.parametrize('test_input', [1, 1.1, 'string', [1, 2], set('set'), True, None]) + def test_default_encoder_unserializable(self, ansible_json_encoder, test_input): + """ + Test for the default encoder of AnsibleJSONEncoder.default(), not serializable objects. + + It must fail with TypeError 'object is not serializable'. + """ + with pytest.raises(TypeError): + ansible_json_encoder.default(test_input) diff --git a/test/units/parsing/test_dataloader.py b/test/units/parsing/test_dataloader.py new file mode 100644 index 00000000..3cc8d451 --- /dev/null +++ b/test/units/parsing/test_dataloader.py @@ -0,0 +1,239 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from units.compat import unittest +from units.compat.mock import patch, mock_open +from ansible.errors import AnsibleParserError, yaml_strings, AnsibleFileNotFound +from ansible.parsing.vault import AnsibleVaultError +from ansible.module_utils._text import to_text +from ansible.module_utils.six import PY3 + +from units.mock.vault_helper import TextVaultSecret +from ansible.parsing.dataloader import DataLoader + +from units.mock.path import mock_unfrackpath_noop + + +class TestDataLoader(unittest.TestCase): + + def setUp(self): + self._loader = DataLoader() + + @patch('os.path.exists') + def test__is_role(self, p_exists): + p_exists.side_effect = lambda p: p == b'test_path/tasks/main.yml' + self.assertTrue(self._loader._is_role('test_path/tasks')) + self.assertTrue(self._loader._is_role('test_path/')) + + @patch.object(DataLoader, '_get_file_contents') + def test_parse_json_from_file(self, mock_def): + mock_def.return_value = (b"""{"a": 1, "b": 2, "c": 3}""", True) + output = self._loader.load_from_file('dummy_json.txt') + self.assertEqual(output, dict(a=1, b=2, c=3)) + + @patch.object(DataLoader, '_get_file_contents') + def test_parse_yaml_from_file(self, mock_def): + mock_def.return_value = (b""" + a: 1 + b: 2 + c: 3 + """, True) + output = self._loader.load_from_file('dummy_yaml.txt') + self.assertEqual(output, dict(a=1, b=2, c=3)) + + @patch.object(DataLoader, '_get_file_contents') + def test_parse_fail_from_file(self, mock_def): + mock_def.return_value = (b""" + TEXT: + *** + NOT VALID + """, True) + self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt') + + @patch('ansible.errors.AnsibleError._get_error_lines_from_file') + @patch.object(DataLoader, '_get_file_contents') + def test_tab_error(self, mock_def, mock_get_error_lines): + mock_def.return_value = (u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""", True) + mock_get_error_lines.return_value = ('''\tblip: baz''', '''..foo: bar''') + with self.assertRaises(AnsibleParserError) as cm: + self._loader.load_from_file('dummy_yaml_text.txt') + self.assertIn(yaml_strings.YAML_COMMON_LEADING_TAB_ERROR, str(cm.exception)) + self.assertIn('foo: bar', str(cm.exception)) + + @patch('ansible.parsing.dataloader.unfrackpath', mock_unfrackpath_noop) + @patch.object(DataLoader, '_is_role') + def test_path_dwim_relative(self, mock_is_role): + """ + simulate a nested dynamic include: + + playbook.yml: + - hosts: localhost + roles: + - { role: 'testrole' } + + testrole/tasks/main.yml: + - include: "include1.yml" + static: no + + testrole/tasks/include1.yml: + - include: include2.yml + static: no + + testrole/tasks/include2.yml: + - debug: msg="blah" + """ + mock_is_role.return_value = False + with patch('os.path.exists') as mock_os_path_exists: + mock_os_path_exists.return_value = False + self._loader.path_dwim_relative('/tmp/roles/testrole/tasks', 'tasks', 'included2.yml') + + # Fetch first args for every call + # mock_os_path_exists.assert_any_call isn't used because os.path.normpath must be used in order to compare paths + called_args = [os.path.normpath(to_text(call[0][0])) for call in mock_os_path_exists.call_args_list] + + # 'path_dwim_relative' docstrings say 'with or without explicitly named dirname subdirs': + self.assertIn('/tmp/roles/testrole/tasks/included2.yml', called_args) + self.assertIn('/tmp/roles/testrole/tasks/tasks/included2.yml', called_args) + + # relative directories below are taken in account too: + self.assertIn('tasks/included2.yml', called_args) + self.assertIn('included2.yml', called_args) + + def test_path_dwim_root(self): + self.assertEqual(self._loader.path_dwim('/'), '/') + + def test_path_dwim_home(self): + self.assertEqual(self._loader.path_dwim('~'), os.path.expanduser('~')) + + def test_path_dwim_tilde_slash(self): + self.assertEqual(self._loader.path_dwim('~/'), os.path.expanduser('~')) + + def test_get_real_file(self): + self.assertEqual(self._loader.get_real_file(__file__), __file__) + + def test_is_file(self): + self.assertTrue(self._loader.is_file(__file__)) + + def test_is_directory_positive(self): + self.assertTrue(self._loader.is_directory(os.path.dirname(__file__))) + + def test_get_file_contents_none_path(self): + self.assertRaisesRegexp(AnsibleParserError, 'Invalid filename', + self._loader._get_file_contents, None) + + def test_get_file_contents_non_existent_path(self): + self.assertRaises(AnsibleFileNotFound, self._loader._get_file_contents, '/non_existent_file') + + +class TestPathDwimRelativeDataLoader(unittest.TestCase): + + def setUp(self): + self._loader = DataLoader() + + def test_all_slash(self): + self.assertEqual(self._loader.path_dwim_relative('/', '/', '/'), '/') + + def test_path_endswith_role(self): + self.assertEqual(self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='/'), '/') + + def test_path_endswith_role_main_yml(self): + self.assertIn('main.yml', self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='main.yml')) + + def test_path_endswith_role_source_tilde(self): + self.assertEqual(self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='~/'), os.path.expanduser('~')) + + +class TestPathDwimRelativeStackDataLoader(unittest.TestCase): + + def setUp(self): + self._loader = DataLoader() + + def test_none(self): + self.assertRaisesRegexp(AnsibleFileNotFound, 'on the Ansible Controller', self._loader.path_dwim_relative_stack, None, None, None) + + def test_empty_strings(self): + self.assertEqual(self._loader.path_dwim_relative_stack('', '', ''), './') + + def test_empty_lists(self): + self.assertEqual(self._loader.path_dwim_relative_stack([], '', '~/'), os.path.expanduser('~')) + + def test_all_slash(self): + self.assertEqual(self._loader.path_dwim_relative_stack('/', '/', '/'), '/') + + def test_path_endswith_role(self): + self.assertEqual(self._loader.path_dwim_relative_stack(paths=['foo/bar/tasks/'], dirname='/', source='/'), '/') + + def test_path_endswith_role_source_tilde(self): + self.assertEqual(self._loader.path_dwim_relative_stack(paths=['foo/bar/tasks/'], dirname='/', source='~/'), os.path.expanduser('~')) + + def test_path_endswith_role_source_main_yml(self): + self.assertRaises(AnsibleFileNotFound, self._loader.path_dwim_relative_stack, ['foo/bar/tasks/'], '/', 'main.yml') + + def test_path_endswith_role_source_main_yml_source_in_dirname(self): + self.assertRaises(AnsibleFileNotFound, self._loader.path_dwim_relative_stack, 'foo/bar/tasks/', 'tasks', 'tasks/main.yml') + + +class TestDataLoaderWithVault(unittest.TestCase): + + def setUp(self): + self._loader = DataLoader() + vault_secrets = [('default', TextVaultSecret('ansible'))] + self._loader.set_vault_secrets(vault_secrets) + self.test_vault_data_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'vault.yml') + + def tearDown(self): + pass + + def test_get_real_file_vault(self): + real_file_path = self._loader.get_real_file(self.test_vault_data_path) + self.assertTrue(os.path.exists(real_file_path)) + + def test_get_real_file_vault_no_vault(self): + self._loader.set_vault_secrets(None) + self.assertRaises(AnsibleParserError, self._loader.get_real_file, self.test_vault_data_path) + + def test_get_real_file_vault_wrong_password(self): + wrong_vault = [('default', TextVaultSecret('wrong_password'))] + self._loader.set_vault_secrets(wrong_vault) + self.assertRaises(AnsibleVaultError, self._loader.get_real_file, self.test_vault_data_path) + + def test_get_real_file_not_a_path(self): + self.assertRaisesRegexp(AnsibleParserError, 'Invalid filename', self._loader.get_real_file, None) + + @patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True) + def test_parse_from_vault_1_1_file(self): + vaulted_data = """$ANSIBLE_VAULT;1.1;AES256 +33343734386261666161626433386662623039356366656637303939306563376130623138626165 +6436333766346533353463636566313332623130383662340a393835656134633665333861393331 +37666233346464636263636530626332623035633135363732623332313534306438393366323966 +3135306561356164310a343937653834643433343734653137383339323330626437313562306630 +3035 +""" + if PY3: + builtins_name = 'builtins' + else: + builtins_name = '__builtin__' + + with patch(builtins_name + '.open', mock_open(read_data=vaulted_data.encode('utf-8'))): + output = self._loader.load_from_file('dummy_vault.txt') + self.assertEqual(output, dict(foo='bar')) diff --git a/test/units/parsing/test_mod_args.py b/test/units/parsing/test_mod_args.py new file mode 100644 index 00000000..50c3b331 --- /dev/null +++ b/test/units/parsing/test_mod_args.py @@ -0,0 +1,137 @@ +# (c) 2012-2014, Michael DeHaan +# Copyright 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest +import re + +from ansible.errors import AnsibleParserError +from ansible.parsing.mod_args import ModuleArgsParser +from ansible.utils.sentinel import Sentinel + + +class TestModArgsDwim: + + # TODO: add tests that construct ModuleArgsParser with a task reference + # TODO: verify the AnsibleError raised on failure knows the task + # and the task knows the line numbers + + INVALID_MULTIPLE_ACTIONS = ( + ({'action': 'shell echo hi', 'local_action': 'shell echo hi'}, "action and local_action are mutually exclusive"), + ({'action': 'shell echo hi', 'shell': 'echo hi'}, "conflicting action statements: shell, shell"), + ({'local_action': 'shell echo hi', 'shell': 'echo hi'}, "conflicting action statements: shell, shell"), + ) + + def _debug(self, mod, args, to): + print("RETURNED module = {0}".format(mod)) + print(" args = {0}".format(args)) + print(" to = {0}".format(to)) + + def test_basic_shell(self): + m = ModuleArgsParser(dict(shell='echo hi')) + mod, args, to = m.parse() + self._debug(mod, args, to) + + assert mod == 'shell' + assert args == dict( + _raw_params='echo hi', + ) + assert to is Sentinel + + def test_basic_command(self): + m = ModuleArgsParser(dict(command='echo hi')) + mod, args, to = m.parse() + self._debug(mod, args, to) + + assert mod == 'command' + assert args == dict( + _raw_params='echo hi', + ) + assert to is Sentinel + + def test_shell_with_modifiers(self): + m = ModuleArgsParser(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) + mod, args, to = m.parse() + self._debug(mod, args, to) + + assert mod == 'shell' + assert args == dict( + creates='/tmp/baz', + removes='/tmp/bleep', + _raw_params='/bin/foo', + ) + assert to is Sentinel + + def test_normal_usage(self): + m = ModuleArgsParser(dict(copy='src=a dest=b')) + mod, args, to = m.parse() + self._debug(mod, args, to) + + assert mod, 'copy' + assert args, dict(src='a', dest='b') + assert to is Sentinel + + def test_complex_args(self): + m = ModuleArgsParser(dict(copy=dict(src='a', dest='b'))) + mod, args, to = m.parse() + self._debug(mod, args, to) + + assert mod, 'copy' + assert args, dict(src='a', dest='b') + assert to is Sentinel + + def test_action_with_complex(self): + m = ModuleArgsParser(dict(action=dict(module='copy', src='a', dest='b'))) + mod, args, to = m.parse() + self._debug(mod, args, to) + + assert mod == 'copy' + assert args == dict(src='a', dest='b') + assert to is Sentinel + + def test_action_with_complex_and_complex_args(self): + m = ModuleArgsParser(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) + mod, args, to = m.parse() + self._debug(mod, args, to) + + assert mod == 'copy' + assert args == dict(src='a', dest='b') + assert to is Sentinel + + def test_local_action_string(self): + m = ModuleArgsParser(dict(local_action='copy src=a dest=b')) + mod, args, delegate_to = m.parse() + self._debug(mod, args, delegate_to) + + assert mod == 'copy' + assert args == dict(src='a', dest='b') + assert delegate_to == 'localhost' + + @pytest.mark.parametrize("args_dict, msg", INVALID_MULTIPLE_ACTIONS) + def test_multiple_actions(self, args_dict, msg): + m = ModuleArgsParser(args_dict) + with pytest.raises(AnsibleParserError) as err: + m.parse() + + assert err.value.args[0] == msg + + def test_multiple_actions(self): + args_dict = {'ping': 'data=hi', 'shell': 'echo hi'} + m = ModuleArgsParser(args_dict) + with pytest.raises(AnsibleParserError) as err: + m.parse() + + assert err.value.args[0].startswith("conflicting action statements: ") + actions = set(re.search(r'(\w+), (\w+)', err.value.args[0]).groups()) + assert actions == set(['ping', 'shell']) + + def test_bogus_action(self): + args_dict = {'bogusaction': {}} + m = ModuleArgsParser(args_dict) + with pytest.raises(AnsibleParserError) as err: + m.parse() + + assert err.value.args[0].startswith("couldn't resolve module/action 'bogusaction'") diff --git a/test/units/parsing/test_splitter.py b/test/units/parsing/test_splitter.py new file mode 100644 index 00000000..a37de0f9 --- /dev/null +++ b/test/units/parsing/test_splitter.py @@ -0,0 +1,110 @@ +# coding: utf-8 +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.parsing.splitter import split_args, parse_kv + +import pytest + +SPLIT_DATA = ( + (u'a', + [u'a'], + {u'_raw_params': u'a'}), + (u'a=b', + [u'a=b'], + {u'a': u'b'}), + (u'a="foo bar"', + [u'a="foo bar"'], + {u'a': u'foo bar'}), + (u'"foo bar baz"', + [u'"foo bar baz"'], + {u'_raw_params': '"foo bar baz"'}), + (u'foo bar baz', + [u'foo', u'bar', u'baz'], + {u'_raw_params': u'foo bar baz'}), + (u'a=b c="foo bar"', + [u'a=b', u'c="foo bar"'], + {u'a': u'b', u'c': u'foo bar'}), + (u'a="echo \\"hello world\\"" b=bar', + [u'a="echo \\"hello world\\""', u'b=bar'], + {u'a': u'echo "hello world"', u'b': u'bar'}), + (u'a="multi\nline"', + [u'a="multi\nline"'], + {u'a': u'multi\nline'}), + (u'a="blank\n\nline"', + [u'a="blank\n\nline"'], + {u'a': u'blank\n\nline'}), + (u'a="blank\n\n\nlines"', + [u'a="blank\n\n\nlines"'], + {u'a': u'blank\n\n\nlines'}), + (u'a="a long\nmessage\\\nabout a thing\n"', + [u'a="a long\nmessage\\\nabout a thing\n"'], + {u'a': u'a long\nmessage\\\nabout a thing\n'}), + (u'a="multiline\nmessage1\\\n" b="multiline\nmessage2\\\n"', + [u'a="multiline\nmessage1\\\n"', u'b="multiline\nmessage2\\\n"'], + {u'a': 'multiline\nmessage1\\\n', u'b': u'multiline\nmessage2\\\n'}), + (u'a={{jinja}}', + [u'a={{jinja}}'], + {u'a': u'{{jinja}}'}), + (u'a={{ jinja }}', + [u'a={{ jinja }}'], + {u'a': u'{{ jinja }}'}), + (u'a="{{jinja}}"', + [u'a="{{jinja}}"'], + {u'a': u'{{jinja}}'}), + (u'a={{ jinja }}{{jinja2}}', + [u'a={{ jinja }}{{jinja2}}'], + {u'a': u'{{ jinja }}{{jinja2}}'}), + (u'a="{{ jinja }}{{jinja2}}"', + [u'a="{{ jinja }}{{jinja2}}"'], + {u'a': u'{{ jinja }}{{jinja2}}'}), + (u'a={{jinja}} b={{jinja2}}', + [u'a={{jinja}}', u'b={{jinja2}}'], + {u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}), + (u'a="{{jinja}}\n" b="{{jinja2}}\n"', + [u'a="{{jinja}}\n"', u'b="{{jinja2}}\n"'], + {u'a': u'{{jinja}}\n', u'b': u'{{jinja2}}\n'}), + (u'a="café eñyei"', + [u'a="café eñyei"'], + {u'a': u'café eñyei'}), + (u'a=café b=eñyei', + [u'a=café', u'b=eñyei'], + {u'a': u'café', u'b': u'eñyei'}), + (u'a={{ foo | some_filter(\' \', " ") }} b=bar', + [u'a={{ foo | some_filter(\' \', " ") }}', u'b=bar'], + {u'a': u'{{ foo | some_filter(\' \', " ") }}', u'b': u'bar'}), + (u'One\n Two\n Three\n', + [u'One\n ', u'Two\n ', u'Three\n'], + {u'_raw_params': u'One\n Two\n Three\n'}), +) + +SPLIT_ARGS = ((test[0], test[1]) for test in SPLIT_DATA) +PARSE_KV = ((test[0], test[2]) for test in SPLIT_DATA) + + +@pytest.mark.parametrize("args, expected", SPLIT_ARGS) +def test_split_args(args, expected): + assert split_args(args) == expected + + +@pytest.mark.parametrize("args, expected", PARSE_KV) +def test_parse_kv(args, expected): + assert parse_kv(args) == expected diff --git a/test/units/parsing/test_unquote.py b/test/units/parsing/test_unquote.py new file mode 100644 index 00000000..4b4260e7 --- /dev/null +++ b/test/units/parsing/test_unquote.py @@ -0,0 +1,51 @@ +# coding: utf-8 +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.parsing.quoting import unquote + +import pytest + +UNQUOTE_DATA = ( + (u'1', u'1'), + (u'\'1\'', u'1'), + (u'"1"', u'1'), + (u'"1 \'2\'"', u'1 \'2\''), + (u'\'1 "2"\'', u'1 "2"'), + (u'\'1 \'2\'\'', u'1 \'2\''), + (u'"1\\"', u'"1\\"'), + (u'\'1\\\'', u'\'1\\\''), + (u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'), + (u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'), + (u'"', u'"'), + (u'\'', u'\''), + # Not entirely sure these are good but they match the current + # behaviour + (u'"1""2"', u'1""2'), + (u'\'1\'\'2\'', u'1\'\'2'), + (u'"1" 2 "3"', u'1" 2 "3'), + (u'"1"\'2\'"3"', u'1"\'2\'"3'), +) + + +@pytest.mark.parametrize("quoted, expected", UNQUOTE_DATA) +def test_unquote(quoted, expected): + assert unquote(quoted) == expected diff --git a/test/units/parsing/utils/__init__.py b/test/units/parsing/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/parsing/utils/test_addresses.py b/test/units/parsing/utils/test_addresses.py new file mode 100644 index 00000000..4f7304f5 --- /dev/null +++ b/test/units/parsing/utils/test_addresses.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import unittest + +from ansible.parsing.utils.addresses import parse_address + + +class TestParseAddress(unittest.TestCase): + + tests = { + # IPv4 addresses + '192.0.2.3': ['192.0.2.3', None], + '192.0.2.3:23': ['192.0.2.3', 23], + + # IPv6 addresses + '::': ['::', None], + '::1': ['::1', None], + '[::1]:442': ['::1', 442], + 'abcd:ef98:7654:3210:abcd:ef98:7654:3210': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', None], + '[abcd:ef98:7654:3210:abcd:ef98:7654:3210]:42': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', 42], + '1234:5678:9abc:def0:1234:5678:9abc:def0': ['1234:5678:9abc:def0:1234:5678:9abc:def0', None], + '1234::9abc:def0:1234:5678:9abc:def0': ['1234::9abc:def0:1234:5678:9abc:def0', None], + '1234:5678::def0:1234:5678:9abc:def0': ['1234:5678::def0:1234:5678:9abc:def0', None], + '1234:5678:9abc::1234:5678:9abc:def0': ['1234:5678:9abc::1234:5678:9abc:def0', None], + '1234:5678:9abc:def0::5678:9abc:def0': ['1234:5678:9abc:def0::5678:9abc:def0', None], + '1234:5678:9abc:def0:1234::9abc:def0': ['1234:5678:9abc:def0:1234::9abc:def0', None], + '1234:5678:9abc:def0:1234:5678::def0': ['1234:5678:9abc:def0:1234:5678::def0', None], + '1234:5678:9abc:def0:1234:5678::': ['1234:5678:9abc:def0:1234:5678::', None], + '::9abc:def0:1234:5678:9abc:def0': ['::9abc:def0:1234:5678:9abc:def0', None], + '0:0:0:0:0:ffff:1.2.3.4': ['0:0:0:0:0:ffff:1.2.3.4', None], + '0:0:0:0:0:0:1.2.3.4': ['0:0:0:0:0:0:1.2.3.4', None], + '::ffff:1.2.3.4': ['::ffff:1.2.3.4', None], + '::1.2.3.4': ['::1.2.3.4', None], + '1234::': ['1234::', None], + + # Hostnames + 'some-host': ['some-host', None], + 'some-host:80': ['some-host', 80], + 'some.host.com:492': ['some.host.com', 492], + '[some.host.com]:493': ['some.host.com', 493], + 'a-b.3foo_bar.com:23': ['a-b.3foo_bar.com', 23], + u'fóöbär': [u'fóöbär', None], + u'fóöbär:32': [u'fóöbär', 32], + u'fóöbär.éxàmplê.com:632': [u'fóöbär.éxàmplê.com', 632], + + # Various errors + '': [None, None], + 'some..host': [None, None], + 'some.': [None, None], + '[example.com]': [None, None], + 'some-': [None, None], + 'some-.foo.com': [None, None], + 'some.-foo.com': [None, None], + } + + range_tests = { + '192.0.2.[3:10]': ['192.0.2.[3:10]', None], + '192.0.2.[3:10]:23': ['192.0.2.[3:10]', 23], + 'abcd:ef98::7654:[1:9]': ['abcd:ef98::7654:[1:9]', None], + '[abcd:ef98::7654:[6:32]]:2222': ['abcd:ef98::7654:[6:32]', 2222], + '[abcd:ef98::7654:[9ab3:fcb7]]:2222': ['abcd:ef98::7654:[9ab3:fcb7]', 2222], + u'fóöb[a:c]r.éxàmplê.com:632': [u'fóöb[a:c]r.éxàmplê.com', 632], + '[a:b]foo.com': ['[a:b]foo.com', None], + 'foo[a:b].com': ['foo[a:b].com', None], + 'foo[a:b]:42': ['foo[a:b]', 42], + 'foo[a-b]-.com': [None, None], + 'foo[a-b]:32': [None, None], + 'foo[x-y]': [None, None], + } + + def test_without_ranges(self): + for t in self.tests: + test = self.tests[t] + + try: + (host, port) = parse_address(t) + except Exception: + host = None + port = None + + assert host == test[0] + assert port == test[1] + + def test_with_ranges(self): + for t in self.range_tests: + test = self.range_tests[t] + + try: + (host, port) = parse_address(t, allow_ranges=True) + except Exception: + host = None + port = None + + assert host == test[0] + assert port == test[1] diff --git a/test/units/parsing/utils/test_jsonify.py b/test/units/parsing/utils/test_jsonify.py new file mode 100644 index 00000000..37be7824 --- /dev/null +++ b/test/units/parsing/utils/test_jsonify.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# (c) 2016, James Cammarata +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.parsing.utils.jsonify import jsonify + + +class TestJsonify(unittest.TestCase): + def test_jsonify_simple(self): + self.assertEqual(jsonify(dict(a=1, b=2, c=3)), '{"a": 1, "b": 2, "c": 3}') + + def test_jsonify_simple_format(self): + res = jsonify(dict(a=1, b=2, c=3), format=True) + cleaned = "".join([x.strip() for x in res.splitlines()]) + self.assertEqual(cleaned, '{"a": 1,"b": 2,"c": 3}') + + def test_jsonify_unicode(self): + self.assertEqual(jsonify(dict(toshio=u'くらとみ')), u'{"toshio": "くらとみ"}') + + def test_jsonify_empty(self): + self.assertEqual(jsonify(None), '{}') diff --git a/test/units/parsing/utils/test_yaml.py b/test/units/parsing/utils/test_yaml.py new file mode 100644 index 00000000..27b2905a --- /dev/null +++ b/test/units/parsing/utils/test_yaml.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# (c) 2017, Ansible Project +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.errors import AnsibleParserError +from ansible.parsing.utils.yaml import from_yaml + + +def test_from_yaml_simple(): + assert from_yaml(u'---\n- test: 1\n test2: "2"\n- caf\xe9: "caf\xe9"') == [{u'test': 1, u'test2': u"2"}, {u"caf\xe9": u"caf\xe9"}] + + +def test_bad_yaml(): + with pytest.raises(AnsibleParserError): + from_yaml(u'foo: bar: baz') diff --git a/test/units/parsing/vault/__init__.py b/test/units/parsing/vault/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/parsing/vault/test_vault.py b/test/units/parsing/vault/test_vault.py new file mode 100644 index 00000000..a9c4fc9e --- /dev/null +++ b/test/units/parsing/vault/test_vault.py @@ -0,0 +1,941 @@ +# -*- coding: utf-8 -*- +# (c) 2012-2014, Michael DeHaan +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import binascii +import io +import os +import tempfile + +from binascii import hexlify +import pytest + +from units.compat import unittest +from units.compat.mock import patch, MagicMock + +from ansible import errors +from ansible.module_utils import six +from ansible.module_utils._text import to_bytes, to_text +from ansible.parsing import vault + +from units.mock.loader import DictDataLoader +from units.mock.vault_helper import TextVaultSecret + + +class TestUnhexlify(unittest.TestCase): + def test(self): + b_plain_data = b'some text to hexlify' + b_data = hexlify(b_plain_data) + res = vault._unhexlify(b_data) + self.assertEqual(res, b_plain_data) + + def test_odd_length(self): + b_data = b'123456789abcdefghijklmnopqrstuvwxyz' + + self.assertRaisesRegexp(vault.AnsibleVaultFormatError, + '.*Vault format unhexlify error.*', + vault._unhexlify, + b_data) + + def test_nonhex(self): + b_data = b'6z36316566653264333665333637623064303639353237620a636366633565663263336335656532' + + self.assertRaisesRegexp(vault.AnsibleVaultFormatError, + '.*Vault format unhexlify error.*Non-hexadecimal digit found', + vault._unhexlify, + b_data) + + +class TestParseVaulttext(unittest.TestCase): + def test(self): + vaulttext_envelope = u'''$ANSIBLE_VAULT;1.1;AES256 +33363965326261303234626463623963633531343539616138316433353830356566396130353436 +3562643163366231316662386565383735653432386435610a306664636137376132643732393835 +63383038383730306639353234326630666539346233376330303938323639306661313032396437 +6233623062366136310a633866373936313238333730653739323461656662303864663666653563 +3138''' + + b_vaulttext_envelope = to_bytes(vaulttext_envelope, errors='strict', encoding='utf-8') + b_vaulttext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext_envelope) + res = vault.parse_vaulttext(b_vaulttext) + self.assertIsInstance(res[0], bytes) + self.assertIsInstance(res[1], bytes) + self.assertIsInstance(res[2], bytes) + + def test_non_hex(self): + vaulttext_envelope = u'''$ANSIBLE_VAULT;1.1;AES256 +3336396J326261303234626463623963633531343539616138316433353830356566396130353436 +3562643163366231316662386565383735653432386435610a306664636137376132643732393835 +63383038383730306639353234326630666539346233376330303938323639306661313032396437 +6233623062366136310a633866373936313238333730653739323461656662303864663666653563 +3138''' + + b_vaulttext_envelope = to_bytes(vaulttext_envelope, errors='strict', encoding='utf-8') + b_vaulttext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext_envelope) + self.assertRaisesRegexp(vault.AnsibleVaultFormatError, + '.*Vault format unhexlify error.*Non-hexadecimal digit found', + vault.parse_vaulttext, + b_vaulttext_envelope) + + +class TestVaultSecret(unittest.TestCase): + def test(self): + secret = vault.VaultSecret() + secret.load() + self.assertIsNone(secret._bytes) + + def test_bytes(self): + some_text = u'私はガラスを食べられます。それは私を傷つけません。' + _bytes = to_bytes(some_text) + secret = vault.VaultSecret(_bytes) + secret.load() + self.assertEqual(secret.bytes, _bytes) + + +class TestPromptVaultSecret(unittest.TestCase): + def test_empty_prompt_formats(self): + secret = vault.PromptVaultSecret(vault_id='test_id', prompt_formats=[]) + secret.load() + self.assertIsNone(secret._bytes) + + @patch('ansible.parsing.vault.display.prompt', return_value='the_password') + def test_prompt_formats_none(self, mock_display_prompt): + secret = vault.PromptVaultSecret(vault_id='test_id') + secret.load() + self.assertEqual(secret._bytes, b'the_password') + + @patch('ansible.parsing.vault.display.prompt', return_value='the_password') + def test_custom_prompt(self, mock_display_prompt): + secret = vault.PromptVaultSecret(vault_id='test_id', + prompt_formats=['The cow flies at midnight: ']) + secret.load() + self.assertEqual(secret._bytes, b'the_password') + + @patch('ansible.parsing.vault.display.prompt', side_effect=EOFError) + def test_prompt_eoferror(self, mock_display_prompt): + secret = vault.PromptVaultSecret(vault_id='test_id') + self.assertRaisesRegexp(vault.AnsibleVaultError, + 'EOFError.*test_id', + secret.load) + + @patch('ansible.parsing.vault.display.prompt', side_effect=['first_password', 'second_password']) + def test_prompt_passwords_dont_match(self, mock_display_prompt): + secret = vault.PromptVaultSecret(vault_id='test_id', + prompt_formats=['Vault password: ', + 'Confirm Vault password: ']) + self.assertRaisesRegexp(errors.AnsibleError, + 'Passwords do not match', + secret.load) + + +class TestFileVaultSecret(unittest.TestCase): + def setUp(self): + self.vault_password = "test-vault-password" + text_secret = TextVaultSecret(self.vault_password) + self.vault_secrets = [('foo', text_secret)] + + def test(self): + secret = vault.FileVaultSecret() + self.assertIsNone(secret._bytes) + self.assertIsNone(secret._text) + + def test_repr_empty(self): + secret = vault.FileVaultSecret() + self.assertEqual(repr(secret), "FileVaultSecret()") + + def test_repr(self): + tmp_file = tempfile.NamedTemporaryFile(delete=False) + fake_loader = DictDataLoader({tmp_file.name: 'sdfadf'}) + + secret = vault.FileVaultSecret(loader=fake_loader, filename=tmp_file.name) + filename = tmp_file.name + tmp_file.close() + self.assertEqual(repr(secret), "FileVaultSecret(filename='%s')" % filename) + + def test_empty_bytes(self): + secret = vault.FileVaultSecret() + self.assertIsNone(secret.bytes) + + def test_file(self): + password = 'some password' + + tmp_file = tempfile.NamedTemporaryFile(delete=False) + tmp_file.write(to_bytes(password)) + tmp_file.close() + + fake_loader = DictDataLoader({tmp_file.name: 'sdfadf'}) + + secret = vault.FileVaultSecret(loader=fake_loader, filename=tmp_file.name) + secret.load() + + os.unlink(tmp_file.name) + + self.assertEqual(secret.bytes, to_bytes(password)) + + def test_file_empty(self): + + tmp_file = tempfile.NamedTemporaryFile(delete=False) + tmp_file.write(to_bytes('')) + tmp_file.close() + + fake_loader = DictDataLoader({tmp_file.name: ''}) + + secret = vault.FileVaultSecret(loader=fake_loader, filename=tmp_file.name) + self.assertRaisesRegexp(vault.AnsibleVaultPasswordError, + 'Invalid vault password was provided from file.*%s' % tmp_file.name, + secret.load) + + os.unlink(tmp_file.name) + + def test_file_encrypted(self): + vault_password = "test-vault-password" + text_secret = TextVaultSecret(vault_password) + vault_secrets = [('foo', text_secret)] + + password = 'some password' + # 'some password' encrypted with 'test-ansible-password' + + password_file_content = '''$ANSIBLE_VAULT;1.1;AES256 +61393863643638653437313566313632306462383837303132346434616433313438353634613762 +3334363431623364386164616163326537366333353663650a663634306232363432626162353665 +39623061353266373631636331643761306665343731376633623439313138396330346237653930 +6432643864346136640a653364386634666461306231353765636662316335613235383565306437 +3737 +''' + + tmp_file = tempfile.NamedTemporaryFile(delete=False) + tmp_file.write(to_bytes(password_file_content)) + tmp_file.close() + + fake_loader = DictDataLoader({tmp_file.name: 'sdfadf'}) + fake_loader._vault.secrets = vault_secrets + + secret = vault.FileVaultSecret(loader=fake_loader, filename=tmp_file.name) + secret.load() + + os.unlink(tmp_file.name) + + self.assertEqual(secret.bytes, to_bytes(password)) + + def test_file_not_a_directory(self): + filename = '/dev/null/foobar' + fake_loader = DictDataLoader({filename: 'sdfadf'}) + + secret = vault.FileVaultSecret(loader=fake_loader, filename=filename) + self.assertRaisesRegexp(errors.AnsibleError, + '.*Could not read vault password file.*/dev/null/foobar.*Not a directory', + secret.load) + + def test_file_not_found(self): + tmp_file = tempfile.NamedTemporaryFile() + filename = os.path.realpath(tmp_file.name) + tmp_file.close() + + fake_loader = DictDataLoader({filename: 'sdfadf'}) + + secret = vault.FileVaultSecret(loader=fake_loader, filename=filename) + self.assertRaisesRegexp(errors.AnsibleError, + '.*Could not read vault password file.*%s.*' % filename, + secret.load) + + +class TestScriptVaultSecret(unittest.TestCase): + def test(self): + secret = vault.ScriptVaultSecret() + self.assertIsNone(secret._bytes) + self.assertIsNone(secret._text) + + def _mock_popen(self, mock_popen, return_code=0, stdout=b'', stderr=b''): + def communicate(): + return stdout, stderr + mock_popen.return_value = MagicMock(returncode=return_code) + mock_popen_instance = mock_popen.return_value + mock_popen_instance.communicate = communicate + + @patch('ansible.parsing.vault.subprocess.Popen') + def test_read_file(self, mock_popen): + self._mock_popen(mock_popen, stdout=b'some_password') + secret = vault.ScriptVaultSecret() + with patch.object(secret, 'loader') as mock_loader: + mock_loader.is_executable = MagicMock(return_value=True) + secret.load() + + @patch('ansible.parsing.vault.subprocess.Popen') + def test_read_file_empty(self, mock_popen): + self._mock_popen(mock_popen, stdout=b'') + secret = vault.ScriptVaultSecret() + with patch.object(secret, 'loader') as mock_loader: + mock_loader.is_executable = MagicMock(return_value=True) + self.assertRaisesRegexp(vault.AnsibleVaultPasswordError, + 'Invalid vault password was provided from script', + secret.load) + + @patch('ansible.parsing.vault.subprocess.Popen') + def test_read_file_os_error(self, mock_popen): + self._mock_popen(mock_popen) + mock_popen.side_effect = OSError('That is not an executable') + secret = vault.ScriptVaultSecret() + with patch.object(secret, 'loader') as mock_loader: + mock_loader.is_executable = MagicMock(return_value=True) + self.assertRaisesRegexp(errors.AnsibleError, + 'Problem running vault password script.*', + secret.load) + + @patch('ansible.parsing.vault.subprocess.Popen') + def test_read_file_not_executable(self, mock_popen): + self._mock_popen(mock_popen) + secret = vault.ScriptVaultSecret() + with patch.object(secret, 'loader') as mock_loader: + mock_loader.is_executable = MagicMock(return_value=False) + self.assertRaisesRegexp(vault.AnsibleVaultError, + 'The vault password script .* was not executable', + secret.load) + + @patch('ansible.parsing.vault.subprocess.Popen') + def test_read_file_non_zero_return_code(self, mock_popen): + stderr = b'That did not work for a random reason' + rc = 37 + + self._mock_popen(mock_popen, return_code=rc, stderr=stderr) + secret = vault.ScriptVaultSecret(filename='/dev/null/some_vault_secret') + with patch.object(secret, 'loader') as mock_loader: + mock_loader.is_executable = MagicMock(return_value=True) + self.assertRaisesRegexp(errors.AnsibleError, + r'Vault password script.*returned non-zero \(%s\): %s' % (rc, stderr), + secret.load) + + +class TestScriptIsClient(unittest.TestCase): + def test_randomname(self): + filename = 'randomname' + res = vault.script_is_client(filename) + self.assertFalse(res) + + def test_something_dash_client(self): + filename = 'something-client' + res = vault.script_is_client(filename) + self.assertTrue(res) + + def test_something_dash_client_somethingelse(self): + filename = 'something-client-somethingelse' + res = vault.script_is_client(filename) + self.assertFalse(res) + + def test_something_dash_client_py(self): + filename = 'something-client.py' + res = vault.script_is_client(filename) + self.assertTrue(res) + + def test_full_path_something_dash_client_py(self): + filename = '/foo/bar/something-client.py' + res = vault.script_is_client(filename) + self.assertTrue(res) + + def test_full_path_something_dash_client(self): + filename = '/foo/bar/something-client' + res = vault.script_is_client(filename) + self.assertTrue(res) + + def test_full_path_something_dash_client_in_dir(self): + filename = '/foo/bar/something-client/but/not/filename' + res = vault.script_is_client(filename) + self.assertFalse(res) + + +class TestGetFileVaultSecret(unittest.TestCase): + def test_file(self): + password = 'some password' + + tmp_file = tempfile.NamedTemporaryFile(delete=False) + tmp_file.write(to_bytes(password)) + tmp_file.close() + + fake_loader = DictDataLoader({tmp_file.name: 'sdfadf'}) + + secret = vault.get_file_vault_secret(filename=tmp_file.name, loader=fake_loader) + secret.load() + + os.unlink(tmp_file.name) + + self.assertEqual(secret.bytes, to_bytes(password)) + + def test_file_not_a_directory(self): + filename = '/dev/null/foobar' + fake_loader = DictDataLoader({filename: 'sdfadf'}) + + self.assertRaisesRegexp(errors.AnsibleError, + '.*The vault password file %s was not found.*' % filename, + vault.get_file_vault_secret, + filename=filename, + loader=fake_loader) + + def test_file_not_found(self): + tmp_file = tempfile.NamedTemporaryFile() + filename = os.path.realpath(tmp_file.name) + tmp_file.close() + + fake_loader = DictDataLoader({filename: 'sdfadf'}) + + self.assertRaisesRegexp(errors.AnsibleError, + '.*The vault password file %s was not found.*' % filename, + vault.get_file_vault_secret, + filename=filename, + loader=fake_loader) + + +class TestVaultIsEncrypted(unittest.TestCase): + def test_bytes_not_encrypted(self): + b_data = b"foobar" + self.assertFalse(vault.is_encrypted(b_data)) + + def test_bytes_encrypted(self): + b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible") + self.assertTrue(vault.is_encrypted(b_data)) + + def test_text_not_encrypted(self): + b_data = to_text(b"foobar") + self.assertFalse(vault.is_encrypted(b_data)) + + def test_text_encrypted(self): + b_data = to_text(b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")) + self.assertTrue(vault.is_encrypted(b_data)) + + def test_invalid_text_not_ascii(self): + data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ " + self.assertFalse(vault.is_encrypted(data)) + + def test_invalid_bytes_not_ascii(self): + data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ " + b_data = to_bytes(data, encoding='utf-8') + self.assertFalse(vault.is_encrypted(b_data)) + + +class TestVaultIsEncryptedFile(unittest.TestCase): + def test_binary_file_handle_not_encrypted(self): + b_data = b"foobar" + b_data_fo = io.BytesIO(b_data) + self.assertFalse(vault.is_encrypted_file(b_data_fo)) + + def test_text_file_handle_not_encrypted(self): + data = u"foobar" + data_fo = io.StringIO(data) + self.assertFalse(vault.is_encrypted_file(data_fo)) + + def test_binary_file_handle_encrypted(self): + b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible") + b_data_fo = io.BytesIO(b_data) + self.assertTrue(vault.is_encrypted_file(b_data_fo)) + + def test_text_file_handle_encrypted(self): + data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % to_text(hexlify(b"ansible")) + data_fo = io.StringIO(data) + self.assertTrue(vault.is_encrypted_file(data_fo)) + + def test_binary_file_handle_invalid(self): + data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ " + b_data = to_bytes(data) + b_data_fo = io.BytesIO(b_data) + self.assertFalse(vault.is_encrypted_file(b_data_fo)) + + def test_text_file_handle_invalid(self): + data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ " + data_fo = io.StringIO(data) + self.assertFalse(vault.is_encrypted_file(data_fo)) + + def test_file_already_read_from_finds_header(self): + b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") + b_data_fo = io.BytesIO(b_data) + b_data_fo.read(42) # Arbitrary number + self.assertTrue(vault.is_encrypted_file(b_data_fo)) + + def test_file_already_read_from_saves_file_pos(self): + b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") + b_data_fo = io.BytesIO(b_data) + b_data_fo.read(69) # Arbitrary number + vault.is_encrypted_file(b_data_fo) + self.assertEqual(b_data_fo.tell(), 69) + + def test_file_with_offset(self): + b_data = b"JUNK$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") + b_data_fo = io.BytesIO(b_data) + self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4)) + + def test_file_with_count(self): + b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") + vault_length = len(b_data) + b_data = b_data + u'ァ ア'.encode('utf-8') + b_data_fo = io.BytesIO(b_data) + self.assertTrue(vault.is_encrypted_file(b_data_fo, count=vault_length)) + + def test_file_with_offset_and_count(self): + b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") + vault_length = len(b_data) + b_data = b'JUNK' + b_data + u'ァ ア'.encode('utf-8') + b_data_fo = io.BytesIO(b_data) + self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4, count=vault_length)) + + +@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY, + reason="Skipping cryptography tests because cryptography is not installed") +class TestVaultCipherAes256(unittest.TestCase): + def setUp(self): + self.vault_cipher = vault.VaultAES256() + + def test(self): + self.assertIsInstance(self.vault_cipher, vault.VaultAES256) + + # TODO: tag these as slow tests + def test_create_key_cryptography(self): + b_password = b'hunter42' + b_salt = os.urandom(32) + b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) + self.assertIsInstance(b_key_cryptography, six.binary_type) + + @pytest.mark.skipif(not vault.HAS_PYCRYPTO, reason='Not testing pycrypto key as pycrypto is not installed') + def test_create_key_pycrypto(self): + b_password = b'hunter42' + b_salt = os.urandom(32) + + b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) + self.assertIsInstance(b_key_pycrypto, six.binary_type) + + @pytest.mark.skipif(not vault.HAS_PYCRYPTO, + reason='Not comparing cryptography key to pycrypto key as pycrypto is not installed') + def test_compare_new_keys(self): + b_password = b'hunter42' + b_salt = os.urandom(32) + b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) + + b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) + self.assertEqual(b_key_cryptography, b_key_pycrypto) + + def test_create_key_known_cryptography(self): + b_password = b'hunter42' + + # A fixed salt + b_salt = b'q' * 32 # q is the most random letter. + b_key_1 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) + self.assertIsInstance(b_key_1, six.binary_type) + + # verify we get the same answer + # we could potentially run a few iterations of this and time it to see if it's roughly constant time + # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI + b_key_2 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) + self.assertIsInstance(b_key_2, six.binary_type) + self.assertEqual(b_key_1, b_key_2) + + # And again with pycrypto + b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) + self.assertIsInstance(b_key_3, six.binary_type) + + # verify we get the same answer + # we could potentially run a few iterations of this and time it to see if it's roughly constant time + # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI + b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) + self.assertIsInstance(b_key_4, six.binary_type) + self.assertEqual(b_key_3, b_key_4) + self.assertEqual(b_key_1, b_key_4) + + def test_create_key_known_pycrypto(self): + b_password = b'hunter42' + + # A fixed salt + b_salt = b'q' * 32 # q is the most random letter. + b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) + self.assertIsInstance(b_key_3, six.binary_type) + + # verify we get the same answer + # we could potentially run a few iterations of this and time it to see if it's roughly constant time + # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI + b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) + self.assertIsInstance(b_key_4, six.binary_type) + self.assertEqual(b_key_3, b_key_4) + + def test_is_equal_is_equal(self): + self.assertTrue(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwxyz')) + + def test_is_equal_unequal_length(self): + self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwx and sometimes y')) + + def test_is_equal_not_equal(self): + self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'AbcdefghijKlmnopQrstuvwxZ')) + + def test_is_equal_empty(self): + self.assertTrue(self.vault_cipher._is_equal(b'', b'')) + + def test_is_equal_non_ascii_equal(self): + utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。') + self.assertTrue(self.vault_cipher._is_equal(utf8_data, utf8_data)) + + def test_is_equal_non_ascii_unequal(self): + utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。') + utf8_data2 = to_bytes(u'Pot să mănânc sticlă și ea nu mă rănește.') + + # Test for the len optimization path + self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data2)) + # Test for the slower, char by char comparison path + self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data[:-1] + b'P')) + + def test_is_equal_non_bytes(self): + """ Anything not a byte string should raise a TypeError """ + self.assertRaises(TypeError, self.vault_cipher._is_equal, u"One fish", b"two fish") + self.assertRaises(TypeError, self.vault_cipher._is_equal, b"One fish", u"two fish") + self.assertRaises(TypeError, self.vault_cipher._is_equal, 1, b"red fish") + self.assertRaises(TypeError, self.vault_cipher._is_equal, b"blue fish", 2) + + +@pytest.mark.skipif(not vault.HAS_PYCRYPTO, + reason="Skipping Pycrypto tests because pycrypto is not installed") +class TestVaultCipherAes256PyCrypto(TestVaultCipherAes256): + def setUp(self): + self.has_cryptography = vault.HAS_CRYPTOGRAPHY + vault.HAS_CRYPTOGRAPHY = False + super(TestVaultCipherAes256PyCrypto, self).setUp() + + def tearDown(self): + vault.HAS_CRYPTOGRAPHY = self.has_cryptography + super(TestVaultCipherAes256PyCrypto, self).tearDown() + + +class TestMatchSecrets(unittest.TestCase): + def test_empty_tuple(self): + secrets = [tuple()] + vault_ids = ['vault_id_1'] + self.assertRaises(ValueError, + vault.match_secrets, + secrets, vault_ids) + + def test_empty_secrets(self): + matches = vault.match_secrets([], ['vault_id_1']) + self.assertEqual(matches, []) + + def test_single_match(self): + secret = TextVaultSecret('password') + matches = vault.match_secrets([('default', secret)], ['default']) + self.assertEqual(matches, [('default', secret)]) + + def test_no_matches(self): + secret = TextVaultSecret('password') + matches = vault.match_secrets([('default', secret)], ['not_default']) + self.assertEqual(matches, []) + + def test_multiple_matches(self): + secrets = [('vault_id1', TextVaultSecret('password1')), + ('vault_id2', TextVaultSecret('password2')), + ('vault_id1', TextVaultSecret('password3')), + ('vault_id4', TextVaultSecret('password4'))] + vault_ids = ['vault_id1', 'vault_id4'] + matches = vault.match_secrets(secrets, vault_ids) + + self.assertEqual(len(matches), 3) + expected = [('vault_id1', TextVaultSecret('password1')), + ('vault_id1', TextVaultSecret('password3')), + ('vault_id4', TextVaultSecret('password4'))] + self.assertEqual([x for x, y in matches], + [a for a, b in expected]) + + +@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY, + reason="Skipping cryptography tests because cryptography is not installed") +class TestVaultLib(unittest.TestCase): + def setUp(self): + self.vault_password = "test-vault-password" + text_secret = TextVaultSecret(self.vault_password) + self.vault_secrets = [('default', text_secret), + ('test_id', text_secret)] + self.v = vault.VaultLib(self.vault_secrets) + + def _vault_secrets(self, vault_id, secret): + return [(vault_id, secret)] + + def _vault_secrets_from_password(self, vault_id, password): + return [(vault_id, TextVaultSecret(password))] + + def test_encrypt(self): + plaintext = u'Some text to encrypt in a café' + b_vaulttext = self.v.encrypt(plaintext) + + self.assertIsInstance(b_vaulttext, six.binary_type) + + b_header = b'$ANSIBLE_VAULT;1.1;AES256\n' + self.assertEqual(b_vaulttext[:len(b_header)], b_header) + + def test_encrypt_vault_id(self): + plaintext = u'Some text to encrypt in a café' + b_vaulttext = self.v.encrypt(plaintext, vault_id='test_id') + + self.assertIsInstance(b_vaulttext, six.binary_type) + + b_header = b'$ANSIBLE_VAULT;1.2;AES256;test_id\n' + self.assertEqual(b_vaulttext[:len(b_header)], b_header) + + def test_encrypt_bytes(self): + + plaintext = to_bytes(u'Some text to encrypt in a café') + b_vaulttext = self.v.encrypt(plaintext) + + self.assertIsInstance(b_vaulttext, six.binary_type) + + b_header = b'$ANSIBLE_VAULT;1.1;AES256\n' + self.assertEqual(b_vaulttext[:len(b_header)], b_header) + + def test_encrypt_no_secret_empty_secrets(self): + vault_secrets = [] + v = vault.VaultLib(vault_secrets) + + plaintext = u'Some text to encrypt in a café' + self.assertRaisesRegexp(vault.AnsibleVaultError, + '.*A vault password must be specified to encrypt data.*', + v.encrypt, + plaintext) + + def test_format_vaulttext_envelope(self): + cipher_name = "TEST" + b_ciphertext = b"ansible" + b_vaulttext = vault.format_vaulttext_envelope(b_ciphertext, + cipher_name, + version=self.v.b_version, + vault_id='default') + b_lines = b_vaulttext.split(b'\n') + self.assertGreater(len(b_lines), 1, msg="failed to properly add header") + + b_header = b_lines[0] + # self.assertTrue(b_header.endswith(b';TEST'), msg="header does not end with cipher name") + + b_header_parts = b_header.split(b';') + self.assertEqual(len(b_header_parts), 4, msg="header has the wrong number of parts") + self.assertEqual(b_header_parts[0], b'$ANSIBLE_VAULT', msg="header does not start with $ANSIBLE_VAULT") + self.assertEqual(b_header_parts[1], self.v.b_version, msg="header version is incorrect") + self.assertEqual(b_header_parts[2], b'TEST', msg="header does not end with cipher name") + + # And just to verify, lets parse the results and compare + b_ciphertext2, b_version2, cipher_name2, vault_id2 = \ + vault.parse_vaulttext_envelope(b_vaulttext) + self.assertEqual(b_ciphertext, b_ciphertext2) + self.assertEqual(self.v.b_version, b_version2) + self.assertEqual(cipher_name, cipher_name2) + self.assertEqual('default', vault_id2) + + def test_parse_vaulttext_envelope(self): + b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\nansible" + b_ciphertext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext) + b_lines = b_ciphertext.split(b'\n') + self.assertEqual(b_lines[0], b"ansible", msg="Payload was not properly split from the header") + self.assertEqual(cipher_name, u'TEST', msg="cipher name was not properly set") + self.assertEqual(b_version, b"9.9", msg="version was not properly set") + + def test_parse_vaulttext_envelope_crlf(self): + b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\r\nansible" + b_ciphertext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext) + b_lines = b_ciphertext.split(b'\n') + self.assertEqual(b_lines[0], b"ansible", msg="Payload was not properly split from the header") + self.assertEqual(cipher_name, u'TEST', msg="cipher name was not properly set") + self.assertEqual(b_version, b"9.9", msg="version was not properly set") + + def test_encrypt_decrypt_aes256(self): + self.v.cipher_name = u'AES256' + plaintext = u"foobar" + b_vaulttext = self.v.encrypt(plaintext) + b_plaintext = self.v.decrypt(b_vaulttext) + self.assertNotEqual(b_vaulttext, b"foobar", msg="encryption failed") + self.assertEqual(b_plaintext, b"foobar", msg="decryption failed") + + def test_encrypt_decrypt_aes256_none_secrets(self): + vault_secrets = self._vault_secrets_from_password('default', 'ansible') + v = vault.VaultLib(vault_secrets) + + plaintext = u"foobar" + b_vaulttext = v.encrypt(plaintext) + + # VaultLib will default to empty {} if secrets is None + v_none = vault.VaultLib(None) + # so set secrets None explicitly + v_none.secrets = None + self.assertRaisesRegexp(vault.AnsibleVaultError, + '.*A vault password must be specified to decrypt data.*', + v_none.decrypt, + b_vaulttext) + + def test_encrypt_decrypt_aes256_empty_secrets(self): + vault_secrets = self._vault_secrets_from_password('default', 'ansible') + v = vault.VaultLib(vault_secrets) + + plaintext = u"foobar" + b_vaulttext = v.encrypt(plaintext) + + vault_secrets_empty = [] + v_none = vault.VaultLib(vault_secrets_empty) + + self.assertRaisesRegexp(vault.AnsibleVaultError, + '.*Attempting to decrypt but no vault secrets found.*', + v_none.decrypt, + b_vaulttext) + + def test_encrypt_decrypt_aes256_multiple_secrets_all_wrong(self): + plaintext = u'Some text to encrypt in a café' + b_vaulttext = self.v.encrypt(plaintext) + + vault_secrets = [('default', TextVaultSecret('another-wrong-password')), + ('wrong-password', TextVaultSecret('wrong-password'))] + + v_multi = vault.VaultLib(vault_secrets) + self.assertRaisesRegexp(errors.AnsibleError, + '.*Decryption failed.*', + v_multi.decrypt, + b_vaulttext, + filename='/dev/null/fake/filename') + + def test_encrypt_decrypt_aes256_multiple_secrets_one_valid(self): + plaintext = u'Some text to encrypt in a café' + b_vaulttext = self.v.encrypt(plaintext) + + correct_secret = TextVaultSecret(self.vault_password) + wrong_secret = TextVaultSecret('wrong-password') + + vault_secrets = [('default', wrong_secret), + ('corect_secret', correct_secret), + ('wrong_secret', wrong_secret)] + + v_multi = vault.VaultLib(vault_secrets) + b_plaintext = v_multi.decrypt(b_vaulttext) + self.assertNotEqual(b_vaulttext, to_bytes(plaintext), msg="encryption failed") + self.assertEqual(b_plaintext, to_bytes(plaintext), msg="decryption failed") + + def test_encrypt_decrypt_aes256_existing_vault(self): + self.v.cipher_name = u'AES256' + b_orig_plaintext = b"Setec Astronomy" + vaulttext = u'''$ANSIBLE_VAULT;1.1;AES256 +33363965326261303234626463623963633531343539616138316433353830356566396130353436 +3562643163366231316662386565383735653432386435610a306664636137376132643732393835 +63383038383730306639353234326630666539346233376330303938323639306661313032396437 +6233623062366136310a633866373936313238333730653739323461656662303864663666653563 +3138''' + + b_plaintext = self.v.decrypt(vaulttext) + self.assertEqual(b_plaintext, b_plaintext, msg="decryption failed") + + b_vaulttext = to_bytes(vaulttext, encoding='ascii', errors='strict') + b_plaintext = self.v.decrypt(b_vaulttext) + self.assertEqual(b_plaintext, b_orig_plaintext, msg="decryption failed") + + # FIXME This test isn't working quite yet. + @pytest.mark.skip(reason='This test is not ready yet') + def test_encrypt_decrypt_aes256_bad_hmac(self): + + self.v.cipher_name = 'AES256' + # plaintext = "Setec Astronomy" + enc_data = '''$ANSIBLE_VAULT;1.1;AES256 +33363965326261303234626463623963633531343539616138316433353830356566396130353436 +3562643163366231316662386565383735653432386435610a306664636137376132643732393835 +63383038383730306639353234326630666539346233376330303938323639306661313032396437 +6233623062366136310a633866373936313238333730653739323461656662303864663666653563 +3138''' + b_data = to_bytes(enc_data, errors='strict', encoding='utf-8') + b_data = self.v._split_header(b_data) + foo = binascii.unhexlify(b_data) + lines = foo.splitlines() + # line 0 is salt, line 1 is hmac, line 2+ is ciphertext + b_salt = lines[0] + b_hmac = lines[1] + b_ciphertext_data = b'\n'.join(lines[2:]) + + b_ciphertext = binascii.unhexlify(b_ciphertext_data) + # b_orig_ciphertext = b_ciphertext[:] + + # now muck with the text + # b_munged_ciphertext = b_ciphertext[:10] + b'\x00' + b_ciphertext[11:] + # b_munged_ciphertext = b_ciphertext + # assert b_orig_ciphertext != b_munged_ciphertext + + b_ciphertext_data = binascii.hexlify(b_ciphertext) + b_payload = b'\n'.join([b_salt, b_hmac, b_ciphertext_data]) + # reformat + b_invalid_ciphertext = self.v._format_output(b_payload) + + # assert we throw an error + self.v.decrypt(b_invalid_ciphertext) + + def test_decrypt_and_get_vault_id(self): + b_expected_plaintext = to_bytes('foo bar\n') + vaulttext = '''$ANSIBLE_VAULT;1.2;AES256;ansible_devel +65616435333934613466373335363332373764363365633035303466643439313864663837393234 +3330656363343637313962633731333237313636633534630a386264363438363362326132363239 +39363166646664346264383934393935653933316263333838386362633534326664646166663736 +6462303664383765650a356637643633366663643566353036303162386237336233393065393164 +6264''' + + vault_secrets = self._vault_secrets_from_password('ansible_devel', 'ansible') + v = vault.VaultLib(vault_secrets) + + b_vaulttext = to_bytes(vaulttext) + + b_plaintext, vault_id_used, vault_secret_used = v.decrypt_and_get_vault_id(b_vaulttext) + + self.assertEqual(b_expected_plaintext, b_plaintext) + self.assertEqual(vault_id_used, 'ansible_devel') + self.assertEqual(vault_secret_used.text, 'ansible') + + def test_decrypt_non_default_1_2(self): + b_expected_plaintext = to_bytes('foo bar\n') + vaulttext = '''$ANSIBLE_VAULT;1.2;AES256;ansible_devel +65616435333934613466373335363332373764363365633035303466643439313864663837393234 +3330656363343637313962633731333237313636633534630a386264363438363362326132363239 +39363166646664346264383934393935653933316263333838386362633534326664646166663736 +6462303664383765650a356637643633366663643566353036303162386237336233393065393164 +6264''' + + vault_secrets = self._vault_secrets_from_password('default', 'ansible') + v = vault.VaultLib(vault_secrets) + + b_vaulttext = to_bytes(vaulttext) + + b_plaintext = v.decrypt(b_vaulttext) + self.assertEqual(b_expected_plaintext, b_plaintext) + + b_ciphertext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext) + self.assertEqual('ansible_devel', vault_id) + self.assertEqual(b'1.2', b_version) + + def test_decrypt_decrypted(self): + plaintext = u"ansible" + self.assertRaises(errors.AnsibleError, self.v.decrypt, plaintext) + + b_plaintext = b"ansible" + self.assertRaises(errors.AnsibleError, self.v.decrypt, b_plaintext) + + def test_cipher_not_set(self): + plaintext = u"ansible" + self.v.encrypt(plaintext) + self.assertEqual(self.v.cipher_name, "AES256") + + +@pytest.mark.skipif(not vault.HAS_PYCRYPTO, + reason="Skipping Pycrypto tests because pycrypto is not installed") +class TestVaultLibPyCrypto(TestVaultLib): + def setUp(self): + self.has_cryptography = vault.HAS_CRYPTOGRAPHY + vault.HAS_CRYPTOGRAPHY = False + super(TestVaultLibPyCrypto, self).setUp() + + def tearDown(self): + vault.HAS_CRYPTOGRAPHY = self.has_cryptography + super(TestVaultLibPyCrypto, self).tearDown() diff --git a/test/units/parsing/vault/test_vault_editor.py b/test/units/parsing/vault/test_vault_editor.py new file mode 100644 index 00000000..8aa9b37c --- /dev/null +++ b/test/units/parsing/vault/test_vault_editor.py @@ -0,0 +1,517 @@ +# (c) 2014, James Tanner +# (c) 2014, James Cammarata, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import tempfile + +import pytest + +from units.compat import unittest +from units.compat.mock import patch + +from ansible import errors +from ansible.parsing import vault +from ansible.parsing.vault import VaultLib, VaultEditor, match_encrypt_secret + +from ansible.module_utils._text import to_bytes, to_text + +from units.mock.vault_helper import TextVaultSecret + +v11_data = """$ANSIBLE_VAULT;1.1;AES256 +62303130653266653331306264616235333735323636616539316433666463323964623162386137 +3961616263373033353631316333623566303532663065310a393036623466376263393961326530 +64336561613965383835646464623865663966323464653236343638373165343863623638316664 +3631633031323837340a396530313963373030343933616133393566366137363761373930663833 +3739""" + + +@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY, + reason="Skipping cryptography tests because cryptography is not installed") +class TestVaultEditor(unittest.TestCase): + + def setUp(self): + self._test_dir = None + self.vault_password = "test-vault-password" + vault_secret = TextVaultSecret(self.vault_password) + self.vault_secrets = [('vault_secret', vault_secret), + ('default', vault_secret)] + + @property + def vault_secret(self): + return match_encrypt_secret(self.vault_secrets)[1] + + def tearDown(self): + if self._test_dir: + pass + # shutil.rmtree(self._test_dir) + self._test_dir = None + + def _secrets(self, password): + vault_secret = TextVaultSecret(password) + vault_secrets = [('default', vault_secret)] + return vault_secrets + + def test_methods_exist(self): + v = vault.VaultEditor(None) + slots = ['create_file', + 'decrypt_file', + 'edit_file', + 'encrypt_file', + 'rekey_file', + 'read_data', + 'write_data'] + for slot in slots: + assert hasattr(v, slot), "VaultLib is missing the %s method" % slot + + def _create_test_dir(self): + suffix = '_ansible_unit_test_%s_' % (self.__class__.__name__) + return tempfile.mkdtemp(suffix=suffix) + + def _create_file(self, test_dir, name, content=None, symlink=False): + file_path = os.path.join(test_dir, name) + opened_file = open(file_path, 'wb') + if content: + opened_file.write(content) + opened_file.close() + return file_path + + def _vault_editor(self, vault_secrets=None): + if vault_secrets is None: + vault_secrets = self._secrets(self.vault_password) + return VaultEditor(VaultLib(vault_secrets)) + + @patch('ansible.parsing.vault.subprocess.call') + def test_edit_file_helper_empty_target(self, mock_sp_call): + self._test_dir = self._create_test_dir() + + src_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents) + + mock_sp_call.side_effect = self._faux_command + ve = self._vault_editor() + + b_ciphertext = ve._edit_file_helper(src_file_path, self.vault_secret) + + self.assertNotEqual(src_contents, b_ciphertext) + + @patch('ansible.parsing.vault.subprocess.call') + def test_edit_file_helper_call_exception(self, mock_sp_call): + self._test_dir = self._create_test_dir() + + src_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents) + + error_txt = 'calling editor raised an exception' + mock_sp_call.side_effect = errors.AnsibleError(error_txt) + + ve = self._vault_editor() + + self.assertRaisesRegexp(errors.AnsibleError, + error_txt, + ve._edit_file_helper, + src_file_path, + self.vault_secret) + + @patch('ansible.parsing.vault.subprocess.call') + def test_edit_file_helper_symlink_target(self, mock_sp_call): + self._test_dir = self._create_test_dir() + + src_file_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents) + + src_file_link_path = os.path.join(self._test_dir, 'a_link_to_dest_file') + + os.symlink(src_file_path, src_file_link_path) + + mock_sp_call.side_effect = self._faux_command + ve = self._vault_editor() + + b_ciphertext = ve._edit_file_helper(src_file_link_path, self.vault_secret) + + self.assertNotEqual(src_file_contents, b_ciphertext, + 'b_ciphertext should be encrypted and not equal to src_contents') + + def _faux_editor(self, editor_args, new_src_contents=None): + if editor_args[0] == 'shred': + return + + tmp_path = editor_args[-1] + + # simulate the tmp file being editted + tmp_file = open(tmp_path, 'wb') + if new_src_contents: + tmp_file.write(new_src_contents) + tmp_file.close() + + def _faux_command(self, tmp_path): + pass + + @patch('ansible.parsing.vault.subprocess.call') + def test_edit_file_helper_no_change(self, mock_sp_call): + self._test_dir = self._create_test_dir() + + src_file_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents) + + # editor invocation doesn't change anything + def faux_editor(editor_args): + self._faux_editor(editor_args, src_file_contents) + + mock_sp_call.side_effect = faux_editor + ve = self._vault_editor() + + ve._edit_file_helper(src_file_path, self.vault_secret, existing_data=src_file_contents) + + new_target_file = open(src_file_path, 'rb') + new_target_file_contents = new_target_file.read() + self.assertEqual(src_file_contents, new_target_file_contents) + + def _assert_file_is_encrypted(self, vault_editor, src_file_path, src_contents): + new_src_file = open(src_file_path, 'rb') + new_src_file_contents = new_src_file.read() + + # TODO: assert that it is encrypted + self.assertTrue(vault.is_encrypted(new_src_file_contents)) + + src_file_plaintext = vault_editor.vault.decrypt(new_src_file_contents) + + # the plaintext should not be encrypted + self.assertFalse(vault.is_encrypted(src_file_plaintext)) + + # and the new plaintext should match the original + self.assertEqual(src_file_plaintext, src_contents) + + def _assert_file_is_link(self, src_file_link_path, src_file_path): + self.assertTrue(os.path.islink(src_file_link_path), + 'The dest path (%s) should be a symlink to (%s) but is not' % (src_file_link_path, src_file_path)) + + def test_rekey_file(self): + self._test_dir = self._create_test_dir() + + src_file_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents) + + ve = self._vault_editor() + ve.encrypt_file(src_file_path, self.vault_secret) + + # FIXME: update to just set self._secrets or just a new vault secret id + new_password = 'password2:electricbugaloo' + new_vault_secret = TextVaultSecret(new_password) + new_vault_secrets = [('default', new_vault_secret)] + ve.rekey_file(src_file_path, vault.match_encrypt_secret(new_vault_secrets)[1]) + + # FIXME: can just update self._secrets here + new_ve = vault.VaultEditor(VaultLib(new_vault_secrets)) + self._assert_file_is_encrypted(new_ve, src_file_path, src_file_contents) + + def test_rekey_file_no_new_password(self): + self._test_dir = self._create_test_dir() + + src_file_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents) + + ve = self._vault_editor() + ve.encrypt_file(src_file_path, self.vault_secret) + + self.assertRaisesRegexp(errors.AnsibleError, + 'The value for the new_password to rekey', + ve.rekey_file, + src_file_path, + None) + + def test_rekey_file_not_encrypted(self): + self._test_dir = self._create_test_dir() + + src_file_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents) + + ve = self._vault_editor() + + new_password = 'password2:electricbugaloo' + self.assertRaisesRegexp(errors.AnsibleError, + 'input is not vault encrypted data', + ve.rekey_file, + src_file_path, new_password) + + def test_plaintext(self): + self._test_dir = self._create_test_dir() + + src_file_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents) + + ve = self._vault_editor() + ve.encrypt_file(src_file_path, self.vault_secret) + + res = ve.plaintext(src_file_path) + self.assertEqual(src_file_contents, res) + + def test_plaintext_not_encrypted(self): + self._test_dir = self._create_test_dir() + + src_file_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents) + + ve = self._vault_editor() + self.assertRaisesRegexp(errors.AnsibleError, + 'input is not vault encrypted data', + ve.plaintext, + src_file_path) + + def test_encrypt_file(self): + self._test_dir = self._create_test_dir() + src_file_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents) + + ve = self._vault_editor() + ve.encrypt_file(src_file_path, self.vault_secret) + + self._assert_file_is_encrypted(ve, src_file_path, src_file_contents) + + def test_encrypt_file_symlink(self): + self._test_dir = self._create_test_dir() + + src_file_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents) + + src_file_link_path = os.path.join(self._test_dir, 'a_link_to_dest_file') + os.symlink(src_file_path, src_file_link_path) + + ve = self._vault_editor() + ve.encrypt_file(src_file_link_path, self.vault_secret) + + self._assert_file_is_encrypted(ve, src_file_path, src_file_contents) + self._assert_file_is_encrypted(ve, src_file_link_path, src_file_contents) + + self._assert_file_is_link(src_file_link_path, src_file_path) + + @patch('ansible.parsing.vault.subprocess.call') + def test_edit_file_no_vault_id(self, mock_sp_call): + self._test_dir = self._create_test_dir() + src_contents = to_bytes("some info in a file\nyup.") + + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents) + + new_src_contents = to_bytes("The info is different now.") + + def faux_editor(editor_args): + self._faux_editor(editor_args, new_src_contents) + + mock_sp_call.side_effect = faux_editor + + ve = self._vault_editor() + + ve.encrypt_file(src_file_path, self.vault_secret) + ve.edit_file(src_file_path) + + new_src_file = open(src_file_path, 'rb') + new_src_file_contents = new_src_file.read() + + self.assertTrue(b'$ANSIBLE_VAULT;1.1;AES256' in new_src_file_contents) + + src_file_plaintext = ve.vault.decrypt(new_src_file_contents) + self.assertEqual(src_file_plaintext, new_src_contents) + + @patch('ansible.parsing.vault.subprocess.call') + def test_edit_file_with_vault_id(self, mock_sp_call): + self._test_dir = self._create_test_dir() + src_contents = to_bytes("some info in a file\nyup.") + + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents) + + new_src_contents = to_bytes("The info is different now.") + + def faux_editor(editor_args): + self._faux_editor(editor_args, new_src_contents) + + mock_sp_call.side_effect = faux_editor + + ve = self._vault_editor() + + ve.encrypt_file(src_file_path, self.vault_secret, + vault_id='vault_secrets') + ve.edit_file(src_file_path) + + new_src_file = open(src_file_path, 'rb') + new_src_file_contents = new_src_file.read() + + self.assertTrue(b'$ANSIBLE_VAULT;1.2;AES256;vault_secrets' in new_src_file_contents) + + src_file_plaintext = ve.vault.decrypt(new_src_file_contents) + self.assertEqual(src_file_plaintext, new_src_contents) + + @patch('ansible.parsing.vault.subprocess.call') + def test_edit_file_symlink(self, mock_sp_call): + self._test_dir = self._create_test_dir() + src_contents = to_bytes("some info in a file\nyup.") + + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents) + + new_src_contents = to_bytes("The info is different now.") + + def faux_editor(editor_args): + self._faux_editor(editor_args, new_src_contents) + + mock_sp_call.side_effect = faux_editor + + ve = self._vault_editor() + + ve.encrypt_file(src_file_path, self.vault_secret) + + src_file_link_path = os.path.join(self._test_dir, 'a_link_to_dest_file') + + os.symlink(src_file_path, src_file_link_path) + + ve.edit_file(src_file_link_path) + + new_src_file = open(src_file_path, 'rb') + new_src_file_contents = new_src_file.read() + + src_file_plaintext = ve.vault.decrypt(new_src_file_contents) + + self._assert_file_is_link(src_file_link_path, src_file_path) + + self.assertEqual(src_file_plaintext, new_src_contents) + + # self.assertEqual(src_file_plaintext, new_src_contents, + # 'The decrypted plaintext of the editted file is not the expected contents.') + + @patch('ansible.parsing.vault.subprocess.call') + def test_edit_file_not_encrypted(self, mock_sp_call): + self._test_dir = self._create_test_dir() + src_contents = to_bytes("some info in a file\nyup.") + + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents) + + new_src_contents = to_bytes("The info is different now.") + + def faux_editor(editor_args): + self._faux_editor(editor_args, new_src_contents) + + mock_sp_call.side_effect = faux_editor + + ve = self._vault_editor() + self.assertRaisesRegexp(errors.AnsibleError, + 'input is not vault encrypted data', + ve.edit_file, + src_file_path) + + def test_create_file_exists(self): + self._test_dir = self._create_test_dir() + src_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents) + + ve = self._vault_editor() + self.assertRaisesRegexp(errors.AnsibleError, + 'please use .edit. instead', + ve.create_file, + src_file_path, + self.vault_secret) + + def test_decrypt_file_exception(self): + self._test_dir = self._create_test_dir() + src_contents = to_bytes("some info in a file\nyup.") + src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents) + + ve = self._vault_editor() + self.assertRaisesRegexp(errors.AnsibleError, + 'input is not vault encrypted data', + ve.decrypt_file, + src_file_path) + + @patch.object(vault.VaultEditor, '_editor_shell_command') + def test_create_file(self, mock_editor_shell_command): + + def sc_side_effect(filename): + return ['touch', filename] + mock_editor_shell_command.side_effect = sc_side_effect + + tmp_file = tempfile.NamedTemporaryFile() + os.unlink(tmp_file.name) + + _secrets = self._secrets('ansible') + ve = self._vault_editor(_secrets) + ve.create_file(tmp_file.name, vault.match_encrypt_secret(_secrets)[1]) + + self.assertTrue(os.path.exists(tmp_file.name)) + + def test_decrypt_1_1(self): + v11_file = tempfile.NamedTemporaryFile(delete=False) + with v11_file as f: + f.write(to_bytes(v11_data)) + + ve = self._vault_editor(self._secrets("ansible")) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.decrypt_file(v11_file.name) + except errors.AnsibleError: + error_hit = True + + # verify decrypted content + f = open(v11_file.name, "rb") + fdata = to_text(f.read()) + f.close() + + os.unlink(v11_file.name) + + assert error_hit is False, "error decrypting 1.1 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip() + + def test_real_path_dash(self): + filename = '-' + ve = self._vault_editor() + + res = ve._real_path(filename) + self.assertEqual(res, '-') + + def test_real_path_dev_null(self): + filename = '/dev/null' + ve = self._vault_editor() + + res = ve._real_path(filename) + self.assertEqual(res, '/dev/null') + + def test_real_path_symlink(self): + self._test_dir = os.path.realpath(self._create_test_dir()) + file_path = self._create_file(self._test_dir, 'test_file', content=b'this is a test file') + file_link_path = os.path.join(self._test_dir, 'a_link_to_test_file') + + os.symlink(file_path, file_link_path) + + ve = self._vault_editor() + + res = ve._real_path(file_link_path) + self.assertEqual(res, file_path) + + +@pytest.mark.skipif(not vault.HAS_PYCRYPTO, + reason="Skipping pycrypto tests because pycrypto is not installed") +class TestVaultEditorPyCrypto(unittest.TestCase): + def setUp(self): + self.has_cryptography = vault.HAS_CRYPTOGRAPHY + vault.HAS_CRYPTOGRAPHY = False + super(TestVaultEditorPyCrypto, self).setUp() + + def tearDown(self): + vault.HAS_CRYPTOGRAPHY = self.has_cryptography + super(TestVaultEditorPyCrypto, self).tearDown() diff --git a/test/units/parsing/yaml/__init__.py b/test/units/parsing/yaml/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/parsing/yaml/test_dumper.py b/test/units/parsing/yaml/test_dumper.py new file mode 100644 index 00000000..8129ca3a --- /dev/null +++ b/test/units/parsing/yaml/test_dumper.py @@ -0,0 +1,103 @@ +# coding: utf-8 +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import io + +from units.compat import unittest +from ansible.parsing import vault +from ansible.parsing.yaml import dumper, objects +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.module_utils.six import PY2 +from ansible.utils.unsafe_proxy import AnsibleUnsafeText, AnsibleUnsafeBytes + +from units.mock.yaml_helper import YamlTestUtils +from units.mock.vault_helper import TextVaultSecret + + +class TestAnsibleDumper(unittest.TestCase, YamlTestUtils): + def setUp(self): + self.vault_password = "hunter42" + vault_secret = TextVaultSecret(self.vault_password) + self.vault_secrets = [('vault_secret', vault_secret)] + self.good_vault = vault.VaultLib(self.vault_secrets) + self.vault = self.good_vault + self.stream = self._build_stream() + self.dumper = dumper.AnsibleDumper + + def _build_stream(self, yaml_text=None): + text = yaml_text or u'' + stream = io.StringIO(text) + return stream + + def _loader(self, stream): + return AnsibleLoader(stream, vault_secrets=self.vault.secrets) + + def test_ansible_vault_encrypted_unicode(self): + plaintext = 'This is a string we are going to encrypt.' + avu = objects.AnsibleVaultEncryptedUnicode.from_plaintext(plaintext, vault=self.vault, + secret=vault.match_secrets(self.vault_secrets, ['vault_secret'])[0][1]) + + yaml_out = self._dump_string(avu, dumper=self.dumper) + stream = self._build_stream(yaml_out) + loader = self._loader(stream) + + data_from_yaml = loader.get_single_data() + + self.assertEqual(plaintext, data_from_yaml.data) + + def test_bytes(self): + b_text = u'tréma'.encode('utf-8') + unsafe_object = AnsibleUnsafeBytes(b_text) + yaml_out = self._dump_string(unsafe_object, dumper=self.dumper) + + stream = self._build_stream(yaml_out) + loader = self._loader(stream) + + data_from_yaml = loader.get_single_data() + + result = b_text + if PY2: + # https://pyyaml.org/wiki/PyYAMLDocumentation#string-conversion-python-2-only + # pyyaml on Python 2 can return either unicode or bytes when given byte strings. + # We normalize that to always return unicode on Python2 as that's right most of the + # time. However, this means byte strings can round trip through yaml on Python3 but + # not on Python2. To make this code work the same on Python2 and Python3 (we want + # the Python3 behaviour) we need to change the methods in Ansible to: + # (1) Let byte strings pass through yaml without being converted on Python2 + # (2) Convert byte strings to text strings before being given to pyyaml (Without this, + # strings would end up as byte strings most of the time which would mostly be wrong) + # In practice, we mostly read bytes in from files and then pass that to pyyaml, for which + # the present behavior is correct. + # This is a workaround for the current behavior. + result = u'tr\xe9ma' + + self.assertEqual(result, data_from_yaml) + + def test_unicode(self): + u_text = u'nöel' + unsafe_object = AnsibleUnsafeText(u_text) + yaml_out = self._dump_string(unsafe_object, dumper=self.dumper) + + stream = self._build_stream(yaml_out) + loader = self._loader(stream) + + data_from_yaml = loader.get_single_data() + + self.assertEqual(u_text, data_from_yaml) diff --git a/test/units/parsing/yaml/test_loader.py b/test/units/parsing/yaml/test_loader.py new file mode 100644 index 00000000..d6989f44 --- /dev/null +++ b/test/units/parsing/yaml/test_loader.py @@ -0,0 +1,436 @@ +# coding: utf-8 +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from io import StringIO + +from units.compat import unittest + +from ansible import errors +from ansible.module_utils.six import text_type, binary_type +from ansible.module_utils.common._collections_compat import Sequence, Set, Mapping +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.parsing import vault +from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode +from ansible.parsing.yaml.dumper import AnsibleDumper + +from units.mock.yaml_helper import YamlTestUtils +from units.mock.vault_helper import TextVaultSecret + +try: + from _yaml import ParserError + from _yaml import ScannerError +except ImportError: + from yaml.parser import ParserError + from yaml.scanner import ScannerError + + +class NameStringIO(StringIO): + """In py2.6, StringIO doesn't let you set name because a baseclass has it + as readonly property""" + name = None + + def __init__(self, *args, **kwargs): + super(NameStringIO, self).__init__(*args, **kwargs) + + +class TestAnsibleLoaderBasic(unittest.TestCase): + + def test_parse_number(self): + stream = StringIO(u""" + 1 + """) + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, 1) + # No line/column info saved yet + + def test_parse_string(self): + stream = StringIO(u""" + Ansible + """) + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, u'Ansible') + self.assertIsInstance(data, text_type) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) + + def test_parse_utf8_string(self): + stream = StringIO(u""" + Cafè Eñyei + """) + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, u'Cafè Eñyei') + self.assertIsInstance(data, text_type) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) + + def test_parse_dict(self): + stream = StringIO(u""" + webster: daniel + oed: oxford + """) + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'}) + self.assertEqual(len(data), 2) + self.assertIsInstance(list(data.keys())[0], text_type) + self.assertIsInstance(list(data.values())[0], text_type) + + # Beginning of the first key + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) + + self.assertEqual(data[u'webster'].ansible_pos, ('myfile.yml', 2, 26)) + self.assertEqual(data[u'oed'].ansible_pos, ('myfile.yml', 3, 22)) + + def test_parse_list(self): + stream = StringIO(u""" + - a + - b + """) + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, [u'a', u'b']) + self.assertEqual(len(data), 2) + self.assertIsInstance(data[0], text_type) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) + + self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19)) + self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19)) + + def test_parse_short_dict(self): + stream = StringIO(u"""{"foo": "bar"}""") + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, dict(foo=u'bar')) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1)) + self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 9)) + + stream = StringIO(u"""foo: bar""") + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, dict(foo=u'bar')) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1)) + self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 6)) + + def test_error_conditions(self): + stream = StringIO(u"""{""") + loader = AnsibleLoader(stream, 'myfile.yml') + self.assertRaises(ParserError, loader.get_single_data) + + def test_tab_error(self): + stream = StringIO(u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""") + loader = AnsibleLoader(stream, 'myfile.yml') + self.assertRaises(ScannerError, loader.get_single_data) + + def test_front_matter(self): + stream = StringIO(u"""---\nfoo: bar""") + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, dict(foo=u'bar')) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 1)) + self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 2, 6)) + + # Initial indent (See: #6348) + stream = StringIO(u""" - foo: bar\n baz: qux""") + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, [{u'foo': u'bar', u'baz': u'qux'}]) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 2)) + self.assertEqual(data[0].ansible_pos, ('myfile.yml', 1, 4)) + self.assertEqual(data[0][u'foo'].ansible_pos, ('myfile.yml', 1, 9)) + self.assertEqual(data[0][u'baz'].ansible_pos, ('myfile.yml', 2, 9)) + + +class TestAnsibleLoaderVault(unittest.TestCase, YamlTestUtils): + def setUp(self): + self.vault_password = "hunter42" + vault_secret = TextVaultSecret(self.vault_password) + self.vault_secrets = [('vault_secret', vault_secret), + ('default', vault_secret)] + self.vault = vault.VaultLib(self.vault_secrets) + + @property + def vault_secret(self): + return vault.match_encrypt_secret(self.vault_secrets)[1] + + def test_wrong_password(self): + plaintext = u"Ansible" + bob_password = "this is a different password" + + bobs_secret = TextVaultSecret(bob_password) + bobs_secrets = [('default', bobs_secret)] + + bobs_vault = vault.VaultLib(bobs_secrets) + + ciphertext = bobs_vault.encrypt(plaintext, vault.match_encrypt_secret(bobs_secrets)[1]) + + try: + self.vault.decrypt(ciphertext) + except Exception as e: + self.assertIsInstance(e, errors.AnsibleError) + self.assertEqual(e.message, 'Decryption failed (no vault secrets were found that could decrypt)') + + def _encrypt_plaintext(self, plaintext): + # Construct a yaml repr of a vault by hand + vaulted_var_bytes = self.vault.encrypt(plaintext, self.vault_secret) + + # add yaml tag + vaulted_var = vaulted_var_bytes.decode() + lines = vaulted_var.splitlines() + lines2 = [] + for line in lines: + lines2.append(' %s' % line) + + vaulted_var = '\n'.join(lines2) + tagged_vaulted_var = u"""!vault |\n%s""" % vaulted_var + return tagged_vaulted_var + + def _build_stream(self, yaml_text): + stream = NameStringIO(yaml_text) + stream.name = 'my.yml' + return stream + + def _loader(self, stream): + return AnsibleLoader(stream, vault_secrets=self.vault.secrets) + + def _load_yaml(self, yaml_text, password): + stream = self._build_stream(yaml_text) + loader = self._loader(stream) + + data_from_yaml = loader.get_single_data() + + return data_from_yaml + + def test_dump_load_cycle(self): + avu = AnsibleVaultEncryptedUnicode.from_plaintext('The plaintext for test_dump_load_cycle.', self.vault, self.vault_secret) + self._dump_load_cycle(avu) + + def test_embedded_vault_from_dump(self): + avu = AnsibleVaultEncryptedUnicode.from_plaintext('setec astronomy', self.vault, self.vault_secret) + blip = {'stuff1': [{'a dict key': 24}, + {'shhh-ssh-secrets': avu, + 'nothing to see here': 'move along'}], + 'another key': 24.1} + + blip = ['some string', 'another string', avu] + stream = NameStringIO() + + self._dump_stream(blip, stream, dumper=AnsibleDumper) + + stream.seek(0) + + stream.seek(0) + + loader = self._loader(stream) + + data_from_yaml = loader.get_data() + + stream2 = NameStringIO(u'') + # verify we can dump the object again + self._dump_stream(data_from_yaml, stream2, dumper=AnsibleDumper) + + def test_embedded_vault(self): + plaintext_var = u"""This is the plaintext string.""" + tagged_vaulted_var = self._encrypt_plaintext(plaintext_var) + another_vaulted_var = self._encrypt_plaintext(plaintext_var) + + different_var = u"""A different string that is not the same as the first one.""" + different_vaulted_var = self._encrypt_plaintext(different_var) + + yaml_text = u"""---\nwebster: daniel\noed: oxford\nthe_secret: %s\nanother_secret: %s\ndifferent_secret: %s""" % (tagged_vaulted_var, + another_vaulted_var, + different_vaulted_var) + + data_from_yaml = self._load_yaml(yaml_text, self.vault_password) + vault_string = data_from_yaml['the_secret'] + + self.assertEqual(plaintext_var, data_from_yaml['the_secret']) + + test_dict = {} + test_dict[vault_string] = 'did this work?' + + self.assertEqual(vault_string.data, vault_string) + + # This looks weird and useless, but the object in question has a custom __eq__ + self.assertEqual(vault_string, vault_string) + + another_vault_string = data_from_yaml['another_secret'] + different_vault_string = data_from_yaml['different_secret'] + + self.assertEqual(vault_string, another_vault_string) + self.assertNotEquals(vault_string, different_vault_string) + + # More testing of __eq__/__ne__ + self.assertTrue('some string' != vault_string) + self.assertNotEquals('some string', vault_string) + + # Note this is a compare of the str/unicode of these, they are different types + # so we want to test self == other, and other == self etc + self.assertEqual(plaintext_var, vault_string) + self.assertEqual(vault_string, plaintext_var) + self.assertFalse(plaintext_var != vault_string) + self.assertFalse(vault_string != plaintext_var) + + +class TestAnsibleLoaderPlay(unittest.TestCase): + + def setUp(self): + stream = NameStringIO(u""" + - hosts: localhost + vars: + number: 1 + string: Ansible + utf8_string: Cafè Eñyei + dictionary: + webster: daniel + oed: oxford + list: + - a + - b + - 1 + - 2 + tasks: + - name: Test case + ping: + data: "{{ utf8_string }}" + + - name: Test 2 + ping: + data: "Cafè Eñyei" + + - name: Test 3 + command: "printf 'Cafè Eñyei\\n'" + """) + self.play_filename = '/path/to/myplay.yml' + stream.name = self.play_filename + self.loader = AnsibleLoader(stream) + self.data = self.loader.get_single_data() + + def tearDown(self): + pass + + def test_data_complete(self): + self.assertEqual(len(self.data), 1) + self.assertIsInstance(self.data, list) + self.assertEqual(frozenset(self.data[0].keys()), frozenset((u'hosts', u'vars', u'tasks'))) + + self.assertEqual(self.data[0][u'hosts'], u'localhost') + + self.assertEqual(self.data[0][u'vars'][u'number'], 1) + self.assertEqual(self.data[0][u'vars'][u'string'], u'Ansible') + self.assertEqual(self.data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei') + self.assertEqual(self.data[0][u'vars'][u'dictionary'], { + u'webster': u'daniel', + u'oed': u'oxford' + }) + self.assertEqual(self.data[0][u'vars'][u'list'], [u'a', u'b', 1, 2]) + + self.assertEqual(self.data[0][u'tasks'], [ + {u'name': u'Test case', u'ping': {u'data': u'{{ utf8_string }}'}}, + {u'name': u'Test 2', u'ping': {u'data': u'Cafè Eñyei'}}, + {u'name': u'Test 3', u'command': u'printf \'Cafè Eñyei\n\''}, + ]) + + def walk(self, data): + # Make sure there's no str in the data + self.assertNotIsInstance(data, binary_type) + + # Descend into various container types + if isinstance(data, text_type): + # strings are a sequence so we have to be explicit here + return + elif isinstance(data, (Sequence, Set)): + for element in data: + self.walk(element) + elif isinstance(data, Mapping): + for k, v in data.items(): + self.walk(k) + self.walk(v) + + # Scalars were all checked so we're good to go + return + + def test_no_str_in_data(self): + # Checks that no strings are str type + self.walk(self.data) + + def check_vars(self): + # Numbers don't have line/col information yet + # self.assertEqual(self.data[0][u'vars'][u'number'].ansible_pos, (self.play_filename, 4, 21)) + + self.assertEqual(self.data[0][u'vars'][u'string'].ansible_pos, (self.play_filename, 5, 29)) + self.assertEqual(self.data[0][u'vars'][u'utf8_string'].ansible_pos, (self.play_filename, 6, 34)) + + self.assertEqual(self.data[0][u'vars'][u'dictionary'].ansible_pos, (self.play_filename, 8, 23)) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster'].ansible_pos, (self.play_filename, 8, 32)) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed'].ansible_pos, (self.play_filename, 9, 28)) + + self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 11, 23)) + self.assertEqual(self.data[0][u'vars'][u'list'][0].ansible_pos, (self.play_filename, 11, 25)) + self.assertEqual(self.data[0][u'vars'][u'list'][1].ansible_pos, (self.play_filename, 12, 25)) + # Numbers don't have line/col info yet + # self.assertEqual(self.data[0][u'vars'][u'list'][2].ansible_pos, (self.play_filename, 13, 25)) + # self.assertEqual(self.data[0][u'vars'][u'list'][3].ansible_pos, (self.play_filename, 14, 25)) + + def check_tasks(self): + # + # First Task + # + self.assertEqual(self.data[0][u'tasks'][0].ansible_pos, (self.play_filename, 16, 23)) + self.assertEqual(self.data[0][u'tasks'][0][u'name'].ansible_pos, (self.play_filename, 16, 29)) + self.assertEqual(self.data[0][u'tasks'][0][u'ping'].ansible_pos, (self.play_filename, 18, 25)) + self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data'].ansible_pos, (self.play_filename, 18, 31)) + + # + # Second Task + # + self.assertEqual(self.data[0][u'tasks'][1].ansible_pos, (self.play_filename, 20, 23)) + self.assertEqual(self.data[0][u'tasks'][1][u'name'].ansible_pos, (self.play_filename, 20, 29)) + self.assertEqual(self.data[0][u'tasks'][1][u'ping'].ansible_pos, (self.play_filename, 22, 25)) + self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data'].ansible_pos, (self.play_filename, 22, 31)) + + # + # Third Task + # + self.assertEqual(self.data[0][u'tasks'][2].ansible_pos, (self.play_filename, 24, 23)) + self.assertEqual(self.data[0][u'tasks'][2][u'name'].ansible_pos, (self.play_filename, 24, 29)) + self.assertEqual(self.data[0][u'tasks'][2][u'command'].ansible_pos, (self.play_filename, 25, 32)) + + def test_line_numbers(self): + # Check the line/column numbers are correct + # Note: Remember, currently dicts begin at the start of their first entry + self.assertEqual(self.data[0].ansible_pos, (self.play_filename, 2, 19)) + self.assertEqual(self.data[0][u'hosts'].ansible_pos, (self.play_filename, 2, 26)) + self.assertEqual(self.data[0][u'vars'].ansible_pos, (self.play_filename, 4, 21)) + + self.check_vars() + + self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 16, 21)) + + self.check_tasks() diff --git a/test/units/parsing/yaml/test_objects.py b/test/units/parsing/yaml/test_objects.py new file mode 100644 index 00000000..d4529eed --- /dev/null +++ b/test/units/parsing/yaml/test_objects.py @@ -0,0 +1,164 @@ +# This file is part of Ansible +# -*- coding: utf-8 -*- +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Copyright 2016, Adrian Likins + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest + +from ansible.errors import AnsibleError + +from ansible.module_utils._text import to_native + +from ansible.parsing import vault +from ansible.parsing.yaml.loader import AnsibleLoader + +# module under test +from ansible.parsing.yaml import objects + +from units.mock.yaml_helper import YamlTestUtils +from units.mock.vault_helper import TextVaultSecret + + +class TestAnsibleVaultUnicodeNoVault(unittest.TestCase, YamlTestUtils): + def test_empty_init(self): + self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode) + + def test_empty_string_init(self): + seq = ''.encode('utf8') + self.assert_values(seq) + + def test_empty_byte_string_init(self): + seq = b'' + self.assert_values(seq) + + def _assert_values(self, avu, seq): + self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode) + self.assertTrue(avu.vault is None) + # AnsibleVaultEncryptedUnicode without a vault should never == any string + self.assertNotEquals(avu, seq) + + def assert_values(self, seq): + avu = objects.AnsibleVaultEncryptedUnicode(seq) + self._assert_values(avu, seq) + + def test_single_char(self): + seq = 'a'.encode('utf8') + self.assert_values(seq) + + def test_string(self): + seq = 'some letters' + self.assert_values(seq) + + def test_byte_string(self): + seq = 'some letters'.encode('utf8') + self.assert_values(seq) + + +class TestAnsibleVaultEncryptedUnicode(unittest.TestCase, YamlTestUtils): + def setUp(self): + self.good_vault_password = "hunter42" + good_vault_secret = TextVaultSecret(self.good_vault_password) + self.good_vault_secrets = [('good_vault_password', good_vault_secret)] + self.good_vault = vault.VaultLib(self.good_vault_secrets) + + # TODO: make this use two vault secret identities instead of two vaultSecrets + self.wrong_vault_password = 'not-hunter42' + wrong_vault_secret = TextVaultSecret(self.wrong_vault_password) + self.wrong_vault_secrets = [('wrong_vault_password', wrong_vault_secret)] + self.wrong_vault = vault.VaultLib(self.wrong_vault_secrets) + + self.vault = self.good_vault + self.vault_secrets = self.good_vault_secrets + + def _loader(self, stream): + return AnsibleLoader(stream, vault_secrets=self.vault_secrets) + + def test_dump_load_cycle(self): + aveu = self._from_plaintext('the test string for TestAnsibleVaultEncryptedUnicode.test_dump_load_cycle') + self._dump_load_cycle(aveu) + + def assert_values(self, avu, seq): + self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode) + + self.assertEqual(avu, seq) + self.assertTrue(avu.vault is self.vault) + self.assertIsInstance(avu.vault, vault.VaultLib) + + def _from_plaintext(self, seq): + id_secret = vault.match_encrypt_secret(self.good_vault_secrets) + return objects.AnsibleVaultEncryptedUnicode.from_plaintext(seq, vault=self.vault, secret=id_secret[1]) + + def _from_ciphertext(self, ciphertext): + avu = objects.AnsibleVaultEncryptedUnicode(ciphertext) + avu.vault = self.vault + return avu + + def test_empty_init(self): + self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode) + + def test_empty_string_init_from_plaintext(self): + seq = '' + avu = self._from_plaintext(seq) + self.assert_values(avu, seq) + + def test_empty_unicode_init_from_plaintext(self): + seq = u'' + avu = self._from_plaintext(seq) + self.assert_values(avu, seq) + + def test_string_from_plaintext(self): + seq = 'some letters' + avu = self._from_plaintext(seq) + self.assert_values(avu, seq) + + def test_unicode_from_plaintext(self): + seq = u'some letters' + avu = self._from_plaintext(seq) + self.assert_values(avu, seq) + + def test_unicode_from_plaintext_encode(self): + seq = u'some text here' + avu = self._from_plaintext(seq) + b_avu = avu.encode('utf-8', 'strict') + self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode) + self.assertEqual(b_avu, seq.encode('utf-8', 'strict')) + self.assertTrue(avu.vault is self.vault) + self.assertIsInstance(avu.vault, vault.VaultLib) + + # TODO/FIXME: make sure bad password fails differently than 'thats not encrypted' + def test_empty_string_wrong_password(self): + seq = '' + self.vault = self.wrong_vault + avu = self._from_plaintext(seq) + + def compare(avu, seq): + return avu == seq + + self.assertRaises(AnsibleError, compare, avu, seq) + + def test_vaulted_utf8_value_37258(self): + seq = u"aöffü" + avu = self._from_plaintext(seq) + self.assert_values(avu, seq) + + def test_str_vaulted_utf8_value_37258(self): + seq = u"aöffü" + avu = self._from_plaintext(seq) + assert str(avu) == to_native(seq) diff --git a/test/units/playbook/__init__.py b/test/units/playbook/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/playbook/role/__init__.py b/test/units/playbook/role/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/playbook/role/test_include_role.py b/test/units/playbook/role/test_include_role.py new file mode 100644 index 00000000..93e222c4 --- /dev/null +++ b/test/units/playbook/role/test_include_role.py @@ -0,0 +1,248 @@ +# (c) 2016, Daniel Miranda +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import patch + +from ansible.playbook import Play +from ansible.playbook.role_include import IncludeRole +from ansible.playbook.task import Task +from ansible.vars.manager import VariableManager + +from units.mock.loader import DictDataLoader +from units.mock.path import mock_unfrackpath_noop + + +class TestIncludeRole(unittest.TestCase): + + def setUp(self): + + self.loader = DictDataLoader({ + '/etc/ansible/roles/l1/tasks/main.yml': """ + - shell: echo 'hello world from l1' + - include_role: name=l2 + """, + '/etc/ansible/roles/l1/tasks/alt.yml': """ + - shell: echo 'hello world from l1 alt' + - include_role: name=l2 tasks_from=alt defaults_from=alt + """, + '/etc/ansible/roles/l1/defaults/main.yml': """ + test_variable: l1-main + l1_variable: l1-main + """, + '/etc/ansible/roles/l1/defaults/alt.yml': """ + test_variable: l1-alt + l1_variable: l1-alt + """, + '/etc/ansible/roles/l2/tasks/main.yml': """ + - shell: echo 'hello world from l2' + - include_role: name=l3 + """, + '/etc/ansible/roles/l2/tasks/alt.yml': """ + - shell: echo 'hello world from l2 alt' + - include_role: name=l3 tasks_from=alt defaults_from=alt + """, + '/etc/ansible/roles/l2/defaults/main.yml': """ + test_variable: l2-main + l2_variable: l2-main + """, + '/etc/ansible/roles/l2/defaults/alt.yml': """ + test_variable: l2-alt + l2_variable: l2-alt + """, + '/etc/ansible/roles/l3/tasks/main.yml': """ + - shell: echo 'hello world from l3' + """, + '/etc/ansible/roles/l3/tasks/alt.yml': """ + - shell: echo 'hello world from l3 alt' + """, + '/etc/ansible/roles/l3/defaults/main.yml': """ + test_variable: l3-main + l3_variable: l3-main + """, + '/etc/ansible/roles/l3/defaults/alt.yml': """ + test_variable: l3-alt + l3_variable: l3-alt + """ + }) + + self.var_manager = VariableManager(loader=self.loader) + + def tearDown(self): + pass + + def flatten_tasks(self, tasks): + for task in tasks: + if isinstance(task, IncludeRole): + blocks, handlers = task.get_block_list(loader=self.loader) + for block in blocks: + for t in self.flatten_tasks(block.block): + yield t + elif isinstance(task, Task): + yield task + else: + for t in self.flatten_tasks(task.block): + yield t + + def get_tasks_vars(self, play, tasks): + for task in self.flatten_tasks(tasks): + role = task._role + if not role: + continue + + yield (role.get_name(), + self.var_manager.get_vars(play=play, task=task)) + + @patch('ansible.playbook.role.definition.unfrackpath', + mock_unfrackpath_noop) + def test_simple(self): + + """Test one-level include with default tasks and variables""" + + play = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + tasks=[ + {'include_role': 'name=l3'} + ] + ), loader=self.loader, variable_manager=self.var_manager) + + tasks = play.compile() + tested = False + for role, task_vars in self.get_tasks_vars(play, tasks): + tested = True + self.assertEqual(task_vars.get('l3_variable'), 'l3-main') + self.assertEqual(task_vars.get('test_variable'), 'l3-main') + self.assertTrue(tested) + + @patch('ansible.playbook.role.definition.unfrackpath', + mock_unfrackpath_noop) + def test_simple_alt_files(self): + + """Test one-level include with alternative tasks and variables""" + + play = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + tasks=[{'include_role': 'name=l3 tasks_from=alt defaults_from=alt'}]), + loader=self.loader, variable_manager=self.var_manager) + + tasks = play.compile() + tested = False + for role, task_vars in self.get_tasks_vars(play, tasks): + tested = True + self.assertEqual(task_vars.get('l3_variable'), 'l3-alt') + self.assertEqual(task_vars.get('test_variable'), 'l3-alt') + self.assertTrue(tested) + + @patch('ansible.playbook.role.definition.unfrackpath', + mock_unfrackpath_noop) + def test_nested(self): + + """ + Test nested includes with default tasks and variables. + + Variables from outer roles should be inherited, but overridden in inner + roles. + """ + + play = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + tasks=[ + {'include_role': 'name=l1'} + ] + ), loader=self.loader, variable_manager=self.var_manager) + + tasks = play.compile() + expected_roles = ['l1', 'l2', 'l3'] + for role, task_vars in self.get_tasks_vars(play, tasks): + expected_roles.remove(role) + # Outer-most role must not have variables from inner roles yet + if role == 'l1': + self.assertEqual(task_vars.get('l1_variable'), 'l1-main') + self.assertEqual(task_vars.get('l2_variable'), None) + self.assertEqual(task_vars.get('l3_variable'), None) + self.assertEqual(task_vars.get('test_variable'), 'l1-main') + # Middle role must have variables from outer role, but not inner + elif role == 'l2': + self.assertEqual(task_vars.get('l1_variable'), 'l1-main') + self.assertEqual(task_vars.get('l2_variable'), 'l2-main') + self.assertEqual(task_vars.get('l3_variable'), None) + self.assertEqual(task_vars.get('test_variable'), 'l2-main') + # Inner role must have variables from both outer roles + elif role == 'l3': + self.assertEqual(task_vars.get('l1_variable'), 'l1-main') + self.assertEqual(task_vars.get('l2_variable'), 'l2-main') + self.assertEqual(task_vars.get('l3_variable'), 'l3-main') + self.assertEqual(task_vars.get('test_variable'), 'l3-main') + else: + self.fail() + self.assertFalse(expected_roles) + + @patch('ansible.playbook.role.definition.unfrackpath', + mock_unfrackpath_noop) + def test_nested_alt_files(self): + + """ + Test nested includes with alternative tasks and variables. + + Variables from outer roles should be inherited, but overridden in inner + roles. + """ + + play = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + tasks=[ + {'include_role': 'name=l1 tasks_from=alt defaults_from=alt'} + ] + ), loader=self.loader, variable_manager=self.var_manager) + + tasks = play.compile() + expected_roles = ['l1', 'l2', 'l3'] + for role, task_vars in self.get_tasks_vars(play, tasks): + expected_roles.remove(role) + # Outer-most role must not have variables from inner roles yet + if role == 'l1': + self.assertEqual(task_vars.get('l1_variable'), 'l1-alt') + self.assertEqual(task_vars.get('l2_variable'), None) + self.assertEqual(task_vars.get('l3_variable'), None) + self.assertEqual(task_vars.get('test_variable'), 'l1-alt') + # Middle role must have variables from outer role, but not inner + elif role == 'l2': + self.assertEqual(task_vars.get('l1_variable'), 'l1-alt') + self.assertEqual(task_vars.get('l2_variable'), 'l2-alt') + self.assertEqual(task_vars.get('l3_variable'), None) + self.assertEqual(task_vars.get('test_variable'), 'l2-alt') + # Inner role must have variables from both outer roles + elif role == 'l3': + self.assertEqual(task_vars.get('l1_variable'), 'l1-alt') + self.assertEqual(task_vars.get('l2_variable'), 'l2-alt') + self.assertEqual(task_vars.get('l3_variable'), 'l3-alt') + self.assertEqual(task_vars.get('test_variable'), 'l3-alt') + else: + self.fail() + self.assertFalse(expected_roles) diff --git a/test/units/playbook/role/test_role.py b/test/units/playbook/role/test_role.py new file mode 100644 index 00000000..3aa30b8b --- /dev/null +++ b/test/units/playbook/role/test_role.py @@ -0,0 +1,422 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import patch, MagicMock + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.module_utils.common._collections_compat import Container +from ansible.playbook.block import Block + +from units.mock.loader import DictDataLoader +from units.mock.path import mock_unfrackpath_noop + +from ansible.playbook.role import Role +from ansible.playbook.role.include import RoleInclude +from ansible.playbook.role import hash_params + + +class TestHashParams(unittest.TestCase): + def test(self): + params = {'foo': 'bar'} + res = hash_params(params) + self._assert_set(res) + self._assert_hashable(res) + + def _assert_hashable(self, res): + a_dict = {} + try: + a_dict[res] = res + except TypeError as e: + self.fail('%s is not hashable: %s' % (res, e)) + + def _assert_set(self, res): + self.assertIsInstance(res, frozenset) + + def test_dict_tuple(self): + params = {'foo': (1, 'bar',)} + res = hash_params(params) + self._assert_set(res) + + def test_tuple(self): + params = (1, None, 'foo') + res = hash_params(params) + self._assert_hashable(res) + + def test_tuple_dict(self): + params = ({'foo': 'bar'}, 37) + res = hash_params(params) + self._assert_hashable(res) + + def test_list(self): + params = ['foo', 'bar', 1, 37, None] + res = hash_params(params) + self._assert_set(res) + self._assert_hashable(res) + + def test_dict_with_list_value(self): + params = {'foo': [1, 4, 'bar']} + res = hash_params(params) + self._assert_set(res) + self._assert_hashable(res) + + def test_empty_set(self): + params = set([]) + res = hash_params(params) + self._assert_hashable(res) + self._assert_set(res) + + def test_generator(self): + def my_generator(): + for i in ['a', 1, None, {}]: + yield i + + params = my_generator() + res = hash_params(params) + self._assert_hashable(res) + + def test_container_but_not_iterable(self): + # This is a Container that is not iterable, which is unlikely but... + class MyContainer(Container): + def __init__(self, some_thing): + self.data = [] + self.data.append(some_thing) + + def __contains__(self, item): + return item in self.data + + def __hash__(self): + return hash(self.data) + + def __len__(self): + return len(self.data) + + def __call__(self): + return False + + foo = MyContainer('foo bar') + params = foo + + self.assertRaises(TypeError, hash_params, params) + + def test_param_dict_dupe_values(self): + params1 = {'foo': False} + params2 = {'bar': False} + + res1 = hash_params(params1) + res2 = hash_params(params2) + + hash1 = hash(res1) + hash2 = hash(res2) + self.assertNotEqual(res1, res2) + self.assertNotEqual(hash1, hash2) + + def test_param_dupe(self): + params1 = { + # 'from_files': {}, + 'tags': [], + u'testvalue': False, + u'testvalue2': True, + # 'when': [] + } + params2 = { + # 'from_files': {}, + 'tags': [], + u'testvalue': True, + u'testvalue2': False, + # 'when': [] + } + res1 = hash_params(params1) + res2 = hash_params(params2) + + self.assertNotEqual(hash(res1), hash(res2)) + self.assertNotEqual(res1, res2) + + foo = {} + foo[res1] = 'params1' + foo[res2] = 'params2' + + self.assertEqual(len(foo), 2) + + del foo[res2] + self.assertEqual(len(foo), 1) + + for key in foo: + self.assertTrue(key in foo) + self.assertIn(key, foo) + + +class TestRole(unittest.TestCase): + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_with_tasks(self): + + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo_tasks/tasks/main.yml": """ + - shell: echo 'hello world' + """, + }) + + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play) + + self.assertEqual(str(r), 'foo_tasks') + self.assertEqual(len(r._task_blocks), 1) + assert isinstance(r._task_blocks[0], Block) + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_with_tasks_dir_vs_file(self): + + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo_tasks/tasks/custom_main/foo.yml": """ + - command: bar + """, + "/etc/ansible/roles/foo_tasks/tasks/custom_main.yml": """ + - command: baz + """, + }) + + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play, from_files=dict(tasks='custom_main')) + + self.assertEqual(r._task_blocks[0]._ds[0]['command'], 'baz') + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_with_handlers(self): + + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo_handlers/handlers/main.yml": """ + - name: test handler + shell: echo 'hello world' + """, + }) + + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load('foo_handlers', play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play) + + self.assertEqual(len(r._handler_blocks), 1) + assert isinstance(r._handler_blocks[0], Block) + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_with_vars(self): + + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo_vars/defaults/main.yml": """ + foo: bar + """, + "/etc/ansible/roles/foo_vars/vars/main.yml": """ + foo: bam + """, + }) + + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play) + + self.assertEqual(r._default_vars, dict(foo='bar')) + self.assertEqual(r._role_vars, dict(foo='bam')) + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_with_vars_dirs(self): + + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo_vars/defaults/main/foo.yml": """ + foo: bar + """, + "/etc/ansible/roles/foo_vars/vars/main/bar.yml": """ + foo: bam + """, + }) + + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play) + + self.assertEqual(r._default_vars, dict(foo='bar')) + self.assertEqual(r._role_vars, dict(foo='bam')) + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_with_vars_nested_dirs(self): + + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo_vars/defaults/main/foo/bar.yml": """ + foo: bar + """, + "/etc/ansible/roles/foo_vars/vars/main/bar/foo.yml": """ + foo: bam + """, + }) + + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play) + + self.assertEqual(r._default_vars, dict(foo='bar')) + self.assertEqual(r._role_vars, dict(foo='bam')) + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_with_vars_nested_dirs_combined(self): + + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo_vars/defaults/main/foo/bar.yml": """ + foo: bar + a: 1 + """, + "/etc/ansible/roles/foo_vars/defaults/main/bar/foo.yml": """ + foo: bam + b: 2 + """, + }) + + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play) + + self.assertEqual(r._default_vars, dict(foo='bar', a=1, b=2)) + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_with_vars_dir_vs_file(self): + + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo_vars/vars/main/foo.yml": """ + foo: bar + """, + "/etc/ansible/roles/foo_vars/vars/main.yml": """ + foo: bam + """, + }) + + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play) + + self.assertEqual(r._role_vars, dict(foo='bam')) + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_with_metadata(self): + + fake_loader = DictDataLoader({ + '/etc/ansible/roles/foo_metadata/meta/main.yml': """ + allow_duplicates: true + dependencies: + - bar_metadata + galaxy_info: + a: 1 + b: 2 + c: 3 + """, + '/etc/ansible/roles/bar_metadata/meta/main.yml': """ + dependencies: + - baz_metadata + """, + '/etc/ansible/roles/baz_metadata/meta/main.yml': """ + dependencies: + - bam_metadata + """, + '/etc/ansible/roles/bam_metadata/meta/main.yml': """ + dependencies: [] + """, + '/etc/ansible/roles/bad1_metadata/meta/main.yml': """ + 1 + """, + '/etc/ansible/roles/bad2_metadata/meta/main.yml': """ + foo: bar + """, + '/etc/ansible/roles/recursive1_metadata/meta/main.yml': """ + dependencies: ['recursive2_metadata'] + """, + '/etc/ansible/roles/recursive2_metadata/meta/main.yml': """ + dependencies: ['recursive1_metadata'] + """, + }) + + mock_play = MagicMock() + mock_play.collections = None + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load('foo_metadata', play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play) + + role_deps = r.get_direct_dependencies() + + self.assertEqual(len(role_deps), 1) + self.assertEqual(type(role_deps[0]), Role) + self.assertEqual(len(role_deps[0].get_parents()), 1) + self.assertEqual(role_deps[0].get_parents()[0], r) + self.assertEqual(r._metadata.allow_duplicates, True) + self.assertEqual(r._metadata.galaxy_info, dict(a=1, b=2, c=3)) + + all_deps = r.get_all_dependencies() + self.assertEqual(len(all_deps), 3) + self.assertEqual(all_deps[0].get_name(), 'bam_metadata') + self.assertEqual(all_deps[1].get_name(), 'baz_metadata') + self.assertEqual(all_deps[2].get_name(), 'bar_metadata') + + i = RoleInclude.load('bad1_metadata', play=mock_play, loader=fake_loader) + self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play) + + i = RoleInclude.load('bad2_metadata', play=mock_play, loader=fake_loader) + self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play) + + # TODO: re-enable this test once Ansible has proper role dep cycle detection + # that doesn't rely on stack overflows being recoverable (as they aren't in Py3.7+) + # see https://github.com/ansible/ansible/issues/61527 + # i = RoleInclude.load('recursive1_metadata', play=mock_play, loader=fake_loader) + # self.assertRaises(AnsibleError, Role.load, i, play=mock_play) + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_load_role_complex(self): + + # FIXME: add tests for the more complex uses of + # params and tags/when statements + + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo_complex/tasks/main.yml": """ + - shell: echo 'hello world' + """, + }) + + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + + i = RoleInclude.load(dict(role='foo_complex'), play=mock_play, loader=fake_loader) + r = Role.load(i, play=mock_play) + + self.assertEqual(r.get_name(), "foo_complex") diff --git a/test/units/playbook/test_attribute.py b/test/units/playbook/test_attribute.py new file mode 100644 index 00000000..bdb37c11 --- /dev/null +++ b/test/units/playbook/test_attribute.py @@ -0,0 +1,57 @@ +# (c) 2015, Marius Gedminas +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.playbook.attribute import Attribute + + +class TestAttribute(unittest.TestCase): + + def setUp(self): + self.one = Attribute(priority=100) + self.two = Attribute(priority=0) + + def test_eq(self): + self.assertTrue(self.one == self.one) + self.assertFalse(self.one == self.two) + + def test_ne(self): + self.assertFalse(self.one != self.one) + self.assertTrue(self.one != self.two) + + def test_lt(self): + self.assertFalse(self.one < self.one) + self.assertTrue(self.one < self.two) + self.assertFalse(self.two < self.one) + + def test_gt(self): + self.assertFalse(self.one > self.one) + self.assertFalse(self.one > self.two) + self.assertTrue(self.two > self.one) + + def test_le(self): + self.assertTrue(self.one <= self.one) + self.assertTrue(self.one <= self.two) + self.assertFalse(self.two <= self.one) + + def test_ge(self): + self.assertTrue(self.one >= self.one) + self.assertFalse(self.one >= self.two) + self.assertTrue(self.two >= self.one) diff --git a/test/units/playbook/test_base.py b/test/units/playbook/test_base.py new file mode 100644 index 00000000..648200af --- /dev/null +++ b/test/units/playbook/test_base.py @@ -0,0 +1,630 @@ +# (c) 2016, Adrian Likins +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest + +from ansible.errors import AnsibleParserError +from ansible.module_utils.six import string_types +from ansible.playbook.attribute import FieldAttribute +from ansible.template import Templar +from ansible.playbook import base +from ansible.utils.unsafe_proxy import AnsibleUnsafeBytes, AnsibleUnsafeText + +from units.mock.loader import DictDataLoader + + +class TestBase(unittest.TestCase): + ClassUnderTest = base.Base + + def setUp(self): + self.assorted_vars = {'var_2_key': 'var_2_value', + 'var_1_key': 'var_1_value', + 'a_list': ['a_list_1', 'a_list_2'], + 'a_dict': {'a_dict_key': 'a_dict_value'}, + 'a_set': set(['set_1', 'set_2']), + 'a_int': 42, + 'a_float': 37.371, + 'a_bool': True, + 'a_none': None, + } + self.b = self.ClassUnderTest() + + def _base_validate(self, ds): + bsc = self.ClassUnderTest() + parent = ExampleParentBaseSubClass() + bsc._parent = parent + bsc._dep_chain = [parent] + parent._dep_chain = None + bsc.load_data(ds) + fake_loader = DictDataLoader({}) + templar = Templar(loader=fake_loader) + bsc.post_validate(templar) + return bsc + + def test(self): + self.assertIsInstance(self.b, base.Base) + self.assertIsInstance(self.b, self.ClassUnderTest) + + # dump me doesnt return anything or change anything so not much to assert + def test_dump_me_empty(self): + self.b.dump_me() + + def test_dump_me(self): + ds = {'environment': [], + 'vars': {'var_2_key': 'var_2_value', + 'var_1_key': 'var_1_value'}} + b = self._base_validate(ds) + b.dump_me() + + def _assert_copy(self, orig, copy): + self.assertIsInstance(copy, self.ClassUnderTest) + self.assertIsInstance(copy, base.Base) + self.assertEqual(len(orig._valid_attrs), len(copy._valid_attrs)) + + sentinel = 'Empty DS' + self.assertEqual(getattr(orig, '_ds', sentinel), getattr(copy, '_ds', sentinel)) + + def test_copy_empty(self): + copy = self.b.copy() + self._assert_copy(self.b, copy) + + def test_copy_with_vars(self): + ds = {'vars': self.assorted_vars} + b = self._base_validate(ds) + + copy = b.copy() + self._assert_copy(b, copy) + + def test_serialize(self): + ds = {} + ds = {'environment': [], + 'vars': self.assorted_vars + } + b = self._base_validate(ds) + ret = b.serialize() + self.assertIsInstance(ret, dict) + + def test_deserialize(self): + data = {} + + d = self.ClassUnderTest() + d.deserialize(data) + self.assertIn('run_once', d._attributes) + self.assertIn('check_mode', d._attributes) + + data = {'no_log': False, + 'remote_user': None, + 'vars': self.assorted_vars, + 'environment': [], + 'run_once': False, + 'connection': None, + 'ignore_errors': False, + 'port': 22, + 'a_sentinel_with_an_unlikely_name': ['sure, a list']} + + d = self.ClassUnderTest() + d.deserialize(data) + self.assertNotIn('a_sentinel_with_an_unlikely_name', d._attributes) + self.assertIn('run_once', d._attributes) + self.assertIn('check_mode', d._attributes) + + def test_serialize_then_deserialize(self): + ds = {'environment': [], + 'vars': self.assorted_vars} + b = self._base_validate(ds) + copy = b.copy() + ret = b.serialize() + b.deserialize(ret) + c = self.ClassUnderTest() + c.deserialize(ret) + # TODO: not a great test, but coverage... + self.maxDiff = None + self.assertDictEqual(b.serialize(), copy.serialize()) + self.assertDictEqual(c.serialize(), copy.serialize()) + + def test_post_validate_empty(self): + fake_loader = DictDataLoader({}) + templar = Templar(loader=fake_loader) + ret = self.b.post_validate(templar) + self.assertIsNone(ret) + + def test_get_ds_none(self): + ds = self.b.get_ds() + self.assertIsNone(ds) + + def test_load_data_ds_is_none(self): + self.assertRaises(AssertionError, self.b.load_data, None) + + def test_load_data_invalid_attr(self): + ds = {'not_a_valid_attr': [], + 'other': None} + + self.assertRaises(AnsibleParserError, self.b.load_data, ds) + + def test_load_data_invalid_attr_type(self): + ds = {'environment': True} + + # environment is supposed to be a list. This + # seems like it shouldn't work? + ret = self.b.load_data(ds) + self.assertEqual(True, ret._attributes['environment']) + + def test_post_validate(self): + ds = {'environment': [], + 'port': 443} + b = self._base_validate(ds) + self.assertEqual(b.port, 443) + self.assertEqual(b.environment, []) + + def test_post_validate_invalid_attr_types(self): + ds = {'environment': [], + 'port': 'some_port'} + b = self._base_validate(ds) + self.assertEqual(b.port, 'some_port') + + def test_squash(self): + data = self.b.serialize() + self.b.squash() + squashed_data = self.b.serialize() + # TODO: assert something + self.assertFalse(data['squashed']) + self.assertTrue(squashed_data['squashed']) + + def test_vars(self): + # vars as a dict. + ds = {'environment': [], + 'vars': {'var_2_key': 'var_2_value', + 'var_1_key': 'var_1_value'}} + b = self._base_validate(ds) + self.assertEqual(b.vars['var_1_key'], 'var_1_value') + + def test_vars_list_of_dicts(self): + ds = {'environment': [], + 'vars': [{'var_2_key': 'var_2_value'}, + {'var_1_key': 'var_1_value'}] + } + b = self._base_validate(ds) + self.assertEqual(b.vars['var_1_key'], 'var_1_value') + + def test_vars_not_dict_or_list(self): + ds = {'environment': [], + 'vars': 'I am a string, not a dict or a list of dicts'} + self.assertRaises(AnsibleParserError, self.b.load_data, ds) + + def test_vars_not_valid_identifier(self): + ds = {'environment': [], + 'vars': [{'var_2_key': 'var_2_value'}, + {'1an-invalid identifer': 'var_1_value'}] + } + self.assertRaises(AnsibleParserError, self.b.load_data, ds) + + def test_vars_is_list_but_not_of_dicts(self): + ds = {'environment': [], + 'vars': ['foo', 'bar', 'this is a string not a dict'] + } + self.assertRaises(AnsibleParserError, self.b.load_data, ds) + + def test_vars_is_none(self): + # If vars is None, we should get a empty dict back + ds = {'environment': [], + 'vars': None + } + b = self._base_validate(ds) + self.assertEqual(b.vars, {}) + + def test_validate_empty(self): + self.b.validate() + self.assertTrue(self.b._validated) + + def test_getters(self): + # not sure why these exist, but here are tests anyway + loader = self.b.get_loader() + variable_manager = self.b.get_variable_manager() + self.assertEqual(loader, self.b._loader) + self.assertEqual(variable_manager, self.b._variable_manager) + + +class TestExtendValue(unittest.TestCase): + # _extend_value could be a module or staticmethod but since its + # not, the test is here. + def test_extend_value_list_newlist(self): + b = base.Base() + value_list = ['first', 'second'] + new_value_list = ['new_first', 'new_second'] + ret = b._extend_value(value_list, new_value_list) + self.assertEqual(value_list + new_value_list, ret) + + def test_extend_value_list_newlist_prepend(self): + b = base.Base() + value_list = ['first', 'second'] + new_value_list = ['new_first', 'new_second'] + ret_prepend = b._extend_value(value_list, new_value_list, prepend=True) + self.assertEqual(new_value_list + value_list, ret_prepend) + + def test_extend_value_newlist_list(self): + b = base.Base() + value_list = ['first', 'second'] + new_value_list = ['new_first', 'new_second'] + ret = b._extend_value(new_value_list, value_list) + self.assertEqual(new_value_list + value_list, ret) + + def test_extend_value_newlist_list_prepend(self): + b = base.Base() + value_list = ['first', 'second'] + new_value_list = ['new_first', 'new_second'] + ret = b._extend_value(new_value_list, value_list, prepend=True) + self.assertEqual(value_list + new_value_list, ret) + + def test_extend_value_string_newlist(self): + b = base.Base() + some_string = 'some string' + new_value_list = ['new_first', 'new_second'] + ret = b._extend_value(some_string, new_value_list) + self.assertEqual([some_string] + new_value_list, ret) + + def test_extend_value_string_newstring(self): + b = base.Base() + some_string = 'some string' + new_value_string = 'this is the new values' + ret = b._extend_value(some_string, new_value_string) + self.assertEqual([some_string, new_value_string], ret) + + def test_extend_value_list_newstring(self): + b = base.Base() + value_list = ['first', 'second'] + new_value_string = 'this is the new values' + ret = b._extend_value(value_list, new_value_string) + self.assertEqual(value_list + [new_value_string], ret) + + def test_extend_value_none_none(self): + b = base.Base() + ret = b._extend_value(None, None) + self.assertEqual(len(ret), 0) + self.assertFalse(ret) + + def test_extend_value_none_list(self): + b = base.Base() + ret = b._extend_value(None, ['foo']) + self.assertEqual(ret, ['foo']) + + +class ExampleException(Exception): + pass + + +# naming fails me... +class ExampleParentBaseSubClass(base.Base): + _test_attr_parent_string = FieldAttribute(isa='string', default='A string attr for a class that may be a parent for testing') + + def __init__(self): + + super(ExampleParentBaseSubClass, self).__init__() + self._dep_chain = None + + def get_dep_chain(self): + return self._dep_chain + + +class ExampleSubClass(base.Base): + _test_attr_blip = FieldAttribute(isa='string', default='example sub class test_attr_blip', + inherit=False, + always_post_validate=True) + + def __init__(self): + super(ExampleSubClass, self).__init__() + + def get_dep_chain(self): + if self._parent: + return self._parent.get_dep_chain() + else: + return None + + +class BaseSubClass(base.Base): + _name = FieldAttribute(isa='string', default='', always_post_validate=True) + _test_attr_bool = FieldAttribute(isa='bool', always_post_validate=True) + _test_attr_int = FieldAttribute(isa='int', always_post_validate=True) + _test_attr_float = FieldAttribute(isa='float', default=3.14159, always_post_validate=True) + _test_attr_list = FieldAttribute(isa='list', listof=string_types, always_post_validate=True) + _test_attr_list_no_listof = FieldAttribute(isa='list', always_post_validate=True) + _test_attr_list_required = FieldAttribute(isa='list', listof=string_types, required=True, + default=list, always_post_validate=True) + _test_attr_string = FieldAttribute(isa='string', default='the_test_attr_string_default_value') + _test_attr_string_required = FieldAttribute(isa='string', required=True, + default='the_test_attr_string_default_value') + _test_attr_percent = FieldAttribute(isa='percent', always_post_validate=True) + _test_attr_set = FieldAttribute(isa='set', default=set, always_post_validate=True) + _test_attr_dict = FieldAttribute(isa='dict', default=lambda: {'a_key': 'a_value'}, always_post_validate=True) + _test_attr_class = FieldAttribute(isa='class', class_type=ExampleSubClass) + _test_attr_class_post_validate = FieldAttribute(isa='class', class_type=ExampleSubClass, + always_post_validate=True) + _test_attr_unknown_isa = FieldAttribute(isa='not_a_real_isa', always_post_validate=True) + _test_attr_example = FieldAttribute(isa='string', default='the_default', + always_post_validate=True) + _test_attr_none = FieldAttribute(isa='string', always_post_validate=True) + _test_attr_preprocess = FieldAttribute(isa='string', default='the default for preprocess') + _test_attr_method = FieldAttribute(isa='string', default='some attr with a getter', + always_post_validate=True) + _test_attr_method_missing = FieldAttribute(isa='string', default='some attr with a missing getter', + always_post_validate=True) + + def _get_attr_test_attr_method(self): + return 'foo bar' + + def _validate_test_attr_example(self, attr, name, value): + if not isinstance(value, str): + raise ExampleException('_test_attr_example is not a string: %s type=%s' % (value, type(value))) + + def _post_validate_test_attr_example(self, attr, value, templar): + after_template_value = templar.template(value) + return after_template_value + + def _post_validate_test_attr_none(self, attr, value, templar): + return None + + def _get_parent_attribute(self, attr, extend=False, prepend=False): + value = None + try: + value = self._attributes[attr] + if self._parent and (value is None or extend): + parent_value = getattr(self._parent, attr, None) + if extend: + value = self._extend_value(value, parent_value, prepend) + else: + value = parent_value + except KeyError: + pass + + return value + + +# terrible name, but it is a TestBase subclass for testing subclasses of Base +class TestBaseSubClass(TestBase): + ClassUnderTest = BaseSubClass + + def _base_validate(self, ds): + ds['test_attr_list_required'] = [] + return super(TestBaseSubClass, self)._base_validate(ds) + + def test_attr_bool(self): + ds = {'test_attr_bool': True} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_bool, True) + + def test_attr_int(self): + MOST_RANDOM_NUMBER = 37 + ds = {'test_attr_int': MOST_RANDOM_NUMBER} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_int, MOST_RANDOM_NUMBER) + + def test_attr_int_del(self): + MOST_RANDOM_NUMBER = 37 + ds = {'test_attr_int': MOST_RANDOM_NUMBER} + bsc = self._base_validate(ds) + del bsc.test_attr_int + self.assertNotIn('test_attr_int', bsc._attributes) + + def test_attr_float(self): + roughly_pi = 4.0 + ds = {'test_attr_float': roughly_pi} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_float, roughly_pi) + + def test_attr_percent(self): + percentage = '90%' + percentage_float = 90.0 + ds = {'test_attr_percent': percentage} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_percent, percentage_float) + + # This method works hard and gives it its all and everything it's got. It doesn't + # leave anything on the field. It deserves to pass. It has earned it. + def test_attr_percent_110_percent(self): + percentage = '110.11%' + percentage_float = 110.11 + ds = {'test_attr_percent': percentage} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_percent, percentage_float) + + # This method is just here for the paycheck. + def test_attr_percent_60_no_percent_sign(self): + percentage = '60' + percentage_float = 60.0 + ds = {'test_attr_percent': percentage} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_percent, percentage_float) + + def test_attr_set(self): + test_set = set(['first_string_in_set', 'second_string_in_set']) + ds = {'test_attr_set': test_set} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_set, test_set) + + def test_attr_set_string(self): + test_data = ['something', 'other'] + test_value = ','.join(test_data) + ds = {'test_attr_set': test_value} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_set, set(test_data)) + + def test_attr_set_not_string_or_list(self): + test_value = 37.1 + ds = {'test_attr_set': test_value} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_set, set([test_value])) + + def test_attr_dict(self): + test_dict = {'a_different_key': 'a_different_value'} + ds = {'test_attr_dict': test_dict} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_dict, test_dict) + + def test_attr_dict_string(self): + test_value = 'just_some_random_string' + ds = {'test_attr_dict': test_value} + self.assertRaisesRegexp(AnsibleParserError, 'is not a dictionary', self._base_validate, ds) + + def test_attr_class(self): + esc = ExampleSubClass() + ds = {'test_attr_class': esc} + bsc = self._base_validate(ds) + self.assertIs(bsc.test_attr_class, esc) + + def test_attr_class_wrong_type(self): + not_a_esc = ExampleSubClass + ds = {'test_attr_class': not_a_esc} + bsc = self._base_validate(ds) + self.assertIs(bsc.test_attr_class, not_a_esc) + + def test_attr_class_post_validate(self): + esc = ExampleSubClass() + ds = {'test_attr_class_post_validate': esc} + bsc = self._base_validate(ds) + self.assertIs(bsc.test_attr_class_post_validate, esc) + + def test_attr_class_post_validate_class_not_instance(self): + not_a_esc = ExampleSubClass + ds = {'test_attr_class_post_validate': not_a_esc} + self.assertRaisesRegexp(AnsibleParserError, 'is not a valid.*got a.*Meta.*instead', + self._base_validate, ds) + + def test_attr_class_post_validate_wrong_class(self): + not_a_esc = 37 + ds = {'test_attr_class_post_validate': not_a_esc} + self.assertRaisesRegexp(AnsibleParserError, 'is not a valid.*got a.*int.*instead', + self._base_validate, ds) + + def test_attr_remote_user(self): + ds = {'remote_user': 'testuser'} + bsc = self._base_validate(ds) + # TODO: attemp to verify we called parent gettters etc + self.assertEqual(bsc.remote_user, 'testuser') + + def test_attr_example_undefined(self): + ds = {'test_attr_example': '{{ some_var_that_shouldnt_exist_to_test_omit }}'} + exc_regex_str = 'test_attr_example.*has an invalid value, which includes an undefined variable.*some_var_that_shouldnt*' + self.assertRaises(AnsibleParserError) + + def test_attr_name_undefined(self): + ds = {'name': '{{ some_var_that_shouldnt_exist_to_test_omit }}'} + bsc = self._base_validate(ds) + # the attribute 'name' is special cases in post_validate + self.assertEqual(bsc.name, '{{ some_var_that_shouldnt_exist_to_test_omit }}') + + def test_subclass_validate_method(self): + ds = {'test_attr_list': ['string_list_item_1', 'string_list_item_2'], + 'test_attr_example': 'the_test_attr_example_value_string'} + # Not throwing an exception here is the test + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_example, 'the_test_attr_example_value_string') + + def test_subclass_validate_method_invalid(self): + ds = {'test_attr_example': [None]} + self.assertRaises(ExampleException, self._base_validate, ds) + + def test_attr_none(self): + ds = {'test_attr_none': 'foo'} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_none, None) + + def test_attr_string(self): + the_string_value = "the new test_attr_string_value" + ds = {'test_attr_string': the_string_value} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_string, the_string_value) + + def test_attr_string_invalid_list(self): + ds = {'test_attr_string': ['The new test_attr_string', 'value, however in a list']} + self.assertRaises(AnsibleParserError, self._base_validate, ds) + + def test_attr_string_required(self): + the_string_value = "the new test_attr_string_required_value" + ds = {'test_attr_string_required': the_string_value} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_string_required, the_string_value) + + def test_attr_list_invalid(self): + ds = {'test_attr_list': {}} + self.assertRaises(AnsibleParserError, self._base_validate, ds) + + def test_attr_list(self): + string_list = ['foo', 'bar'] + ds = {'test_attr_list': string_list} + bsc = self._base_validate(ds) + self.assertEqual(string_list, bsc._attributes['test_attr_list']) + + def test_attr_list_none(self): + ds = {'test_attr_list': None} + bsc = self._base_validate(ds) + self.assertEqual(None, bsc._attributes['test_attr_list']) + + def test_attr_list_no_listof(self): + test_list = ['foo', 'bar', 123] + ds = {'test_attr_list_no_listof': test_list} + bsc = self._base_validate(ds) + self.assertEqual(test_list, bsc._attributes['test_attr_list_no_listof']) + + def test_attr_list_required(self): + string_list = ['foo', 'bar'] + ds = {'test_attr_list_required': string_list} + bsc = self.ClassUnderTest() + bsc.load_data(ds) + fake_loader = DictDataLoader({}) + templar = Templar(loader=fake_loader) + bsc.post_validate(templar) + self.assertEqual(string_list, bsc._attributes['test_attr_list_required']) + + def test_attr_list_required_empty_string(self): + string_list = [""] + ds = {'test_attr_list_required': string_list} + bsc = self.ClassUnderTest() + bsc.load_data(ds) + fake_loader = DictDataLoader({}) + templar = Templar(loader=fake_loader) + self.assertRaisesRegexp(AnsibleParserError, 'cannot have empty values', + bsc.post_validate, templar) + + def test_attr_unknown(self): + a_list = ['some string'] + ds = {'test_attr_unknown_isa': a_list} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_unknown_isa, a_list) + + def test_attr_method(self): + ds = {'test_attr_method': 'value from the ds'} + bsc = self._base_validate(ds) + # The value returned by the subclasses _get_attr_test_attr_method + self.assertEqual(bsc.test_attr_method, 'foo bar') + + def test_attr_method_missing(self): + a_string = 'The value set from the ds' + ds = {'test_attr_method_missing': a_string} + bsc = self._base_validate(ds) + self.assertEqual(bsc.test_attr_method_missing, a_string) + + def test_get_validated_value_string_rewrap_unsafe(self): + attribute = FieldAttribute(isa='string') + value = AnsibleUnsafeText(u'bar') + templar = Templar(None) + bsc = self.ClassUnderTest() + result = bsc.get_validated_value('foo', attribute, value, templar) + self.assertIsInstance(result, AnsibleUnsafeText) + self.assertEqual(result, AnsibleUnsafeText(u'bar')) diff --git a/test/units/playbook/test_block.py b/test/units/playbook/test_block.py new file mode 100644 index 00000000..48471237 --- /dev/null +++ b/test/units/playbook/test_block.py @@ -0,0 +1,82 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.playbook.block import Block +from ansible.playbook.task import Task + + +class TestBlock(unittest.TestCase): + + def test_construct_empty_block(self): + b = Block() + + def test_construct_block_with_role(self): + pass + + def test_load_block_simple(self): + ds = dict( + block=[], + rescue=[], + always=[], + # otherwise=[], + ) + b = Block.load(ds) + self.assertEqual(b.block, []) + self.assertEqual(b.rescue, []) + self.assertEqual(b.always, []) + # not currently used + # self.assertEqual(b.otherwise, []) + + def test_load_block_with_tasks(self): + ds = dict( + block=[dict(action='block')], + rescue=[dict(action='rescue')], + always=[dict(action='always')], + # otherwise=[dict(action='otherwise')], + ) + b = Block.load(ds) + self.assertEqual(len(b.block), 1) + self.assertIsInstance(b.block[0], Task) + self.assertEqual(len(b.rescue), 1) + self.assertIsInstance(b.rescue[0], Task) + self.assertEqual(len(b.always), 1) + self.assertIsInstance(b.always[0], Task) + # not currently used + # self.assertEqual(len(b.otherwise), 1) + # self.assertIsInstance(b.otherwise[0], Task) + + def test_load_implicit_block(self): + ds = [dict(action='foo')] + b = Block.load(ds) + self.assertEqual(len(b.block), 1) + self.assertIsInstance(b.block[0], Task) + + def test_deserialize(self): + ds = dict( + block=[dict(action='block')], + rescue=[dict(action='rescue')], + always=[dict(action='always')], + ) + b = Block.load(ds) + data = dict(parent=ds, parent_type='Block') + b.deserialize(data) + self.assertIsInstance(b._parent, Block) diff --git a/test/units/playbook/test_collectionsearch.py b/test/units/playbook/test_collectionsearch.py new file mode 100644 index 00000000..be40d85e --- /dev/null +++ b/test/units/playbook/test_collectionsearch.py @@ -0,0 +1,78 @@ +# (c) 2020 Ansible Project +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleParserError +from ansible.playbook.play import Play +from ansible.playbook.task import Task +from ansible.playbook.block import Block +from ansible.playbook.collectionsearch import CollectionSearch + +import pytest + + +def test_collection_static_warning(capsys): + """Test that collection name is not templated. + + Also, make sure that users see the warning message for the referenced name. + """ + collection_name = "foo.{{bar}}" + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + connection='local', + collections=collection_name, + )) + assert collection_name in p.collections + std_out, std_err = capsys.readouterr() + assert '[WARNING]: "collections" is not templatable, but we found: %s' % collection_name in std_err + assert '' == std_out + + +def test_collection_invalid_data_play(): + """Test that collection as a dict at the play level fails with parser error""" + collection_name = {'name': 'foo'} + with pytest.raises(AnsibleParserError): + Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + connection='local', + collections=collection_name, + )) + + +def test_collection_invalid_data_task(): + """Test that collection as a dict at the task level fails with parser error""" + collection_name = {'name': 'foo'} + with pytest.raises(AnsibleParserError): + Task.load(dict( + name="test task", + collections=collection_name, + )) + + +def test_collection_invalid_data_block(): + """Test that collection as a dict at the block level fails with parser error""" + collection_name = {'name': 'foo'} + with pytest.raises(AnsibleParserError): + Block.load(dict( + block=[dict(name="test task", collections=collection_name)] + )) diff --git a/test/units/playbook/test_conditional.py b/test/units/playbook/test_conditional.py new file mode 100644 index 00000000..ce351484 --- /dev/null +++ b/test/units/playbook/test_conditional.py @@ -0,0 +1,240 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.mock.loader import DictDataLoader +from units.compat.mock import MagicMock + +from ansible.plugins.strategy import SharedPluginLoaderObj +from ansible.template import Templar +from ansible import errors + +from ansible.playbook import conditional + + +class TestConditional(unittest.TestCase): + def setUp(self): + self.loader = DictDataLoader({}) + self.cond = conditional.Conditional(loader=self.loader) + self.shared_loader = SharedPluginLoaderObj() + self.templar = Templar(loader=self.loader, variables={}) + + def _eval_con(self, when=None, variables=None): + when = when or [] + variables = variables or {} + self.cond.when = when + ret = self.cond.evaluate_conditional(self.templar, variables) + return ret + + def test_false(self): + when = [u"False"] + ret = self._eval_con(when, {}) + self.assertFalse(ret) + + def test_true(self): + when = [u"True"] + ret = self._eval_con(when, {}) + self.assertTrue(ret) + + def test_true_boolean(self): + self.cond.when = [True] + m = MagicMock() + ret = self.cond.evaluate_conditional(m, {}) + self.assertTrue(ret) + self.assertFalse(m.is_template.called) + + def test_false_boolean(self): + self.cond.when = [False] + m = MagicMock() + ret = self.cond.evaluate_conditional(m, {}) + self.assertFalse(ret) + self.assertFalse(m.is_template.called) + + def test_undefined(self): + when = [u"{{ some_undefined_thing }}"] + self.assertRaisesRegexp(errors.AnsibleError, "The conditional check '{{ some_undefined_thing }}' failed", + self._eval_con, when, {}) + + def test_defined(self): + variables = {'some_defined_thing': True} + when = [u"{{ some_defined_thing }}"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_dict_defined_values(self): + variables = {'dict_value': 1, + 'some_defined_dict': {'key1': 'value1', + 'key2': '{{ dict_value }}'}} + + when = [u"some_defined_dict"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_dict_defined_values_is_defined(self): + variables = {'dict_value': 1, + 'some_defined_dict': {'key1': 'value1', + 'key2': '{{ dict_value }}'}} + + when = [u"some_defined_dict.key1 is defined"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_dict_defined_multiple_values_is_defined(self): + variables = {'dict_value': 1, + 'some_defined_dict': {'key1': 'value1', + 'key2': '{{ dict_value }}'}} + + when = [u"some_defined_dict.key1 is defined", + u"some_defined_dict.key2 is not undefined"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_dict_undefined_values(self): + variables = {'dict_value': 1, + 'some_defined_dict_with_undefined_values': {'key1': 'value1', + 'key2': '{{ dict_value }}', + 'key3': '{{ undefined_dict_value }}' + }} + + when = [u"some_defined_dict_with_undefined_values is defined"] + self.assertRaisesRegexp(errors.AnsibleError, + "The conditional check 'some_defined_dict_with_undefined_values is defined' failed.", + self._eval_con, + when, variables) + + def test_nested_hostvars_undefined_values(self): + variables = {'dict_value': 1, + 'hostvars': {'host1': {'key1': 'value1', + 'key2': '{{ dict_value }}'}, + 'host2': '{{ dict_value }}', + 'host3': '{{ undefined_dict_value }}', + # no host4 + }, + 'some_dict': {'some_dict_key1': '{{ hostvars["host3"] }}'} + } + + when = [u"some_dict.some_dict_key1 == hostvars['host3']"] + # self._eval_con(when, variables) + self.assertRaisesRegexp(errors.AnsibleError, + r"The conditional check 'some_dict.some_dict_key1 == hostvars\['host3'\]' failed", + # "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed", + # "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed.", + self._eval_con, + when, variables) + + def test_dict_undefined_values_bare(self): + variables = {'dict_value': 1, + 'some_defined_dict_with_undefined_values': {'key1': 'value1', + 'key2': '{{ dict_value }}', + 'key3': '{{ undefined_dict_value }}' + }} + + # raises an exception when a non-string conditional is passed to extract_defined_undefined() + when = [u"some_defined_dict_with_undefined_values"] + self.assertRaisesRegexp(errors.AnsibleError, + "The conditional check 'some_defined_dict_with_undefined_values' failed.", + self._eval_con, + when, variables) + + def test_dict_undefined_values_is_defined(self): + variables = {'dict_value': 1, + 'some_defined_dict_with_undefined_values': {'key1': 'value1', + 'key2': '{{ dict_value }}', + 'key3': '{{ undefined_dict_value }}' + }} + + when = [u"some_defined_dict_with_undefined_values is defined"] + self.assertRaisesRegexp(errors.AnsibleError, + "The conditional check 'some_defined_dict_with_undefined_values is defined' failed.", + self._eval_con, + when, variables) + + def test_is_defined(self): + variables = {'some_defined_thing': True} + when = [u"some_defined_thing is defined"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_is_undefined(self): + variables = {'some_defined_thing': True} + when = [u"some_defined_thing is undefined"] + ret = self._eval_con(when, variables) + self.assertFalse(ret) + + def test_is_undefined_and_defined(self): + variables = {'some_defined_thing': True} + when = [u"some_defined_thing is undefined", u"some_defined_thing is defined"] + ret = self._eval_con(when, variables) + self.assertFalse(ret) + + def test_is_undefined_and_defined_reversed(self): + variables = {'some_defined_thing': True} + when = [u"some_defined_thing is defined", u"some_defined_thing is undefined"] + ret = self._eval_con(when, variables) + self.assertFalse(ret) + + def test_is_not_undefined(self): + variables = {'some_defined_thing': True} + when = [u"some_defined_thing is not undefined"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_is_not_defined(self): + variables = {'some_defined_thing': True} + when = [u"some_undefined_thing is not defined"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_is_hostvars_quotes_is_defined(self): + variables = {'hostvars': {'some_host': {}}, + 'compare_targets_single': "hostvars['some_host']", + 'compare_targets_double': 'hostvars["some_host"]', + 'compare_targets': {'double': '{{ compare_targets_double }}', + 'single': "{{ compare_targets_single }}"}, + } + when = [u"hostvars['some_host'] is defined", + u'hostvars["some_host"] is defined', + u"{{ compare_targets.double }} is defined", + u"{{ compare_targets.single }} is defined"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_is_hostvars_quotes_is_defined_but_is_not_defined(self): + variables = {'hostvars': {'some_host': {}}, + 'compare_targets_single': "hostvars['some_host']", + 'compare_targets_double': 'hostvars["some_host"]', + 'compare_targets': {'double': '{{ compare_targets_double }}', + 'single': "{{ compare_targets_single }}"}, + } + when = [u"hostvars['some_host'] is defined", + u'hostvars["some_host"] is defined', + u"{{ compare_targets.triple }} is defined", + u"{{ compare_targets.quadruple }} is defined"] + self.assertRaisesRegexp(errors.AnsibleError, + "The conditional check '{{ compare_targets.triple }} is defined' failed", + self._eval_con, + when, variables) + + def test_is_hostvars_host_is_defined(self): + variables = {'hostvars': {'some_host': {}, }} + when = [u"hostvars['some_host'] is defined"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_is_hostvars_host_undefined_is_defined(self): + variables = {'hostvars': {'some_host': {}, }} + when = [u"hostvars['some_undefined_host'] is defined"] + ret = self._eval_con(when, variables) + self.assertFalse(ret) + + def test_is_hostvars_host_undefined_is_undefined(self): + variables = {'hostvars': {'some_host': {}, }} + when = [u"hostvars['some_undefined_host'] is undefined"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) + + def test_is_hostvars_host_undefined_is_not_defined(self): + variables = {'hostvars': {'some_host': {}, }} + when = [u"hostvars['some_undefined_host'] is not defined"] + ret = self._eval_con(when, variables) + self.assertTrue(ret) diff --git a/test/units/playbook/test_helpers.py b/test/units/playbook/test_helpers.py new file mode 100644 index 00000000..a4ed6178 --- /dev/null +++ b/test/units/playbook/test_helpers.py @@ -0,0 +1,405 @@ +# (c) 2016, Adrian Likins +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from units.compat import unittest +from units.compat.mock import MagicMock +from units.mock.loader import DictDataLoader + +from ansible import errors +from ansible.playbook.block import Block +from ansible.playbook.handler import Handler +from ansible.playbook.task import Task +from ansible.playbook.task_include import TaskInclude +from ansible.playbook.role.include import RoleInclude + +from ansible.playbook import helpers + + +class MixinForMocks(object): + def _setup(self): + # This is not a very good mixin, lots of side effects + self.fake_loader = DictDataLoader({'include_test.yml': "", + 'other_include_test.yml': ""}) + self.mock_tqm = MagicMock(name='MockTaskQueueManager') + + self.mock_play = MagicMock(name='MockPlay') + self.mock_play._attributes = [] + self.mock_play.collections = None + + self.mock_iterator = MagicMock(name='MockIterator') + self.mock_iterator._play = self.mock_play + + self.mock_inventory = MagicMock(name='MockInventory') + self.mock_inventory._hosts_cache = dict() + + def _get_host(host_name): + return None + + self.mock_inventory.get_host.side_effect = _get_host + # TODO: can we use a real VariableManager? + self.mock_variable_manager = MagicMock(name='MockVariableManager') + self.mock_variable_manager.get_vars.return_value = dict() + + self.mock_block = MagicMock(name='MockBlock') + + # On macOS /etc is actually /private/etc, tests fail when performing literal /etc checks + self.fake_role_loader = DictDataLoader({os.path.join(os.path.realpath("/etc"), "ansible/roles/bogus_role/tasks/main.yml"): """ + - shell: echo 'hello world' + """}) + + self._test_data_path = os.path.dirname(__file__) + self.fake_include_loader = DictDataLoader({"/dev/null/includes/test_include.yml": """ + - include: other_test_include.yml + - shell: echo 'hello world' + """, + "/dev/null/includes/static_test_include.yml": """ + - include: other_test_include.yml + - shell: echo 'hello static world' + """, + "/dev/null/includes/other_test_include.yml": """ + - debug: + msg: other_test_include_debug + """}) + + +class TestLoadListOfTasks(unittest.TestCase, MixinForMocks): + def setUp(self): + self._setup() + + def _assert_is_task_list(self, results): + for result in results: + self.assertIsInstance(result, Task) + + def _assert_is_task_list_or_blocks(self, results): + self.assertIsInstance(results, list) + for result in results: + self.assertIsInstance(result, (Task, Block)) + + def test_ds_not_list(self): + ds = {} + self.assertRaises(AssertionError, helpers.load_list_of_tasks, + ds, self.mock_play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None) + + def test_ds_not_dict(self): + ds = [[]] + self.assertRaises(AssertionError, helpers.load_list_of_tasks, + ds, self.mock_play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None) + + def test_empty_task(self): + ds = [{}] + self.assertRaisesRegexp(errors.AnsibleParserError, + "no module/action detected in task", + helpers.load_list_of_tasks, + ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_loader) + + def test_empty_task_use_handlers(self): + ds = [{}] + self.assertRaisesRegexp(errors.AnsibleParserError, + "no module/action detected in task.", + helpers.load_list_of_tasks, + ds, + use_handlers=True, + play=self.mock_play, + variable_manager=self.mock_variable_manager, + loader=self.fake_loader) + + def test_one_bogus_block(self): + ds = [{'block': None}] + self.assertRaisesRegexp(errors.AnsibleParserError, + "A malformed block was encountered", + helpers.load_list_of_tasks, + ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_loader) + + def test_unknown_action(self): + action_name = 'foo_test_unknown_action' + ds = [{'action': action_name}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_loader) + self._assert_is_task_list_or_blocks(res) + self.assertEqual(res[0].action, action_name) + + def test_block_unknown_action(self): + action_name = 'foo_test_block_unknown_action' + ds = [{ + 'block': [{'action': action_name}] + }] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_loader) + self._assert_is_task_list_or_blocks(res) + self.assertIsInstance(res[0], Block) + self._assert_default_block(res[0]) + + def _assert_default_block(self, block): + # the expected defaults + self.assertIsInstance(block.block, list) + self.assertEqual(len(block.block), 1) + self.assertIsInstance(block.rescue, list) + self.assertEqual(len(block.rescue), 0) + self.assertIsInstance(block.always, list) + self.assertEqual(len(block.always), 0) + + def test_block_unknown_action_use_handlers(self): + ds = [{ + 'block': [{'action': 'foo_test_block_unknown_action'}] + }] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True, + variable_manager=self.mock_variable_manager, loader=self.fake_loader) + self._assert_is_task_list_or_blocks(res) + self.assertIsInstance(res[0], Block) + self._assert_default_block(res[0]) + + def test_one_bogus_block_use_handlers(self): + ds = [{'block': True}] + self.assertRaisesRegexp(errors.AnsibleParserError, + "A malformed block was encountered", + helpers.load_list_of_tasks, + ds, play=self.mock_play, use_handlers=True, + variable_manager=self.mock_variable_manager, loader=self.fake_loader) + + def test_one_bogus_include(self): + ds = [{'include': 'somefile.yml'}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_loader) + self.assertIsInstance(res, list) + self.assertEqual(len(res), 0) + + def test_one_bogus_include_use_handlers(self): + ds = [{'include': 'somefile.yml'}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True, + variable_manager=self.mock_variable_manager, loader=self.fake_loader) + self.assertIsInstance(res, list) + self.assertEqual(len(res), 0) + + def test_one_bogus_include_static(self): + ds = [{'include': 'somefile.yml', + 'static': 'true'}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_loader) + self.assertIsInstance(res, list) + self.assertEqual(len(res), 0) + + def test_one_include(self): + ds = [{'include': '/dev/null/includes/other_test_include.yml'}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) + self.assertEqual(len(res), 1) + self._assert_is_task_list_or_blocks(res) + + def test_one_parent_include(self): + ds = [{'include': '/dev/null/includes/test_include.yml'}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) + self._assert_is_task_list_or_blocks(res) + self.assertIsInstance(res[0], Block) + self.assertIsInstance(res[0]._parent, TaskInclude) + + # TODO/FIXME: do this non deprecated way + def test_one_include_tags(self): + ds = [{'include': '/dev/null/includes/other_test_include.yml', + 'tags': ['test_one_include_tags_tag1', 'and_another_tagB'] + }] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) + self._assert_is_task_list_or_blocks(res) + self.assertIsInstance(res[0], Block) + self.assertIn('test_one_include_tags_tag1', res[0].tags) + self.assertIn('and_another_tagB', res[0].tags) + + # TODO/FIXME: do this non deprecated way + def test_one_parent_include_tags(self): + ds = [{'include': '/dev/null/includes/test_include.yml', + # 'vars': {'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']} + 'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2'] + } + ] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) + self._assert_is_task_list_or_blocks(res) + self.assertIsInstance(res[0], Block) + self.assertIn('test_one_parent_include_tags_tag1', res[0].tags) + self.assertIn('and_another_tag2', res[0].tags) + + # It would be useful to be able to tell what kind of deprecation we encountered and where we encountered it. + def test_one_include_tags_deprecated_mixed(self): + ds = [{'include': "/dev/null/includes/other_test_include.yml", + 'vars': {'tags': "['tag_on_include1', 'tag_on_include2']"}, + 'tags': 'mixed_tag1, mixed_tag2' + }] + self.assertRaisesRegexp(errors.AnsibleParserError, 'Mixing styles', + helpers.load_list_of_tasks, + ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) + + def test_one_include_tags_deprecated_include(self): + ds = [{'include': '/dev/null/includes/other_test_include.yml', + 'vars': {'tags': ['include_tag1_deprecated', 'and_another_tagB_deprecated']} + }] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) + self._assert_is_task_list_or_blocks(res) + self.assertIsInstance(res[0], Block) + self.assertIn('include_tag1_deprecated', res[0].tags) + self.assertIn('and_another_tagB_deprecated', res[0].tags) + + def test_one_include_use_handlers(self): + ds = [{'include': '/dev/null/includes/other_test_include.yml'}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + use_handlers=True, + variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) + self._assert_is_task_list_or_blocks(res) + self.assertIsInstance(res[0], Handler) + + def test_one_parent_include_use_handlers(self): + ds = [{'include': '/dev/null/includes/test_include.yml'}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + use_handlers=True, + variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) + self._assert_is_task_list_or_blocks(res) + self.assertIsInstance(res[0], Handler) + + # default for Handler + self.assertEqual(res[0].listen, []) + + # TODO/FIXME: this doesn't seen right + # figure out how to get the non-static errors to be raised, this seems to just ignore everything + def test_one_include_not_static(self): + ds = [{ + 'include': '/dev/null/includes/static_test_include.yml', + 'static': False + }] + # a_block = Block() + ti_ds = {'include': '/dev/null/includes/ssdftatic_test_include.yml'} + a_task_include = TaskInclude() + ti = a_task_include.load(ti_ds) + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + block=ti, + variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) + self._assert_is_task_list_or_blocks(res) + self.assertIsInstance(res[0], Task) + self.assertEqual(res[0].args['_raw_params'], '/dev/null/includes/static_test_include.yml') + + # TODO/FIXME: This two get stuck trying to make a mock_block into a TaskInclude +# def test_one_include(self): +# ds = [{'include': 'other_test_include.yml'}] +# res = helpers.load_list_of_tasks(ds, play=self.mock_play, +# block=self.mock_block, +# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) +# print(res) + +# def test_one_parent_include(self): +# ds = [{'include': 'test_include.yml'}] +# res = helpers.load_list_of_tasks(ds, play=self.mock_play, +# block=self.mock_block, +# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader) +# print(res) + + def test_one_bogus_include_role(self): + ds = [{'include_role': {'name': 'bogus_role'}, 'collections': []}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, + block=self.mock_block, + variable_manager=self.mock_variable_manager, loader=self.fake_role_loader) + self.assertEqual(len(res), 1) + self._assert_is_task_list_or_blocks(res) + + def test_one_bogus_include_role_use_handlers(self): + ds = [{'include_role': {'name': 'bogus_role'}, 'collections': []}] + res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True, + block=self.mock_block, + variable_manager=self.mock_variable_manager, + loader=self.fake_role_loader) + self.assertEqual(len(res), 1) + self._assert_is_task_list_or_blocks(res) + + +class TestLoadListOfRoles(unittest.TestCase, MixinForMocks): + def setUp(self): + self._setup() + + def test_ds_not_list(self): + ds = {} + self.assertRaises(AssertionError, helpers.load_list_of_roles, + ds, self.mock_play) + + def test_empty_role(self): + ds = [{}] + self.assertRaisesRegexp(errors.AnsibleError, + "role definitions must contain a role name", + helpers.load_list_of_roles, + ds, self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_role_loader) + + def test_empty_role_just_name(self): + ds = [{'name': 'bogus_role'}] + res = helpers.load_list_of_roles(ds, self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_role_loader) + self.assertIsInstance(res, list) + for r in res: + self.assertIsInstance(r, RoleInclude) + + def test_block_unknown_action(self): + ds = [{ + 'block': [{'action': 'foo_test_block_unknown_action'}] + }] + ds = [{'name': 'bogus_role'}] + res = helpers.load_list_of_roles(ds, self.mock_play, + variable_manager=self.mock_variable_manager, loader=self.fake_role_loader) + self.assertIsInstance(res, list) + for r in res: + self.assertIsInstance(r, RoleInclude) + + +class TestLoadListOfBlocks(unittest.TestCase, MixinForMocks): + def setUp(self): + self._setup() + + def test_ds_not_list(self): + ds = {} + mock_play = MagicMock(name='MockPlay') + self.assertRaises(AssertionError, helpers.load_list_of_blocks, + ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None) + + def test_empty_block(self): + ds = [{}] + mock_play = MagicMock(name='MockPlay') + self.assertRaisesRegexp(errors.AnsibleParserError, + "no module/action detected in task", + helpers.load_list_of_blocks, + ds, mock_play, + parent_block=None, + role=None, + task_include=None, + use_handlers=False, + variable_manager=None, + loader=None) + + def test_block_unknown_action(self): + ds = [{'action': 'foo', 'collections': []}] + mock_play = MagicMock(name='MockPlay') + res = helpers.load_list_of_blocks(ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, + loader=None) + + self.assertIsInstance(res, list) + for block in res: + self.assertIsInstance(block, Block) diff --git a/test/units/playbook/test_included_file.py b/test/units/playbook/test_included_file.py new file mode 100644 index 00000000..f143acb9 --- /dev/null +++ b/test/units/playbook/test_included_file.py @@ -0,0 +1,332 @@ +# (c) 2016, Adrian Likins +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +import pytest + +from units.compat.mock import MagicMock +from units.mock.loader import DictDataLoader + +from ansible.playbook.block import Block +from ansible.playbook.task import Task +from ansible.playbook.task_include import TaskInclude +from ansible.playbook.role_include import IncludeRole +from ansible.executor import task_result + +from ansible.playbook.included_file import IncludedFile +from ansible.errors import AnsibleParserError + + +@pytest.fixture +def mock_iterator(): + mock_iterator = MagicMock(name='MockIterator') + mock_iterator._play = MagicMock(name='MockPlay') + return mock_iterator + + +@pytest.fixture +def mock_variable_manager(): + # TODO: can we use a real VariableManager? + mock_variable_manager = MagicMock(name='MockVariableManager') + mock_variable_manager.get_vars.return_value = dict() + return mock_variable_manager + + +def test_equals_ok(): + uuid = '111-111' + parent = MagicMock(name='MockParent') + parent._uuid = uuid + task = MagicMock(name='MockTask') + task._uuid = uuid + task._parent = parent + inc_a = IncludedFile('a.yml', {}, {}, task) + inc_b = IncludedFile('a.yml', {}, {}, task) + assert inc_a == inc_b + + +def test_equals_different_tasks(): + parent = MagicMock(name='MockParent') + parent._uuid = '111-111' + task_a = MagicMock(name='MockTask') + task_a._uuid = '11-11' + task_a._parent = parent + task_b = MagicMock(name='MockTask') + task_b._uuid = '22-22' + task_b._parent = parent + inc_a = IncludedFile('a.yml', {}, {}, task_a) + inc_b = IncludedFile('a.yml', {}, {}, task_b) + assert inc_a != inc_b + + +def test_equals_different_parents(): + parent_a = MagicMock(name='MockParent') + parent_a._uuid = '111-111' + parent_b = MagicMock(name='MockParent') + parent_b._uuid = '222-222' + task_a = MagicMock(name='MockTask') + task_a._uuid = '11-11' + task_a._parent = parent_a + task_b = MagicMock(name='MockTask') + task_b._uuid = '11-11' + task_b._parent = parent_b + inc_a = IncludedFile('a.yml', {}, {}, task_a) + inc_b = IncludedFile('a.yml', {}, {}, task_b) + assert inc_a != inc_b + + +def test_included_file_instantiation(): + filename = 'somefile.yml' + + inc_file = IncludedFile(filename=filename, args={}, vars={}, task=None) + + assert isinstance(inc_file, IncludedFile) + assert inc_file._filename == filename + assert inc_file._args == {} + assert inc_file._vars == {} + assert inc_file._task is None + + +def test_process_include_results(mock_iterator, mock_variable_manager): + hostname = "testhost1" + hostname2 = "testhost2" + + parent_task_ds = {'debug': 'msg=foo'} + parent_task = Task.load(parent_task_ds) + parent_task._play = None + + task_ds = {'include': 'include_test.yml'} + loaded_task = TaskInclude.load(task_ds, task_include=parent_task) + + return_data = {'include': 'include_test.yml'} + # The task in the TaskResult has to be a TaskInclude so it has a .static attr + result1 = task_result.TaskResult(host=hostname, task=loaded_task, return_data=return_data) + result2 = task_result.TaskResult(host=hostname2, task=loaded_task, return_data=return_data) + results = [result1, result2] + + fake_loader = DictDataLoader({'include_test.yml': ""}) + + res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager) + assert isinstance(res, list) + assert len(res) == 1 + assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml') + assert res[0]._hosts == ['testhost1', 'testhost2'] + assert res[0]._args == {} + assert res[0]._vars == {} + + +def test_process_include_diff_files(mock_iterator, mock_variable_manager): + hostname = "testhost1" + hostname2 = "testhost2" + + parent_task_ds = {'debug': 'msg=foo'} + parent_task = Task.load(parent_task_ds) + parent_task._play = None + + task_ds = {'include': 'include_test.yml'} + loaded_task = TaskInclude.load(task_ds, task_include=parent_task) + loaded_task._play = None + + child_task_ds = {'include': 'other_include_test.yml'} + loaded_child_task = TaskInclude.load(child_task_ds, task_include=loaded_task) + loaded_child_task._play = None + + return_data = {'include': 'include_test.yml'} + # The task in the TaskResult has to be a TaskInclude so it has a .static attr + result1 = task_result.TaskResult(host=hostname, task=loaded_task, return_data=return_data) + + return_data = {'include': 'other_include_test.yml'} + result2 = task_result.TaskResult(host=hostname2, task=loaded_child_task, return_data=return_data) + results = [result1, result2] + + fake_loader = DictDataLoader({'include_test.yml': "", + 'other_include_test.yml': ""}) + + res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager) + assert isinstance(res, list) + assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml') + assert res[1]._filename == os.path.join(os.getcwd(), 'other_include_test.yml') + + assert res[0]._hosts == ['testhost1'] + assert res[1]._hosts == ['testhost2'] + + assert res[0]._args == {} + assert res[1]._args == {} + + assert res[0]._vars == {} + assert res[1]._vars == {} + + +def test_process_include_simulate_free(mock_iterator, mock_variable_manager): + hostname = "testhost1" + hostname2 = "testhost2" + + parent_task_ds = {'debug': 'msg=foo'} + parent_task1 = Task.load(parent_task_ds) + parent_task2 = Task.load(parent_task_ds) + + parent_task1._play = None + parent_task2._play = None + + task_ds = {'include': 'include_test.yml'} + loaded_task1 = TaskInclude.load(task_ds, task_include=parent_task1) + loaded_task2 = TaskInclude.load(task_ds, task_include=parent_task2) + + return_data = {'include': 'include_test.yml'} + # The task in the TaskResult has to be a TaskInclude so it has a .static attr + result1 = task_result.TaskResult(host=hostname, task=loaded_task1, return_data=return_data) + result2 = task_result.TaskResult(host=hostname2, task=loaded_task2, return_data=return_data) + results = [result1, result2] + + fake_loader = DictDataLoader({'include_test.yml': ""}) + + res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager) + assert isinstance(res, list) + assert len(res) == 2 + assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml') + assert res[1]._filename == os.path.join(os.getcwd(), 'include_test.yml') + + assert res[0]._hosts == ['testhost1'] + assert res[1]._hosts == ['testhost2'] + + assert res[0]._args == {} + assert res[1]._args == {} + + assert res[0]._vars == {} + assert res[1]._vars == {} + + +def test_process_include_simulate_free_block_role_tasks(mock_iterator, + mock_variable_manager): + """Test loading the same role returns different included files + + In the case of free, we may end up with included files from roles that + have the same parent but are different tasks. Previously the comparison + for equality did not check if the tasks were the same and only checked + that the parents were the same. This lead to some tasks being run + incorrectly and some tasks being silient dropped.""" + + fake_loader = DictDataLoader({ + 'include_test.yml': "", + '/etc/ansible/roles/foo_role/tasks/task1.yml': """ + - debug: msg=task1 + """, + '/etc/ansible/roles/foo_role/tasks/task2.yml': """ + - debug: msg=task2 + """, + }) + + hostname = "testhost1" + hostname2 = "testhost2" + + role1_ds = { + 'name': 'task1 include', + 'include_role': { + 'name': 'foo_role', + 'tasks_from': 'task1.yml' + } + } + role2_ds = { + 'name': 'task2 include', + 'include_role': { + 'name': 'foo_role', + 'tasks_from': 'task2.yml' + } + } + parent_task_ds = { + 'block': [ + role1_ds, + role2_ds + ] + } + parent_block = Block.load(parent_task_ds, loader=fake_loader) + + parent_block._play = None + + include_role1_ds = { + 'include_args': { + 'name': 'foo_role', + 'tasks_from': 'task1.yml' + } + } + include_role2_ds = { + 'include_args': { + 'name': 'foo_role', + 'tasks_from': 'task2.yml' + } + } + + include_role1 = IncludeRole.load(role1_ds, + block=parent_block, + loader=fake_loader) + include_role2 = IncludeRole.load(role2_ds, + block=parent_block, + loader=fake_loader) + + result1 = task_result.TaskResult(host=hostname, + task=include_role1, + return_data=include_role1_ds) + result2 = task_result.TaskResult(host=hostname2, + task=include_role2, + return_data=include_role2_ds) + results = [result1, result2] + + res = IncludedFile.process_include_results(results, + mock_iterator, + fake_loader, + mock_variable_manager) + assert isinstance(res, list) + # we should get two different includes + assert len(res) == 2 + assert res[0]._filename == 'foo_role' + assert res[1]._filename == 'foo_role' + # with different tasks + assert res[0]._task != res[1]._task + + assert res[0]._hosts == ['testhost1'] + assert res[1]._hosts == ['testhost2'] + + assert res[0]._args == {} + assert res[1]._args == {} + + assert res[0]._vars == {} + assert res[1]._vars == {} + + +def test_empty_raw_params(): + parent_task_ds = {'debug': 'msg=foo'} + parent_task = Task.load(parent_task_ds) + parent_task._play = None + + task_ds_list = [ + { + 'include': '' + }, + { + 'include_tasks': '' + }, + { + 'import_tasks': '' + } + ] + for task_ds in task_ds_list: + with pytest.raises(AnsibleParserError): + TaskInclude.load(task_ds, task_include=parent_task) diff --git a/test/units/playbook/test_play.py b/test/units/playbook/test_play.py new file mode 100644 index 00000000..725c28ea --- /dev/null +++ b/test/units/playbook/test_play.py @@ -0,0 +1,132 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import patch, MagicMock + +from ansible.errors import AnsibleParserError +from ansible.playbook.play import Play + +from units.mock.loader import DictDataLoader +from units.mock.path import mock_unfrackpath_noop + + +class TestPlay(unittest.TestCase): + + def test_empty_play(self): + p = Play.load(dict()) + self.assertEqual(str(p), '') + + def test_basic_play(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + connection='local', + remote_user="root", + become=True, + become_user="testing", + )) + + def test_play_with_user_conflict(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + user="testing", + gather_facts=False, + )) + self.assertEqual(p.remote_user, "testing") + + def test_play_with_user_conflict(self): + play_data = dict( + name="test play", + hosts=['foo'], + user="testing", + remote_user="testing", + ) + self.assertRaises(AnsibleParserError, Play.load, play_data) + + def test_play_with_tasks(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + tasks=[dict(action='shell echo "hello world"')], + )) + + def test_play_with_handlers(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + handlers=[dict(action='shell echo "hello world"')], + )) + + def test_play_with_pre_tasks(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + pre_tasks=[dict(action='shell echo "hello world"')], + )) + + def test_play_with_post_tasks(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + post_tasks=[dict(action='shell echo "hello world"')], + )) + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_play_with_roles(self): + fake_loader = DictDataLoader({ + '/etc/ansible/roles/foo/tasks.yml': """ + - name: role task + shell: echo "hello world" + """, + }) + + mock_var_manager = MagicMock() + mock_var_manager.get_vars.return_value = dict() + + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + roles=['foo'], + ), loader=fake_loader, variable_manager=mock_var_manager) + + blocks = p.compile() + + def test_play_compile(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + tasks=[dict(action='shell echo "hello world"')], + )) + + blocks = p.compile() + + # with a single block, there will still be three + # implicit meta flush_handler blocks inserted + self.assertEqual(len(blocks), 4) diff --git a/test/units/playbook/test_play_context.py b/test/units/playbook/test_play_context.py new file mode 100644 index 00000000..0936775b --- /dev/null +++ b/test/units/playbook/test_play_context.py @@ -0,0 +1,111 @@ +# (c) 2012-2014, Michael DeHaan +# (c) 2017 Ansible Project +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible import constants as C +from ansible import context +from ansible.cli.arguments import option_helpers as opt_help +from ansible.errors import AnsibleError +from ansible.playbook.play_context import PlayContext +from ansible.playbook.play import Play +from ansible.plugins.loader import become_loader +from ansible.utils import context_objects as co + + +@pytest.fixture +def parser(): + parser = opt_help.create_base_parser('testparser') + + opt_help.add_runas_options(parser) + opt_help.add_meta_options(parser) + opt_help.add_runtask_options(parser) + opt_help.add_vault_options(parser) + opt_help.add_async_options(parser) + opt_help.add_connect_options(parser) + opt_help.add_subset_options(parser) + opt_help.add_check_options(parser) + opt_help.add_inventory_options(parser) + + return parser + + +@pytest.fixture +def reset_cli_args(): + co.GlobalCLIArgs._Singleton__instance = None + yield + co.GlobalCLIArgs._Singleton__instance = None + + +def test_play_context(mocker, parser, reset_cli_args): + options = parser.parse_args(['-vv', '--check']) + context._init_global_context(options) + play = Play.load({}) + play_context = PlayContext(play=play) + + assert play_context.remote_addr is None + assert play_context.remote_user is None + assert play_context.password == '' + assert play_context.private_key_file == C.DEFAULT_PRIVATE_KEY_FILE + assert play_context.timeout == C.DEFAULT_TIMEOUT + assert play_context.verbosity == 2 + assert play_context.check_mode is True + + mock_play = mocker.MagicMock() + mock_play.force_handlers = True + + play_context = PlayContext(play=mock_play) + assert play_context.force_handlers is True + + mock_task = mocker.MagicMock() + mock_task.connection = 'mocktask' + mock_task.remote_user = 'mocktask' + mock_task.port = 1234 + mock_task.no_log = True + mock_task.become = True + mock_task.become_method = 'mocktask' + mock_task.become_user = 'mocktaskroot' + mock_task.become_pass = 'mocktaskpass' + mock_task._local_action = False + mock_task.delegate_to = None + + all_vars = dict( + ansible_connection='mock_inventory', + ansible_ssh_port=4321, + ) + + mock_templar = mocker.MagicMock() + + play_context = PlayContext() + play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars, templar=mock_templar) + + assert play_context.connection == 'mock_inventory' + assert play_context.remote_user == 'mocktask' + assert play_context.no_log is True + + mock_task.no_log = False + play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars, templar=mock_templar) + assert play_context.no_log is False + + +def test_play_context_make_become_bad(mocker, parser, reset_cli_args): + options = parser.parse_args([]) + context._init_global_context(options) + play_context = PlayContext() + + default_cmd = "/bin/foo" + default_exe = "/bin/bash" + + play_context.become = True + play_context.become_user = 'foo' + play_context.set_become_plugin(become_loader.get('bad')) + play_context.become_method = 'bad' + + with pytest.raises(AnsibleError): + play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) diff --git a/test/units/playbook/test_playbook.py b/test/units/playbook/test_playbook.py new file mode 100644 index 00000000..68a9fb75 --- /dev/null +++ b/test/units/playbook/test_playbook.py @@ -0,0 +1,61 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.errors import AnsibleParserError +from ansible.playbook import Playbook +from ansible.vars.manager import VariableManager + +from units.mock.loader import DictDataLoader + + +class TestPlaybook(unittest.TestCase): + + def test_empty_playbook(self): + fake_loader = DictDataLoader({}) + p = Playbook(loader=fake_loader) + + def test_basic_playbook(self): + fake_loader = DictDataLoader({ + "test_file.yml": """ + - hosts: all + """, + }) + p = Playbook.load("test_file.yml", loader=fake_loader) + plays = p.get_plays() + + def test_bad_playbook_files(self): + fake_loader = DictDataLoader({ + # represents a playbook which is not a list of plays + "bad_list.yml": """ + foo: bar + + """, + # represents a playbook where a play entry is mis-formatted + "bad_entry.yml": """ + - + - "This should be a mapping..." + + """, + }) + vm = VariableManager() + self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader) + self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader) diff --git a/test/units/playbook/test_taggable.py b/test/units/playbook/test_taggable.py new file mode 100644 index 00000000..ab5f86b4 --- /dev/null +++ b/test/units/playbook/test_taggable.py @@ -0,0 +1,102 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.playbook.taggable import Taggable +from units.mock.loader import DictDataLoader + + +class TaggableTestObj(Taggable): + + def __init__(self): + self._loader = DictDataLoader({}) + self.tags = [] + + +class TestTaggable(unittest.TestCase): + + def assert_evaluate_equal(self, test_value, tags, only_tags, skip_tags): + taggable_obj = TaggableTestObj() + taggable_obj.tags = tags + + evaluate = taggable_obj.evaluate_tags(only_tags, skip_tags, {}) + + self.assertEqual(test_value, evaluate) + + def test_evaluate_tags_tag_in_only_tags(self): + self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag1'], []) + + def test_evaluate_tags_tag_in_skip_tags(self): + self.assert_evaluate_equal(False, ['tag1', 'tag2'], [], ['tag1']) + + def test_evaluate_tags_special_always_in_object_tags(self): + self.assert_evaluate_equal(True, ['tag', 'always'], ['random'], []) + + def test_evaluate_tags_tag_in_skip_tags_special_always_in_object_tags(self): + self.assert_evaluate_equal(False, ['tag', 'always'], ['random'], ['tag']) + + def test_evaluate_tags_special_always_in_skip_tags_and_always_in_tags(self): + self.assert_evaluate_equal(False, ['tag', 'always'], [], ['always']) + + def test_evaluate_tags_special_tagged_in_only_tags_and_object_tagged(self): + self.assert_evaluate_equal(True, ['tag'], ['tagged'], []) + + def test_evaluate_tags_special_tagged_in_only_tags_and_object_untagged(self): + self.assert_evaluate_equal(False, [], ['tagged'], []) + + def test_evaluate_tags_special_tagged_in_skip_tags_and_object_tagged(self): + self.assert_evaluate_equal(False, ['tag'], [], ['tagged']) + + def test_evaluate_tags_special_tagged_in_skip_tags_and_object_untagged(self): + self.assert_evaluate_equal(True, [], [], ['tagged']) + + def test_evaluate_tags_special_untagged_in_only_tags_and_object_tagged(self): + self.assert_evaluate_equal(False, ['tag'], ['untagged'], []) + + def test_evaluate_tags_special_untagged_in_only_tags_and_object_untagged(self): + self.assert_evaluate_equal(True, [], ['untagged'], []) + + def test_evaluate_tags_special_untagged_in_skip_tags_and_object_tagged(self): + self.assert_evaluate_equal(True, ['tag'], [], ['untagged']) + + def test_evaluate_tags_special_untagged_in_skip_tags_and_object_untagged(self): + self.assert_evaluate_equal(False, [], [], ['untagged']) + + def test_evaluate_tags_special_all_in_only_tags(self): + self.assert_evaluate_equal(True, ['tag'], ['all'], ['untagged']) + + def test_evaluate_tags_special_all_in_skip_tags(self): + self.assert_evaluate_equal(False, ['tag'], ['tag'], ['all']) + + def test_evaluate_tags_special_all_in_only_tags_and_special_all_in_skip_tags(self): + self.assert_evaluate_equal(False, ['tag'], ['all'], ['all']) + + def test_evaluate_tags_special_all_in_skip_tags_and_always_in_object_tags(self): + self.assert_evaluate_equal(True, ['tag', 'always'], [], ['all']) + + def test_evaluate_tags_special_all_in_skip_tags_and_special_always_in_skip_tags_and_always_in_object_tags(self): + self.assert_evaluate_equal(False, ['tag', 'always'], [], ['all', 'always']) + + def test_evaluate_tags_accepts_lists(self): + self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag2'], []) + + def test_evaluate_tags_with_repeated_tags(self): + self.assert_evaluate_equal(False, ['tag', 'tag'], [], ['tag']) diff --git a/test/units/playbook/test_task.py b/test/units/playbook/test_task.py new file mode 100644 index 00000000..f94419a2 --- /dev/null +++ b/test/units/playbook/test_task.py @@ -0,0 +1,114 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import patch +from ansible.playbook.task import Task +from ansible.parsing.yaml import objects +from ansible import errors + + +basic_command_task = dict( + name='Test Task', + command='echo hi' +) + +kv_command_task = dict( + action='command echo hi' +) + +# See #36848 +kv_bad_args_str = '- apk: sdfs sf sdf 37' +kv_bad_args_ds = {'apk': 'sdfs sf sdf 37'} + + +class TestTask(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_construct_empty_task(self): + Task() + + def test_construct_task_with_role(self): + pass + + def test_construct_task_with_block(self): + pass + + def test_construct_task_with_role_and_block(self): + pass + + def test_load_task_simple(self): + t = Task.load(basic_command_task) + assert t is not None + self.assertEqual(t.name, basic_command_task['name']) + self.assertEqual(t.action, 'command') + self.assertEqual(t.args, dict(_raw_params='echo hi')) + + def test_load_task_kv_form(self): + t = Task.load(kv_command_task) + self.assertEqual(t.action, 'command') + self.assertEqual(t.args, dict(_raw_params='echo hi')) + + @patch.object(errors.AnsibleError, '_get_error_lines_from_file') + def test_load_task_kv_form_error_36848(self, mock_get_err_lines): + ds = objects.AnsibleMapping(kv_bad_args_ds) + ds.ansible_pos = ('test_task_faux_playbook.yml', 1, 1) + mock_get_err_lines.return_value = (kv_bad_args_str, '') + + with self.assertRaises(errors.AnsibleParserError) as cm: + Task.load(ds) + + self.assertIsInstance(cm.exception, errors.AnsibleParserError) + self.assertEqual(cm.exception._obj, ds) + self.assertEqual(cm.exception._obj, kv_bad_args_ds) + self.assertIn("The error appears to be in 'test_task_faux_playbook.yml", cm.exception.message) + self.assertIn(kv_bad_args_str, cm.exception.message) + self.assertIn('apk', cm.exception.message) + self.assertEqual(cm.exception.message.count('The offending line'), 1) + self.assertEqual(cm.exception.message.count('The error appears to be in'), 1) + + def test_task_auto_name(self): + assert 'name' not in kv_command_task + Task.load(kv_command_task) + # self.assertEqual(t.name, 'shell echo hi') + + def test_task_auto_name_with_role(self): + pass + + def test_load_task_complex_form(self): + pass + + def test_can_load_module_complex_form(self): + pass + + def test_local_action_implies_delegate(self): + pass + + def test_local_action_conflicts_with_delegate(self): + pass + + def test_delegate_to_parses(self): + pass diff --git a/test/units/plugins/__init__.py b/test/units/plugins/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/action/__init__.py b/test/units/plugins/action/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py new file mode 100644 index 00000000..12488019 --- /dev/null +++ b/test/units/plugins/action/test_action.py @@ -0,0 +1,683 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Florian Apolloner +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re + +from ansible import constants as C +from units.compat import unittest +from units.compat.mock import patch, MagicMock, mock_open + +from ansible.errors import AnsibleError +from ansible.module_utils.six import text_type +from ansible.module_utils.six.moves import shlex_quote, builtins +from ansible.module_utils._text import to_bytes +from ansible.playbook.play_context import PlayContext +from ansible.plugins.action import ActionBase +from ansible.template import Templar +from ansible.vars.clean import clean_facts + +from units.mock.loader import DictDataLoader + + +python_module_replacers = br""" +#!/usr/bin/python + +#ANSIBLE_VERSION = "<>" +#MODULE_COMPLEX_ARGS = "<>" +#SELINUX_SPECIAL_FS="<>" + +test = u'Toshio \u304f\u3089\u3068\u307f' +from ansible.module_utils.basic import * +""" + +powershell_module_replacers = b""" +WINDOWS_ARGS = "<>" +# POWERSHELL_COMMON +""" + + +def _action_base(): + fake_loader = DictDataLoader({ + }) + mock_module_loader = MagicMock() + mock_shared_loader_obj = MagicMock() + mock_shared_loader_obj.module_loader = mock_module_loader + mock_connection_loader = MagicMock() + + mock_shared_loader_obj.connection_loader = mock_connection_loader + mock_connection = MagicMock() + + play_context = MagicMock() + + action_base = DerivedActionBase(task=None, + connection=mock_connection, + play_context=play_context, + loader=fake_loader, + templar=None, + shared_loader_obj=mock_shared_loader_obj) + return action_base + + +class DerivedActionBase(ActionBase): + TRANSFERS_FILES = False + + def run(self, tmp=None, task_vars=None): + # We're not testing the plugin run() method, just the helper + # methods ActionBase defines + return super(DerivedActionBase, self).run(tmp=tmp, task_vars=task_vars) + + +class TestActionBase(unittest.TestCase): + + def test_action_base_run(self): + mock_task = MagicMock() + mock_task.action = "foo" + mock_task.args = dict(a=1, b=2, c=3) + + mock_connection = MagicMock() + + play_context = PlayContext() + + mock_task.async_val = None + action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None) + results = action_base.run() + self.assertEqual(results, dict()) + + mock_task.async_val = 0 + action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None) + results = action_base.run() + self.assertEqual(results, {}) + + def test_action_base__configure_module(self): + fake_loader = DictDataLoader({ + }) + + # create our fake task + mock_task = MagicMock() + mock_task.action = "copy" + mock_task.async_val = 0 + mock_task.delegate_to = None + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + + # create a mock shared loader object + def mock_find_plugin_with_context(name, options, collection_list=None): + mockctx = MagicMock() + if name == 'badmodule': + mockctx.resolved = False + mockctx.plugin_resolved_path = None + elif '.ps1' in options: + mockctx.resolved = True + mockctx.plugin_resolved_path = '/fake/path/to/%s.ps1' % name + else: + mockctx.resolved = True + mockctx.plugin_resolved_path = '/fake/path/to/%s' % name + return mockctx + + mock_module_loader = MagicMock() + mock_module_loader.find_plugin_with_context.side_effect = mock_find_plugin_with_context + mock_shared_obj_loader = MagicMock() + mock_shared_obj_loader.module_loader = mock_module_loader + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=fake_loader, + templar=Templar(loader=fake_loader), + shared_loader_obj=mock_shared_obj_loader, + ) + + # test python module formatting + with patch.object(builtins, 'open', mock_open(read_data=to_bytes(python_module_replacers.strip(), encoding='utf-8'))): + with patch.object(os, 'rename'): + mock_task.args = dict(a=1, foo='fö〩') + mock_connection.module_implementation_preferences = ('',) + (style, shebang, data, path) = action_base._configure_module(mock_task.action, mock_task.args, + task_vars=dict(ansible_python_interpreter='/usr/bin/python')) + self.assertEqual(style, "new") + self.assertEqual(shebang, u"#!/usr/bin/python") + + # test module not found + self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args, {}) + + # test powershell module formatting + with patch.object(builtins, 'open', mock_open(read_data=to_bytes(powershell_module_replacers.strip(), encoding='utf-8'))): + mock_task.action = 'win_copy' + mock_task.args = dict(b=2) + mock_connection.module_implementation_preferences = ('.ps1',) + (style, shebang, data, path) = action_base._configure_module('stat', mock_task.args, {}) + self.assertEqual(style, "new") + self.assertEqual(shebang, u'#!powershell') + + # test module not found + self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args, {}) + + def test_action_base__compute_environment_string(self): + fake_loader = DictDataLoader({ + }) + + # create our fake task + mock_task = MagicMock() + mock_task.action = "copy" + mock_task.args = dict(a=1) + + # create a mock connection, so we don't actually try and connect to things + def env_prefix(**args): + return ' '.join(['%s=%s' % (k, shlex_quote(text_type(v))) for k, v in args.items()]) + mock_connection = MagicMock() + mock_connection._shell.env_prefix.side_effect = env_prefix + + # we're using a real play context here + play_context = PlayContext() + + # and we're using a real templar here too + templar = Templar(loader=fake_loader) + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=fake_loader, + templar=templar, + shared_loader_obj=None, + ) + + # test standard environment setup + mock_task.environment = [dict(FOO='foo'), None] + env_string = action_base._compute_environment_string() + self.assertEqual(env_string, "FOO=foo") + + # test where environment is not a list + mock_task.environment = dict(FOO='foo') + env_string = action_base._compute_environment_string() + self.assertEqual(env_string, "FOO=foo") + + # test environment with a variable in it + templar.available_variables = dict(the_var='bar') + mock_task.environment = [dict(FOO='{{the_var}}')] + env_string = action_base._compute_environment_string() + self.assertEqual(env_string, "FOO=bar") + + # test with a bad environment set + mock_task.environment = dict(FOO='foo') + mock_task.environment = ['hi there'] + self.assertRaises(AnsibleError, action_base._compute_environment_string) + + def test_action_base__early_needs_tmp_path(self): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + self.assertFalse(action_base._early_needs_tmp_path()) + + action_base.TRANSFERS_FILES = True + self.assertTrue(action_base._early_needs_tmp_path()) + + def test_action_base__make_tmp_path(self): + # create our fake task + mock_task = MagicMock() + + def get_shell_opt(opt): + + ret = None + if opt == 'admin_users': + ret = ['root', 'toor', 'Administrator'] + elif opt == 'remote_tmp': + ret = '~/.ansible/tmp' + + return ret + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + mock_connection.transport = 'ssh' + mock_connection._shell.mkdtemp.return_value = 'mkdir command' + mock_connection._shell.join_path.side_effect = os.path.join + mock_connection._shell.get_option = get_shell_opt + mock_connection._shell.HOMES_RE = re.compile(r'(\'|\")?(~|\$HOME)(.*)') + + # we're using a real play context here + play_context = PlayContext() + play_context.become = True + play_context.become_user = 'foo' + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + action_base._low_level_execute_command = MagicMock() + action_base._low_level_execute_command.return_value = dict(rc=0, stdout='/some/path') + self.assertEqual(action_base._make_tmp_path('root'), '/some/path/') + + # empty path fails + action_base._low_level_execute_command.return_value = dict(rc=0, stdout='') + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') + + # authentication failure + action_base._low_level_execute_command.return_value = dict(rc=5, stdout='') + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') + + # ssh error + action_base._low_level_execute_command.return_value = dict(rc=255, stdout='', stderr='') + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') + play_context.verbosity = 5 + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') + + # general error + action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='') + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') + action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='No space left on device') + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') + + def test_action_base__remove_tmp_path(self): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + mock_connection._shell.remove.return_value = 'rm some stuff' + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + action_base._low_level_execute_command = MagicMock() + # these don't really return anything or raise errors, so + # we're pretty much calling these for coverage right now + action_base._remove_tmp_path('/bad/path/dont/remove') + action_base._remove_tmp_path('/good/path/to/ansible-tmp-thing') + + @patch('os.unlink') + @patch('os.fdopen') + @patch('tempfile.mkstemp') + def test_action_base__transfer_data(self, mock_mkstemp, mock_fdopen, mock_unlink): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + mock_connection.put_file.return_value = None + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + mock_afd = MagicMock() + mock_afile = MagicMock() + mock_mkstemp.return_value = (mock_afd, mock_afile) + + mock_unlink.return_value = None + + mock_afo = MagicMock() + mock_afo.write.return_value = None + mock_afo.flush.return_value = None + mock_afo.close.return_value = None + mock_fdopen.return_value = mock_afo + + self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some data'), '/path/to/remote/file') + self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some mixed data: fö〩'), '/path/to/remote/file') + self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='some value')), '/path/to/remote/file') + self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='fö〩')), '/path/to/remote/file') + + mock_afo.write.side_effect = Exception() + self.assertRaises(AnsibleError, action_base._transfer_data, '/path/to/remote/file', '') + + def test_action_base__execute_remote_stat(self): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + action_base._execute_module = MagicMock() + + # test normal case + action_base._execute_module.return_value = dict(stat=dict(checksum='1111111111111111111111111111111111', exists=True)) + res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False) + self.assertEqual(res['checksum'], '1111111111111111111111111111111111') + + # test does not exist + action_base._execute_module.return_value = dict(stat=dict(exists=False)) + res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False) + self.assertFalse(res['exists']) + self.assertEqual(res['checksum'], '1') + + # test no checksum in result from _execute_module + action_base._execute_module.return_value = dict(stat=dict(exists=True)) + res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False) + self.assertTrue(res['exists']) + self.assertEqual(res['checksum'], '') + + # test stat call failed + action_base._execute_module.return_value = dict(failed=True, msg="because I said so") + self.assertRaises(AnsibleError, action_base._execute_remote_stat, path='/path/to/file', all_vars=dict(), follow=False) + + def test_action_base__execute_module(self): + # create our fake task + mock_task = MagicMock() + mock_task.action = 'copy' + mock_task.args = dict(a=1, b=2, c=3) + + # create a mock connection, so we don't actually try and connect to things + def build_module_command(env_string, shebang, cmd, arg_path=None): + to_run = [env_string, cmd] + if arg_path: + to_run.append(arg_path) + return " ".join(to_run) + + def get_option(option): + return {'admin_users': ['root', 'toor']}.get(option) + + mock_connection = MagicMock() + mock_connection.build_module_command.side_effect = build_module_command + mock_connection.socket_path = None + mock_connection._shell.get_remote_filename.return_value = 'copy.py' + mock_connection._shell.join_path.side_effect = os.path.join + mock_connection._shell.tmpdir = '/var/tmp/mytempdir' + mock_connection._shell.get_option = get_option + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + # fake a lot of methods as we test those elsewhere + action_base._configure_module = MagicMock() + action_base._supports_check_mode = MagicMock() + action_base._is_pipelining_enabled = MagicMock() + action_base._make_tmp_path = MagicMock() + action_base._transfer_data = MagicMock() + action_base._compute_environment_string = MagicMock() + action_base._low_level_execute_command = MagicMock() + action_base._fixup_perms2 = MagicMock() + + action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data', 'path') + action_base._is_pipelining_enabled.return_value = False + action_base._compute_environment_string.return_value = '' + action_base._connection.has_pipelining = False + action_base._make_tmp_path.return_value = '/the/tmp/path' + action_base._low_level_execute_command.return_value = dict(stdout='{"rc": 0, "stdout": "ok"}') + self.assertEqual(action_base._execute_module(module_name=None, module_args=None), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) + self.assertEqual( + action_base._execute_module( + module_name='foo', + module_args=dict(z=9, y=8, x=7), + task_vars=dict(a=1) + ), + dict( + _ansible_parsed=True, + rc=0, + stdout="ok", + stdout_lines=['ok'], + ) + ) + + # test with needing/removing a remote tmp path + action_base._configure_module.return_value = ('old', '#!/usr/bin/python', 'this is the module data', 'path') + action_base._is_pipelining_enabled.return_value = False + action_base._make_tmp_path.return_value = '/the/tmp/path' + self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) + + action_base._configure_module.return_value = ('non_native_want_json', '#!/usr/bin/python', 'this is the module data', 'path') + self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) + + play_context.become = True + play_context.become_user = 'foo' + self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) + + # test an invalid shebang return + action_base._configure_module.return_value = ('new', '', 'this is the module data', 'path') + action_base._is_pipelining_enabled.return_value = False + action_base._make_tmp_path.return_value = '/the/tmp/path' + self.assertRaises(AnsibleError, action_base._execute_module) + + # test with check mode enabled, once with support for check + # mode and once with support disabled to raise an error + play_context.check_mode = True + action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data', 'path') + self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) + action_base._supports_check_mode = False + self.assertRaises(AnsibleError, action_base._execute_module) + + def test_action_base_sudo_only_if_user_differs(self): + fake_loader = MagicMock() + fake_loader.get_basedir.return_value = os.getcwd() + play_context = PlayContext() + + action_base = DerivedActionBase(None, None, play_context, fake_loader, None, None) + action_base.get_become_option = MagicMock(return_value='root') + action_base._get_remote_user = MagicMock(return_value='root') + + action_base._connection = MagicMock(exec_command=MagicMock(return_value=(0, '', ''))) + + action_base._connection._shell = shell = MagicMock(append_command=MagicMock(return_value=('JOINED CMD'))) + + action_base._connection.become = become = MagicMock() + become.build_become_command.return_value = 'foo' + + action_base._low_level_execute_command('ECHO', sudoable=True) + become.build_become_command.assert_not_called() + + action_base._get_remote_user.return_value = 'apo' + action_base._low_level_execute_command('ECHO', sudoable=True, executable='/bin/csh') + become.build_become_command.assert_called_once_with("ECHO", shell) + + become.build_become_command.reset_mock() + + with patch.object(C, 'BECOME_ALLOW_SAME_USER', new=True): + action_base._get_remote_user.return_value = 'root' + action_base._low_level_execute_command('ECHO SAME', sudoable=True) + become.build_become_command.assert_called_once_with("ECHO SAME", shell) + + def test__remote_expand_user_relative_pathing(self): + action_base = _action_base() + action_base._play_context.remote_addr = 'bar' + action_base._low_level_execute_command = MagicMock(return_value={'stdout': b'../home/user'}) + action_base._connection._shell.join_path.return_value = '../home/user/foo' + with self.assertRaises(AnsibleError) as cm: + action_base._remote_expand_user('~/foo') + self.assertEqual( + cm.exception.message, + "'bar' returned an invalid relative home directory path containing '..'" + ) + + +class TestActionBaseCleanReturnedData(unittest.TestCase): + def test(self): + + fake_loader = DictDataLoader({ + }) + mock_module_loader = MagicMock() + mock_shared_loader_obj = MagicMock() + mock_shared_loader_obj.module_loader = mock_module_loader + connection_loader_paths = ['/tmp/asdfadf', '/usr/lib64/whatever', + 'dfadfasf', + 'foo.py', + '.*', + # FIXME: a path with parans breaks the regex + # '(.*)', + '/path/to/ansible/lib/ansible/plugins/connection/custom_connection.py', + '/path/to/ansible/lib/ansible/plugins/connection/ssh.py'] + + def fake_all(path_only=None): + for path in connection_loader_paths: + yield path + + mock_connection_loader = MagicMock() + mock_connection_loader.all = fake_all + + mock_shared_loader_obj.connection_loader = mock_connection_loader + mock_connection = MagicMock() + # mock_connection._shell.env_prefix.side_effect = env_prefix + + # action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None) + action_base = DerivedActionBase(task=None, + connection=mock_connection, + play_context=None, + loader=fake_loader, + templar=None, + shared_loader_obj=mock_shared_loader_obj) + data = {'ansible_playbook_python': '/usr/bin/python', + # 'ansible_rsync_path': '/usr/bin/rsync', + 'ansible_python_interpreter': '/usr/bin/python', + 'ansible_ssh_some_var': 'whatever', + 'ansible_ssh_host_key_somehost': 'some key here', + 'some_other_var': 'foo bar'} + data = clean_facts(data) + self.assertNotIn('ansible_playbook_python', data) + self.assertNotIn('ansible_python_interpreter', data) + self.assertIn('ansible_ssh_host_key_somehost', data) + self.assertIn('some_other_var', data) + + +class TestActionBaseParseReturnedData(unittest.TestCase): + + def test_fail_no_json(self): + action_base = _action_base() + rc = 0 + stdout = 'foo\nbar\n' + err = 'oopsy' + returned_data = {'rc': rc, + 'stdout': stdout, + 'stdout_lines': stdout.splitlines(), + 'stderr': err} + res = action_base._parse_returned_data(returned_data) + self.assertFalse(res['_ansible_parsed']) + self.assertTrue(res['failed']) + self.assertEqual(res['module_stderr'], err) + + def test_json_empty(self): + action_base = _action_base() + rc = 0 + stdout = '{}\n' + err = '' + returned_data = {'rc': rc, + 'stdout': stdout, + 'stdout_lines': stdout.splitlines(), + 'stderr': err} + res = action_base._parse_returned_data(returned_data) + del res['_ansible_parsed'] # we always have _ansible_parsed + self.assertEqual(len(res), 0) + self.assertFalse(res) + + def test_json_facts(self): + action_base = _action_base() + rc = 0 + stdout = '{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"}}\n' + err = '' + + returned_data = {'rc': rc, + 'stdout': stdout, + 'stdout_lines': stdout.splitlines(), + 'stderr': err} + res = action_base._parse_returned_data(returned_data) + self.assertTrue(res['ansible_facts']) + self.assertIn('ansible_blip', res['ansible_facts']) + # TODO: Should this be an AnsibleUnsafe? + # self.assertIsInstance(res['ansible_facts'], AnsibleUnsafe) + + def test_json_facts_add_host(self): + action_base = _action_base() + rc = 0 + stdout = '''{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"}, + "add_host": {"host_vars": {"some_key": ["whatever the add_host object is"]} + } + }\n''' + err = '' + + returned_data = {'rc': rc, + 'stdout': stdout, + 'stdout_lines': stdout.splitlines(), + 'stderr': err} + res = action_base._parse_returned_data(returned_data) + self.assertTrue(res['ansible_facts']) + self.assertIn('ansible_blip', res['ansible_facts']) + self.assertIn('add_host', res) + # TODO: Should this be an AnsibleUnsafe? + # self.assertIsInstance(res['ansible_facts'], AnsibleUnsafe) diff --git a/test/units/plugins/action/test_gather_facts.py b/test/units/plugins/action/test_gather_facts.py new file mode 100644 index 00000000..e15edd39 --- /dev/null +++ b/test/units/plugins/action/test_gather_facts.py @@ -0,0 +1,87 @@ +# (c) 2016, Saran Ahluwalia +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import MagicMock, patch + +from ansible import constants as C +from ansible.plugins.action.gather_facts import ActionModule +from ansible.playbook.task import Task +from ansible.template import Templar +import ansible.executor.module_common as module_common + +from units.mock.loader import DictDataLoader + + +class TestNetworkFacts(unittest.TestCase): + task = MagicMock(Task) + play_context = MagicMock() + play_context.check_mode = False + connection = MagicMock() + fake_loader = DictDataLoader({ + }) + templar = Templar(loader=fake_loader) + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_network_gather_facts(self): + self.task_vars = {'ansible_network_os': 'ios'} + self.task.action = 'gather_facts' + self.task.async_val = False + self.task._ansible_internal_redirect_list = [] + self.task.args = {'gather_subset': 'min'} + self.task.module_defaults = [{'ios_facts': {'gather_subset': 'min'}}] + + plugin = ActionModule(self.task, self.connection, self.play_context, loader=None, templar=self.templar, shared_loader_obj=None) + plugin._execute_module = MagicMock() + + res = plugin.run(task_vars=self.task_vars) + self.assertEqual(res['ansible_facts']['_ansible_facts_gathered'], True) + + mod_args = plugin._get_module_args('ios_facts', task_vars=self.task_vars) + self.assertEqual(mod_args['gather_subset'], 'min') + + facts_modules = C.config.get_config_value('FACTS_MODULES', variables=self.task_vars) + self.assertEqual(facts_modules, ['ansible.legacy.ios_facts']) + + @patch.object(module_common, '_get_collection_metadata', return_value={}) + def test_network_gather_facts_fqcn(self, mock_collection_metadata): + self.fqcn_task_vars = {'ansible_network_os': 'cisco.ios.ios'} + self.task.action = 'gather_facts' + self.task._ansible_internal_redirect_list = ['cisco.ios.ios_facts'] + self.task.async_val = False + self.task.args = {'gather_subset': 'min'} + self.task.module_defaults = [{'cisco.ios.ios_facts': {'gather_subset': 'min'}}] + + plugin = ActionModule(self.task, self.connection, self.play_context, loader=None, templar=self.templar, shared_loader_obj=None) + plugin._execute_module = MagicMock() + + res = plugin.run(task_vars=self.fqcn_task_vars) + self.assertEqual(res['ansible_facts']['_ansible_facts_gathered'], True) + + mod_args = plugin._get_module_args('cisco.ios.ios_facts', task_vars=self.fqcn_task_vars) + self.assertEqual(mod_args['gather_subset'], 'min') + + facts_modules = C.config.get_config_value('FACTS_MODULES', variables=self.fqcn_task_vars) + self.assertEqual(facts_modules, ['cisco.ios.ios_facts']) diff --git a/test/units/plugins/action/test_raw.py b/test/units/plugins/action/test_raw.py new file mode 100644 index 00000000..a8bde6c1 --- /dev/null +++ b/test/units/plugins/action/test_raw.py @@ -0,0 +1,105 @@ +# (c) 2016, Saran Ahluwalia +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors import AnsibleActionFail +from units.compat import unittest +from units.compat.mock import MagicMock, Mock +from ansible.plugins.action.raw import ActionModule +from ansible.playbook.task import Task +from ansible.plugins.loader import connection_loader + + +class TestCopyResultExclude(unittest.TestCase): + + def setUp(self): + self.play_context = Mock() + self.play_context.shell = 'sh' + self.connection = connection_loader.get('local', self.play_context, os.devnull) + + def tearDown(self): + pass + + # The current behavior of the raw aciton in regards to executable is currently in question; + # the test_raw_executable_is_not_empty_string verifies the current behavior (whether it is desireed or not. + # Please refer to the following for context: + # Issue: https://github.com/ansible/ansible/issues/16054 + # PR: https://github.com/ansible/ansible/pull/16085 + + def test_raw_executable_is_not_empty_string(self): + + task = MagicMock(Task) + task.async_val = False + + task.args = {'_raw_params': 'Args1'} + self.play_context.check_mode = False + + self.mock_am = ActionModule(task, self.connection, self.play_context, loader=None, templar=None, shared_loader_obj=None) + self.mock_am._low_level_execute_command = Mock(return_value={}) + self.mock_am.display = Mock() + self.mock_am._admin_users = ['root', 'toor'] + + self.mock_am.run() + self.mock_am._low_level_execute_command.assert_called_with('Args1', executable=False) + + def test_raw_check_mode_is_True(self): + + task = MagicMock(Task) + task.async_val = False + + task.args = {'_raw_params': 'Args1'} + self.play_context.check_mode = True + + try: + self.mock_am = ActionModule(task, self.connection, self.play_context, loader=None, templar=None, shared_loader_obj=None) + except AnsibleActionFail: + pass + + def test_raw_test_environment_is_None(self): + + task = MagicMock(Task) + task.async_val = False + + task.args = {'_raw_params': 'Args1'} + task.environment = None + self.play_context.check_mode = False + + self.mock_am = ActionModule(task, self.connection, self.play_context, loader=None, templar=None, shared_loader_obj=None) + self.mock_am._low_level_execute_command = Mock(return_value={}) + self.mock_am.display = Mock() + + self.assertEqual(task.environment, None) + + def test_raw_task_vars_is_not_None(self): + + task = MagicMock(Task) + task.async_val = False + + task.args = {'_raw_params': 'Args1'} + task.environment = None + self.play_context.check_mode = False + + self.mock_am = ActionModule(task, self.connection, self.play_context, loader=None, templar=None, shared_loader_obj=None) + self.mock_am._low_level_execute_command = Mock(return_value={}) + self.mock_am.display = Mock() + + self.mock_am.run(task_vars={'a': 'b'}) + self.assertEqual(task.environment, None) diff --git a/test/units/plugins/become/__init__.py b/test/units/plugins/become/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/become/conftest.py b/test/units/plugins/become/conftest.py new file mode 100644 index 00000000..a04a5e2d --- /dev/null +++ b/test/units/plugins/become/conftest.py @@ -0,0 +1,37 @@ +# (c) 2012-2014, Michael DeHaan +# (c) 2017 Ansible Project +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.cli.arguments import option_helpers as opt_help +from ansible.utils import context_objects as co + + +@pytest.fixture +def parser(): + parser = opt_help.create_base_parser('testparser') + + opt_help.add_runas_options(parser) + opt_help.add_meta_options(parser) + opt_help.add_runtask_options(parser) + opt_help.add_vault_options(parser) + opt_help.add_async_options(parser) + opt_help.add_connect_options(parser) + opt_help.add_subset_options(parser) + opt_help.add_check_options(parser) + opt_help.add_inventory_options(parser) + + return parser + + +@pytest.fixture +def reset_cli_args(): + co.GlobalCLIArgs._Singleton__instance = None + yield + co.GlobalCLIArgs._Singleton__instance = None diff --git a/test/units/plugins/become/test_su.py b/test/units/plugins/become/test_su.py new file mode 100644 index 00000000..73eb71dd --- /dev/null +++ b/test/units/plugins/become/test_su.py @@ -0,0 +1,40 @@ +# (c) 2012-2014, Michael DeHaan +# (c) 2020 Ansible Project +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible import context +from ansible.playbook.play_context import PlayContext +from ansible.plugins.loader import become_loader + + +def test_su(mocker, parser, reset_cli_args): + options = parser.parse_args([]) + context._init_global_context(options) + play_context = PlayContext() + + default_cmd = "/bin/foo" + default_exe = "/bin/bash" + su_exe = 'su' + su_flags = '' + + cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + assert cmd == default_cmd + + success = 'BECOME-SUCCESS-.+?' + + play_context.become = True + play_context.become_user = 'foo' + play_context.become_pass = None + play_context.become_method = 'su' + play_context.set_become_plugin(become_loader.get('su')) + play_context.become_flags = su_flags + cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + assert (re.match("""%s %s -c '%s -c '"'"'echo %s; %s'"'"''""" % (su_exe, play_context.become_user, default_exe, + success, default_cmd), cmd) is not None) diff --git a/test/units/plugins/become/test_sudo.py b/test/units/plugins/become/test_sudo.py new file mode 100644 index 00000000..ba501296 --- /dev/null +++ b/test/units/plugins/become/test_sudo.py @@ -0,0 +1,45 @@ +# (c) 2012-2014, Michael DeHaan +# (c) 2020 Ansible Project +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible import context +from ansible.playbook.play_context import PlayContext +from ansible.plugins.loader import become_loader + + +def test_sudo(mocker, parser, reset_cli_args): + options = parser.parse_args([]) + context._init_global_context(options) + play_context = PlayContext() + + default_cmd = "/bin/foo" + default_exe = "/bin/bash" + sudo_exe = 'sudo' + sudo_flags = '-H -s -n' + + cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + assert cmd == default_cmd + + success = 'BECOME-SUCCESS-.+?' + + play_context.become = True + play_context.become_user = 'foo' + play_context.set_become_plugin(become_loader.get('sudo')) + play_context.become_flags = sudo_flags + cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + + assert (re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, play_context.become_user, + default_exe, success, default_cmd), cmd) is not None) + + play_context.become_pass = 'testpass' + cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + assert (re.match("""%s %s -p "%s" -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace('-n', ''), + r"\[sudo via ansible, key=.+?\] password:", play_context.become_user, + default_exe, success, default_cmd), cmd) is not None) diff --git a/test/units/plugins/cache/__init__.py b/test/units/plugins/cache/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py new file mode 100644 index 00000000..1f16b806 --- /dev/null +++ b/test/units/plugins/cache/test_cache.py @@ -0,0 +1,167 @@ +# (c) 2012-2015, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest, mock +from ansible.errors import AnsibleError +from ansible.plugins.cache import FactCache, CachePluginAdjudicator +from ansible.plugins.cache.base import BaseCacheModule +from ansible.plugins.cache.memory import CacheModule as MemoryCache +from ansible.plugins.loader import cache_loader + +import pytest + + +class TestCachePluginAdjudicator: + # memory plugin cache + cache = CachePluginAdjudicator() + cache['cache_key'] = {'key1': 'value1', 'key2': 'value2'} + cache['cache_key_2'] = {'key': 'value'} + + def test___setitem__(self): + self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']} + assert self.cache['new_cache_key'] == {'new_key1': ['new_value1', 'new_value2']} + + def test_inner___setitem__(self): + self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']} + self.cache['new_cache_key']['new_key1'][0] = 'updated_value1' + assert self.cache['new_cache_key'] == {'new_key1': ['updated_value1', 'new_value2']} + + def test___contains__(self): + assert 'cache_key' in self.cache + assert 'not_cache_key' not in self.cache + + def test_get(self): + assert self.cache.get('cache_key') == {'key1': 'value1', 'key2': 'value2'} + + def test_get_with_default(self): + assert self.cache.get('foo', 'bar') == 'bar' + + def test_get_without_default(self): + assert self.cache.get('foo') is None + + def test___getitem__(self): + with pytest.raises(KeyError) as err: + self.cache['foo'] + + def test_pop_with_default(self): + assert self.cache.pop('foo', 'bar') == 'bar' + + def test_pop_without_default(self): + with pytest.raises(KeyError) as err: + assert self.cache.pop('foo') + + def test_pop(self): + v = self.cache.pop('cache_key_2') + assert v == {'key': 'value'} + assert 'cache_key_2' not in self.cache + + def test_update(self): + self.cache.update({'cache_key': {'key2': 'updatedvalue'}}) + assert self.cache['cache_key']['key2'] == 'updatedvalue' + + +class TestFactCache(unittest.TestCase): + + def setUp(self): + with mock.patch('ansible.constants.CACHE_PLUGIN', 'memory'): + self.cache = FactCache() + + def test_copy(self): + self.cache['avocado'] = 'fruit' + self.cache['daisy'] = 'flower' + a_copy = self.cache.copy() + self.assertEqual(type(a_copy), dict) + self.assertEqual(a_copy, dict(avocado='fruit', daisy='flower')) + + def test_plugin_load_failure(self): + # See https://github.com/ansible/ansible/issues/18751 + # Note no fact_connection config set, so this will fail + with mock.patch('ansible.constants.CACHE_PLUGIN', 'json'): + self.assertRaisesRegexp(AnsibleError, + "Unable to load the facts cache plugin.*json.*", + FactCache) + + def test_update(self): + self.cache.update({'cache_key': {'key2': 'updatedvalue'}}) + assert self.cache['cache_key']['key2'] == 'updatedvalue' + + def test_update_legacy(self): + self.cache.update('cache_key', {'key2': 'updatedvalue'}) + assert self.cache['cache_key']['key2'] == 'updatedvalue' + + def test_update_legacy_key_exists(self): + self.cache['cache_key'] = {'key': 'value', 'key2': 'value2'} + self.cache.update('cache_key', {'key': 'updatedvalue'}) + assert self.cache['cache_key']['key'] == 'updatedvalue' + assert self.cache['cache_key']['key2'] == 'value2' + + +class TestAbstractClass(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_subclass_error(self): + class CacheModule1(BaseCacheModule): + pass + with self.assertRaises(TypeError): + CacheModule1() # pylint: disable=abstract-class-instantiated + + class CacheModule2(BaseCacheModule): + def get(self, key): + super(CacheModule2, self).get(key) + + with self.assertRaises(TypeError): + CacheModule2() # pylint: disable=abstract-class-instantiated + + def test_subclass_success(self): + class CacheModule3(BaseCacheModule): + def get(self, key): + super(CacheModule3, self).get(key) + + def set(self, key, value): + super(CacheModule3, self).set(key, value) + + def keys(self): + super(CacheModule3, self).keys() + + def contains(self, key): + super(CacheModule3, self).contains(key) + + def delete(self, key): + super(CacheModule3, self).delete(key) + + def flush(self): + super(CacheModule3, self).flush() + + def copy(self): + super(CacheModule3, self).copy() + + self.assertIsInstance(CacheModule3(), CacheModule3) + + def test_memory_cachemodule(self): + self.assertIsInstance(MemoryCache(), MemoryCache) + + def test_memory_cachemodule_with_loader(self): + self.assertIsInstance(cache_loader.get('memory'), MemoryCache) diff --git a/test/units/plugins/callback/__init__.py b/test/units/plugins/callback/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/callback/test_callback.py b/test/units/plugins/callback/test_callback.py new file mode 100644 index 00000000..0c9a335c --- /dev/null +++ b/test/units/plugins/callback/test_callback.py @@ -0,0 +1,412 @@ +# (c) 2012-2014, Chris Meyers +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re +import textwrap +import types + +from units.compat import unittest +from units.compat.mock import MagicMock + +from ansible.plugins.callback import CallbackBase + + +class TestCallback(unittest.TestCase): + # FIXME: This doesn't really test anything... + def test_init(self): + CallbackBase() + + def test_display(self): + display_mock = MagicMock() + display_mock.verbosity = 0 + cb = CallbackBase(display=display_mock) + self.assertIs(cb._display, display_mock) + + def test_display_verbose(self): + display_mock = MagicMock() + display_mock.verbosity = 5 + cb = CallbackBase(display=display_mock) + self.assertIs(cb._display, display_mock) + + # TODO: import callback module so we can patch callback.cli/callback.C + + +class TestCallbackResults(unittest.TestCase): + + def test_get_item(self): + cb = CallbackBase() + results = {'item': 'some_item'} + res = cb._get_item(results) + self.assertEqual(res, 'some_item') + + def test_get_item_no_log(self): + cb = CallbackBase() + results = {'item': 'some_item', '_ansible_no_log': True} + res = cb._get_item(results) + self.assertEqual(res, "(censored due to no_log)") + + results = {'item': 'some_item', '_ansible_no_log': False} + res = cb._get_item(results) + self.assertEqual(res, "some_item") + + def test_get_item_label(self): + cb = CallbackBase() + results = {'item': 'some_item'} + res = cb._get_item_label(results) + self.assertEqual(res, 'some_item') + + def test_get_item_label_no_log(self): + cb = CallbackBase() + results = {'item': 'some_item', '_ansible_no_log': True} + res = cb._get_item_label(results) + self.assertEqual(res, "(censored due to no_log)") + + results = {'item': 'some_item', '_ansible_no_log': False} + res = cb._get_item_label(results) + self.assertEqual(res, "some_item") + + def test_clean_results_debug_task(self): + cb = CallbackBase() + result = {'item': 'some_item', + 'invocation': 'foo --bar whatever [some_json]', + 'a': 'a single a in result note letter a is in invocation', + 'b': 'a single b in result note letter b is not in invocation', + 'changed': True} + + cb._clean_results(result, 'debug') + + # See https://github.com/ansible/ansible/issues/33723 + self.assertTrue('a' in result) + self.assertTrue('b' in result) + self.assertFalse('invocation' in result) + self.assertFalse('changed' in result) + + def test_clean_results_debug_task_no_invocation(self): + cb = CallbackBase() + result = {'item': 'some_item', + 'a': 'a single a in result note letter a is in invocation', + 'b': 'a single b in result note letter b is not in invocation', + 'changed': True} + + cb._clean_results(result, 'debug') + self.assertTrue('a' in result) + self.assertTrue('b' in result) + self.assertFalse('changed' in result) + self.assertFalse('invocation' in result) + + def test_clean_results_debug_task_empty_results(self): + cb = CallbackBase() + result = {} + cb._clean_results(result, 'debug') + self.assertFalse('invocation' in result) + self.assertEqual(len(result), 0) + + def test_clean_results(self): + cb = CallbackBase() + result = {'item': 'some_item', + 'invocation': 'foo --bar whatever [some_json]', + 'a': 'a single a in result note letter a is in invocation', + 'b': 'a single b in result note letter b is not in invocation', + 'changed': True} + + expected_result = result.copy() + cb._clean_results(result, 'ebug') + self.assertEqual(result, expected_result) + + +class TestCallbackDumpResults(object): + def test_internal_keys(self): + cb = CallbackBase() + result = {'item': 'some_item', + '_ansible_some_var': 'SENTINEL', + 'testing_ansible_out': 'should_be_left_in LEFTIN', + 'invocation': 'foo --bar whatever [some_json]', + 'some_dict_key': {'a_sub_dict_for_key': 'baz'}, + 'bad_dict_key': {'_ansible_internal_blah': 'SENTINEL'}, + 'changed': True} + json_out = cb._dump_results(result) + assert '"_ansible_' not in json_out + assert 'SENTINEL' not in json_out + assert 'LEFTIN' in json_out + + def test_exception(self): + cb = CallbackBase() + result = {'item': 'some_item LEFTIN', + 'exception': ['frame1', 'SENTINEL']} + json_out = cb._dump_results(result) + assert 'SENTINEL' not in json_out + assert 'exception' not in json_out + assert 'LEFTIN' in json_out + + def test_verbose(self): + cb = CallbackBase() + result = {'item': 'some_item LEFTIN', + '_ansible_verbose_always': 'chicane'} + json_out = cb._dump_results(result) + assert 'SENTINEL' not in json_out + assert 'LEFTIN' in json_out + + def test_diff(self): + cb = CallbackBase() + result = {'item': 'some_item LEFTIN', + 'diff': ['remove stuff', 'added LEFTIN'], + '_ansible_verbose_always': 'chicane'} + json_out = cb._dump_results(result) + assert 'SENTINEL' not in json_out + assert 'LEFTIN' in json_out + + def test_mixed_keys(self): + cb = CallbackBase() + result = {3: 'pi', + 'tau': 6} + json_out = cb._dump_results(result) + round_trip_result = json.loads(json_out) + assert len(round_trip_result) == 2 + assert '3' in round_trip_result + assert 'tau' in round_trip_result + assert round_trip_result['3'] == 'pi' + assert round_trip_result['tau'] == 6 + + +class TestCallbackDiff(unittest.TestCase): + + def setUp(self): + self.cb = CallbackBase() + + def _strip_color(self, s): + return re.sub('\033\\[[^m]*m', '', s) + + def test_difflist(self): + # TODO: split into smaller tests? + difflist = [{'before': u'preface\nThe Before String\npostscript', + 'after': u'preface\nThe After String\npostscript', + 'before_header': u'just before', + 'after_header': u'just after' + }, + {'before': u'preface\nThe Before String\npostscript', + 'after': u'preface\nThe After String\npostscript', + }, + {'src_binary': 'chicane'}, + {'dst_binary': 'chicanery'}, + {'dst_larger': 1}, + {'src_larger': 2}, + {'prepared': u'what does prepared do?'}, + {'before_header': u'just before'}, + {'after_header': u'just after'}] + + res = self.cb._get_diff(difflist) + + self.assertIn(u'Before String', res) + self.assertIn(u'After String', res) + self.assertIn(u'just before', res) + self.assertIn(u'just after', res) + + def test_simple_diff(self): + self.assertMultiLineEqual( + self._strip_color(self.cb._get_diff({ + 'before_header': 'somefile.txt', + 'after_header': 'generated from template somefile.j2', + 'before': 'one\ntwo\nthree\n', + 'after': 'one\nthree\nfour\n', + })), + textwrap.dedent('''\ + --- before: somefile.txt + +++ after: generated from template somefile.j2 + @@ -1,3 +1,3 @@ + one + -two + three + +four + + ''')) + + def test_new_file(self): + self.assertMultiLineEqual( + self._strip_color(self.cb._get_diff({ + 'before_header': 'somefile.txt', + 'after_header': 'generated from template somefile.j2', + 'before': '', + 'after': 'one\ntwo\nthree\n', + })), + textwrap.dedent('''\ + --- before: somefile.txt + +++ after: generated from template somefile.j2 + @@ -0,0 +1,3 @@ + +one + +two + +three + + ''')) + + def test_clear_file(self): + self.assertMultiLineEqual( + self._strip_color(self.cb._get_diff({ + 'before_header': 'somefile.txt', + 'after_header': 'generated from template somefile.j2', + 'before': 'one\ntwo\nthree\n', + 'after': '', + })), + textwrap.dedent('''\ + --- before: somefile.txt + +++ after: generated from template somefile.j2 + @@ -1,3 +0,0 @@ + -one + -two + -three + + ''')) + + def test_no_trailing_newline_before(self): + self.assertMultiLineEqual( + self._strip_color(self.cb._get_diff({ + 'before_header': 'somefile.txt', + 'after_header': 'generated from template somefile.j2', + 'before': 'one\ntwo\nthree', + 'after': 'one\ntwo\nthree\n', + })), + textwrap.dedent('''\ + --- before: somefile.txt + +++ after: generated from template somefile.j2 + @@ -1,3 +1,3 @@ + one + two + -three + \\ No newline at end of file + +three + + ''')) + + def test_no_trailing_newline_after(self): + self.assertMultiLineEqual( + self._strip_color(self.cb._get_diff({ + 'before_header': 'somefile.txt', + 'after_header': 'generated from template somefile.j2', + 'before': 'one\ntwo\nthree\n', + 'after': 'one\ntwo\nthree', + })), + textwrap.dedent('''\ + --- before: somefile.txt + +++ after: generated from template somefile.j2 + @@ -1,3 +1,3 @@ + one + two + -three + +three + \\ No newline at end of file + + ''')) + + def test_no_trailing_newline_both(self): + self.assertMultiLineEqual( + self.cb._get_diff({ + 'before_header': 'somefile.txt', + 'after_header': 'generated from template somefile.j2', + 'before': 'one\ntwo\nthree', + 'after': 'one\ntwo\nthree', + }), + '') + + def test_no_trailing_newline_both_with_some_changes(self): + self.assertMultiLineEqual( + self._strip_color(self.cb._get_diff({ + 'before_header': 'somefile.txt', + 'after_header': 'generated from template somefile.j2', + 'before': 'one\ntwo\nthree', + 'after': 'one\nfive\nthree', + })), + textwrap.dedent('''\ + --- before: somefile.txt + +++ after: generated from template somefile.j2 + @@ -1,3 +1,3 @@ + one + -two + +five + three + \\ No newline at end of file + + ''')) + + def test_diff_dicts(self): + self.assertMultiLineEqual( + self._strip_color(self.cb._get_diff({ + 'before': dict(one=1, two=2, three=3), + 'after': dict(one=1, three=3, four=4), + })), + textwrap.dedent('''\ + --- before + +++ after + @@ -1,5 +1,5 @@ + { + + "four": 4, + "one": 1, + - "three": 3, + - "two": 2 + + "three": 3 + } + + ''')) + + def test_diff_before_none(self): + self.assertMultiLineEqual( + self._strip_color(self.cb._get_diff({ + 'before': None, + 'after': 'one line\n', + })), + textwrap.dedent('''\ + --- before + +++ after + @@ -0,0 +1 @@ + +one line + + ''')) + + def test_diff_after_none(self): + self.assertMultiLineEqual( + self._strip_color(self.cb._get_diff({ + 'before': 'one line\n', + 'after': None, + })), + textwrap.dedent('''\ + --- before + +++ after + @@ -1 +0,0 @@ + -one line + + ''')) + + +class TestCallbackOnMethods(unittest.TestCase): + def _find_on_methods(self, callback): + cb_dir = dir(callback) + method_names = [x for x in cb_dir if '_on_' in x] + methods = [getattr(callback, mn) for mn in method_names] + return methods + + def test_are_methods(self): + cb = CallbackBase() + for method in self._find_on_methods(cb): + self.assertIsInstance(method, types.MethodType) + + def test_on_any(self): + cb = CallbackBase() + cb.v2_on_any('whatever', some_keyword='blippy') + cb.on_any('whatever', some_keyword='blippy') diff --git a/test/units/plugins/connection/__init__.py b/test/units/plugins/connection/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/connection/test_connection.py b/test/units/plugins/connection/test_connection.py new file mode 100644 index 00000000..17c2e085 --- /dev/null +++ b/test/units/plugins/connection/test_connection.py @@ -0,0 +1,169 @@ +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from io import StringIO +import sys +import pytest + +from units.compat import mock +from units.compat import unittest +from units.compat.mock import MagicMock +from units.compat.mock import patch +from ansible.errors import AnsibleError +from ansible.playbook.play_context import PlayContext +from ansible.plugins.connection import ConnectionBase +from ansible.plugins.loader import become_loader + + +class TestConnectionBaseClass(unittest.TestCase): + + def setUp(self): + self.play_context = PlayContext() + self.play_context.prompt = ( + '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: ' + ) + self.in_stream = StringIO() + + def tearDown(self): + pass + + def test_subclass_error(self): + class ConnectionModule1(ConnectionBase): + pass + with self.assertRaises(TypeError): + ConnectionModule1() # pylint: disable=abstract-class-instantiated + + class ConnectionModule2(ConnectionBase): + def get(self, key): + super(ConnectionModule2, self).get(key) + + with self.assertRaises(TypeError): + ConnectionModule2() # pylint: disable=abstract-class-instantiated + + def test_subclass_success(self): + class ConnectionModule3(ConnectionBase): + + @property + def transport(self): + pass + + def _connect(self): + pass + + def exec_command(self): + pass + + def put_file(self): + pass + + def fetch_file(self): + pass + + def close(self): + pass + + self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3) + + def test_check_password_prompt(self): + local = ( + b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n' + b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n' + ) + + ssh_pipelining_vvvv = b''' +debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 251 +debug2: process_mux_new_session: channel 1: request tty 0, X 1, agent 1, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'", env 0 +debug3: process_mux_new_session: got fds stdin 9, stdout 10, stderr 11 +debug2: client_session2_setup: id 2 +debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0' +debug2: channel 2: request exec confirm 1 +debug2: channel 2: rcvd ext data 67 +[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: debug2: channel 2: written 67 to efd 11 +BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq +debug3: receive packet: type 98 +''' # noqa + + ssh_nopipelining_vvvv = b''' +debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 251 +debug2: process_mux_new_session: channel 1: request tty 1, X 1, agent 1, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'", env 0 +debug3: mux_client_request_session: session request sent +debug3: send packet: type 98 +debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0' +debug2: channel 2: request exec confirm 1 +debug2: exec request accepted on channel 2 +[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: debug3: receive packet: type 2 +debug3: Received SSH2_MSG_IGNORE +debug3: Received SSH2_MSG_IGNORE + +BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq +debug3: receive packet: type 98 +''' # noqa + + ssh_novvvv = ( + b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n' + b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n' + ) + + dns_issue = ( + b'timeout waiting for privilege escalation password prompt:\n' + b'sudo: sudo: unable to resolve host tcloud014\n' + b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n' + b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n' + ) + + nothing = b'' + + in_front = b''' +debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo +''' + + class ConnectionFoo(ConnectionBase): + + @property + def transport(self): + pass + + def _connect(self): + pass + + def exec_command(self): + pass + + def put_file(self): + pass + + def fetch_file(self): + pass + + def close(self): + pass + + c = ConnectionFoo(self.play_context, self.in_stream) + c.set_become_plugin(become_loader.get('sudo')) + c.become.prompt = '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: ' + + self.assertTrue(c.check_password_prompt(local)) + self.assertTrue(c.check_password_prompt(ssh_pipelining_vvvv)) + self.assertTrue(c.check_password_prompt(ssh_nopipelining_vvvv)) + self.assertTrue(c.check_password_prompt(ssh_novvvv)) + self.assertTrue(c.check_password_prompt(dns_issue)) + self.assertFalse(c.check_password_prompt(nothing)) + self.assertFalse(c.check_password_prompt(in_front)) diff --git a/test/units/plugins/connection/test_local.py b/test/units/plugins/connection/test_local.py new file mode 100644 index 00000000..e5525855 --- /dev/null +++ b/test/units/plugins/connection/test_local.py @@ -0,0 +1,40 @@ +# +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from io import StringIO +import pytest + +from units.compat import unittest +from ansible.plugins.connection import local +from ansible.playbook.play_context import PlayContext + + +class TestLocalConnectionClass(unittest.TestCase): + + def test_local_connection_module(self): + play_context = PlayContext() + play_context.prompt = ( + '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: ' + ) + in_stream = StringIO() + + self.assertIsInstance(local.Connection(play_context, in_stream), local.Connection) diff --git a/test/units/plugins/connection/test_paramiko.py b/test/units/plugins/connection/test_paramiko.py new file mode 100644 index 00000000..e3643b14 --- /dev/null +++ b/test/units/plugins/connection/test_paramiko.py @@ -0,0 +1,42 @@ +# +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from io import StringIO +import pytest + +from units.compat import unittest +from ansible.plugins.connection import paramiko_ssh +from ansible.playbook.play_context import PlayContext + + +class TestParamikoConnectionClass(unittest.TestCase): + + def test_paramiko_connection_module(self): + play_context = PlayContext() + play_context.prompt = ( + '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: ' + ) + in_stream = StringIO() + + self.assertIsInstance( + paramiko_ssh.Connection(play_context, in_stream), + paramiko_ssh.Connection) diff --git a/test/units/plugins/connection/test_psrp.py b/test/units/plugins/connection/test_psrp.py new file mode 100644 index 00000000..f6416751 --- /dev/null +++ b/test/units/plugins/connection/test_psrp.py @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- +# (c) 2018, Jordan Borean +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from io import StringIO +from units.compat.mock import MagicMock + +from ansible.playbook.play_context import PlayContext +from ansible.plugins.loader import connection_loader +from ansible.utils.display import Display + + +@pytest.fixture(autouse=True) +def psrp_connection(): + """Imports the psrp connection plugin with a mocked pypsrp module for testing""" + + # Take a snapshot of sys.modules before we manipulate it + orig_modules = sys.modules.copy() + try: + fake_pypsrp = MagicMock() + fake_pypsrp.FEATURES = [ + 'wsman_locale', + 'wsman_read_timeout', + 'wsman_reconnections', + ] + + fake_wsman = MagicMock() + fake_wsman.AUTH_KWARGS = { + "certificate": ["certificate_key_pem", "certificate_pem"], + "credssp": ["credssp_auth_mechanism", "credssp_disable_tlsv1_2", + "credssp_minimum_version"], + "negotiate": ["negotiate_delegate", "negotiate_hostname_override", + "negotiate_send_cbt", "negotiate_service"], + "mock": ["mock_test1", "mock_test2"], + } + + sys.modules["pypsrp"] = fake_pypsrp + sys.modules["pypsrp.complex_objects"] = MagicMock() + sys.modules["pypsrp.exceptions"] = MagicMock() + sys.modules["pypsrp.host"] = MagicMock() + sys.modules["pypsrp.powershell"] = MagicMock() + sys.modules["pypsrp.shell"] = MagicMock() + sys.modules["pypsrp.wsman"] = fake_wsman + sys.modules["requests.exceptions"] = MagicMock() + + from ansible.plugins.connection import psrp + + # Take a copy of the original import state vars before we set to an ok import + orig_has_psrp = psrp.HAS_PYPSRP + orig_psrp_imp_err = psrp.PYPSRP_IMP_ERR + + yield psrp + + psrp.HAS_PYPSRP = orig_has_psrp + psrp.PYPSRP_IMP_ERR = orig_psrp_imp_err + finally: + # Restore sys.modules back to our pre-shenanigans + sys.modules = orig_modules + + +class TestConnectionPSRP(object): + + OPTIONS_DATA = ( + # default options + ( + {'_extras': {}}, + { + '_psrp_auth': 'negotiate', + '_psrp_cert_validation': True, + '_psrp_configuration_name': 'Microsoft.PowerShell', + '_psrp_connection_timeout': 30, + '_psrp_message_encryption': 'auto', + '_psrp_host': 'inventory_hostname', + '_psrp_conn_kwargs': { + 'server': 'inventory_hostname', + 'port': 5986, + 'username': None, + 'password': None, + 'ssl': True, + 'path': 'wsman', + 'auth': 'negotiate', + 'cert_validation': True, + 'connection_timeout': 30, + 'encryption': 'auto', + 'proxy': None, + 'no_proxy': False, + 'max_envelope_size': 153600, + 'operation_timeout': 20, + 'certificate_key_pem': None, + 'certificate_pem': None, + 'credssp_auth_mechanism': 'auto', + 'credssp_disable_tlsv1_2': False, + 'credssp_minimum_version': 2, + 'negotiate_delegate': None, + 'negotiate_hostname_override': None, + 'negotiate_send_cbt': True, + 'negotiate_service': 'WSMAN', + 'read_timeout': 30, + 'reconnection_backoff': 2.0, + 'reconnection_retries': 0, + }, + '_psrp_max_envelope_size': 153600, + '_psrp_ignore_proxy': False, + '_psrp_operation_timeout': 20, + '_psrp_pass': None, + '_psrp_path': 'wsman', + '_psrp_port': 5986, + '_psrp_proxy': None, + '_psrp_protocol': 'https', + '_psrp_user': None + }, + ), + # ssl=False when port defined to 5985 + ( + {'_extras': {}, 'ansible_port': '5985'}, + { + '_psrp_port': 5985, + '_psrp_protocol': 'http' + }, + ), + # ssl=True when port defined to not 5985 + ( + {'_extras': {}, 'ansible_port': 1234}, + { + '_psrp_port': 1234, + '_psrp_protocol': 'https' + }, + ), + # port 5986 when ssl=True + ( + {'_extras': {}, 'ansible_psrp_protocol': 'https'}, + { + '_psrp_port': 5986, + '_psrp_protocol': 'https' + }, + ), + # port 5985 when ssl=False + ( + {'_extras': {}, 'ansible_psrp_protocol': 'http'}, + { + '_psrp_port': 5985, + '_psrp_protocol': 'http' + }, + ), + # psrp extras + ( + {'_extras': {'ansible_psrp_mock_test1': True}}, + { + '_psrp_conn_kwargs': { + 'server': 'inventory_hostname', + 'port': 5986, + 'username': None, + 'password': None, + 'ssl': True, + 'path': 'wsman', + 'auth': 'negotiate', + 'cert_validation': True, + 'connection_timeout': 30, + 'encryption': 'auto', + 'proxy': None, + 'no_proxy': False, + 'max_envelope_size': 153600, + 'operation_timeout': 20, + 'certificate_key_pem': None, + 'certificate_pem': None, + 'credssp_auth_mechanism': 'auto', + 'credssp_disable_tlsv1_2': False, + 'credssp_minimum_version': 2, + 'negotiate_delegate': None, + 'negotiate_hostname_override': None, + 'negotiate_send_cbt': True, + 'negotiate_service': 'WSMAN', + 'read_timeout': 30, + 'reconnection_backoff': 2.0, + 'reconnection_retries': 0, + 'mock_test1': True + }, + }, + ), + # cert validation through string repr of bool + ( + {'_extras': {}, 'ansible_psrp_cert_validation': 'ignore'}, + { + '_psrp_cert_validation': False + }, + ), + # cert validation path + ( + {'_extras': {}, 'ansible_psrp_cert_trust_path': '/path/cert.pem'}, + { + '_psrp_cert_validation': '/path/cert.pem' + }, + ), + ) + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + # pylint: disable=undefined-variable + @pytest.mark.parametrize('options, expected', + ((o, e) for o, e in OPTIONS_DATA)) + def test_set_options(self, options, expected): + pc = PlayContext() + new_stdin = StringIO() + + conn = connection_loader.get('psrp', pc, new_stdin) + conn.set_options(var_options=options) + conn._build_kwargs() + + for attr, expected in expected.items(): + actual = getattr(conn, attr) + assert actual == expected, \ + "psrp attr '%s', actual '%s' != expected '%s'"\ + % (attr, actual, expected) + + def test_set_invalid_extras_options(self, monkeypatch): + pc = PlayContext() + new_stdin = StringIO() + + conn = connection_loader.get('psrp', pc, new_stdin) + conn.set_options(var_options={'_extras': {'ansible_psrp_mock_test3': True}}) + + mock_display = MagicMock() + monkeypatch.setattr(Display, "warning", mock_display) + conn._build_kwargs() + + assert mock_display.call_args[0][0] == \ + 'ansible_psrp_mock_test3 is unsupported by the current psrp version installed' diff --git a/test/units/plugins/connection/test_ssh.py b/test/units/plugins/connection/test_ssh.py new file mode 100644 index 00000000..cfe7fcb6 --- /dev/null +++ b/test/units/plugins/connection/test_ssh.py @@ -0,0 +1,688 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from io import StringIO +import pytest + + +from ansible import constants as C +from ansible.errors import AnsibleAuthenticationFailure +from units.compat import unittest +from units.compat.mock import patch, MagicMock, PropertyMock +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils.compat.selectors import SelectorKey, EVENT_READ +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_bytes +from ansible.playbook.play_context import PlayContext +from ansible.plugins.connection import ssh +from ansible.plugins.loader import connection_loader, become_loader + + +class TestConnectionBaseClass(unittest.TestCase): + + def test_plugins_connection_ssh_module(self): + play_context = PlayContext() + play_context.prompt = ( + '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: ' + ) + in_stream = StringIO() + + self.assertIsInstance(ssh.Connection(play_context, in_stream), ssh.Connection) + + def test_plugins_connection_ssh_basic(self): + pc = PlayContext() + new_stdin = StringIO() + conn = ssh.Connection(pc, new_stdin) + + # connect just returns self, so assert that + res = conn._connect() + self.assertEqual(conn, res) + + ssh.SSHPASS_AVAILABLE = False + self.assertFalse(conn._sshpass_available()) + + ssh.SSHPASS_AVAILABLE = True + self.assertTrue(conn._sshpass_available()) + + with patch('subprocess.Popen') as p: + ssh.SSHPASS_AVAILABLE = None + p.return_value = MagicMock() + self.assertTrue(conn._sshpass_available()) + + ssh.SSHPASS_AVAILABLE = None + p.return_value = None + p.side_effect = OSError() + self.assertFalse(conn._sshpass_available()) + + conn.close() + self.assertFalse(conn._connected) + + def test_plugins_connection_ssh__build_command(self): + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('ssh', pc, new_stdin) + conn._build_command('ssh') + + def test_plugins_connection_ssh_exec_command(self): + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('ssh', pc, new_stdin) + + conn._build_command = MagicMock() + conn._build_command.return_value = 'ssh something something' + conn._run = MagicMock() + conn._run.return_value = (0, 'stdout', 'stderr') + conn.get_option = MagicMock() + conn.get_option.return_value = True + + res, stdout, stderr = conn.exec_command('ssh') + res, stdout, stderr = conn.exec_command('ssh', 'this is some data') + + def test_plugins_connection_ssh__examine_output(self): + pc = PlayContext() + new_stdin = StringIO() + + conn = connection_loader.get('ssh', pc, new_stdin) + conn.set_become_plugin(become_loader.get('sudo')) + + conn.check_password_prompt = MagicMock() + conn.check_become_success = MagicMock() + conn.check_incorrect_password = MagicMock() + conn.check_missing_password = MagicMock() + + def _check_password_prompt(line): + if b'foo' in line: + return True + return False + + def _check_become_success(line): + if b'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' in line: + return True + return False + + def _check_incorrect_password(line): + if b'incorrect password' in line: + return True + return False + + def _check_missing_password(line): + if b'bad password' in line: + return True + return False + + conn.become.check_password_prompt = MagicMock(side_effect=_check_password_prompt) + conn.become.check_become_success = MagicMock(side_effect=_check_become_success) + conn.become.check_incorrect_password = MagicMock(side_effect=_check_incorrect_password) + conn.become.check_missing_password = MagicMock(side_effect=_check_missing_password) + + # test examining output for prompt + conn._flags = dict( + become_prompt=False, + become_success=False, + become_error=False, + become_nopasswd_error=False, + ) + + pc.prompt = True + conn.become.prompt = True + + def get_option(option): + if option == 'become_pass': + return 'password' + return None + + conn.become.get_option = get_option + output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nfoo\nline 3\nthis should be the remainder', False) + self.assertEqual(output, b'line 1\nline 2\nline 3\n') + self.assertEqual(unprocessed, b'this should be the remainder') + self.assertTrue(conn._flags['become_prompt']) + self.assertFalse(conn._flags['become_success']) + self.assertFalse(conn._flags['become_error']) + self.assertFalse(conn._flags['become_nopasswd_error']) + + # test examining output for become prompt + conn._flags = dict( + become_prompt=False, + become_success=False, + become_error=False, + become_nopasswd_error=False, + ) + + pc.prompt = False + conn.become.prompt = False + pc.success_key = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' + conn.become.success = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' + output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nBECOME-SUCCESS-abcdefghijklmnopqrstuvxyz\nline 3\n', False) + self.assertEqual(output, b'line 1\nline 2\nline 3\n') + self.assertEqual(unprocessed, b'') + self.assertFalse(conn._flags['become_prompt']) + self.assertTrue(conn._flags['become_success']) + self.assertFalse(conn._flags['become_error']) + self.assertFalse(conn._flags['become_nopasswd_error']) + + # test examining output for become failure + conn._flags = dict( + become_prompt=False, + become_success=False, + become_error=False, + become_nopasswd_error=False, + ) + + pc.prompt = False + conn.become.prompt = False + pc.success_key = None + output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nincorrect password\n', True) + self.assertEqual(output, b'line 1\nline 2\nincorrect password\n') + self.assertEqual(unprocessed, b'') + self.assertFalse(conn._flags['become_prompt']) + self.assertFalse(conn._flags['become_success']) + self.assertTrue(conn._flags['become_error']) + self.assertFalse(conn._flags['become_nopasswd_error']) + + # test examining output for missing password + conn._flags = dict( + become_prompt=False, + become_success=False, + become_error=False, + become_nopasswd_error=False, + ) + + pc.prompt = False + conn.become.prompt = False + pc.success_key = None + output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nbad password\n', True) + self.assertEqual(output, b'line 1\nbad password\n') + self.assertEqual(unprocessed, b'') + self.assertFalse(conn._flags['become_prompt']) + self.assertFalse(conn._flags['become_success']) + self.assertFalse(conn._flags['become_error']) + self.assertTrue(conn._flags['become_nopasswd_error']) + + @patch('time.sleep') + @patch('os.path.exists') + def test_plugins_connection_ssh_put_file(self, mock_ospe, mock_sleep): + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('ssh', pc, new_stdin) + conn._build_command = MagicMock() + conn._bare_run = MagicMock() + + mock_ospe.return_value = True + conn._build_command.return_value = 'some command to run' + conn._bare_run.return_value = (0, '', '') + conn.host = "some_host" + + C.ANSIBLE_SSH_RETRIES = 9 + + # Test with C.DEFAULT_SCP_IF_SSH set to smart + # Test when SFTP works + C.DEFAULT_SCP_IF_SSH = 'smart' + expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n' + conn.put_file('/path/to/in/file', '/path/to/dest/file') + conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False) + + # Test when SFTP doesn't work but SCP does + conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')] + conn.put_file('/path/to/in/file', '/path/to/dest/file') + conn._bare_run.assert_called_with('some command to run', None, checkrc=False) + conn._bare_run.side_effect = None + + # test with C.DEFAULT_SCP_IF_SSH enabled + C.DEFAULT_SCP_IF_SSH = True + conn.put_file('/path/to/in/file', '/path/to/dest/file') + conn._bare_run.assert_called_with('some command to run', None, checkrc=False) + + conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') + conn._bare_run.assert_called_with('some command to run', None, checkrc=False) + + # test with C.DEFAULT_SCP_IF_SSH disabled + C.DEFAULT_SCP_IF_SSH = False + expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n' + conn.put_file('/path/to/in/file', '/path/to/dest/file') + conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False) + + expected_in_data = b' '.join((b'put', + to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')), + to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n' + conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') + conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False) + + # test that a non-zero rc raises an error + conn._bare_run.return_value = (1, 'stdout', 'some errors') + self.assertRaises(AnsibleError, conn.put_file, '/path/to/bad/file', '/remote/path/to/file') + + # test that a not-found path raises an error + mock_ospe.return_value = False + conn._bare_run.return_value = (0, 'stdout', '') + self.assertRaises(AnsibleFileNotFound, conn.put_file, '/path/to/bad/file', '/remote/path/to/file') + + @patch('time.sleep') + def test_plugins_connection_ssh_fetch_file(self, mock_sleep): + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('ssh', pc, new_stdin) + conn._build_command = MagicMock() + conn._bare_run = MagicMock() + conn._load_name = 'ssh' + + conn._build_command.return_value = 'some command to run' + conn._bare_run.return_value = (0, '', '') + conn.host = "some_host" + + C.ANSIBLE_SSH_RETRIES = 9 + + # Test with C.DEFAULT_SCP_IF_SSH set to smart + # Test when SFTP works + C.DEFAULT_SCP_IF_SSH = 'smart' + expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n' + conn.set_options({}) + conn.fetch_file('/path/to/in/file', '/path/to/dest/file') + conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False) + + # Test when SFTP doesn't work but SCP does + conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')] + conn.fetch_file('/path/to/in/file', '/path/to/dest/file') + conn._bare_run.assert_called_with('some command to run', None, checkrc=False) + conn._bare_run.side_effect = None + + # test with C.DEFAULT_SCP_IF_SSH enabled + C.DEFAULT_SCP_IF_SSH = True + conn.fetch_file('/path/to/in/file', '/path/to/dest/file') + conn._bare_run.assert_called_with('some command to run', None, checkrc=False) + + conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') + conn._bare_run.assert_called_with('some command to run', None, checkrc=False) + + # test with C.DEFAULT_SCP_IF_SSH disabled + C.DEFAULT_SCP_IF_SSH = False + expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n' + conn.fetch_file('/path/to/in/file', '/path/to/dest/file') + conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False) + + expected_in_data = b' '.join((b'get', + to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')), + to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n' + conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') + conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False) + + # test that a non-zero rc raises an error + conn._bare_run.return_value = (1, 'stdout', 'some errors') + self.assertRaises(AnsibleError, conn.fetch_file, '/path/to/bad/file', '/remote/path/to/file') + + +class MockSelector(object): + def __init__(self): + self.files_watched = 0 + self.register = MagicMock(side_effect=self._register) + self.unregister = MagicMock(side_effect=self._unregister) + self.close = MagicMock() + self.get_map = MagicMock(side_effect=self._get_map) + self.select = MagicMock() + + def _register(self, *args, **kwargs): + self.files_watched += 1 + + def _unregister(self, *args, **kwargs): + self.files_watched -= 1 + + def _get_map(self, *args, **kwargs): + return self.files_watched + + +@pytest.fixture +def mock_run_env(request, mocker): + pc = PlayContext() + new_stdin = StringIO() + + conn = connection_loader.get('ssh', pc, new_stdin) + conn.set_become_plugin(become_loader.get('sudo')) + conn._send_initial_data = MagicMock() + conn._examine_output = MagicMock() + conn._terminate_process = MagicMock() + conn._load_name = 'ssh' + conn.sshpass_pipe = [MagicMock(), MagicMock()] + + request.cls.pc = pc + request.cls.conn = conn + + mock_popen_res = MagicMock() + mock_popen_res.poll = MagicMock() + mock_popen_res.wait = MagicMock() + mock_popen_res.stdin = MagicMock() + mock_popen_res.stdin.fileno.return_value = 1000 + mock_popen_res.stdout = MagicMock() + mock_popen_res.stdout.fileno.return_value = 1001 + mock_popen_res.stderr = MagicMock() + mock_popen_res.stderr.fileno.return_value = 1002 + mock_popen_res.returncode = 0 + request.cls.mock_popen_res = mock_popen_res + + mock_popen = mocker.patch('subprocess.Popen', return_value=mock_popen_res) + request.cls.mock_popen = mock_popen + + request.cls.mock_selector = MockSelector() + mocker.patch('ansible.module_utils.compat.selectors.DefaultSelector', lambda: request.cls.mock_selector) + + request.cls.mock_openpty = mocker.patch('pty.openpty') + + mocker.patch('fcntl.fcntl') + mocker.patch('os.write') + mocker.patch('os.close') + + +@pytest.mark.usefixtures('mock_run_env') +class TestSSHConnectionRun(object): + # FIXME: + # These tests are little more than a smoketest. Need to enhance them + # a bit to check that they're calling the relevant functions and making + # complete coverage of the code paths + def test_no_escalation(self): + self.mock_popen_res.stdout.read.side_effect = [b"my_stdout\n", b"second_line"] + self.mock_popen_res.stderr.read.side_effect = [b"my_stderr"] + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + []] + self.mock_selector.get_map.side_effect = lambda: True + + return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data") + assert return_code == 0 + assert b_stdout == b'my_stdout\nsecond_line' + assert b_stderr == b'my_stderr' + assert self.mock_selector.register.called is True + assert self.mock_selector.register.call_count == 2 + assert self.conn._send_initial_data.called is True + assert self.conn._send_initial_data.call_count == 1 + assert self.conn._send_initial_data.call_args[0][1] == 'this is input data' + + def test_with_password(self): + # test with a password set to trigger the sshpass write + self.pc.password = '12345' + self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""] + self.mock_popen_res.stderr.read.side_effect = [b""] + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + []] + self.mock_selector.get_map.side_effect = lambda: True + + return_code, b_stdout, b_stderr = self.conn._run(["ssh", "is", "a", "cmd"], "this is more data") + assert return_code == 0 + assert b_stdout == b'some data' + assert b_stderr == b'' + assert self.mock_selector.register.called is True + assert self.mock_selector.register.call_count == 2 + assert self.conn._send_initial_data.called is True + assert self.conn._send_initial_data.call_count == 1 + assert self.conn._send_initial_data.call_args[0][1] == 'this is more data' + + def _password_with_prompt_examine_output(self, sourice, state, b_chunk, sudoable): + if state == 'awaiting_prompt': + self.conn._flags['become_prompt'] = True + elif state == 'awaiting_escalation': + self.conn._flags['become_success'] = True + return (b'', b'') + + def test_password_with_prompt(self): + # test with password prompting enabled + self.pc.password = None + self.conn.become.prompt = b'Password:' + self.conn._examine_output.side_effect = self._password_with_prompt_examine_output + self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"Success", b""] + self.mock_popen_res.stderr.read.side_effect = [b""] + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ), + (SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + []] + self.mock_selector.get_map.side_effect = lambda: True + + return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data") + assert return_code == 0 + assert b_stdout == b'' + assert b_stderr == b'' + assert self.mock_selector.register.called is True + assert self.mock_selector.register.call_count == 2 + assert self.conn._send_initial_data.called is True + assert self.conn._send_initial_data.call_count == 1 + assert self.conn._send_initial_data.call_args[0][1] == 'this is input data' + + def test_password_with_become(self): + # test with some become settings + self.pc.prompt = b'Password:' + self.conn.become.prompt = b'Password:' + self.pc.become = True + self.pc.success_key = 'BECOME-SUCCESS-abcdefg' + self.conn.become._id = 'abcdefg' + self.conn._examine_output.side_effect = self._password_with_prompt_examine_output + self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"BECOME-SUCCESS-abcdefg", b"abc"] + self.mock_popen_res.stderr.read.side_effect = [b"123"] + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + []] + self.mock_selector.get_map.side_effect = lambda: True + + return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data") + self.mock_popen_res.stdin.flush.assert_called_once_with() + assert return_code == 0 + assert b_stdout == b'abc' + assert b_stderr == b'123' + assert self.mock_selector.register.called is True + assert self.mock_selector.register.call_count == 2 + assert self.conn._send_initial_data.called is True + assert self.conn._send_initial_data.call_count == 1 + assert self.conn._send_initial_data.call_args[0][1] == 'this is input data' + + def test_pasword_without_data(self): + # simulate no data input but Popen using new pty's fails + self.mock_popen.return_value = None + self.mock_popen.side_effect = [OSError(), self.mock_popen_res] + + # simulate no data input + self.mock_openpty.return_value = (98, 99) + self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""] + self.mock_popen_res.stderr.read.side_effect = [b""] + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + []] + self.mock_selector.get_map.side_effect = lambda: True + + return_code, b_stdout, b_stderr = self.conn._run("ssh", "") + assert return_code == 0 + assert b_stdout == b'some data' + assert b_stderr == b'' + assert self.mock_selector.register.called is True + assert self.mock_selector.register.call_count == 2 + assert self.conn._send_initial_data.called is False + + +@pytest.mark.usefixtures('mock_run_env') +class TestSSHConnectionRetries(object): + def test_incorrect_password(self, monkeypatch): + monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False) + monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 5) + monkeypatch.setattr('time.sleep', lambda x: None) + + self.mock_popen_res.stdout.read.side_effect = [b''] + self.mock_popen_res.stderr.read.side_effect = [b'Permission denied, please try again.\r\n'] + type(self.mock_popen_res).returncode = PropertyMock(side_effect=[5] * 4) + + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [], + ] + + self.mock_selector.get_map.side_effect = lambda: True + + self.conn._build_command = MagicMock() + self.conn._build_command.return_value = [b'sshpass', b'-d41', b'ssh', b'-C'] + self.conn.get_option = MagicMock() + self.conn.get_option.return_value = True + + exception_info = pytest.raises(AnsibleAuthenticationFailure, self.conn.exec_command, 'sshpass', 'some data') + assert exception_info.value.message == ('Invalid/incorrect username/password. Skipping remaining 5 retries to prevent account lockout: ' + 'Permission denied, please try again.') + assert self.mock_popen.call_count == 1 + + def test_retry_then_success(self, monkeypatch): + monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False) + monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 3) + + monkeypatch.setattr('time.sleep', lambda x: None) + + self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"] + self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"] + type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 3 + [0] * 4) + + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [] + ] + self.mock_selector.get_map.side_effect = lambda: True + + self.conn._build_command = MagicMock() + self.conn._build_command.return_value = 'ssh' + self.conn.get_option = MagicMock() + self.conn.get_option.return_value = True + + return_code, b_stdout, b_stderr = self.conn.exec_command('ssh', 'some data') + assert return_code == 0 + assert b_stdout == b'my_stdout\nsecond_line' + assert b_stderr == b'my_stderr' + + def test_multiple_failures(self, monkeypatch): + monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False) + monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 9) + + monkeypatch.setattr('time.sleep', lambda x: None) + + self.mock_popen_res.stdout.read.side_effect = [b""] * 10 + self.mock_popen_res.stderr.read.side_effect = [b""] * 10 + type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 30) + + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [], + ] * 10 + self.mock_selector.get_map.side_effect = lambda: True + + self.conn._build_command = MagicMock() + self.conn._build_command.return_value = 'ssh' + self.conn.get_option = MagicMock() + self.conn.get_option.return_value = True + + pytest.raises(AnsibleConnectionFailure, self.conn.exec_command, 'ssh', 'some data') + assert self.mock_popen.call_count == 10 + + def test_abitrary_exceptions(self, monkeypatch): + monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False) + monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 9) + + monkeypatch.setattr('time.sleep', lambda x: None) + + self.conn._build_command = MagicMock() + self.conn._build_command.return_value = 'ssh' + self.conn.get_option = MagicMock() + self.conn.get_option.return_value = True + + self.mock_popen.side_effect = [Exception('bad')] * 10 + pytest.raises(Exception, self.conn.exec_command, 'ssh', 'some data') + assert self.mock_popen.call_count == 10 + + def test_put_file_retries(self, monkeypatch): + monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False) + monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 3) + + monkeypatch.setattr('time.sleep', lambda x: None) + monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True) + + self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"] + self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"] + type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4) + + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [] + ] + self.mock_selector.get_map.side_effect = lambda: True + + self.conn._build_command = MagicMock() + self.conn._build_command.return_value = 'sftp' + + return_code, b_stdout, b_stderr = self.conn.put_file('/path/to/in/file', '/path/to/dest/file') + assert return_code == 0 + assert b_stdout == b"my_stdout\nsecond_line" + assert b_stderr == b"my_stderr" + assert self.mock_popen.call_count == 2 + + def test_fetch_file_retries(self, monkeypatch): + monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False) + monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 3) + + monkeypatch.setattr('time.sleep', lambda x: None) + monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True) + + self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"] + self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"] + type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4) + + self.mock_selector.select.side_effect = [ + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], + [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], + [] + ] + self.mock_selector.get_map.side_effect = lambda: True + + self.conn._build_command = MagicMock() + self.conn._build_command.return_value = 'sftp' + + return_code, b_stdout, b_stderr = self.conn.fetch_file('/path/to/in/file', '/path/to/dest/file') + assert return_code == 0 + assert b_stdout == b"my_stdout\nsecond_line" + assert b_stderr == b"my_stderr" + assert self.mock_popen.call_count == 2 diff --git a/test/units/plugins/connection/test_winrm.py b/test/units/plugins/connection/test_winrm.py new file mode 100644 index 00000000..67bfd9ae --- /dev/null +++ b/test/units/plugins/connection/test_winrm.py @@ -0,0 +1,431 @@ +# -*- coding: utf-8 -*- +# (c) 2018, Jordan Borean +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from io import StringIO + +from units.compat.mock import MagicMock +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_bytes +from ansible.playbook.play_context import PlayContext +from ansible.plugins.loader import connection_loader +from ansible.plugins.connection import winrm + +pytest.importorskip("winrm") + + +class TestConnectionWinRM(object): + + OPTIONS_DATA = ( + # default options + ( + {'_extras': {}}, + {}, + { + '_kerb_managed': False, + '_kinit_cmd': 'kinit', + '_winrm_connection_timeout': None, + '_winrm_host': 'inventory_hostname', + '_winrm_kwargs': {'username': None, 'password': None}, + '_winrm_pass': None, + '_winrm_path': '/wsman', + '_winrm_port': 5986, + '_winrm_scheme': 'https', + '_winrm_transport': ['ssl'], + '_winrm_user': None + }, + False + ), + # http through port + ( + {'_extras': {}, 'ansible_port': 5985}, + {}, + { + '_winrm_kwargs': {'username': None, 'password': None}, + '_winrm_port': 5985, + '_winrm_scheme': 'http', + '_winrm_transport': ['plaintext'], + }, + False + ), + # kerberos user with kerb present + ( + {'_extras': {}, 'ansible_user': 'user@domain.com'}, + {}, + { + '_kerb_managed': False, + '_kinit_cmd': 'kinit', + '_winrm_kwargs': {'username': 'user@domain.com', + 'password': None}, + '_winrm_pass': None, + '_winrm_transport': ['kerberos', 'ssl'], + '_winrm_user': 'user@domain.com' + }, + True + ), + # kerberos user without kerb present + ( + {'_extras': {}, 'ansible_user': 'user@domain.com'}, + {}, + { + '_kerb_managed': False, + '_kinit_cmd': 'kinit', + '_winrm_kwargs': {'username': 'user@domain.com', + 'password': None}, + '_winrm_pass': None, + '_winrm_transport': ['ssl'], + '_winrm_user': 'user@domain.com' + }, + False + ), + # kerberos user with managed ticket (implicit) + ( + {'_extras': {}, 'ansible_user': 'user@domain.com'}, + {'remote_password': 'pass'}, + { + '_kerb_managed': True, + '_kinit_cmd': 'kinit', + '_winrm_kwargs': {'username': 'user@domain.com', + 'password': 'pass'}, + '_winrm_pass': 'pass', + '_winrm_transport': ['kerberos', 'ssl'], + '_winrm_user': 'user@domain.com' + }, + True + ), + # kerb with managed ticket (explicit) + ( + {'_extras': {}, 'ansible_user': 'user@domain.com', + 'ansible_winrm_kinit_mode': 'managed'}, + {'password': 'pass'}, + { + '_kerb_managed': True, + }, + True + ), + # kerb with unmanaged ticket (explicit)) + ( + {'_extras': {}, 'ansible_user': 'user@domain.com', + 'ansible_winrm_kinit_mode': 'manual'}, + {'password': 'pass'}, + { + '_kerb_managed': False, + }, + True + ), + # transport override (single) + ( + {'_extras': {}, 'ansible_user': 'user@domain.com', + 'ansible_winrm_transport': 'ntlm'}, + {}, + { + '_winrm_kwargs': {'username': 'user@domain.com', + 'password': None}, + '_winrm_pass': None, + '_winrm_transport': ['ntlm'], + }, + False + ), + # transport override (list) + ( + {'_extras': {}, 'ansible_user': 'user@domain.com', + 'ansible_winrm_transport': ['ntlm', 'certificate']}, + {}, + { + '_winrm_kwargs': {'username': 'user@domain.com', + 'password': None}, + '_winrm_pass': None, + '_winrm_transport': ['ntlm', 'certificate'], + }, + False + ), + # winrm extras + ( + {'_extras': {'ansible_winrm_server_cert_validation': 'ignore', + 'ansible_winrm_service': 'WSMAN'}}, + {}, + { + '_winrm_kwargs': {'username': None, 'password': None, + 'server_cert_validation': 'ignore', + 'service': 'WSMAN'}, + }, + False + ), + # direct override + ( + {'_extras': {}, 'ansible_winrm_connection_timeout': 5}, + {'connection_timeout': 10}, + { + '_winrm_connection_timeout': 10, + }, + False + ), + # password as ansible_password + ( + {'_extras': {}, 'ansible_password': 'pass'}, + {}, + { + '_winrm_pass': 'pass', + '_winrm_kwargs': {'username': None, 'password': 'pass'} + }, + False + ), + # password as ansible_winrm_pass + ( + {'_extras': {}, 'ansible_winrm_pass': 'pass'}, + {}, + { + '_winrm_pass': 'pass', + '_winrm_kwargs': {'username': None, 'password': 'pass'} + }, + False + ), + + # password as ansible_winrm_password + ( + {'_extras': {}, 'ansible_winrm_password': 'pass'}, + {}, + { + '_winrm_pass': 'pass', + '_winrm_kwargs': {'username': None, 'password': 'pass'} + }, + False + ), + ) + + # pylint bug: https://github.com/PyCQA/pylint/issues/511 + # pylint: disable=undefined-variable + @pytest.mark.parametrize('options, direct, expected, kerb', + ((o, d, e, k) for o, d, e, k in OPTIONS_DATA)) + def test_set_options(self, options, direct, expected, kerb): + winrm.HAVE_KERBEROS = kerb + + pc = PlayContext() + new_stdin = StringIO() + + conn = connection_loader.get('winrm', pc, new_stdin) + conn.set_options(var_options=options, direct=direct) + conn._build_winrm_kwargs() + + for attr, expected in expected.items(): + actual = getattr(conn, attr) + assert actual == expected, \ + "winrm attr '%s', actual '%s' != expected '%s'"\ + % (attr, actual, expected) + + +class TestWinRMKerbAuth(object): + + @pytest.mark.parametrize('options, expected', [ + [{"_extras": {}}, + (["kinit", "user@domain"],)], + [{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'}, + (["kinit2", "user@domain"],)], + [{"_extras": {'ansible_winrm_kerberos_delegation': True}}, + (["kinit", "-f", "user@domain"],)], + ]) + def test_kinit_success_subprocess(self, monkeypatch, options, expected): + def mock_communicate(input=None, timeout=None): + return b"", b"" + + mock_popen = MagicMock() + mock_popen.return_value.communicate = mock_communicate + mock_popen.return_value.returncode = 0 + monkeypatch.setattr("subprocess.Popen", mock_popen) + + winrm.HAS_PEXPECT = False + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('winrm', pc, new_stdin) + conn.set_options(var_options=options) + conn._build_winrm_kwargs() + + conn._kerb_auth("user@domain", "pass") + mock_calls = mock_popen.mock_calls + assert len(mock_calls) == 1 + assert mock_calls[0][1] == expected + actual_env = mock_calls[0][2]['env'] + assert list(actual_env.keys()) == ['KRB5CCNAME'] + assert actual_env['KRB5CCNAME'].startswith("FILE:/") + + @pytest.mark.parametrize('options, expected', [ + [{"_extras": {}}, + ("kinit", ["user@domain"],)], + [{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'}, + ("kinit2", ["user@domain"],)], + [{"_extras": {'ansible_winrm_kerberos_delegation': True}}, + ("kinit", ["-f", "user@domain"],)], + ]) + def test_kinit_success_pexpect(self, monkeypatch, options, expected): + pytest.importorskip("pexpect") + mock_pexpect = MagicMock() + mock_pexpect.return_value.exitstatus = 0 + monkeypatch.setattr("pexpect.spawn", mock_pexpect) + + winrm.HAS_PEXPECT = True + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('winrm', pc, new_stdin) + conn.set_options(var_options=options) + conn._build_winrm_kwargs() + + conn._kerb_auth("user@domain", "pass") + mock_calls = mock_pexpect.mock_calls + assert mock_calls[0][1] == expected + actual_env = mock_calls[0][2]['env'] + assert list(actual_env.keys()) == ['KRB5CCNAME'] + assert actual_env['KRB5CCNAME'].startswith("FILE:/") + assert mock_calls[0][2]['echo'] is False + assert mock_calls[1][0] == "().expect" + assert mock_calls[1][1] == (".*:",) + assert mock_calls[2][0] == "().sendline" + assert mock_calls[2][1] == ("pass",) + assert mock_calls[3][0] == "().read" + assert mock_calls[4][0] == "().wait" + + def test_kinit_with_missing_executable_subprocess(self, monkeypatch): + expected_err = "[Errno 2] No such file or directory: " \ + "'/fake/kinit': '/fake/kinit'" + mock_popen = MagicMock(side_effect=OSError(expected_err)) + + monkeypatch.setattr("subprocess.Popen", mock_popen) + + winrm.HAS_PEXPECT = False + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('winrm', pc, new_stdin) + options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"} + conn.set_options(var_options=options) + conn._build_winrm_kwargs() + + with pytest.raises(AnsibleConnectionFailure) as err: + conn._kerb_auth("user@domain", "pass") + assert str(err.value) == "Kerberos auth failure when calling " \ + "kinit cmd '/fake/kinit': %s" % expected_err + + def test_kinit_with_missing_executable_pexpect(self, monkeypatch): + pexpect = pytest.importorskip("pexpect") + + expected_err = "The command was not found or was not " \ + "executable: /fake/kinit" + mock_pexpect = \ + MagicMock(side_effect=pexpect.ExceptionPexpect(expected_err)) + + monkeypatch.setattr("pexpect.spawn", mock_pexpect) + + winrm.HAS_PEXPECT = True + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('winrm', pc, new_stdin) + options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"} + conn.set_options(var_options=options) + conn._build_winrm_kwargs() + + with pytest.raises(AnsibleConnectionFailure) as err: + conn._kerb_auth("user@domain", "pass") + assert str(err.value) == "Kerberos auth failure when calling " \ + "kinit cmd '/fake/kinit': %s" % expected_err + + def test_kinit_error_subprocess(self, monkeypatch): + expected_err = "kinit: krb5_parse_name: " \ + "Configuration file does not specify default realm" + + def mock_communicate(input=None, timeout=None): + return b"", to_bytes(expected_err) + + mock_popen = MagicMock() + mock_popen.return_value.communicate = mock_communicate + mock_popen.return_value.returncode = 1 + monkeypatch.setattr("subprocess.Popen", mock_popen) + + winrm.HAS_PEXPECT = False + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('winrm', pc, new_stdin) + conn.set_options(var_options={"_extras": {}}) + conn._build_winrm_kwargs() + + with pytest.raises(AnsibleConnectionFailure) as err: + conn._kerb_auth("invaliduser", "pass") + + assert str(err.value) == \ + "Kerberos auth failure for principal invaliduser with " \ + "subprocess: %s" % (expected_err) + + def test_kinit_error_pexpect(self, monkeypatch): + pytest.importorskip("pexpect") + + expected_err = "Configuration file does not specify default realm" + mock_pexpect = MagicMock() + mock_pexpect.return_value.expect = MagicMock(side_effect=OSError) + mock_pexpect.return_value.read.return_value = to_bytes(expected_err) + mock_pexpect.return_value.exitstatus = 1 + + monkeypatch.setattr("pexpect.spawn", mock_pexpect) + + winrm.HAS_PEXPECT = True + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('winrm', pc, new_stdin) + conn.set_options(var_options={"_extras": {}}) + conn._build_winrm_kwargs() + + with pytest.raises(AnsibleConnectionFailure) as err: + conn._kerb_auth("invaliduser", "pass") + + assert str(err.value) == \ + "Kerberos auth failure for principal invaliduser with " \ + "pexpect: %s" % (expected_err) + + def test_kinit_error_pass_in_output_subprocess(self, monkeypatch): + def mock_communicate(input=None, timeout=None): + return b"", b"Error with kinit\n" + input + + mock_popen = MagicMock() + mock_popen.return_value.communicate = mock_communicate + mock_popen.return_value.returncode = 1 + monkeypatch.setattr("subprocess.Popen", mock_popen) + + winrm.HAS_PEXPECT = False + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('winrm', pc, new_stdin) + conn.set_options(var_options={"_extras": {}}) + conn._build_winrm_kwargs() + + with pytest.raises(AnsibleConnectionFailure) as err: + conn._kerb_auth("username", "password") + assert str(err.value) == \ + "Kerberos auth failure for principal username with subprocess: " \ + "Error with kinit\n" + + def test_kinit_error_pass_in_output_pexpect(self, monkeypatch): + pytest.importorskip("pexpect") + + mock_pexpect = MagicMock() + mock_pexpect.return_value.expect = MagicMock() + mock_pexpect.return_value.read.return_value = \ + b"Error with kinit\npassword\n" + mock_pexpect.return_value.exitstatus = 1 + + monkeypatch.setattr("pexpect.spawn", mock_pexpect) + + winrm.HAS_PEXPECT = True + pc = PlayContext() + pc = PlayContext() + new_stdin = StringIO() + conn = connection_loader.get('winrm', pc, new_stdin) + conn.set_options(var_options={"_extras": {}}) + conn._build_winrm_kwargs() + + with pytest.raises(AnsibleConnectionFailure) as err: + conn._kerb_auth("username", "password") + assert str(err.value) == \ + "Kerberos auth failure for principal username with pexpect: " \ + "Error with kinit\n" diff --git a/test/units/plugins/filter/__init__.py b/test/units/plugins/filter/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/filter/test_core.py b/test/units/plugins/filter/test_core.py new file mode 100644 index 00000000..8a626d9a --- /dev/null +++ b/test/units/plugins/filter/test_core.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.module_utils._text import to_native +from ansible.plugins.filter.core import to_uuid +from ansible.errors import AnsibleFilterError + + +UUID_DEFAULT_NAMESPACE_TEST_CASES = ( + ('example.com', 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'), + ('test.example', '8e437a35-c7c5-50ea-867c-5c254848dbc2'), + ('café.example', '8a99d6b1-fb8f-5f78-af86-879768589f56'), +) + +UUID_TEST_CASES = ( + ('361E6D51-FAEC-444A-9079-341386DA8E2E', 'example.com', 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'), + ('361E6D51-FAEC-444A-9079-341386DA8E2E', 'test.example', '8e437a35-c7c5-50ea-867c-5c254848dbc2'), + ('11111111-2222-3333-4444-555555555555', 'example.com', 'e776faa5-5299-55dc-9057-7a00e6be2364'), +) + + +@pytest.mark.parametrize('value, expected', UUID_DEFAULT_NAMESPACE_TEST_CASES) +def test_to_uuid_default_namespace(value, expected): + assert expected == to_uuid(value) + + +@pytest.mark.parametrize('namespace, value, expected', UUID_TEST_CASES) +def test_to_uuid(namespace, value, expected): + assert expected == to_uuid(value, namespace=namespace) + + +def test_to_uuid_invalid_namespace(): + with pytest.raises(AnsibleFilterError) as e: + to_uuid('example.com', namespace='11111111-2222-3333-4444-555555555') + assert 'Invalid value' in to_native(e.value) diff --git a/test/units/plugins/filter/test_mathstuff.py b/test/units/plugins/filter/test_mathstuff.py new file mode 100644 index 00000000..a0e78d33 --- /dev/null +++ b/test/units/plugins/filter/test_mathstuff.py @@ -0,0 +1,176 @@ +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from jinja2 import Environment + +import ansible.plugins.filter.mathstuff as ms +from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError + + +UNIQUE_DATA = (([1, 3, 4, 2], sorted([1, 2, 3, 4])), + ([1, 3, 2, 4, 2, 3], sorted([1, 2, 3, 4])), + (['a', 'b', 'c', 'd'], sorted(['a', 'b', 'c', 'd'])), + (['a', 'a', 'd', 'b', 'a', 'd', 'c', 'b'], sorted(['a', 'b', 'c', 'd'])), + ) + +TWO_SETS_DATA = (([1, 2], [3, 4], ([], sorted([1, 2]), sorted([1, 2, 3, 4]), sorted([1, 2, 3, 4]))), + ([1, 2, 3], [5, 3, 4], ([3], sorted([1, 2]), sorted([1, 2, 5, 4]), sorted([1, 2, 3, 4, 5]))), + (['a', 'b', 'c'], ['d', 'c', 'e'], (['c'], sorted(['a', 'b']), sorted(['a', 'b', 'd', 'e']), sorted(['a', 'b', 'c', 'e', 'd']))), + ) + +env = Environment() + + +@pytest.mark.parametrize('data, expected', UNIQUE_DATA) +class TestUnique: + def test_unhashable(self, data, expected): + assert sorted(ms.unique(env, list(data))) == expected + + def test_hashable(self, data, expected): + assert sorted(ms.unique(env, tuple(data))) == expected + + +@pytest.mark.parametrize('dataset1, dataset2, expected', TWO_SETS_DATA) +class TestIntersect: + def test_unhashable(self, dataset1, dataset2, expected): + assert sorted(ms.intersect(env, list(dataset1), list(dataset2))) == expected[0] + + def test_hashable(self, dataset1, dataset2, expected): + assert sorted(ms.intersect(env, tuple(dataset1), tuple(dataset2))) == expected[0] + + +@pytest.mark.parametrize('dataset1, dataset2, expected', TWO_SETS_DATA) +class TestDifference: + def test_unhashable(self, dataset1, dataset2, expected): + assert sorted(ms.difference(env, list(dataset1), list(dataset2))) == expected[1] + + def test_hashable(self, dataset1, dataset2, expected): + assert sorted(ms.difference(env, tuple(dataset1), tuple(dataset2))) == expected[1] + + +@pytest.mark.parametrize('dataset1, dataset2, expected', TWO_SETS_DATA) +class TestSymmetricDifference: + def test_unhashable(self, dataset1, dataset2, expected): + assert sorted(ms.symmetric_difference(env, list(dataset1), list(dataset2))) == expected[2] + + def test_hashable(self, dataset1, dataset2, expected): + assert sorted(ms.symmetric_difference(env, tuple(dataset1), tuple(dataset2))) == expected[2] + + +class TestMin: + def test_min(self): + assert ms.min((1, 2)) == 1 + assert ms.min((2, 1)) == 1 + assert ms.min(('p', 'a', 'w', 'b', 'p')) == 'a' + + +class TestMax: + def test_max(self): + assert ms.max((1, 2)) == 2 + assert ms.max((2, 1)) == 2 + assert ms.max(('p', 'a', 'w', 'b', 'p')) == 'w' + + +class TestLogarithm: + def test_log_non_number(self): + # Message changed in python3.6 + with pytest.raises(AnsibleFilterTypeError, match='log\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'): + ms.logarithm('a') + with pytest.raises(AnsibleFilterTypeError, match='log\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'): + ms.logarithm(10, base='a') + + def test_log_ten(self): + assert ms.logarithm(10, 10) == 1.0 + assert ms.logarithm(69, 10) * 1000 // 1 == 1838 + + def test_log_natural(self): + assert ms.logarithm(69) * 1000 // 1 == 4234 + + def test_log_two(self): + assert ms.logarithm(69, 2) * 1000 // 1 == 6108 + + +class TestPower: + def test_power_non_number(self): + # Message changed in python3.6 + with pytest.raises(AnsibleFilterTypeError, match='pow\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'): + ms.power('a', 10) + + with pytest.raises(AnsibleFilterTypeError, match='pow\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'): + ms.power(10, 'a') + + def test_power_squared(self): + assert ms.power(10, 2) == 100 + + def test_power_cubed(self): + assert ms.power(10, 3) == 1000 + + +class TestInversePower: + def test_root_non_number(self): + # Messages differed in python-2.6, python-2.7-3.5, and python-3.6+ + with pytest.raises(AnsibleFilterTypeError, match="root\\(\\) can only be used on numbers:" + " (invalid literal for float\\(\\): a" + "|could not convert string to float: a" + "|could not convert string to float: 'a')"): + ms.inversepower(10, 'a') + + with pytest.raises(AnsibleFilterTypeError, match="root\\(\\) can only be used on numbers: (a float is required|must be real number, not str)"): + ms.inversepower('a', 10) + + def test_square_root(self): + assert ms.inversepower(100) == 10 + assert ms.inversepower(100, 2) == 10 + + def test_cube_root(self): + assert ms.inversepower(27, 3) == 3 + + +class TestRekeyOnMember(): + # (Input data structure, member to rekey on, expected return) + VALID_ENTRIES = ( + ([{"proto": "eigrp", "state": "enabled"}, {"proto": "ospf", "state": "enabled"}], + 'proto', + {'eigrp': {'state': 'enabled', 'proto': 'eigrp'}, 'ospf': {'state': 'enabled', 'proto': 'ospf'}}), + ({'eigrp': {"proto": "eigrp", "state": "enabled"}, 'ospf': {"proto": "ospf", "state": "enabled"}}, + 'proto', + {'eigrp': {'state': 'enabled', 'proto': 'eigrp'}, 'ospf': {'state': 'enabled', 'proto': 'ospf'}}), + ) + + # (Input data structure, member to rekey on, expected error message) + INVALID_ENTRIES = ( + # Fail when key is not found + (AnsibleFilterError, [{"proto": "eigrp", "state": "enabled"}], 'invalid_key', "Key invalid_key was not found"), + (AnsibleFilterError, {"eigrp": {"proto": "eigrp", "state": "enabled"}}, 'invalid_key', "Key invalid_key was not found"), + # Fail when key is duplicated + (AnsibleFilterError, [{"proto": "eigrp"}, {"proto": "ospf"}, {"proto": "ospf"}], + 'proto', 'Key ospf is not unique, cannot correctly turn into dict'), + # Fail when value is not a dict + (AnsibleFilterTypeError, ["string"], 'proto', "List item is not a valid dict"), + (AnsibleFilterTypeError, [123], 'proto', "List item is not a valid dict"), + (AnsibleFilterTypeError, [[{'proto': 1}]], 'proto', "List item is not a valid dict"), + # Fail when we do not send a dict or list + (AnsibleFilterTypeError, "string", 'proto', "Type is not a valid list, set, or dict"), + (AnsibleFilterTypeError, 123, 'proto', "Type is not a valid list, set, or dict"), + ) + + @pytest.mark.parametrize("list_original, key, expected", VALID_ENTRIES) + def test_rekey_on_member_success(self, list_original, key, expected): + assert ms.rekey_on_member(list_original, key) == expected + + @pytest.mark.parametrize("expected_exception_type, list_original, key, expected", INVALID_ENTRIES) + def test_fail_rekey_on_member(self, expected_exception_type, list_original, key, expected): + with pytest.raises(expected_exception_type) as err: + ms.rekey_on_member(list_original, key) + + assert err.value.message == expected + + def test_duplicate_strategy_overwrite(self): + list_original = ({'proto': 'eigrp', 'id': 1}, {'proto': 'ospf', 'id': 2}, {'proto': 'eigrp', 'id': 3}) + expected = {'eigrp': {'proto': 'eigrp', 'id': 3}, 'ospf': {'proto': 'ospf', 'id': 2}} + assert ms.rekey_on_member(list_original, 'proto', duplicates='overwrite') == expected diff --git a/test/units/plugins/inventory/__init__.py b/test/units/plugins/inventory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/inventory/test_constructed.py b/test/units/plugins/inventory/test_constructed.py new file mode 100644 index 00000000..6d521982 --- /dev/null +++ b/test/units/plugins/inventory/test_constructed.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- + +# Copyright 2019 Alan Rominger +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.errors import AnsibleParserError +from ansible.plugins.inventory.constructed import InventoryModule +from ansible.inventory.data import InventoryData +from ansible.template import Templar + + +@pytest.fixture() +def inventory_module(): + r = InventoryModule() + r.inventory = InventoryData() + r.templar = Templar(None) + return r + + +def test_group_by_value_only(inventory_module): + inventory_module.inventory.add_host('foohost') + inventory_module.inventory.set_variable('foohost', 'bar', 'my_group_name') + host = inventory_module.inventory.get_host('foohost') + keyed_groups = [ + { + 'prefix': '', + 'separator': '', + 'key': 'bar' + } + ] + inventory_module._add_host_to_keyed_groups( + keyed_groups, host.vars, host.name, strict=False + ) + assert 'my_group_name' in inventory_module.inventory.groups + group = inventory_module.inventory.groups['my_group_name'] + assert group.hosts == [host] + + +def test_keyed_group_separator(inventory_module): + inventory_module.inventory.add_host('farm') + inventory_module.inventory.set_variable('farm', 'farmer', 'mcdonald') + inventory_module.inventory.set_variable('farm', 'barn', {'cow': 'betsy'}) + host = inventory_module.inventory.get_host('farm') + keyed_groups = [ + { + 'prefix': 'farmer', + 'separator': '_old_', + 'key': 'farmer' + }, + { + 'separator': 'mmmmmmmmmm', + 'key': 'barn' + } + ] + inventory_module._add_host_to_keyed_groups( + keyed_groups, host.vars, host.name, strict=False + ) + for group_name in ('farmer_old_mcdonald', 'mmmmmmmmmmcowmmmmmmmmmmbetsy'): + assert group_name in inventory_module.inventory.groups + group = inventory_module.inventory.groups[group_name] + assert group.hosts == [host] + + +def test_keyed_group_empty_construction(inventory_module): + inventory_module.inventory.add_host('farm') + inventory_module.inventory.set_variable('farm', 'barn', {}) + host = inventory_module.inventory.get_host('farm') + keyed_groups = [ + { + 'separator': 'mmmmmmmmmm', + 'key': 'barn' + } + ] + inventory_module._add_host_to_keyed_groups( + keyed_groups, host.vars, host.name, strict=True + ) + assert host.groups == [] + + +def test_keyed_group_host_confusion(inventory_module): + inventory_module.inventory.add_host('cow') + inventory_module.inventory.add_group('cow') + host = inventory_module.inventory.get_host('cow') + host.vars['species'] = 'cow' + keyed_groups = [ + { + 'separator': '', + 'prefix': '', + 'key': 'species' + } + ] + inventory_module._add_host_to_keyed_groups( + keyed_groups, host.vars, host.name, strict=True + ) + group = inventory_module.inventory.groups['cow'] + # group cow has host of cow + assert group.hosts == [host] + + +def test_keyed_parent_groups(inventory_module): + inventory_module.inventory.add_host('web1') + inventory_module.inventory.add_host('web2') + inventory_module.inventory.set_variable('web1', 'region', 'japan') + inventory_module.inventory.set_variable('web2', 'region', 'japan') + host1 = inventory_module.inventory.get_host('web1') + host2 = inventory_module.inventory.get_host('web2') + keyed_groups = [ + { + 'prefix': 'region', + 'key': 'region', + 'parent_group': 'region_list' + } + ] + for host in [host1, host2]: + inventory_module._add_host_to_keyed_groups( + keyed_groups, host.vars, host.name, strict=False + ) + assert 'region_japan' in inventory_module.inventory.groups + assert 'region_list' in inventory_module.inventory.groups + region_group = inventory_module.inventory.groups['region_japan'] + all_regions = inventory_module.inventory.groups['region_list'] + assert all_regions.child_groups == [region_group] + assert region_group.hosts == [host1, host2] + + +def test_parent_group_templating(inventory_module): + inventory_module.inventory.add_host('cow') + inventory_module.inventory.set_variable('cow', 'sound', 'mmmmmmmmmm') + inventory_module.inventory.set_variable('cow', 'nickname', 'betsy') + host = inventory_module.inventory.get_host('cow') + keyed_groups = [ + { + 'key': 'sound', + 'prefix': 'sound', + 'parent_group': '{{ nickname }}' + }, + { + 'key': 'nickname', + 'prefix': '', + 'separator': '', + 'parent_group': 'nickname' # statically-named parent group, conflicting with hostvar + }, + { + 'key': 'nickname', + 'separator': '', + 'parent_group': '{{ location | default("field") }}' + } + ] + inventory_module._add_host_to_keyed_groups( + keyed_groups, host.vars, host.name, strict=True + ) + # first keyed group, "betsy" is a parent group name dynamically generated + betsys_group = inventory_module.inventory.groups['betsy'] + assert [child.name for child in betsys_group.child_groups] == ['sound_mmmmmmmmmm'] + # second keyed group, "nickname" is a statically-named root group + nicknames_group = inventory_module.inventory.groups['nickname'] + assert [child.name for child in nicknames_group.child_groups] == ['betsy'] + # second keyed group actually generated the parent group of the first keyed group + # assert that these are, in fact, the same object + assert nicknames_group.child_groups[0] == betsys_group + # second keyed group has two parents + locations_group = inventory_module.inventory.groups['field'] + assert [child.name for child in locations_group.child_groups] == ['betsy'] + + +def test_parent_group_templating_error(inventory_module): + inventory_module.inventory.add_host('cow') + inventory_module.inventory.set_variable('cow', 'nickname', 'betsy') + host = inventory_module.inventory.get_host('cow') + keyed_groups = [ + { + 'key': 'nickname', + 'separator': '', + 'parent_group': '{{ location.barn-yard }}' + } + ] + with pytest.raises(AnsibleParserError) as err_message: + inventory_module._add_host_to_keyed_groups( + keyed_groups, host.vars, host.name, strict=True + ) + assert 'Could not generate parent group' in err_message + # invalid parent group did not raise an exception with strict=False + inventory_module._add_host_to_keyed_groups( + keyed_groups, host.vars, host.name, strict=False + ) + # assert group was never added with invalid parent + assert 'betsy' not in inventory_module.inventory.groups diff --git a/test/units/plugins/inventory/test_inventory.py b/test/units/plugins/inventory/test_inventory.py new file mode 100644 index 00000000..66b5ec37 --- /dev/null +++ b/test/units/plugins/inventory/test_inventory.py @@ -0,0 +1,207 @@ +# Copyright 2015 Abhijit Menon-Sen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import string +import textwrap + +from ansible import constants as C +from units.compat import mock +from units.compat import unittest +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_text +from units.mock.path import mock_unfrackpath_noop + +from ansible.inventory.manager import InventoryManager, split_host_pattern + +from units.mock.loader import DictDataLoader + + +class TestInventory(unittest.TestCase): + + patterns = { + 'a': ['a'], + 'a, b': ['a', 'b'], + 'a , b': ['a', 'b'], + ' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'], + '9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'], + '9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'], + '9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo'], + 'foo[1:2]': ['foo[1:2]'], + 'a::b': ['a::b'], + 'a:b': ['a', 'b'], + ' a : b ': ['a', 'b'], + 'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'], + 'a,,b': ['a', 'b'], + 'a, ,b,,c, ,': ['a', 'b', 'c'], + ',': [], + '': [], + } + + pattern_lists = [ + [['a'], ['a']], + [['a', 'b'], ['a', 'b']], + [['a, b'], ['a', 'b']], + [['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'], + ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo']] + ] + + # pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ] + # a,b are the bounds of the subscript; x..z are the results of the subscript + # when applied to string.ascii_letters. + + subscripts = { + 'a': [('a', None), list(string.ascii_letters)], + 'a[0]': [('a', (0, None)), ['a']], + 'a[1]': [('a', (1, None)), ['b']], + 'a[2:3]': [('a', (2, 3)), ['c', 'd']], + 'a[-1]': [('a', (-1, None)), ['Z']], + 'a[-2]': [('a', (-2, None)), ['Y']], + 'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']], + 'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']], + 'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])], + } + + ranges_to_expand = { + 'a[1:2]': ['a1', 'a2'], + 'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'], + 'a[a:b]': ['aa', 'ab'], + 'a[a:i:3]': ['aa', 'ad', 'ag'], + 'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'], + 'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'], + 'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'], + } + + def setUp(self): + fake_loader = DictDataLoader({}) + + self.i = InventoryManager(loader=fake_loader, sources=[None]) + + def test_split_patterns(self): + + for p in self.patterns: + r = self.patterns[p] + self.assertEqual(r, split_host_pattern(p)) + + for p, r in self.pattern_lists: + self.assertEqual(r, split_host_pattern(p)) + + def test_ranges(self): + + for s in self.subscripts: + r = self.subscripts[s] + self.assertEqual(r[0], self.i._split_subscript(s)) + self.assertEqual( + r[1], + self.i._apply_subscript( + list(string.ascii_letters), + r[0][1] + ) + ) + + +class TestInventoryPlugins(unittest.TestCase): + + def test_empty_inventory(self): + inventory = self._get_inventory('') + + self.assertIn('all', inventory.groups) + self.assertIn('ungrouped', inventory.groups) + self.assertFalse(inventory.groups['all'].get_hosts()) + self.assertFalse(inventory.groups['ungrouped'].get_hosts()) + + def test_ini(self): + self._test_default_groups(""" + host1 + host2 + host3 + [servers] + host3 + host4 + host5 + """) + + def test_ini_explicit_ungrouped(self): + self._test_default_groups(""" + [ungrouped] + host1 + host2 + host3 + [servers] + host3 + host4 + host5 + """) + + def test_ini_variables_stringify(self): + values = ['string', 'no', 'No', 'false', 'FALSE', [], False, 0] + + inventory_content = "host1 " + inventory_content += ' '.join(['var%s=%s' % (i, to_text(x)) for i, x in enumerate(values)]) + inventory = self._get_inventory(inventory_content) + + variables = inventory.get_host('host1').vars + for i in range(len(values)): + if isinstance(values[i], string_types): + self.assertIsInstance(variables['var%s' % i], string_types) + else: + self.assertIsInstance(variables['var%s' % i], type(values[i])) + + @mock.patch('ansible.inventory.manager.unfrackpath', mock_unfrackpath_noop) + @mock.patch('os.path.exists', lambda x: True) + @mock.patch('os.access', lambda x, y: True) + def test_yaml_inventory(self, filename="test.yaml"): + inventory_content = {filename: textwrap.dedent("""\ + --- + all: + hosts: + test1: + test2: + """)} + C.INVENTORY_ENABLED = ['yaml'] + fake_loader = DictDataLoader(inventory_content) + im = InventoryManager(loader=fake_loader, sources=filename) + self.assertTrue(im._inventory.hosts) + self.assertIn('test1', im._inventory.hosts) + self.assertIn('test2', im._inventory.hosts) + self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['all'].hosts) + self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['all'].hosts) + self.assertEqual(len(im._inventory.groups['all'].hosts), 2) + self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['ungrouped'].hosts) + self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['ungrouped'].hosts) + self.assertEqual(len(im._inventory.groups['ungrouped'].hosts), 2) + + def _get_inventory(self, inventory_content): + + fake_loader = DictDataLoader({__file__: inventory_content}) + + return InventoryManager(loader=fake_loader, sources=[__file__]) + + def _test_default_groups(self, inventory_content): + inventory = self._get_inventory(inventory_content) + + self.assertIn('all', inventory.groups) + self.assertIn('ungrouped', inventory.groups) + all_hosts = set(host.name for host in inventory.groups['all'].get_hosts()) + self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts) + ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts()) + self.assertEqual(set(['host1', 'host2']), ungrouped_hosts) + servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts()) + self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts) diff --git a/test/units/plugins/inventory/test_script.py b/test/units/plugins/inventory/test_script.py new file mode 100644 index 00000000..5f054813 --- /dev/null +++ b/test/units/plugins/inventory/test_script.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- + +# Copyright 2017 Chris Meyers +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.plugins.loader import PluginLoader +from units.compat import mock +from units.compat import unittest +from ansible.module_utils._text import to_bytes, to_native + + +class TestInventoryModule(unittest.TestCase): + + def setUp(self): + + class Inventory(): + cache = dict() + + class PopenResult(): + returncode = 0 + stdout = b"" + stderr = b"" + + def communicate(self): + return (self.stdout, self.stderr) + + self.popen_result = PopenResult() + self.inventory = Inventory() + self.loader = mock.MagicMock() + self.loader.load = mock.MagicMock() + + inv_loader = PluginLoader('InventoryModule', 'ansible.plugins.inventory', C.DEFAULT_INVENTORY_PLUGIN_PATH, 'inventory_plugins') + self.inventory_module = inv_loader.get('script') + self.inventory_module.set_options() + + def register_patch(name): + patcher = mock.patch(name) + self.addCleanup(patcher.stop) + return patcher.start() + + self.popen = register_patch('subprocess.Popen') + self.popen.return_value = self.popen_result + + self.BaseInventoryPlugin = register_patch('ansible.plugins.inventory.BaseInventoryPlugin') + self.BaseInventoryPlugin.get_cache_prefix.return_value = 'abc123' + + def test_parse_subprocess_path_not_found_fail(self): + self.popen.side_effect = OSError("dummy text") + + with pytest.raises(AnsibleError) as e: + self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py') + assert e.value.message == "problem running /foo/bar/foobar.py --list (dummy text)" + + def test_parse_subprocess_err_code_fail(self): + self.popen_result.stdout = to_bytes(u"fooébar", errors='surrogate_escape') + self.popen_result.stderr = to_bytes(u"dummyédata") + + self.popen_result.returncode = 1 + + with pytest.raises(AnsibleError) as e: + self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py') + assert e.value.message == to_native("Inventory script (/foo/bar/foobar.py) had an execution error: " + "dummyédata\n ") + + def test_parse_utf8_fail(self): + self.popen_result.returncode = 0 + self.popen_result.stderr = to_bytes("dummyédata") + self.loader.load.side_effect = TypeError('obj must be string') + + with pytest.raises(AnsibleError) as e: + self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py') + assert e.value.message == to_native("failed to parse executable inventory script results from " + "/foo/bar/foobar.py: obj must be string\ndummyédata\n") + + def test_parse_dict_fail(self): + self.popen_result.returncode = 0 + self.popen_result.stderr = to_bytes("dummyédata") + self.loader.load.return_value = 'i am not a dict' + + with pytest.raises(AnsibleError) as e: + self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py') + assert e.value.message == to_native("failed to parse executable inventory script results from " + "/foo/bar/foobar.py: needs to be a json dict\ndummyédata\n") diff --git a/test/units/plugins/loader_fixtures/__init__.py b/test/units/plugins/loader_fixtures/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/loader_fixtures/import_fixture.py b/test/units/plugins/loader_fixtures/import_fixture.py new file mode 100644 index 00000000..81127332 --- /dev/null +++ b/test/units/plugins/loader_fixtures/import_fixture.py @@ -0,0 +1,9 @@ +# Nothing to see here, this file is just empty to support a imp.load_source +# without doing anything +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class test: + def __init__(self, *args, **kwargs): + pass diff --git a/test/units/plugins/lookup/__init__.py b/test/units/plugins/lookup/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/lookup/test_env.py b/test/units/plugins/lookup/test_env.py new file mode 100644 index 00000000..5d9713fe --- /dev/null +++ b/test/units/plugins/lookup/test_env.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Abhay Kadam +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.plugins.loader import lookup_loader + + +@pytest.mark.parametrize('env_var,exp_value', [ + ('foo', 'bar'), + ('equation', 'a=b*100') +]) +def test_env_var_value(monkeypatch, env_var, exp_value): + monkeypatch.setattr('ansible.utils.py3compat.environ.get', lambda x, y: exp_value) + + env_lookup = lookup_loader.get('env') + retval = env_lookup.run([env_var], None) + assert retval == [exp_value] + + +@pytest.mark.parametrize('env_var,exp_value', [ + ('simple_var', 'alpha-β-gamma'), + ('the_var', 'ãnˈsiβle') +]) +def test_utf8_env_var_value(monkeypatch, env_var, exp_value): + monkeypatch.setattr('ansible.utils.py3compat.environ.get', lambda x, y: exp_value) + + env_lookup = lookup_loader.get('env') + retval = env_lookup.run([env_var], None) + assert retval == [exp_value] diff --git a/test/units/plugins/lookup/test_ini.py b/test/units/plugins/lookup/test_ini.py new file mode 100644 index 00000000..adf2bac2 --- /dev/null +++ b/test/units/plugins/lookup/test_ini.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from ansible.plugins.lookup.ini import _parse_params + + +class TestINILookup(unittest.TestCase): + + # Currently there isn't a new-style + old_style_params_data = ( + # Simple case + dict( + term=u'keyA section=sectionA file=/path/to/file', + expected=[u'file=/path/to/file', u'keyA', u'section=sectionA'], + ), + dict( + term=u'keyB section=sectionB with space file=/path/with/embedded spaces and/file', + expected=[u'file=/path/with/embedded spaces and/file', u'keyB', u'section=sectionB with space'], + ), + dict( + term=u'keyC section=sectionC file=/path/with/equals/cn=com.ansible', + expected=[u'file=/path/with/equals/cn=com.ansible', u'keyC', u'section=sectionC'], + ), + dict( + term=u'keyD section=sectionD file=/path/with space and/equals/cn=com.ansible', + expected=[u'file=/path/with space and/equals/cn=com.ansible', u'keyD', u'section=sectionD'], + ), + dict( + term=u'keyE section=sectionE file=/path/with/unicode/くらとみ/file', + expected=[u'file=/path/with/unicode/くらとみ/file', u'keyE', u'section=sectionE'], + ), + dict( + term=u'keyF section=sectionF file=/path/with/utf 8 and spaces/くらとみ/file', + expected=[u'file=/path/with/utf 8 and spaces/くらとみ/file', u'keyF', u'section=sectionF'], + ), + ) + + def test_parse_parameters(self): + for testcase in self.old_style_params_data: + # print(testcase) + params = _parse_params(testcase['term']) + params.sort() + self.assertEqual(params, testcase['expected']) diff --git a/test/units/plugins/lookup/test_password.py b/test/units/plugins/lookup/test_password.py new file mode 100644 index 00000000..9871f4ab --- /dev/null +++ b/test/units/plugins/lookup/test_password.py @@ -0,0 +1,501 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + import passlib + from passlib.handlers import pbkdf2 +except ImportError: + passlib = None + pbkdf2 = None + +import pytest + +from units.mock.loader import DictDataLoader + +from units.compat import unittest +from units.compat.mock import mock_open, patch +from ansible.errors import AnsibleError +from ansible.module_utils.six import text_type +from ansible.module_utils.six.moves import builtins +from ansible.module_utils._text import to_bytes +from ansible.plugins.loader import PluginLoader +from ansible.plugins.lookup import password + + +DEFAULT_CHARS = sorted([u'ascii_letters', u'digits', u".,:-_"]) +DEFAULT_CANDIDATE_CHARS = u'.,:-_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' + +# Currently there isn't a new-style +old_style_params_data = ( + # Simple case + dict( + term=u'/path/to/file', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), + candidate_chars=DEFAULT_CANDIDATE_CHARS, + ), + + # Special characters in path + dict( + term=u'/path/with/embedded spaces and/file', + filename=u'/path/with/embedded spaces and/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), + candidate_chars=DEFAULT_CANDIDATE_CHARS, + ), + dict( + term=u'/path/with/equals/cn=com.ansible', + filename=u'/path/with/equals/cn=com.ansible', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), + candidate_chars=DEFAULT_CANDIDATE_CHARS, + ), + dict( + term=u'/path/with/unicode/くらとみ/file', + filename=u'/path/with/unicode/くらとみ/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), + candidate_chars=DEFAULT_CANDIDATE_CHARS, + ), + # Mix several special chars + dict( + term=u'/path/with/utf 8 and spaces/くらとみ/file', + filename=u'/path/with/utf 8 and spaces/くらとみ/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), + candidate_chars=DEFAULT_CANDIDATE_CHARS, + ), + dict( + term=u'/path/with/encoding=unicode/くらとみ/file', + filename=u'/path/with/encoding=unicode/くらとみ/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), + candidate_chars=DEFAULT_CANDIDATE_CHARS, + ), + dict( + term=u'/path/with/encoding=unicode/くらとみ/and spaces file', + filename=u'/path/with/encoding=unicode/くらとみ/and spaces file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), + candidate_chars=DEFAULT_CANDIDATE_CHARS, + ), + + # Simple parameters + dict( + term=u'/path/to/file length=42', + filename=u'/path/to/file', + params=dict(length=42, encrypt=None, chars=DEFAULT_CHARS), + candidate_chars=DEFAULT_CANDIDATE_CHARS, + ), + dict( + term=u'/path/to/file encrypt=pbkdf2_sha256', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt='pbkdf2_sha256', chars=DEFAULT_CHARS), + candidate_chars=DEFAULT_CANDIDATE_CHARS, + ), + dict( + term=u'/path/to/file chars=abcdefghijklmnop', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'abcdefghijklmnop']), + candidate_chars=u'abcdefghijklmnop', + ), + dict( + term=u'/path/to/file chars=digits,abc,def', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'abc', u'def'])), + candidate_chars=u'abcdef0123456789', + ), + + # Including comma in chars + dict( + term=u'/path/to/file chars=abcdefghijklmnop,,digits', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'abcdefghijklmnop', u',', u'digits'])), + candidate_chars=u',abcdefghijklmnop0123456789', + ), + dict( + term=u'/path/to/file chars=,,', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u',']), + candidate_chars=u',', + ), + + # Including = in chars + dict( + term=u'/path/to/file chars=digits,=,,', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'=', u','])), + candidate_chars=u',=0123456789', + ), + dict( + term=u'/path/to/file chars=digits,abc=def', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'abc=def'])), + candidate_chars=u'abc=def0123456789', + ), + + # Including unicode in chars + dict( + term=u'/path/to/file chars=digits,くらとみ,,', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'くらとみ', u','])), + candidate_chars=u',0123456789くらとみ', + ), + # Including only unicode in chars + dict( + term=u'/path/to/file chars=くらとみ', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'くらとみ'])), + candidate_chars=u'くらとみ', + ), + + # Include ':' in path + dict( + term=u'/path/to/file_with:colon chars=ascii_letters,digits', + filename=u'/path/to/file_with:colon', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'ascii_letters', u'digits'])), + candidate_chars=u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', + ), + + # Including special chars in both path and chars + # Special characters in path + dict( + term=u'/path/with/embedded spaces and/file chars=abc=def', + filename=u'/path/with/embedded spaces and/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'abc=def']), + candidate_chars=u'abc=def', + ), + dict( + term=u'/path/with/equals/cn=com.ansible chars=abc=def', + filename=u'/path/with/equals/cn=com.ansible', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'abc=def']), + candidate_chars=u'abc=def', + ), + dict( + term=u'/path/with/unicode/くらとみ/file chars=くらとみ', + filename=u'/path/with/unicode/くらとみ/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'くらとみ']), + candidate_chars=u'くらとみ', + ), +) + + +class TestParseParameters(unittest.TestCase): + def test(self): + for testcase in old_style_params_data: + filename, params = password._parse_parameters(testcase['term']) + params['chars'].sort() + self.assertEqual(filename, testcase['filename']) + self.assertEqual(params, testcase['params']) + + def test_unrecognized_value(self): + testcase = dict(term=u'/path/to/file chars=くらとみi sdfsdf', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'くらとみ']), + candidate_chars=u'くらとみ') + self.assertRaises(AnsibleError, password._parse_parameters, testcase['term']) + + def test_invalid_params(self): + testcase = dict(term=u'/path/to/file chars=くらとみi somethign_invalid=123', + filename=u'/path/to/file', + params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'くらとみ']), + candidate_chars=u'くらとみ') + self.assertRaises(AnsibleError, password._parse_parameters, testcase['term']) + + +class TestReadPasswordFile(unittest.TestCase): + def setUp(self): + self.os_path_exists = password.os.path.exists + + def tearDown(self): + password.os.path.exists = self.os_path_exists + + def test_no_password_file(self): + password.os.path.exists = lambda x: False + self.assertEqual(password._read_password_file(b'/nonexistent'), None) + + def test_with_password_file(self): + password.os.path.exists = lambda x: True + with patch.object(builtins, 'open', mock_open(read_data=b'Testing\n')) as m: + self.assertEqual(password._read_password_file(b'/etc/motd'), u'Testing') + + +class TestGenCandidateChars(unittest.TestCase): + def _assert_gen_candidate_chars(self, testcase): + expected_candidate_chars = testcase['candidate_chars'] + params = testcase['params'] + chars_spec = params['chars'] + res = password._gen_candidate_chars(chars_spec) + self.assertEqual(res, expected_candidate_chars) + + def test_gen_candidate_chars(self): + for testcase in old_style_params_data: + self._assert_gen_candidate_chars(testcase) + + +class TestRandomPassword(unittest.TestCase): + def _assert_valid_chars(self, res, chars): + for res_char in res: + self.assertIn(res_char, chars) + + def test_default(self): + res = password.random_password() + self.assertEqual(len(res), password.DEFAULT_LENGTH) + self.assertTrue(isinstance(res, text_type)) + self._assert_valid_chars(res, DEFAULT_CANDIDATE_CHARS) + + def test_zero_length(self): + res = password.random_password(length=0) + self.assertEqual(len(res), 0) + self.assertTrue(isinstance(res, text_type)) + self._assert_valid_chars(res, u',') + + def test_just_a_common(self): + res = password.random_password(length=1, chars=u',') + self.assertEqual(len(res), 1) + self.assertEqual(res, u',') + + def test_free_will(self): + # A Rush and Spinal Tap reference twofer + res = password.random_password(length=11, chars=u'a') + self.assertEqual(len(res), 11) + self.assertEqual(res, 'aaaaaaaaaaa') + self._assert_valid_chars(res, u'a') + + def test_unicode(self): + res = password.random_password(length=11, chars=u'くらとみ') + self._assert_valid_chars(res, u'くらとみ') + self.assertEqual(len(res), 11) + + def test_gen_password(self): + for testcase in old_style_params_data: + params = testcase['params'] + candidate_chars = testcase['candidate_chars'] + params_chars_spec = password._gen_candidate_chars(params['chars']) + password_string = password.random_password(length=params['length'], + chars=params_chars_spec) + self.assertEqual(len(password_string), + params['length'], + msg='generated password=%s has length (%s) instead of expected length (%s)' % + (password_string, len(password_string), params['length'])) + + for char in password_string: + self.assertIn(char, candidate_chars, + msg='%s not found in %s from chars spect %s' % + (char, candidate_chars, params['chars'])) + + +class TestParseContent(unittest.TestCase): + def test_empty_password_file(self): + plaintext_password, salt = password._parse_content(u'') + self.assertEqual(plaintext_password, u'') + self.assertEqual(salt, None) + + def test(self): + expected_content = u'12345678' + file_content = expected_content + plaintext_password, salt = password._parse_content(file_content) + self.assertEqual(plaintext_password, expected_content) + self.assertEqual(salt, None) + + def test_with_salt(self): + expected_content = u'12345678 salt=87654321' + file_content = expected_content + plaintext_password, salt = password._parse_content(file_content) + self.assertEqual(plaintext_password, u'12345678') + self.assertEqual(salt, u'87654321') + + +class TestFormatContent(unittest.TestCase): + def test_no_encrypt(self): + self.assertEqual( + password._format_content(password=u'hunter42', + salt=u'87654321', + encrypt=False), + u'hunter42 salt=87654321') + + def test_no_encrypt_no_salt(self): + self.assertEqual( + password._format_content(password=u'hunter42', + salt=None, + encrypt=None), + u'hunter42') + + def test_encrypt(self): + self.assertEqual( + password._format_content(password=u'hunter42', + salt=u'87654321', + encrypt='pbkdf2_sha256'), + u'hunter42 salt=87654321') + + def test_encrypt_no_salt(self): + self.assertRaises(AssertionError, password._format_content, u'hunter42', None, 'pbkdf2_sha256') + + +class TestWritePasswordFile(unittest.TestCase): + def setUp(self): + self.makedirs_safe = password.makedirs_safe + self.os_chmod = password.os.chmod + password.makedirs_safe = lambda path, mode: None + password.os.chmod = lambda path, mode: None + + def tearDown(self): + password.makedirs_safe = self.makedirs_safe + password.os.chmod = self.os_chmod + + def test_content_written(self): + + with patch.object(builtins, 'open', mock_open()) as m: + password._write_password_file(b'/this/is/a/test/caf\xc3\xa9', u'Testing Café') + + m.assert_called_once_with(b'/this/is/a/test/caf\xc3\xa9', 'wb') + m().write.assert_called_once_with(u'Testing Café\n'.encode('utf-8')) + + +class BaseTestLookupModule(unittest.TestCase): + def setUp(self): + self.fake_loader = DictDataLoader({'/path/to/somewhere': 'sdfsdf'}) + self.password_lookup = password.LookupModule(loader=self.fake_loader) + self.os_path_exists = password.os.path.exists + self.os_open = password.os.open + password.os.open = lambda path, flag: None + self.os_close = password.os.close + password.os.close = lambda fd: None + self.os_remove = password.os.remove + password.os.remove = lambda path: None + self.makedirs_safe = password.makedirs_safe + password.makedirs_safe = lambda path, mode: None + + def tearDown(self): + password.os.path.exists = self.os_path_exists + password.os.open = self.os_open + password.os.close = self.os_close + password.os.remove = self.os_remove + password.makedirs_safe = self.makedirs_safe + + +class TestLookupModuleWithoutPasslib(BaseTestLookupModule): + @patch.object(PluginLoader, '_get_paths') + @patch('ansible.plugins.lookup.password._write_password_file') + def test_no_encrypt(self, mock_get_paths, mock_write_file): + mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] + + results = self.password_lookup.run([u'/path/to/somewhere'], None) + + # FIXME: assert something useful + for result in results: + assert len(result) == password.DEFAULT_LENGTH + assert isinstance(result, text_type) + + @patch.object(PluginLoader, '_get_paths') + @patch('ansible.plugins.lookup.password._write_password_file') + def test_password_already_created_no_encrypt(self, mock_get_paths, mock_write_file): + mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] + password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere') + + with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m: + results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None) + + for result in results: + self.assertEqual(result, u'hunter42') + + @patch.object(PluginLoader, '_get_paths') + @patch('ansible.plugins.lookup.password._write_password_file') + def test_only_a(self, mock_get_paths, mock_write_file): + mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] + + results = self.password_lookup.run([u'/path/to/somewhere chars=a'], None) + for result in results: + self.assertEqual(result, u'a' * password.DEFAULT_LENGTH) + + @patch('time.sleep') + def test_lock_been_held(self, mock_sleep): + # pretend the lock file is here + password.os.path.exists = lambda x: True + try: + with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m: + # should timeout here + results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None) + self.fail("Lookup didn't timeout when lock already been held") + except AnsibleError: + pass + + def test_lock_not_been_held(self): + # pretend now there is password file but no lock + password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere') + try: + with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m: + # should not timeout here + results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None) + except AnsibleError: + self.fail('Lookup timeouts when lock is free') + + for result in results: + self.assertEqual(result, u'hunter42') + + +@pytest.mark.skipif(passlib is None, reason='passlib must be installed to run these tests') +class TestLookupModuleWithPasslib(BaseTestLookupModule): + def setUp(self): + super(TestLookupModuleWithPasslib, self).setUp() + + # Different releases of passlib default to a different number of rounds + self.sha256 = passlib.registry.get_crypt_handler('pbkdf2_sha256') + sha256_for_tests = pbkdf2.create_pbkdf2_hash("sha256", 32, 20000) + passlib.registry.register_crypt_handler(sha256_for_tests, force=True) + + def tearDown(self): + super(TestLookupModuleWithPasslib, self).tearDown() + + passlib.registry.register_crypt_handler(self.sha256, force=True) + + @patch.object(PluginLoader, '_get_paths') + @patch('ansible.plugins.lookup.password._write_password_file') + def test_encrypt(self, mock_get_paths, mock_write_file): + mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] + + results = self.password_lookup.run([u'/path/to/somewhere encrypt=pbkdf2_sha256'], None) + + # pbkdf2 format plus hash + expected_password_length = 76 + + for result in results: + self.assertEqual(len(result), expected_password_length) + # result should have 5 parts split by '$' + str_parts = result.split('$', 5) + + # verify the result is parseable by the passlib + crypt_parts = passlib.hash.pbkdf2_sha256.parsehash(result) + + # verify it used the right algo type + self.assertEqual(str_parts[1], 'pbkdf2-sha256') + + self.assertEqual(len(str_parts), 5) + + # verify the string and parsehash agree on the number of rounds + self.assertEqual(int(str_parts[2]), crypt_parts['rounds']) + self.assertIsInstance(result, text_type) + + @patch.object(PluginLoader, '_get_paths') + @patch('ansible.plugins.lookup.password._write_password_file') + def test_password_already_created_encrypt(self, mock_get_paths, mock_write_file): + mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] + password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere') + + with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m: + results = self.password_lookup.run([u'/path/to/somewhere chars=anything encrypt=pbkdf2_sha256'], None) + for result in results: + self.assertEqual(result, u'$pbkdf2-sha256$20000$ODc2NTQzMjE$Uikde0cv0BKaRaAXMrUQB.zvG4GmnjClwjghwIRf2gU') diff --git a/test/units/plugins/shell/__init__.py b/test/units/plugins/shell/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/shell/test_cmd.py b/test/units/plugins/shell/test_cmd.py new file mode 100644 index 00000000..4c1a654b --- /dev/null +++ b/test/units/plugins/shell/test_cmd.py @@ -0,0 +1,19 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.plugins.shell.cmd import ShellModule + + +@pytest.mark.parametrize('s, expected', [ + ['arg1', 'arg1'], + [None, '""'], + ['arg1 and 2', '^"arg1 and 2^"'], + ['malicious argument\\"&whoami', '^"malicious argument\\\\^"^&whoami^"'], + ['C:\\temp\\some ^%file% > nul', '^"C:\\temp\\some ^^^%file^% ^> nul^"'] +]) +def test_quote_args(s, expected): + cmd = ShellModule() + actual = cmd.quote(s) + assert actual == expected diff --git a/test/units/plugins/shell/test_powershell.py b/test/units/plugins/shell/test_powershell.py new file mode 100644 index 00000000..c94baabb --- /dev/null +++ b/test/units/plugins/shell/test_powershell.py @@ -0,0 +1,83 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.shell.powershell import _parse_clixml, ShellModule + + +def test_parse_clixml_empty(): + empty = b'#< CLIXML\r\n' + expected = b'' + actual = _parse_clixml(empty) + assert actual == expected + + +def test_parse_clixml_with_progress(): + progress = b'#< CLIXML\r\n' \ + b'System.Management.Automation.PSCustomObjectSystem.Object' \ + b'1Preparing modules for first use.0' \ + b'-1-1Completed-1 ' + expected = b'' + actual = _parse_clixml(progress) + assert actual == expected + + +def test_parse_clixml_single_stream(): + single_stream = b'#< CLIXML\r\n' \ + b'fake : The term \'fake\' is not recognized as the name of a cmdlet. Check _x000D__x000A_' \ + b'the spelling of the name, or if a path was included._x000D__x000A_' \ + b'At line:1 char:1_x000D__x000A_' \ + b'+ fake cmdlet_x000D__x000A_+ ~~~~_x000D__x000A_' \ + b' + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException_x000D__x000A_' \ + b' + FullyQualifiedErrorId : CommandNotFoundException_x000D__x000A_ _x000D__x000A_' \ + b'' + expected = b"fake : The term 'fake' is not recognized as the name of a cmdlet. Check \r\n" \ + b"the spelling of the name, or if a path was included.\r\n" \ + b"At line:1 char:1\r\n" \ + b"+ fake cmdlet\r\n" \ + b"+ ~~~~\r\n" \ + b" + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException\r\n" \ + b" + FullyQualifiedErrorId : CommandNotFoundException\r\n " + actual = _parse_clixml(single_stream) + assert actual == expected + + +def test_parse_clixml_multiple_streams(): + multiple_stream = b'#< CLIXML\r\n' \ + b'fake : The term \'fake\' is not recognized as the name of a cmdlet. Check _x000D__x000A_' \ + b'the spelling of the name, or if a path was included._x000D__x000A_' \ + b'At line:1 char:1_x000D__x000A_' \ + b'+ fake cmdlet_x000D__x000A_+ ~~~~_x000D__x000A_' \ + b' + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException_x000D__x000A_' \ + b' + FullyQualifiedErrorId : CommandNotFoundException_x000D__x000A_ _x000D__x000A_' \ + b'hi info' \ + b'' + expected = b"hi info" + actual = _parse_clixml(multiple_stream, stream="Info") + assert actual == expected + + +def test_parse_clixml_multiple_elements(): + multiple_elements = b'#< CLIXML\r\n#< CLIXML\r\n' \ + b'System.Management.Automation.PSCustomObjectSystem.Object' \ + b'1Preparing modules for first use.0' \ + b'-1-1Completed-1 ' \ + b'Error 1' \ + b'' \ + b'System.Management.Automation.PSCustomObjectSystem.Object' \ + b'1Preparing modules for first use.0' \ + b'-1-1Completed-1 ' \ + b'2' \ + b'Preparing modules for first use.0' \ + b'-1-1Completed-1 ' \ + b'Error 2' + expected = b"Error 1\r\nError 2" + actual = _parse_clixml(multiple_elements) + assert actual == expected + + +def test_join_path_unc(): + pwsh = ShellModule() + unc_path_parts = ['\\\\host\\share\\dir1\\\\dir2\\', '\\dir3/dir4', 'dir5', 'dir6\\'] + expected = '\\\\host\\share\\dir1\\dir2\\dir3\\dir4\\dir5\\dir6' + actual = pwsh.join_path(*unc_path_parts) + assert actual == expected diff --git a/test/units/plugins/strategy/__init__.py b/test/units/plugins/strategy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/plugins/strategy/test_linear.py b/test/units/plugins/strategy/test_linear.py new file mode 100644 index 00000000..74887030 --- /dev/null +++ b/test/units/plugins/strategy/test_linear.py @@ -0,0 +1,177 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from units.compat import unittest +from units.compat.mock import patch, MagicMock + +from ansible.executor.play_iterator import PlayIterator +from ansible.playbook import Playbook +from ansible.playbook.play_context import PlayContext +from ansible.plugins.strategy.linear import StrategyModule +from ansible.executor.task_queue_manager import TaskQueueManager + +from units.mock.loader import DictDataLoader +from units.mock.path import mock_unfrackpath_noop + + +class TestStrategyLinear(unittest.TestCase): + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_noop(self): + fake_loader = DictDataLoader({ + "test_play.yml": """ + - hosts: all + gather_facts: no + tasks: + - block: + - block: + - name: task1 + debug: msg='task1' + failed_when: inventory_hostname == 'host01' + + - name: task2 + debug: msg='task2' + + rescue: + - name: rescue1 + debug: msg='rescue1' + + - name: rescue2 + debug: msg='rescue2' + """, + }) + + mock_var_manager = MagicMock() + mock_var_manager._fact_cache = dict() + mock_var_manager.get_vars.return_value = dict() + + p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) + + inventory = MagicMock() + inventory.hosts = {} + hosts = [] + for i in range(0, 2): + host = MagicMock() + host.name = host.get_name.return_value = 'host%02d' % i + hosts.append(host) + inventory.hosts[host.name] = host + inventory.get_hosts.return_value = hosts + inventory.filter_hosts.return_value = hosts + + mock_var_manager._fact_cache['host00'] = dict() + + play_context = PlayContext(play=p._entries[0]) + + itr = PlayIterator( + inventory=inventory, + play=p._entries[0], + play_context=play_context, + variable_manager=mock_var_manager, + all_vars=dict(), + ) + + tqm = TaskQueueManager( + inventory=inventory, + variable_manager=mock_var_manager, + loader=fake_loader, + passwords=None, + forks=5, + ) + tqm._initialize_processes(3) + strategy = StrategyModule(tqm) + strategy._hosts_cache = [h.name for h in hosts] + strategy._hosts_cache_all = [h.name for h in hosts] + + # implicit meta: flush_handlers + hosts_left = strategy.get_hosts_left(itr) + hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr) + host1_task = hosts_tasks[0][1] + host2_task = hosts_tasks[1][1] + self.assertIsNotNone(host1_task) + self.assertIsNotNone(host2_task) + self.assertEqual(host1_task.action, 'meta') + self.assertEqual(host2_task.action, 'meta') + + # debug: task1, debug: task1 + hosts_left = strategy.get_hosts_left(itr) + hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr) + host1_task = hosts_tasks[0][1] + host2_task = hosts_tasks[1][1] + self.assertIsNotNone(host1_task) + self.assertIsNotNone(host2_task) + self.assertEqual(host1_task.action, 'debug') + self.assertEqual(host2_task.action, 'debug') + self.assertEqual(host1_task.name, 'task1') + self.assertEqual(host2_task.name, 'task1') + + # mark the second host failed + itr.mark_host_failed(hosts[1]) + + # debug: task2, meta: noop + hosts_left = strategy.get_hosts_left(itr) + hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr) + host1_task = hosts_tasks[0][1] + host2_task = hosts_tasks[1][1] + self.assertIsNotNone(host1_task) + self.assertIsNotNone(host2_task) + self.assertEqual(host1_task.action, 'debug') + self.assertEqual(host2_task.action, 'meta') + self.assertEqual(host1_task.name, 'task2') + self.assertEqual(host2_task.name, '') + + # meta: noop, debug: rescue1 + hosts_left = strategy.get_hosts_left(itr) + hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr) + host1_task = hosts_tasks[0][1] + host2_task = hosts_tasks[1][1] + self.assertIsNotNone(host1_task) + self.assertIsNotNone(host2_task) + self.assertEqual(host1_task.action, 'meta') + self.assertEqual(host2_task.action, 'debug') + self.assertEqual(host1_task.name, '') + self.assertEqual(host2_task.name, 'rescue1') + + # meta: noop, debug: rescue2 + hosts_left = strategy.get_hosts_left(itr) + hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr) + host1_task = hosts_tasks[0][1] + host2_task = hosts_tasks[1][1] + self.assertIsNotNone(host1_task) + self.assertIsNotNone(host2_task) + self.assertEqual(host1_task.action, 'meta') + self.assertEqual(host2_task.action, 'debug') + self.assertEqual(host1_task.name, '') + self.assertEqual(host2_task.name, 'rescue2') + + # implicit meta: flush_handlers + hosts_left = strategy.get_hosts_left(itr) + hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr) + host1_task = hosts_tasks[0][1] + host2_task = hosts_tasks[1][1] + self.assertIsNotNone(host1_task) + self.assertIsNotNone(host2_task) + self.assertEqual(host1_task.action, 'meta') + self.assertEqual(host2_task.action, 'meta') + + # implicit meta: flush_handlers + hosts_left = strategy.get_hosts_left(itr) + hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr) + host1_task = hosts_tasks[0][1] + host2_task = hosts_tasks[1][1] + self.assertIsNotNone(host1_task) + self.assertIsNotNone(host2_task) + self.assertEqual(host1_task.action, 'meta') + self.assertEqual(host2_task.action, 'meta') + + # end of iteration + hosts_left = strategy.get_hosts_left(itr) + hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr) + host1_task = hosts_tasks[0][1] + host2_task = hosts_tasks[1][1] + self.assertIsNone(host1_task) + self.assertIsNone(host2_task) diff --git a/test/units/plugins/strategy/test_strategy.py b/test/units/plugins/strategy/test_strategy.py new file mode 100644 index 00000000..9a2574d2 --- /dev/null +++ b/test/units/plugins/strategy/test_strategy.py @@ -0,0 +1,546 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.mock.loader import DictDataLoader +from copy import deepcopy +import uuid + +from units.compat import unittest +from units.compat.mock import patch, MagicMock +from ansible.executor.process.worker import WorkerProcess +from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.executor.task_result import TaskResult +from ansible.inventory.host import Host +from ansible.module_utils.six.moves import queue as Queue +from ansible.playbook.handler import Handler +from ansible.plugins.strategy import StrategyBase + + +class TestStrategyBase(unittest.TestCase): + + def test_strategy_base_init(self): + queue_items = [] + + def _queue_empty(*args, **kwargs): + return len(queue_items) == 0 + + def _queue_get(*args, **kwargs): + if len(queue_items) == 0: + raise Queue.Empty + else: + return queue_items.pop() + + def _queue_put(item, *args, **kwargs): + queue_items.append(item) + + mock_queue = MagicMock() + mock_queue.empty.side_effect = _queue_empty + mock_queue.get.side_effect = _queue_get + mock_queue.put.side_effect = _queue_put + + mock_tqm = MagicMock(TaskQueueManager) + mock_tqm._final_q = mock_queue + mock_tqm._workers = [] + strategy_base = StrategyBase(tqm=mock_tqm) + strategy_base.cleanup() + + def test_strategy_base_run(self): + queue_items = [] + + def _queue_empty(*args, **kwargs): + return len(queue_items) == 0 + + def _queue_get(*args, **kwargs): + if len(queue_items) == 0: + raise Queue.Empty + else: + return queue_items.pop() + + def _queue_put(item, *args, **kwargs): + queue_items.append(item) + + mock_queue = MagicMock() + mock_queue.empty.side_effect = _queue_empty + mock_queue.get.side_effect = _queue_get + mock_queue.put.side_effect = _queue_put + + mock_tqm = MagicMock(TaskQueueManager) + mock_tqm._final_q = mock_queue + mock_tqm._stats = MagicMock() + mock_tqm.send_callback.return_value = None + + for attr in ('RUN_OK', 'RUN_ERROR', 'RUN_FAILED_HOSTS', 'RUN_UNREACHABLE_HOSTS'): + setattr(mock_tqm, attr, getattr(TaskQueueManager, attr)) + + mock_iterator = MagicMock() + mock_iterator._play = MagicMock() + mock_iterator._play.handlers = [] + + mock_play_context = MagicMock() + + mock_tqm._failed_hosts = dict() + mock_tqm._unreachable_hosts = dict() + mock_tqm._workers = [] + strategy_base = StrategyBase(tqm=mock_tqm) + + mock_host = MagicMock() + mock_host.name = 'host1' + + self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context), mock_tqm.RUN_OK) + self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=TaskQueueManager.RUN_ERROR), mock_tqm.RUN_ERROR) + mock_tqm._failed_hosts = dict(host1=True) + mock_iterator.get_failed_hosts.return_value = [mock_host] + self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_FAILED_HOSTS) + mock_tqm._unreachable_hosts = dict(host1=True) + mock_iterator.get_failed_hosts.return_value = [] + self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_UNREACHABLE_HOSTS) + strategy_base.cleanup() + + def test_strategy_base_get_hosts(self): + queue_items = [] + + def _queue_empty(*args, **kwargs): + return len(queue_items) == 0 + + def _queue_get(*args, **kwargs): + if len(queue_items) == 0: + raise Queue.Empty + else: + return queue_items.pop() + + def _queue_put(item, *args, **kwargs): + queue_items.append(item) + + mock_queue = MagicMock() + mock_queue.empty.side_effect = _queue_empty + mock_queue.get.side_effect = _queue_get + mock_queue.put.side_effect = _queue_put + + mock_hosts = [] + for i in range(0, 5): + mock_host = MagicMock() + mock_host.name = "host%02d" % (i + 1) + mock_host.has_hostkey = True + mock_hosts.append(mock_host) + + mock_hosts_names = [h.name for h in mock_hosts] + + mock_inventory = MagicMock() + mock_inventory.get_hosts.return_value = mock_hosts + + mock_tqm = MagicMock() + mock_tqm._final_q = mock_queue + mock_tqm.get_inventory.return_value = mock_inventory + + mock_play = MagicMock() + mock_play.hosts = ["host%02d" % (i + 1) for i in range(0, 5)] + + strategy_base = StrategyBase(tqm=mock_tqm) + strategy_base._hosts_cache = strategy_base._hosts_cache_all = mock_hosts_names + + mock_tqm._failed_hosts = [] + mock_tqm._unreachable_hosts = [] + self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), [h.name for h in mock_hosts]) + + mock_tqm._failed_hosts = ["host01"] + self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), [h.name for h in mock_hosts[1:]]) + self.assertEqual(strategy_base.get_failed_hosts(play=mock_play), [mock_hosts[0].name]) + + mock_tqm._unreachable_hosts = ["host02"] + self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), [h.name for h in mock_hosts[2:]]) + strategy_base.cleanup() + + @patch.object(WorkerProcess, 'run') + def test_strategy_base_queue_task(self, mock_worker): + def fake_run(self): + return + + mock_worker.run.side_effect = fake_run + + fake_loader = DictDataLoader() + mock_var_manager = MagicMock() + mock_host = MagicMock() + mock_host.get_vars.return_value = dict() + mock_host.has_hostkey = True + mock_inventory = MagicMock() + mock_inventory.get.return_value = mock_host + + tqm = TaskQueueManager( + inventory=mock_inventory, + variable_manager=mock_var_manager, + loader=fake_loader, + passwords=None, + forks=3, + ) + tqm._initialize_processes(3) + tqm.hostvars = dict() + + mock_task = MagicMock() + mock_task._uuid = 'abcd' + mock_task.throttle = 0 + + try: + strategy_base = StrategyBase(tqm=tqm) + strategy_base._queue_task(host=mock_host, task=mock_task, task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 1) + self.assertEqual(strategy_base._pending_results, 1) + strategy_base._queue_task(host=mock_host, task=mock_task, task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 2) + self.assertEqual(strategy_base._pending_results, 2) + strategy_base._queue_task(host=mock_host, task=mock_task, task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 0) + self.assertEqual(strategy_base._pending_results, 3) + finally: + tqm.cleanup() + + def test_strategy_base_process_pending_results(self): + mock_tqm = MagicMock() + mock_tqm._terminated = False + mock_tqm._failed_hosts = dict() + mock_tqm._unreachable_hosts = dict() + mock_tqm.send_callback.return_value = None + + queue_items = [] + + def _queue_empty(*args, **kwargs): + return len(queue_items) == 0 + + def _queue_get(*args, **kwargs): + if len(queue_items) == 0: + raise Queue.Empty + else: + return queue_items.pop() + + def _queue_put(item, *args, **kwargs): + queue_items.append(item) + + mock_queue = MagicMock() + mock_queue.empty.side_effect = _queue_empty + mock_queue.get.side_effect = _queue_get + mock_queue.put.side_effect = _queue_put + mock_tqm._final_q = mock_queue + + mock_tqm._stats = MagicMock() + mock_tqm._stats.increment.return_value = None + + mock_play = MagicMock() + + mock_host = MagicMock() + mock_host.name = 'test01' + mock_host.vars = dict() + mock_host.get_vars.return_value = dict() + mock_host.has_hostkey = True + + mock_task = MagicMock() + mock_task._role = None + mock_task._parent = None + mock_task.ignore_errors = False + mock_task.ignore_unreachable = False + mock_task._uuid = uuid.uuid4() + mock_task.loop = None + mock_task.copy.return_value = mock_task + + mock_handler_task = Handler() + mock_handler_task.name = 'test handler' + mock_handler_task.action = 'foo' + mock_handler_task._parent = None + mock_handler_task._uuid = 'xxxxxxxxxxxxx' + + mock_iterator = MagicMock() + mock_iterator._play = mock_play + mock_iterator.mark_host_failed.return_value = None + mock_iterator.get_next_task_for_host.return_value = (None, None) + + mock_handler_block = MagicMock() + mock_handler_block.block = [mock_handler_task] + mock_handler_block.rescue = [] + mock_handler_block.always = [] + mock_play.handlers = [mock_handler_block] + + mock_group = MagicMock() + mock_group.add_host.return_value = None + + def _get_host(host_name): + if host_name == 'test01': + return mock_host + return None + + def _get_group(group_name): + if group_name in ('all', 'foo'): + return mock_group + return None + + mock_inventory = MagicMock() + mock_inventory._hosts_cache = dict() + mock_inventory.hosts.return_value = mock_host + mock_inventory.get_host.side_effect = _get_host + mock_inventory.get_group.side_effect = _get_group + mock_inventory.clear_pattern_cache.return_value = None + mock_inventory.get_host_vars.return_value = {} + mock_inventory.hosts.get.return_value = mock_host + + mock_var_mgr = MagicMock() + mock_var_mgr.set_host_variable.return_value = None + mock_var_mgr.set_host_facts.return_value = None + mock_var_mgr.get_vars.return_value = dict() + + strategy_base = StrategyBase(tqm=mock_tqm) + strategy_base._inventory = mock_inventory + strategy_base._variable_manager = mock_var_mgr + strategy_base._blocked_hosts = dict() + + def _has_dead_workers(): + return False + + strategy_base._tqm.has_dead_workers.side_effect = _has_dead_workers + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 0) + + task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True)) + queue_items.append(task_result) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + + mock_queued_task_cache = { + (mock_host.name, mock_task._uuid): { + 'task': mock_task, + 'host': mock_host, + 'task_vars': {}, + 'play_context': {}, + } + } + + strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache) + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(results[0], task_result) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + + task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"failed":true}') + queue_items.append(task_result) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + mock_iterator.is_failed.return_value = True + strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache) + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(results[0], task_result) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + # self.assertIn('test01', mock_tqm._failed_hosts) + # del mock_tqm._failed_hosts['test01'] + mock_iterator.is_failed.return_value = False + + task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"unreachable": true}') + queue_items.append(task_result) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache) + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(results[0], task_result) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + self.assertIn('test01', mock_tqm._unreachable_hosts) + del mock_tqm._unreachable_hosts['test01'] + + task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"skipped": true}') + queue_items.append(task_result) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache) + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(results[0], task_result) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + + queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_host=dict(host_name='newhost01', new_groups=['foo'])))) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache) + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + + queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_group=dict(group_name='foo')))) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache) + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + + queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True, _ansible_notify=['test handler']))) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache) + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + self.assertTrue(mock_handler_task.is_host_notified(mock_host)) + + # queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar')) + # results = strategy_base._process_pending_results(iterator=mock_iterator) + # self.assertEqual(len(results), 0) + # self.assertEqual(strategy_base._pending_results, 1) + + # queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict())) + # results = strategy_base._process_pending_results(iterator=mock_iterator) + # self.assertEqual(len(results), 0) + # self.assertEqual(strategy_base._pending_results, 1) + + # queue_items.append(('bad')) + # self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator) + strategy_base.cleanup() + + def test_strategy_base_load_included_file(self): + fake_loader = DictDataLoader({ + "test.yml": """ + - debug: msg='foo' + """, + "bad.yml": """ + """, + }) + + queue_items = [] + + def _queue_empty(*args, **kwargs): + return len(queue_items) == 0 + + def _queue_get(*args, **kwargs): + if len(queue_items) == 0: + raise Queue.Empty + else: + return queue_items.pop() + + def _queue_put(item, *args, **kwargs): + queue_items.append(item) + + mock_queue = MagicMock() + mock_queue.empty.side_effect = _queue_empty + mock_queue.get.side_effect = _queue_get + mock_queue.put.side_effect = _queue_put + + mock_tqm = MagicMock() + mock_tqm._final_q = mock_queue + + strategy_base = StrategyBase(tqm=mock_tqm) + strategy_base._loader = fake_loader + strategy_base.cleanup() + + mock_play = MagicMock() + + mock_block = MagicMock() + mock_block._play = mock_play + mock_block.vars = dict() + + mock_task = MagicMock() + mock_task._block = mock_block + mock_task._role = None + mock_task._parent = None + + mock_iterator = MagicMock() + mock_iterator.mark_host_failed.return_value = None + + mock_inc_file = MagicMock() + mock_inc_file._task = mock_task + + mock_inc_file._filename = "test.yml" + res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator) + + mock_inc_file._filename = "bad.yml" + res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator) + self.assertEqual(res, []) + + @patch.object(WorkerProcess, 'run') + def test_strategy_base_run_handlers(self, mock_worker): + def fake_run(*args): + return + mock_worker.side_effect = fake_run + mock_play_context = MagicMock() + + mock_handler_task = Handler() + mock_handler_task.action = 'foo' + mock_handler_task.cached_name = False + mock_handler_task.name = "test handler" + mock_handler_task.listen = [] + mock_handler_task._role = None + mock_handler_task._parent = None + mock_handler_task._uuid = 'xxxxxxxxxxxxxxxx' + + mock_handler = MagicMock() + mock_handler.block = [mock_handler_task] + mock_handler.flag_for_host.return_value = False + + mock_play = MagicMock() + mock_play.handlers = [mock_handler] + + mock_host = MagicMock(Host) + mock_host.name = "test01" + mock_host.has_hostkey = True + + mock_inventory = MagicMock() + mock_inventory.get_hosts.return_value = [mock_host] + mock_inventory.get.return_value = mock_host + mock_inventory.get_host.return_value = mock_host + + mock_var_mgr = MagicMock() + mock_var_mgr.get_vars.return_value = dict() + + mock_iterator = MagicMock() + mock_iterator._play = mock_play + + fake_loader = DictDataLoader() + + tqm = TaskQueueManager( + inventory=mock_inventory, + variable_manager=mock_var_mgr, + loader=fake_loader, + passwords=None, + forks=5, + ) + tqm._initialize_processes(3) + tqm.hostvars = dict() + + try: + strategy_base = StrategyBase(tqm=tqm) + + strategy_base._inventory = mock_inventory + + task_result = TaskResult(mock_host.name, mock_handler_task._uuid, dict(changed=False)) + strategy_base._queued_task_cache = dict() + strategy_base._queued_task_cache[(mock_host.name, mock_handler_task._uuid)] = { + 'task': mock_handler_task, + 'host': mock_host, + 'task_vars': {}, + 'play_context': mock_play_context + } + tqm._final_q.put(task_result) + + result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) + finally: + strategy_base.cleanup() + tqm.cleanup() diff --git a/test/units/plugins/test_plugins.py b/test/units/plugins/test_plugins.py new file mode 100644 index 00000000..c9d80cda --- /dev/null +++ b/test/units/plugins/test_plugins.py @@ -0,0 +1,134 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from units.compat import unittest +from units.compat.builtins import BUILTINS +from units.compat.mock import patch, MagicMock +from ansible.plugins.loader import PluginLoader, PluginPathContext + + +class TestErrors(unittest.TestCase): + + @patch.object(PluginLoader, '_get_paths') + def test_print_paths(self, mock_method): + mock_method.return_value = ['/path/one', '/path/two', '/path/three'] + pl = PluginLoader('foo', 'foo', '', 'test_plugins') + paths = pl.print_paths() + expected_paths = os.pathsep.join(['/path/one', '/path/two', '/path/three']) + self.assertEqual(paths, expected_paths) + + def test_plugins__get_package_paths_no_package(self): + pl = PluginLoader('test', '', 'test', 'test_plugin') + self.assertEqual(pl._get_package_paths(), []) + + def test_plugins__get_package_paths_with_package(self): + # the _get_package_paths() call uses __import__ to load a + # python library, and then uses the __file__ attribute of + # the result for that to get the library path, so we mock + # that here and patch the builtin to use our mocked result + foo = MagicMock() + bar = MagicMock() + bam = MagicMock() + bam.__file__ = '/path/to/my/foo/bar/bam/__init__.py' + bar.bam = bam + foo.return_value.bar = bar + pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin') + with patch('{0}.__import__'.format(BUILTINS), foo): + self.assertEqual(pl._get_package_paths(), ['/path/to/my/foo/bar/bam']) + + def test_plugins__get_paths(self): + pl = PluginLoader('test', '', 'test', 'test_plugin') + pl._paths = [PluginPathContext('/path/one', False), + PluginPathContext('/path/two', True)] + self.assertEqual(pl._get_paths(), ['/path/one', '/path/two']) + + # NOT YET WORKING + # def fake_glob(path): + # if path == 'test/*': + # return ['test/foo', 'test/bar', 'test/bam'] + # elif path == 'test/*/*' + # m._paths = None + # mock_glob = MagicMock() + # mock_glob.return_value = [] + # with patch('glob.glob', mock_glob): + # pass + + def assertPluginLoaderConfigBecomes(self, arg, expected): + pl = PluginLoader('test', '', arg, 'test_plugin') + self.assertEqual(pl.config, expected) + + def test_plugin__init_config_list(self): + config = ['/one', '/two'] + self.assertPluginLoaderConfigBecomes(config, config) + + def test_plugin__init_config_str(self): + self.assertPluginLoaderConfigBecomes('test', ['test']) + + def test_plugin__init_config_none(self): + self.assertPluginLoaderConfigBecomes(None, []) + + def test__load_module_source_no_duplicate_names(self): + ''' + This test simulates importing 2 plugins with the same name, + and validating that the import is short circuited if a file with the same name + has already been imported + ''' + + fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures') + + pl = PluginLoader('test', '', 'test', 'test_plugin') + one = pl._load_module_source('import_fixture', os.path.join(fixture_path, 'import_fixture.py')) + # This line wouldn't even succeed if we didn't short circuit on finding a duplicate name + two = pl._load_module_source('import_fixture', '/path/to/import_fixture.py') + + self.assertEqual(one, two) + + @patch('ansible.plugins.loader.glob') + @patch.object(PluginLoader, '_get_paths') + def test_all_no_duplicate_names(self, gp_mock, glob_mock): + ''' + This test goes along with ``test__load_module_source_no_duplicate_names`` + and ensures that we ignore duplicate imports on multiple paths + ''' + + fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures') + + gp_mock.return_value = [ + fixture_path, + '/path/to' + ] + + glob_mock.glob.side_effect = [ + [os.path.join(fixture_path, 'import_fixture.py')], + ['/path/to/import_fixture.py'] + ] + + pl = PluginLoader('test', '', 'test', 'test_plugin') + # Aside from needing ``list()`` so we can do a len, ``PluginLoader.all`` returns a generator + # so ``list()`` actually causes ``PluginLoader.all`` to run. + plugins = list(pl.all()) + self.assertEqual(len(plugins), 1) + + self.assertIn(os.path.join(fixture_path, 'import_fixture.py'), pl._module_cache) + self.assertNotIn('/path/to/import_fixture.py', pl._module_cache) diff --git a/test/units/regex/test_invalid_var_names.py b/test/units/regex/test_invalid_var_names.py new file mode 100644 index 00000000..d47e68d3 --- /dev/null +++ b/test/units/regex/test_invalid_var_names.py @@ -0,0 +1,27 @@ +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest + +from ansible import constants as C + + +test_cases = (('not-valid', ['-'], 'not_valid'), ('not!valid@either', ['!', '@'], 'not_valid_either'), ('1_nor_This', ['1'], '__nor_This')) + + +class TestInvalidVars(unittest.TestCase): + + def test_positive_matches(self): + + for name, invalid, sanitized in test_cases: + self.assertEqual(C.INVALID_VARIABLE_NAMES.findall(name), invalid) + + def test_negative_matches(self): + for name in ('this_is_valid', 'Also_1_valid', 'noproblem'): + self.assertEqual(C.INVALID_VARIABLE_NAMES.findall(name), []) + + def test_get_setting(self): + + for name, invalid, sanitized in test_cases: + self.assertEqual(C.INVALID_VARIABLE_NAMES.sub('_', name), sanitized) diff --git a/test/units/requirements.txt b/test/units/requirements.txt new file mode 100644 index 00000000..153500e3 --- /dev/null +++ b/test/units/requirements.txt @@ -0,0 +1,6 @@ +pycrypto +passlib +pywinrm +pytz +unittest2 ; python_version < '2.7' +pexpect diff --git a/test/units/template/__init__.py b/test/units/template/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/template/test_native_concat.py b/test/units/template/test_native_concat.py new file mode 100644 index 00000000..db85a73b --- /dev/null +++ b/test/units/template/test_native_concat.py @@ -0,0 +1,28 @@ +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible import constants as C +from ansible.errors import AnsibleUndefinedVariable + +# need to mock DEFAULT_JINJA2_NATIVE here so native modules are imported +# correctly within the template module +C.DEFAULT_JINJA2_NATIVE = True +from ansible.template import Templar + +from units.mock.loader import DictDataLoader + + +# https://github.com/ansible/ansible/issues/52158 +def test_undefined_variable(): + fake_loader = DictDataLoader({}) + variables = {} + templar = Templar(loader=fake_loader, variables=variables) + + with pytest.raises(AnsibleUndefinedVariable): + templar.template("{{ missing }}") diff --git a/test/units/template/test_safe_eval.py b/test/units/template/test_safe_eval.py new file mode 100644 index 00000000..89ff8a0e --- /dev/null +++ b/test/units/template/test_safe_eval.py @@ -0,0 +1,44 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +from collections import defaultdict + +from units.compat import unittest +from ansible.template.safe_eval import safe_eval + + +class TestSafeEval(unittest.TestCase): + + def test_safe_eval_usage(self): + # test safe eval calls with different possible types for the + # locals dictionary, to ensure we don't run into problems like + # ansible/ansible/issues/12206 again + for locals_vars in (dict(), defaultdict(dict)): + self.assertEqual(safe_eval('True', locals=locals_vars), True) + self.assertEqual(safe_eval('False', locals=locals_vars), False) + self.assertEqual(safe_eval('0', locals=locals_vars), 0) + self.assertEqual(safe_eval('[]', locals=locals_vars), []) + self.assertEqual(safe_eval('{}', locals=locals_vars), {}) + + @unittest.skipUnless(sys.version_info[:2] >= (2, 7), "Python 2.6 has no set literals") + def test_set_literals(self): + self.assertEqual(safe_eval('{0}'), set([0])) diff --git a/test/units/template/test_templar.py b/test/units/template/test_templar.py new file mode 100644 index 00000000..dd6985ce --- /dev/null +++ b/test/units/template/test_templar.py @@ -0,0 +1,446 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from jinja2.runtime import Context + +from units.compat import unittest +from units.compat.mock import patch + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleUndefinedVariable +from ansible.module_utils.six import string_types +from ansible.template import Templar, AnsibleContext, AnsibleEnvironment, AnsibleUndefined +from ansible.utils.unsafe_proxy import AnsibleUnsafe, wrap_var +from units.mock.loader import DictDataLoader + + +class BaseTemplar(object): + def setUp(self): + self.test_vars = dict( + foo="bar", + bam="{{foo}}", + num=1, + var_true=True, + var_false=False, + var_dict=dict(a="b"), + bad_dict="{a='b'", + var_list=[1], + recursive="{{recursive}}", + some_var="blip", + some_static_var="static_blip", + some_keyword="{{ foo }}", + some_unsafe_var=wrap_var("unsafe_blip"), + some_static_unsafe_var=wrap_var("static_unsafe_blip"), + some_unsafe_keyword=wrap_var("{{ foo }}"), + str_with_error="{{ 'str' | from_json }}", + ) + self.fake_loader = DictDataLoader({ + "/path/to/my_file.txt": "foo\n", + }) + self.templar = Templar(loader=self.fake_loader, variables=self.test_vars) + self._ansible_context = AnsibleContext(self.templar.environment, {}, {}, {}) + + def is_unsafe(self, obj): + return self._ansible_context._is_unsafe(obj) + + +# class used for testing arbitrary objects passed to template +class SomeClass(object): + foo = 'bar' + + def __init__(self): + self.blip = 'blip' + + +class SomeUnsafeClass(AnsibleUnsafe): + def __init__(self): + super(SomeUnsafeClass, self).__init__() + self.blip = 'unsafe blip' + + +class TestTemplarTemplate(BaseTemplar, unittest.TestCase): + def test_lookup_jinja_dict_key_in_static_vars(self): + res = self.templar.template("{'some_static_var': '{{ some_var }}'}", + static_vars=['some_static_var']) + # self.assertEqual(res['{{ a_keyword }}'], "blip") + print(res) + + def test_is_possibly_template_true(self): + tests = [ + '{{ foo }}', + '{% foo %}', + '{# foo #}', + '{# {{ foo }} #}', + '{# {{ nothing }} {# #}', + '{# {{ nothing }} {# #} #}', + '{% raw %}{{ foo }}{% endraw %}', + '{{', + '{%', + '{#', + '{% raw', + ] + for test in tests: + self.assertTrue(self.templar.is_possibly_template(test)) + + def test_is_possibly_template_false(self): + tests = [ + '{', + '%', + '#', + 'foo', + '}}', + '%}', + 'raw %}', + '#}', + ] + for test in tests: + self.assertFalse(self.templar.is_possibly_template(test)) + + def test_is_possible_template(self): + """This test ensures that a broken template still gets templated""" + # Purposefully invalid jinja + self.assertRaises(AnsibleError, self.templar.template, '{{ foo|default(False)) }}') + + def test_is_template_true(self): + tests = [ + '{{ foo }}', + '{% foo %}', + '{# foo #}', + '{# {{ foo }} #}', + '{# {{ nothing }} {# #}', + '{# {{ nothing }} {# #} #}', + '{% raw %}{{ foo }}{% endraw %}', + ] + for test in tests: + self.assertTrue(self.templar.is_template(test)) + + def test_is_template_false(self): + tests = [ + 'foo', + '{{ foo', + '{% foo', + '{# foo', + '{{ foo %}', + '{{ foo #}', + '{% foo }}', + '{% foo #}', + '{# foo %}', + '{# foo }}', + '{{ foo {{', + '{% raw %}{% foo %}', + ] + for test in tests: + self.assertFalse(self.templar.is_template(test)) + + def test_is_template_raw_string(self): + res = self.templar.is_template('foo') + self.assertFalse(res) + + def test_is_template_none(self): + res = self.templar.is_template(None) + self.assertFalse(res) + + def test_template_convert_bare_string(self): + res = self.templar.template('foo', convert_bare=True) + self.assertEqual(res, 'bar') + + def test_template_convert_bare_nested(self): + res = self.templar.template('bam', convert_bare=True) + self.assertEqual(res, 'bar') + + def test_template_convert_bare_unsafe(self): + res = self.templar.template('some_unsafe_var', convert_bare=True) + self.assertEqual(res, 'unsafe_blip') + # self.assertIsInstance(res, AnsibleUnsafe) + self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res) + + def test_template_convert_bare_filter(self): + res = self.templar.template('bam|capitalize', convert_bare=True) + self.assertEqual(res, 'Bar') + + def test_template_convert_bare_filter_unsafe(self): + res = self.templar.template('some_unsafe_var|capitalize', convert_bare=True) + self.assertEqual(res, 'Unsafe_blip') + # self.assertIsInstance(res, AnsibleUnsafe) + self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res) + + def test_template_convert_data(self): + res = self.templar.template('{{foo}}', convert_data=True) + self.assertTrue(res) + self.assertEqual(res, 'bar') + + @patch('ansible.template.safe_eval', side_effect=AnsibleError) + def test_template_convert_data_template_in_data(self, mock_safe_eval): + res = self.templar.template('{{bam}}', convert_data=True) + self.assertTrue(res) + self.assertEqual(res, 'bar') + + def test_template_convert_data_bare(self): + res = self.templar.template('bam', convert_data=True) + self.assertTrue(res) + self.assertEqual(res, 'bam') + + def test_template_convert_data_to_json(self): + res = self.templar.template('{{bam|to_json}}', convert_data=True) + self.assertTrue(res) + self.assertEqual(res, '"bar"') + + def test_template_convert_data_convert_bare_data_bare(self): + res = self.templar.template('bam', convert_data=True, convert_bare=True) + self.assertTrue(res) + self.assertEqual(res, 'bar') + + def test_template_unsafe_non_string(self): + unsafe_obj = AnsibleUnsafe() + res = self.templar.template(unsafe_obj) + self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res) + + def test_template_unsafe_non_string_subclass(self): + unsafe_obj = SomeUnsafeClass() + res = self.templar.template(unsafe_obj) + self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res) + + def test_weird(self): + data = u'''1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7''' + self.assertRaisesRegexp(AnsibleError, + 'template error while templating string', + self.templar.template, + data) + + def test_template_with_error(self): + """Check that AnsibleError is raised, fail if an unhandled exception is raised""" + self.assertRaises(AnsibleError, self.templar.template, "{{ str_with_error }}") + + +class TestTemplarMisc(BaseTemplar, unittest.TestCase): + def test_templar_simple(self): + + templar = self.templar + # test some basic templating + self.assertEqual(templar.template("{{foo}}"), "bar") + self.assertEqual(templar.template("{{foo}}\n"), "bar\n") + self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n") + self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar") + self.assertEqual(templar.template("{{bam}}"), "bar") + self.assertEqual(templar.template("{{num}}"), 1) + self.assertEqual(templar.template("{{var_true}}"), True) + self.assertEqual(templar.template("{{var_false}}"), False) + self.assertEqual(templar.template("{{var_dict}}"), dict(a="b")) + self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'") + self.assertEqual(templar.template("{{var_list}}"), [1]) + self.assertEqual(templar.template(1, convert_bare=True), 1) + + # force errors + self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}") + self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}") + self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}") + self.assertRaises(AnsibleError, templar.template, "{{recursive}}") + self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}") + + # test with fail_on_undefined=False + self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}") + + # test setting available_variables + templar.available_variables = dict(foo="bam") + self.assertEqual(templar.template("{{foo}}"), "bam") + # variables must be a dict() for available_variables setter + # FIXME Use assertRaises() as a context manager (added in 2.7) once we do not run tests on Python 2.6 anymore. + try: + templar.available_variables = "foo=bam" + except AssertionError: + pass + except Exception as e: + self.fail(e) + + def test_templar_escape_backslashes(self): + # Rule of thumb: If escape backslashes is True you should end up with + # the same number of backslashes as when you started. + self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar") + self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar") + self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar") + self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar") + self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t") + self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t") + self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t") + self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t") + self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t") + self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t") + + def test_template_jinja2_extensions(self): + fake_loader = DictDataLoader({}) + templar = Templar(loader=fake_loader) + + old_exts = C.DEFAULT_JINJA2_EXTENSIONS + try: + C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar" + self.assertEqual(templar._get_extensions(), ['foo', 'bar']) + finally: + C.DEFAULT_JINJA2_EXTENSIONS = old_exts + + +class TestTemplarLookup(BaseTemplar, unittest.TestCase): + def test_lookup_missing_plugin(self): + self.assertRaisesRegexp(AnsibleError, + r'lookup plugin \(not_a_real_lookup_plugin\) not found', + self.templar._lookup, + 'not_a_real_lookup_plugin', + 'an_arg', a_keyword_arg='a_keyword_arg_value') + + def test_lookup_list(self): + res = self.templar._lookup('list', 'an_arg', 'another_arg') + self.assertEqual(res, 'an_arg,another_arg') + + def test_lookup_jinja_undefined(self): + self.assertRaisesRegexp(AnsibleUndefinedVariable, + "'an_undefined_jinja_var' is undefined", + self.templar._lookup, + 'list', '{{ an_undefined_jinja_var }}') + + def test_lookup_jinja_defined(self): + res = self.templar._lookup('list', '{{ some_var }}') + self.assertTrue(self.is_unsafe(res)) + # self.assertIsInstance(res, AnsibleUnsafe) + + def test_lookup_jinja_dict_string_passed(self): + self.assertRaisesRegexp(AnsibleError, + "with_dict expects a dict", + self.templar._lookup, + 'dict', + '{{ some_var }}') + + def test_lookup_jinja_dict_list_passed(self): + self.assertRaisesRegexp(AnsibleError, + "with_dict expects a dict", + self.templar._lookup, + 'dict', + ['foo', 'bar']) + + def test_lookup_jinja_kwargs(self): + res = self.templar._lookup('list', 'blip', random_keyword='12345') + self.assertTrue(self.is_unsafe(res)) + # self.assertIsInstance(res, AnsibleUnsafe) + + def test_lookup_jinja_list_wantlist(self): + res = self.templar._lookup('list', '{{ some_var }}', wantlist=True) + self.assertEqual(res, ["blip"]) + + def test_lookup_jinja_list_wantlist_undefined(self): + self.assertRaisesRegexp(AnsibleUndefinedVariable, + "'some_undefined_var' is undefined", + self.templar._lookup, + 'list', + '{{ some_undefined_var }}', + wantlist=True) + + def test_lookup_jinja_list_wantlist_unsafe(self): + res = self.templar._lookup('list', '{{ some_unsafe_var }}', wantlist=True) + for lookup_result in res: + self.assertTrue(self.is_unsafe(lookup_result)) + # self.assertIsInstance(lookup_result, AnsibleUnsafe) + + # Should this be an AnsibleUnsafe + # self.assertIsInstance(res, AnsibleUnsafe) + + def test_lookup_jinja_dict(self): + res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_var }}'}) + self.assertEqual(res['{{ a_keyword }}'], "blip") + # TODO: Should this be an AnsibleUnsafe + # self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe) + # self.assertIsInstance(res, AnsibleUnsafe) + + def test_lookup_jinja_dict_unsafe(self): + res = self.templar._lookup('list', {'{{ some_unsafe_key }}': '{{ some_unsafe_var }}'}) + self.assertTrue(self.is_unsafe(res['{{ some_unsafe_key }}'])) + # self.assertIsInstance(res['{{ some_unsafe_key }}'], AnsibleUnsafe) + # TODO: Should this be an AnsibleUnsafe + # self.assertIsInstance(res, AnsibleUnsafe) + + def test_lookup_jinja_dict_unsafe_value(self): + res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_unsafe_var }}'}) + self.assertTrue(self.is_unsafe(res['{{ a_keyword }}'])) + # self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe) + # TODO: Should this be an AnsibleUnsafe + # self.assertIsInstance(res, AnsibleUnsafe) + + def test_lookup_jinja_none(self): + res = self.templar._lookup('list', None) + self.assertIsNone(res) + + +class TestAnsibleContext(BaseTemplar, unittest.TestCase): + def _context(self, variables=None): + variables = variables or {} + + env = AnsibleEnvironment() + context = AnsibleContext(env, parent={}, name='some_context', + blocks={}) + + for key, value in variables.items(): + context.vars[key] = value + + return context + + def test(self): + context = self._context() + self.assertIsInstance(context, AnsibleContext) + self.assertIsInstance(context, Context) + + def test_resolve_unsafe(self): + context = self._context(variables={'some_unsafe_key': wrap_var('some_unsafe_string')}) + res = context.resolve('some_unsafe_key') + # self.assertIsInstance(res, AnsibleUnsafe) + self.assertTrue(self.is_unsafe(res), + 'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res) + + def test_resolve_unsafe_list(self): + context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]}) + res = context.resolve('some_unsafe_key') + # self.assertIsInstance(res[0], AnsibleUnsafe) + self.assertTrue(self.is_unsafe(res), + 'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res) + + def test_resolve_unsafe_dict(self): + context = self._context(variables={'some_unsafe_key': + {'an_unsafe_dict': wrap_var('some unsafe string 1')} + }) + res = context.resolve('some_unsafe_key') + self.assertTrue(self.is_unsafe(res['an_unsafe_dict']), + 'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res['an_unsafe_dict']) + + def test_resolve(self): + context = self._context(variables={'some_key': 'some_string'}) + res = context.resolve('some_key') + self.assertEqual(res, 'some_string') + # self.assertNotIsInstance(res, AnsibleUnsafe) + self.assertFalse(self.is_unsafe(res), + 'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res) + + def test_resolve_none(self): + context = self._context(variables={'some_key': None}) + res = context.resolve('some_key') + self.assertEqual(res, None) + # self.assertNotIsInstance(res, AnsibleUnsafe) + self.assertFalse(self.is_unsafe(res), + 'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res) + + def test_is_unsafe(self): + context = self._context() + self.assertFalse(context._is_unsafe(AnsibleUndefined())) diff --git a/test/units/template/test_template_utilities.py b/test/units/template/test_template_utilities.py new file mode 100644 index 00000000..1044895f --- /dev/null +++ b/test/units/template/test_template_utilities.py @@ -0,0 +1,117 @@ +# (c) 2015 Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import jinja2 +from units.compat import unittest + +from ansible.template import AnsibleUndefined, _escape_backslashes, _count_newlines_from_end + +# These are internal utility functions only needed for templating. They're +# algorithmic so good candidates for unittesting by themselves + + +class TestBackslashEscape(unittest.TestCase): + + test_data = ( + # Test backslashes in a filter arg are double escaped + dict( + template=u"{{ 'test2 %s' | format('\\1') }}", + intermediate=u"{{ 'test2 %s' | format('\\\\1') }}", + expectation=u"test2 \\1", + args=dict() + ), + # Test backslashes inside the jinja2 var itself are double + # escaped + dict( + template=u"Test 2\\3: {{ '\\1 %s' | format('\\2') }}", + intermediate=u"Test 2\\3: {{ '\\\\1 %s' | format('\\\\2') }}", + expectation=u"Test 2\\3: \\1 \\2", + args=dict() + ), + # Test backslashes outside of the jinja2 var are not double + # escaped + dict( + template=u"Test 2\\3: {{ 'test2 %s' | format('\\1') }}; \\done", + intermediate=u"Test 2\\3: {{ 'test2 %s' | format('\\\\1') }}; \\done", + expectation=u"Test 2\\3: test2 \\1; \\done", + args=dict() + ), + # Test backslashes in a variable sent to a filter are handled + dict( + template=u"{{ 'test2 %s' | format(var1) }}", + intermediate=u"{{ 'test2 %s' | format(var1) }}", + expectation=u"test2 \\1", + args=dict(var1=u'\\1') + ), + # Test backslashes in a variable expanded by jinja2 are double + # escaped + dict( + template=u"Test 2\\3: {{ var1 | format('\\2') }}", + intermediate=u"Test 2\\3: {{ var1 | format('\\\\2') }}", + expectation=u"Test 2\\3: \\1 \\2", + args=dict(var1=u'\\1 %s') + ), + ) + + def setUp(self): + self.env = jinja2.Environment() + + def test_backslash_escaping(self): + + for test in self.test_data: + intermediate = _escape_backslashes(test['template'], self.env) + self.assertEqual(intermediate, test['intermediate']) + template = jinja2.Template(intermediate) + args = test['args'] + self.assertEqual(template.render(**args), test['expectation']) + + +class TestCountNewlines(unittest.TestCase): + + def test_zero_length_string(self): + self.assertEqual(_count_newlines_from_end(u''), 0) + + def test_short_string(self): + self.assertEqual(_count_newlines_from_end(u'The quick\n'), 1) + + def test_one_newline(self): + self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000 + u'\n'), 1) + + def test_multiple_newlines(self): + self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000 + u'\n\n\n'), 3) + + def test_zero_newlines(self): + self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000), 0) + + def test_all_newlines(self): + self.assertEqual(_count_newlines_from_end(u'\n' * 10), 10) + + def test_mostly_newlines(self): + self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' + u'\n' * 1000), 1000) + + +class TestAnsibleUndefined(unittest.TestCase): + def test_getattr(self): + val = AnsibleUndefined() + + self.assertIs(getattr(val, 'foo'), val) + + self.assertRaises(AttributeError, getattr, val, '__UNSAFE__') diff --git a/test/units/template/test_vars.py b/test/units/template/test_vars.py new file mode 100644 index 00000000..74e67839 --- /dev/null +++ b/test/units/template/test_vars.py @@ -0,0 +1,81 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat import unittest +from units.compat.mock import MagicMock + +from ansible.template.vars import AnsibleJ2Vars + + +class TestVars(unittest.TestCase): + def setUp(self): + self.mock_templar = MagicMock(name='mock_templar') + + def test(self): + ajvars = AnsibleJ2Vars(None, None) + print(ajvars) + + def test_globals_empty_2_8(self): + ajvars = AnsibleJ2Vars(self.mock_templar, {}) + res28 = self._dict_jinja28(ajvars) + self.assertIsInstance(res28, dict) + + def test_globals_empty_2_9(self): + ajvars = AnsibleJ2Vars(self.mock_templar, {}) + res29 = self._dict_jinja29(ajvars) + self.assertIsInstance(res29, dict) + + def _assert_globals(self, res): + self.assertIsInstance(res, dict) + self.assertIn('foo', res) + self.assertEqual(res['foo'], 'bar') + + def test_globals_2_8(self): + ajvars = AnsibleJ2Vars(self.mock_templar, {'foo': 'bar', 'blip': [1, 2, 3]}) + res28 = self._dict_jinja28(ajvars) + self._assert_globals(res28) + + def test_globals_2_9(self): + ajvars = AnsibleJ2Vars(self.mock_templar, {'foo': 'bar', 'blip': [1, 2, 3]}) + res29 = self._dict_jinja29(ajvars) + self._assert_globals(res29) + + def _dicts(self, ajvars): + print(ajvars) + res28 = self._dict_jinja28(ajvars) + res29 = self._dict_jinja29(ajvars) + # res28_other = self._dict_jinja28(ajvars, {'other_key': 'other_value'}) + # other = {'other_key': 'other_value'} + # res29_other = self._dict_jinja29(ajvars, *other) + print('res28: %s' % res28) + print('res29: %s' % res29) + # print('res28_other: %s' % res28_other) + # print('res29_other: %s' % res29_other) + # return (res28, res29, res28_other, res29_other) + # assert ajvars == res28 + # assert ajvars == res29 + return (res28, res29) + + def _dict_jinja28(self, *args, **kwargs): + return dict(*args, **kwargs) + + def _dict_jinja29(self, the_vars): + return dict(the_vars) diff --git a/test/units/test_constants.py b/test/units/test_constants.py new file mode 100644 index 00000000..4cf2f7f9 --- /dev/null +++ b/test/units/test_constants.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +# (c) 2017 Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pwd +import os + +import pytest + +from ansible import constants +from ansible.module_utils.six import StringIO +from ansible.module_utils.six.moves import configparser +from ansible.module_utils._text import to_text + + +@pytest.fixture +def cfgparser(): + CFGDATA = StringIO(""" +[defaults] +defaults_one = 'data_defaults_one' + +[level1] +level1_one = 'data_level1_one' + """) + p = configparser.ConfigParser() + p.readfp(CFGDATA) + return p + + +@pytest.fixture +def user(): + user = {} + user['uid'] = os.geteuid() + + pwd_entry = pwd.getpwuid(user['uid']) + user['username'] = pwd_entry.pw_name + user['home'] = pwd_entry.pw_dir + + return user + + +@pytest.fixture +def cfg_file(): + data = '/ansible/test/cfg/path' + old_cfg_file = constants.CONFIG_FILE + constants.CONFIG_FILE = os.path.join(data, 'ansible.cfg') + yield data + + constants.CONFIG_FILE = old_cfg_file + + +@pytest.fixture +def null_cfg_file(): + old_cfg_file = constants.CONFIG_FILE + del constants.CONFIG_FILE + yield + + constants.CONFIG_FILE = old_cfg_file + + +@pytest.fixture +def cwd(): + data = '/ansible/test/cwd/' + old_cwd = os.getcwd + os.getcwd = lambda: data + + old_cwdu = None + if hasattr(os, 'getcwdu'): + old_cwdu = os.getcwdu + os.getcwdu = lambda: to_text(data) + + yield data + + os.getcwd = old_cwd + if hasattr(os, 'getcwdu'): + os.getcwdu = old_cwdu + + +class TestMkBoolean: + def test_bools(self): + assert constants.mk_boolean(True) is True + assert constants.mk_boolean(False) is False + + def test_none(self): + assert constants.mk_boolean(None) is False + + def test_numbers(self): + assert constants.mk_boolean(1) is True + assert constants.mk_boolean(0) is False + assert constants.mk_boolean(0.0) is False + +# Current mk_boolean doesn't consider these to be true values +# def test_other_numbers(self): +# assert constants.mk_boolean(2) is True +# assert constants.mk_boolean(-1) is True +# assert constants.mk_boolean(0.1) is True + + def test_strings(self): + assert constants.mk_boolean("true") is True + assert constants.mk_boolean("TRUE") is True + assert constants.mk_boolean("t") is True + assert constants.mk_boolean("yes") is True + assert constants.mk_boolean("y") is True + assert constants.mk_boolean("on") is True diff --git a/test/units/test_context.py b/test/units/test_context.py new file mode 100644 index 00000000..24e2376d --- /dev/null +++ b/test/units/test_context.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Toshio Kuratomi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible import context + + +class FakeOptions: + pass + + +def test_set_global_context(): + options = FakeOptions() + options.tags = [u'production', u'webservers'] + options.check_mode = True + options.start_at_task = u'Start with くらとみ' + + expected = frozenset((('tags', (u'production', u'webservers')), + ('check_mode', True), + ('start_at_task', u'Start with くらとみ'))) + + context._init_global_context(options) + assert frozenset(context.CLIARGS.items()) == expected diff --git a/test/units/utils/__init__.py b/test/units/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/utils/collection_loader/__init__.py b/test/units/utils/collection_loader/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/ansible/builtin/plugins/modules/shouldnotload.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/ansible/builtin/plugins/modules/shouldnotload.py new file mode 100644 index 00000000..4041a338 --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/ansible/builtin/plugins/modules/shouldnotload.py @@ -0,0 +1,4 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +raise Exception('this module should never be loaded') diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/meta/runtime.yml b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/meta/runtime.yml new file mode 100644 index 00000000..f2e2fdec --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/meta/runtime.yml @@ -0,0 +1,4 @@ +plugin_routing: + modules: + rerouted_module: + redirect: ansible.builtin.ping diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py new file mode 100644 index 00000000..9d30580f --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py @@ -0,0 +1,8 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ..module_utils.my_util import question + + +def action_code(): + return "hello from my_action.py" diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/__init__.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_other_util.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_other_util.py new file mode 100644 index 00000000..35e1381b --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_other_util.py @@ -0,0 +1,4 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from .my_util import question diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py new file mode 100644 index 00000000..c431c34c --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py @@ -0,0 +1,6 @@ +# WARNING: Changing line numbers of code in this file will break collection tests that use tracing to check paths and line numbers. +# Also, do not import division from __future__ as this will break detection of __future__ inheritance on Python 2. + + +def question(): + return 3 / 2 diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py new file mode 100644 index 00000000..6d697034 --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py @@ -0,0 +1,5 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +raise Exception('this should never run') diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/amodule.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/amodule.py new file mode 100644 index 00000000..99320a0c --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/amodule.py @@ -0,0 +1,6 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def module_code(): + return "hello from amodule.py" diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/roles/some_role/.gitkeep b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/roles/some_role/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py new file mode 100644 index 00000000..6068ac1a --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py @@ -0,0 +1,5 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +raise Exception('this code should never execute') diff --git a/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py new file mode 100644 index 00000000..6068ac1a --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py @@ -0,0 +1,5 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +raise Exception('this code should never execute') diff --git a/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py new file mode 100644 index 00000000..6068ac1a --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py @@ -0,0 +1,5 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +raise Exception('this code should never execute') diff --git a/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py new file mode 100644 index 00000000..6068ac1a --- /dev/null +++ b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py @@ -0,0 +1,5 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +raise Exception('this code should never execute') diff --git a/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/ansible/playbook_adj_other/.gitkeep b/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/ansible/playbook_adj_other/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/freshns/playbook_adj_other/.gitkeep b/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/freshns/playbook_adj_other/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/testns/playbook_adj_other/.gitkeep b/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/testns/playbook_adj_other/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/test/units/utils/collection_loader/test_collection_loader.py b/test/units/utils/collection_loader/test_collection_loader.py new file mode 100644 index 00000000..6488188c --- /dev/null +++ b/test/units/utils/collection_loader/test_collection_loader.py @@ -0,0 +1,834 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import pkgutil +import pytest +import re +import sys + +from ansible.module_utils.six import PY3, string_types +from ansible.module_utils.compat.importlib import import_module +from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef +from ansible.utils.collection_loader._collection_finder import ( + _AnsibleCollectionFinder, _AnsibleCollectionLoader, _AnsibleCollectionNSPkgLoader, _AnsibleCollectionPkgLoader, + _AnsibleCollectionPkgLoaderBase, _AnsibleCollectionRootPkgLoader, _AnsiblePathHookFinder, + _get_collection_name_from_path, _get_collection_role_path, _get_collection_metadata, _iter_modules_impl +) +from ansible.utils.collection_loader._collection_config import _EventSource +from units.compat.mock import MagicMock, NonCallableMagicMock, patch + + +# fixture to ensure we always clean up the import stuff when we're done +@pytest.fixture(autouse=True, scope='function') +def teardown(*args, **kwargs): + yield + reset_collections_loader_state() + +# BEGIN STANDALONE TESTS - these exercise behaviors of the individual components without the import machinery + + +def test_finder_setup(): + # ensure scalar path is listified + f = _AnsibleCollectionFinder(paths='/bogus/bogus') + assert isinstance(f._n_collection_paths, list) + + # ensure sys.path paths that have an ansible_collections dir are added to the end of the collections paths + with patch.object(sys, 'path', ['/bogus', default_test_collection_paths[1], '/morebogus', default_test_collection_paths[0]]): + f = _AnsibleCollectionFinder(paths=['/explicit', '/other']) + assert f._n_collection_paths == ['/explicit', '/other', default_test_collection_paths[1], default_test_collection_paths[0]] + + configured_paths = ['/bogus'] + playbook_paths = ['/playbookdir'] + f = _AnsibleCollectionFinder(paths=configured_paths) + assert f._n_collection_paths == configured_paths + f.set_playbook_paths(playbook_paths) + assert f._n_collection_paths == extend_paths(playbook_paths, 'collections') + configured_paths + + # ensure scalar playbook_paths gets listified + f.set_playbook_paths(playbook_paths[0]) + assert f._n_collection_paths == extend_paths(playbook_paths, 'collections') + configured_paths + + +def test_finder_not_interested(): + f = get_default_finder() + assert f.find_module('nothanks') is None + assert f.find_module('nothanks.sub', path=['/bogus/dir']) is None + + +def test_finder_ns(): + # ensure we can still load ansible_collections and ansible_collections.ansible when they don't exist on disk + f = _AnsibleCollectionFinder(paths=['/bogus/bogus']) + loader = f.find_module('ansible_collections') + assert isinstance(loader, _AnsibleCollectionRootPkgLoader) + + loader = f.find_module('ansible_collections.ansible', path=['/bogus/bogus']) + assert isinstance(loader, _AnsibleCollectionNSPkgLoader) + + f = get_default_finder() + loader = f.find_module('ansible_collections') + assert isinstance(loader, _AnsibleCollectionRootPkgLoader) + + # path is not allowed for top-level + with pytest.raises(ValueError): + f.find_module('ansible_collections', path=['whatever']) + + # path is required for subpackages + with pytest.raises(ValueError): + f.find_module('ansible_collections.whatever', path=None) + + paths = [os.path.join(p, 'ansible_collections/nonexistns') for p in default_test_collection_paths] + + # test missing + loader = f.find_module('ansible_collections.nonexistns', paths) + assert loader is None + + +# keep these up top to make sure the loader install/remove are working, since we rely on them heavily in the tests +def test_loader_remove(): + fake_mp = [MagicMock(), _AnsibleCollectionFinder(), MagicMock(), _AnsibleCollectionFinder()] + fake_ph = [MagicMock().m1, MagicMock().m2, _AnsibleCollectionFinder()._ansible_collection_path_hook, NonCallableMagicMock] + # must nest until 2.6 compilation is totally donezo + with patch.object(sys, 'meta_path', fake_mp): + with patch.object(sys, 'path_hooks', fake_ph): + _AnsibleCollectionFinder()._remove() + assert len(sys.meta_path) == 2 + # no AnsibleCollectionFinders on the meta path after remove is called + assert all((not isinstance(mpf, _AnsibleCollectionFinder) for mpf in sys.meta_path)) + assert len(sys.path_hooks) == 3 + # none of the remaining path hooks should point at an AnsibleCollectionFinder + assert all((not isinstance(ph.__self__, _AnsibleCollectionFinder) for ph in sys.path_hooks if hasattr(ph, '__self__'))) + assert AnsibleCollectionConfig.collection_finder is None + + +def test_loader_install(): + fake_mp = [MagicMock(), _AnsibleCollectionFinder(), MagicMock(), _AnsibleCollectionFinder()] + fake_ph = [MagicMock().m1, MagicMock().m2, _AnsibleCollectionFinder()._ansible_collection_path_hook, NonCallableMagicMock] + # must nest until 2.6 compilation is totally donezo + with patch.object(sys, 'meta_path', fake_mp): + with patch.object(sys, 'path_hooks', fake_ph): + f = _AnsibleCollectionFinder() + f._install() + assert len(sys.meta_path) == 3 # should have removed the existing ACFs and installed a new one + assert sys.meta_path[0] is f # at the front + # the rest of the meta_path should not be AnsibleCollectionFinders + assert all((not isinstance(mpf, _AnsibleCollectionFinder) for mpf in sys.meta_path[1:])) + assert len(sys.path_hooks) == 4 # should have removed the existing ACF path hooks and installed a new one + # the first path hook should be ours, make sure it's pointing at the right instance + assert hasattr(sys.path_hooks[0], '__self__') and sys.path_hooks[0].__self__ is f + # the rest of the path_hooks should not point at an AnsibleCollectionFinder + assert all((not isinstance(ph.__self__, _AnsibleCollectionFinder) for ph in sys.path_hooks[1:] if hasattr(ph, '__self__'))) + assert AnsibleCollectionConfig.collection_finder is f + with pytest.raises(ValueError): + AnsibleCollectionConfig.collection_finder = f + + +def test_finder_coll(): + f = get_default_finder() + + tests = [ + {'name': 'ansible_collections.testns.testcoll', 'test_paths': [default_test_collection_paths]}, + {'name': 'ansible_collections.ansible.builtin', 'test_paths': [['/bogus'], default_test_collection_paths]}, + ] + # ensure finder works for legit paths and bogus paths + for test_dict in tests: + # splat the dict values to our locals + globals().update(test_dict) + parent_pkg = name.rpartition('.')[0] + for paths in test_paths: + paths = [os.path.join(p, parent_pkg.replace('.', '/')) for p in paths] + loader = f.find_module(name, path=paths) + assert isinstance(loader, _AnsibleCollectionPkgLoader) + + +def test_root_loader_not_interested(): + with pytest.raises(ImportError): + _AnsibleCollectionRootPkgLoader('not_ansible_collections_toplevel', path_list=[]) + + with pytest.raises(ImportError): + _AnsibleCollectionRootPkgLoader('ansible_collections.somens', path_list=['/bogus']) + + +def test_root_loader(): + name = 'ansible_collections' + # ensure this works even when ansible_collections doesn't exist on disk + for paths in [], default_test_collection_paths: + if name in sys.modules: + del sys.modules[name] + loader = _AnsibleCollectionRootPkgLoader(name, paths) + assert repr(loader).startswith('_AnsibleCollectionRootPkgLoader(path=') + module = loader.load_module(name) + assert module.__name__ == name + assert module.__path__ == [p for p in extend_paths(paths, name) if os.path.isdir(p)] + # even if the dir exists somewhere, this loader doesn't support get_data, so make __file__ a non-file + assert module.__file__ == '' + assert module.__package__ == name + assert sys.modules.get(name) == module + + +def test_nspkg_loader_not_interested(): + with pytest.raises(ImportError): + _AnsibleCollectionNSPkgLoader('not_ansible_collections_toplevel.something', path_list=[]) + + with pytest.raises(ImportError): + _AnsibleCollectionNSPkgLoader('ansible_collections.somens.somecoll', path_list=[]) + + +def test_nspkg_loader_load_module(): + # ensure the loader behaves on the toplevel and ansible packages for both legit and missing/bogus paths + for name in ['ansible_collections.ansible', 'ansible_collections.testns']: + parent_pkg = name.partition('.')[0] + module_to_load = name.rpartition('.')[2] + paths = extend_paths(default_test_collection_paths, parent_pkg) + existing_child_paths = [p for p in extend_paths(paths, module_to_load) if os.path.exists(p)] + if name in sys.modules: + del sys.modules[name] + loader = _AnsibleCollectionNSPkgLoader(name, path_list=paths) + assert repr(loader).startswith('_AnsibleCollectionNSPkgLoader(path=') + module = loader.load_module(name) + assert module.__name__ == name + assert isinstance(module.__loader__, _AnsibleCollectionNSPkgLoader) + assert module.__path__ == existing_child_paths + assert module.__package__ == name + assert module.__file__ == '' + assert sys.modules.get(name) == module + + +def test_collpkg_loader_not_interested(): + with pytest.raises(ImportError): + _AnsibleCollectionPkgLoader('not_ansible_collections', path_list=[]) + + with pytest.raises(ImportError): + _AnsibleCollectionPkgLoader('ansible_collections.ns', path_list=['/bogus/bogus']) + + +def test_collpkg_loader_load_module(): + reset_collections_loader_state() + with patch('ansible.utils.collection_loader.AnsibleCollectionConfig') as p: + for name in ['ansible_collections.ansible.builtin', 'ansible_collections.testns.testcoll']: + parent_pkg = name.rpartition('.')[0] + module_to_load = name.rpartition('.')[2] + paths = extend_paths(default_test_collection_paths, parent_pkg) + existing_child_paths = [p for p in extend_paths(paths, module_to_load) if os.path.exists(p)] + is_builtin = 'ansible.builtin' in name + if name in sys.modules: + del sys.modules[name] + loader = _AnsibleCollectionPkgLoader(name, path_list=paths) + assert repr(loader).startswith('_AnsibleCollectionPkgLoader(path=') + module = loader.load_module(name) + assert module.__name__ == name + assert isinstance(module.__loader__, _AnsibleCollectionPkgLoader) + if is_builtin: + assert module.__path__ == [] + else: + assert module.__path__ == [existing_child_paths[0]] + + assert module.__package__ == name + if is_builtin: + assert module.__file__ == '' + else: + assert module.__file__.endswith('__synthetic__') and os.path.isdir(os.path.dirname(module.__file__)) + assert sys.modules.get(name) == module + + assert hasattr(module, '_collection_meta') and isinstance(module._collection_meta, dict) + + # FIXME: validate _collection_meta contents match what's on disk (or not) + + # if the module has metadata, try loading it with busted metadata + if module._collection_meta: + _collection_finder = import_module('ansible.utils.collection_loader._collection_finder') + with patch.object(_collection_finder, '_meta_yml_to_dict', side_effect=Exception('bang')): + with pytest.raises(Exception) as ex: + _AnsibleCollectionPkgLoader(name, path_list=paths).load_module(name) + assert 'error parsing collection metadata' in str(ex.value) + + +def test_coll_loader(): + with patch('ansible.utils.collection_loader.AnsibleCollectionConfig'): + with pytest.raises(ValueError): + # not a collection + _AnsibleCollectionLoader('ansible_collections') + + with pytest.raises(ValueError): + # bogus paths + _AnsibleCollectionLoader('ansible_collections.testns.testcoll', path_list=[]) + + # FIXME: more + + +def test_path_hook_setup(): + with patch.object(sys, 'path_hooks', []): + found_hook = None + pathhook_exc = None + try: + found_hook = _AnsiblePathHookFinder._get_filefinder_path_hook() + except Exception as phe: + pathhook_exc = phe + + if PY3: + assert str(pathhook_exc) == 'need exactly one FileFinder import hook (found 0)' + else: + assert found_hook is None + + assert repr(_AnsiblePathHookFinder(object(), '/bogus/path')) == "_AnsiblePathHookFinder(path='/bogus/path')" + + +def test_path_hook_importerror(): + # ensure that AnsiblePathHookFinder.find_module swallows ImportError from path hook delegation on Py3, eg if the delegated + # path hook gets passed a file on sys.path (python36.zip) + reset_collections_loader_state() + path_to_a_file = os.path.join(default_test_collection_paths[0], 'ansible_collections/testns/testcoll/plugins/action/my_action.py') + # it's a bug if the following pops an ImportError... + assert _AnsiblePathHookFinder(_AnsibleCollectionFinder(), path_to_a_file).find_module('foo.bar.my_action') is None + + +def test_new_or_existing_module(): + module_name = 'blar.test.module' + pkg_name = module_name.rpartition('.')[0] + + # create new module case + nuke_module_prefix(module_name) + with _AnsibleCollectionPkgLoaderBase._new_or_existing_module(module_name, __package__=pkg_name) as new_module: + # the module we just created should now exist in sys.modules + assert sys.modules.get(module_name) is new_module + assert new_module.__name__ == module_name + + # the module should stick since we didn't raise an exception in the contextmgr + assert sys.modules.get(module_name) is new_module + + # reuse existing module case + with _AnsibleCollectionPkgLoaderBase._new_or_existing_module(module_name, __attr1__=42, blar='yo') as existing_module: + assert sys.modules.get(module_name) is new_module # should be the same module we created earlier + assert hasattr(existing_module, '__package__') and existing_module.__package__ == pkg_name + assert hasattr(existing_module, '__attr1__') and existing_module.__attr1__ == 42 + assert hasattr(existing_module, 'blar') and existing_module.blar == 'yo' + + # exception during update existing shouldn't zap existing module from sys.modules + with pytest.raises(ValueError) as ve: + with _AnsibleCollectionPkgLoaderBase._new_or_existing_module(module_name) as existing_module: + err_to_raise = ValueError('bang') + raise err_to_raise + # make sure we got our error + assert ve.value is err_to_raise + # and that the module still exists + assert sys.modules.get(module_name) is existing_module + + # test module removal after exception during creation + nuke_module_prefix(module_name) + with pytest.raises(ValueError) as ve: + with _AnsibleCollectionPkgLoaderBase._new_or_existing_module(module_name) as new_module: + err_to_raise = ValueError('bang') + raise err_to_raise + # make sure we got our error + assert ve.value is err_to_raise + # and that the module was removed + assert sys.modules.get(module_name) is None + + +def test_iter_modules_impl(): + modules_trailer = 'ansible_collections/testns/testcoll/plugins' + modules_pkg_prefix = modules_trailer.replace('/', '.') + '.' + modules_path = os.path.join(default_test_collection_paths[0], modules_trailer) + modules = list(_iter_modules_impl([modules_path], modules_pkg_prefix)) + + assert modules + assert set([('ansible_collections.testns.testcoll.plugins.action', True), + ('ansible_collections.testns.testcoll.plugins.module_utils', True), + ('ansible_collections.testns.testcoll.plugins.modules', True)]) == set(modules) + + modules_trailer = 'ansible_collections/testns/testcoll/plugins/modules' + modules_pkg_prefix = modules_trailer.replace('/', '.') + '.' + modules_path = os.path.join(default_test_collection_paths[0], modules_trailer) + modules = list(_iter_modules_impl([modules_path], modules_pkg_prefix)) + + assert modules + assert len(modules) == 1 + assert modules[0][0] == 'ansible_collections.testns.testcoll.plugins.modules.amodule' # name + assert modules[0][1] is False # is_pkg + + # FIXME: more + + +# BEGIN IN-CIRCUIT TESTS - these exercise behaviors of the loader when wired up to the import machinery + + +def test_import_from_collection(monkeypatch): + collection_root = os.path.join(os.path.dirname(__file__), 'fixtures', 'collections') + collection_path = os.path.join(collection_root, 'ansible_collections/testns/testcoll/plugins/module_utils/my_util.py') + + # THIS IS UNSTABLE UNDER A DEBUGGER + # the trace we're expecting to be generated when running the code below: + # answer = question() + expected_trace_log = [ + (collection_path, 5, 'call'), + (collection_path, 6, 'line'), + (collection_path, 6, 'return'), + ] + + # define the collection root before any ansible code has been loaded + # otherwise config will have already been loaded and changing the environment will have no effect + monkeypatch.setenv('ANSIBLE_COLLECTIONS_PATH', collection_root) + + finder = _AnsibleCollectionFinder(paths=[collection_root]) + reset_collections_loader_state(finder) + + from ansible_collections.testns.testcoll.plugins.module_utils.my_util import question + + original_trace_function = sys.gettrace() + trace_log = [] + + if original_trace_function: + # enable tracing while preserving the existing trace function (coverage) + def my_trace_function(frame, event, arg): + trace_log.append((frame.f_code.co_filename, frame.f_lineno, event)) + + # the original trace function expects to have itself set as the trace function + sys.settrace(original_trace_function) + # call the original trace function + original_trace_function(frame, event, arg) + # restore our trace function + sys.settrace(my_trace_function) + + return my_trace_function + else: + # no existing trace function, so our trace function is much simpler + def my_trace_function(frame, event, arg): + trace_log.append((frame.f_code.co_filename, frame.f_lineno, event)) + + return my_trace_function + + sys.settrace(my_trace_function) + + try: + # run a minimal amount of code while the trace is running + # adding more code here, including use of a context manager, will add more to our trace + answer = question() + finally: + sys.settrace(original_trace_function) + + # make sure 'import ... as ...' works on builtin synthetic collections + # the following import is not supported (it tries to find module_utils in ansible.plugins) + # import ansible_collections.ansible.builtin.plugins.module_utils as c1 + import ansible_collections.ansible.builtin.plugins.action as c2 + import ansible_collections.ansible.builtin.plugins as c3 + import ansible_collections.ansible.builtin as c4 + import ansible_collections.ansible as c5 + import ansible_collections as c6 + + # make sure 'import ...' works on builtin synthetic collections + import ansible_collections.ansible.builtin.plugins.module_utils + + import ansible_collections.ansible.builtin.plugins.action + assert ansible_collections.ansible.builtin.plugins.action == c3.action == c2 + + import ansible_collections.ansible.builtin.plugins + assert ansible_collections.ansible.builtin.plugins == c4.plugins == c3 + + import ansible_collections.ansible.builtin + assert ansible_collections.ansible.builtin == c5.builtin == c4 + + import ansible_collections.ansible + assert ansible_collections.ansible == c6.ansible == c5 + + import ansible_collections + assert ansible_collections == c6 + + # make sure 'from ... import ...' works on builtin synthetic collections + from ansible_collections.ansible import builtin + from ansible_collections.ansible.builtin import plugins + assert builtin.plugins == plugins + + from ansible_collections.ansible.builtin.plugins import action + from ansible_collections.ansible.builtin.plugins.action import command + assert action.command == command + + from ansible_collections.ansible.builtin.plugins.module_utils import basic + from ansible_collections.ansible.builtin.plugins.module_utils.basic import AnsibleModule + assert basic.AnsibleModule == AnsibleModule + + # make sure relative imports work from collections code + # these require __package__ to be set correctly + import ansible_collections.testns.testcoll.plugins.module_utils.my_other_util + import ansible_collections.testns.testcoll.plugins.action.my_action + + # verify that code loaded from a collection does not inherit __future__ statements from the collection loader + if sys.version_info[0] == 2: + # if the collection code inherits the division future feature from the collection loader this will fail + assert answer == 1 + else: + assert answer == 1.5 + + # verify that the filename and line number reported by the trace is correct + # this makes sure that collection loading preserves file paths and line numbers + assert trace_log == expected_trace_log + + +def test_eventsource(): + es = _EventSource() + # fire when empty should succeed + es.fire(42) + handler1 = MagicMock() + handler2 = MagicMock() + es += handler1 + es.fire(99, my_kwarg='blah') + handler1.assert_called_with(99, my_kwarg='blah') + es += handler2 + es.fire(123, foo='bar') + handler1.assert_called_with(123, foo='bar') + handler2.assert_called_with(123, foo='bar') + es -= handler2 + handler1.reset_mock() + handler2.reset_mock() + es.fire(123, foo='bar') + handler1.assert_called_with(123, foo='bar') + handler2.assert_not_called() + es -= handler1 + handler1.reset_mock() + es.fire('blah', kwarg=None) + handler1.assert_not_called() + handler2.assert_not_called() + es -= handler1 # should succeed silently + handler_bang = MagicMock(side_effect=Exception('bang')) + es += handler_bang + with pytest.raises(Exception) as ex: + es.fire(123) + assert 'bang' in str(ex.value) + handler_bang.assert_called_with(123) + with pytest.raises(ValueError): + es += 42 + + +def test_on_collection_load(): + finder = get_default_finder() + reset_collections_loader_state(finder) + + load_handler = MagicMock() + AnsibleCollectionConfig.on_collection_load += load_handler + + m = import_module('ansible_collections.testns.testcoll') + load_handler.assert_called_once_with(collection_name='testns.testcoll', collection_path=os.path.dirname(m.__file__)) + + _meta = _get_collection_metadata('testns.testcoll') + assert _meta + # FIXME: compare to disk + + finder = get_default_finder() + reset_collections_loader_state(finder) + + AnsibleCollectionConfig.on_collection_load += MagicMock(side_effect=Exception('bang')) + with pytest.raises(Exception) as ex: + import_module('ansible_collections.testns.testcoll') + assert 'bang' in str(ex.value) + + +def test_default_collection_config(): + finder = get_default_finder() + reset_collections_loader_state(finder) + assert AnsibleCollectionConfig.default_collection is None + AnsibleCollectionConfig.default_collection = 'foo.bar' + assert AnsibleCollectionConfig.default_collection == 'foo.bar' + with pytest.raises(ValueError): + AnsibleCollectionConfig.default_collection = 'bar.baz' + + +def test_default_collection_detection(): + finder = get_default_finder() + reset_collections_loader_state(finder) + + # we're clearly not under a collection path + assert _get_collection_name_from_path('/') is None + + # something that looks like a collection path but isn't importable by our finder + assert _get_collection_name_from_path('/foo/ansible_collections/bogusns/boguscoll/bar') is None + + # legit, at the top of the collection + live_collection_path = os.path.join(os.path.dirname(__file__), 'fixtures/collections/ansible_collections/testns/testcoll') + assert _get_collection_name_from_path(live_collection_path) == 'testns.testcoll' + + # legit, deeper inside the collection + live_collection_deep_path = os.path.join(live_collection_path, 'plugins/modules') + assert _get_collection_name_from_path(live_collection_deep_path) == 'testns.testcoll' + + # this one should be hidden by the real testns.testcoll, so should not resolve + masked_collection_path = os.path.join(os.path.dirname(__file__), 'fixtures/collections_masked/ansible_collections/testns/testcoll') + assert _get_collection_name_from_path(masked_collection_path) is None + + +@pytest.mark.parametrize( + 'role_name,collection_list,expected_collection_name,expected_path_suffix', + [ + ('some_role', ['testns.testcoll', 'ansible.bogus'], 'testns.testcoll', 'testns/testcoll/roles/some_role'), + ('testns.testcoll.some_role', ['ansible.bogus', 'testns.testcoll'], 'testns.testcoll', 'testns/testcoll/roles/some_role'), + ('testns.testcoll.some_role', [], 'testns.testcoll', 'testns/testcoll/roles/some_role'), + ('testns.testcoll.some_role', None, 'testns.testcoll', 'testns/testcoll/roles/some_role'), + ('some_role', [], None, None), + ('some_role', None, None, None), + ]) +def test_collection_role_name_location(role_name, collection_list, expected_collection_name, expected_path_suffix): + finder = get_default_finder() + reset_collections_loader_state(finder) + + expected_path = None + if expected_path_suffix: + expected_path = os.path.join(os.path.dirname(__file__), 'fixtures/collections/ansible_collections', expected_path_suffix) + + found = _get_collection_role_path(role_name, collection_list) + + if found: + assert found[0] == role_name.rpartition('.')[2] + assert found[1] == expected_path + assert found[2] == expected_collection_name + else: + assert expected_collection_name is None and expected_path_suffix is None + + +def test_bogus_imports(): + finder = get_default_finder() + reset_collections_loader_state(finder) + + # ensure ImportError on known-bogus imports + bogus_imports = ['bogus_toplevel', 'ansible_collections.bogusns', 'ansible_collections.testns.boguscoll', + 'ansible_collections.testns.testcoll.bogussub', 'ansible_collections.ansible.builtin.bogussub'] + for bogus_import in bogus_imports: + with pytest.raises(ImportError): + import_module(bogus_import) + + +def test_empty_vs_no_code(): + finder = get_default_finder() + reset_collections_loader_state(finder) + + from ansible_collections.testns import testcoll # synthetic package with no code on disk + from ansible_collections.testns.testcoll.plugins import module_utils # real package with empty code file + + # ensure synthetic packages have no code object at all (prevent bogus coverage entries) + assert testcoll.__loader__.get_source(testcoll.__name__) is None + assert testcoll.__loader__.get_code(testcoll.__name__) is None + + # ensure empty package inits do have a code object + assert module_utils.__loader__.get_source(module_utils.__name__) == b'' + assert module_utils.__loader__.get_code(module_utils.__name__) is not None + + +def test_finder_playbook_paths(): + finder = get_default_finder() + reset_collections_loader_state(finder) + + import ansible_collections + import ansible_collections.ansible + import ansible_collections.testns + + # ensure the package modules look like we expect + assert hasattr(ansible_collections, '__path__') and len(ansible_collections.__path__) > 0 + assert hasattr(ansible_collections.ansible, '__path__') and len(ansible_collections.ansible.__path__) > 0 + assert hasattr(ansible_collections.testns, '__path__') and len(ansible_collections.testns.__path__) > 0 + + # these shouldn't be visible yet, since we haven't added the playbook dir + with pytest.raises(ImportError): + import ansible_collections.ansible.playbook_adj_other + + with pytest.raises(ImportError): + import ansible_collections.testns.playbook_adj_other + + assert AnsibleCollectionConfig.playbook_paths == [] + playbook_path_fixture_dir = os.path.join(os.path.dirname(__file__), 'fixtures/playbook_path') + + # configure the playbook paths + AnsibleCollectionConfig.playbook_paths = [playbook_path_fixture_dir] + + # playbook paths go to the front of the line + assert AnsibleCollectionConfig.collection_paths[0] == os.path.join(playbook_path_fixture_dir, 'collections') + + # playbook paths should be updated on the existing root ansible_collections path, as well as on the 'ansible' namespace (but no others!) + assert ansible_collections.__path__[0] == os.path.join(playbook_path_fixture_dir, 'collections/ansible_collections') + assert ansible_collections.ansible.__path__[0] == os.path.join(playbook_path_fixture_dir, 'collections/ansible_collections/ansible') + assert all('playbook_path' not in p for p in ansible_collections.testns.__path__) + + # should succeed since we fixed up the package path + import ansible_collections.ansible.playbook_adj_other + # should succeed since we didn't import freshns before hacking in the path + import ansible_collections.freshns.playbook_adj_other + # should fail since we've already imported something from this path and didn't fix up its package path + with pytest.raises(ImportError): + import ansible_collections.testns.playbook_adj_other + + +def test_toplevel_iter_modules(): + finder = get_default_finder() + reset_collections_loader_state(finder) + + modules = list(pkgutil.iter_modules(default_test_collection_paths, '')) + assert len(modules) == 1 + assert modules[0][1] == 'ansible_collections' + + +def test_iter_modules_namespaces(): + finder = get_default_finder() + reset_collections_loader_state(finder) + + paths = extend_paths(default_test_collection_paths, 'ansible_collections') + modules = list(pkgutil.iter_modules(paths, 'ansible_collections.')) + assert len(modules) == 2 + assert all(m[2] is True for m in modules) + assert all(isinstance(m[0], _AnsiblePathHookFinder) for m in modules) + assert set(['ansible_collections.testns', 'ansible_collections.ansible']) == set(m[1] for m in modules) + + +def test_collection_get_data(): + finder = get_default_finder() + reset_collections_loader_state(finder) + + # something that's there + d = pkgutil.get_data('ansible_collections.testns.testcoll', 'plugins/action/my_action.py') + assert b'hello from my_action.py' in d + + # something that's not there + d = pkgutil.get_data('ansible_collections.testns.testcoll', 'bogus/bogus') + assert d is None + + with pytest.raises(ValueError): + plugins_pkg = import_module('ansible_collections.ansible.builtin') + assert not os.path.exists(os.path.dirname(plugins_pkg.__file__)) + d = pkgutil.get_data('ansible_collections.ansible.builtin', 'plugins/connection/local.py') + + +@pytest.mark.parametrize( + 'ref,ref_type,expected_collection,expected_subdirs,expected_resource,expected_python_pkg_name', + [ + ('ns.coll.myaction', 'action', 'ns.coll', '', 'myaction', 'ansible_collections.ns.coll.plugins.action'), + ('ns.coll.subdir1.subdir2.myaction', 'action', 'ns.coll', 'subdir1.subdir2', 'myaction', 'ansible_collections.ns.coll.plugins.action.subdir1.subdir2'), + ('ns.coll.myrole', 'role', 'ns.coll', '', 'myrole', 'ansible_collections.ns.coll.roles.myrole'), + ('ns.coll.subdir1.subdir2.myrole', 'role', 'ns.coll', 'subdir1.subdir2', 'myrole', 'ansible_collections.ns.coll.roles.subdir1.subdir2.myrole'), + ]) +def test_fqcr_parsing_valid(ref, ref_type, expected_collection, + expected_subdirs, expected_resource, expected_python_pkg_name): + assert AnsibleCollectionRef.is_valid_fqcr(ref, ref_type) + + r = AnsibleCollectionRef.from_fqcr(ref, ref_type) + assert r.collection == expected_collection + assert r.subdirs == expected_subdirs + assert r.resource == expected_resource + assert r.n_python_package_name == expected_python_pkg_name + + r = AnsibleCollectionRef.try_parse_fqcr(ref, ref_type) + assert r.collection == expected_collection + assert r.subdirs == expected_subdirs + assert r.resource == expected_resource + assert r.n_python_package_name == expected_python_pkg_name + + +@pytest.mark.parametrize( + 'ref,ref_type,expected_error_type,expected_error_expression', + [ + ('no_dots_at_all_action', 'action', ValueError, 'is not a valid collection reference'), + ('no_nscoll.myaction', 'action', ValueError, 'is not a valid collection reference'), + ('ns.coll.myaction', 'bogus', ValueError, 'invalid collection ref_type'), + ]) +def test_fqcr_parsing_invalid(ref, ref_type, expected_error_type, expected_error_expression): + assert not AnsibleCollectionRef.is_valid_fqcr(ref, ref_type) + + with pytest.raises(expected_error_type) as curerr: + AnsibleCollectionRef.from_fqcr(ref, ref_type) + + assert re.search(expected_error_expression, str(curerr.value)) + + r = AnsibleCollectionRef.try_parse_fqcr(ref, ref_type) + assert r is None + + +@pytest.mark.parametrize( + 'name,subdirs,resource,ref_type,python_pkg_name', + [ + ('ns.coll', None, 'res', 'doc_fragments', 'ansible_collections.ns.coll.plugins.doc_fragments'), + ('ns.coll', 'subdir1', 'res', 'doc_fragments', 'ansible_collections.ns.coll.plugins.doc_fragments.subdir1'), + ('ns.coll', 'subdir1.subdir2', 'res', 'action', 'ansible_collections.ns.coll.plugins.action.subdir1.subdir2'), + ]) +def test_collectionref_components_valid(name, subdirs, resource, ref_type, python_pkg_name): + x = AnsibleCollectionRef(name, subdirs, resource, ref_type) + + assert x.collection == name + if subdirs: + assert x.subdirs == subdirs + else: + assert x.subdirs == '' + + assert x.resource == resource + assert x.ref_type == ref_type + assert x.n_python_package_name == python_pkg_name + + +@pytest.mark.parametrize( + 'dirname,expected_result', + [ + ('become_plugins', 'become'), + ('cache_plugins', 'cache'), + ('connection_plugins', 'connection'), + ('library', 'modules'), + ('filter_plugins', 'filter'), + ('bogus_plugins', ValueError), + (None, ValueError) + ] +) +def test_legacy_plugin_dir_to_plugin_type(dirname, expected_result): + if isinstance(expected_result, string_types): + assert AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(dirname) == expected_result + else: + with pytest.raises(expected_result): + AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(dirname) + + +@pytest.mark.parametrize( + 'name,subdirs,resource,ref_type,expected_error_type,expected_error_expression', + [ + ('bad_ns', '', 'resource', 'action', ValueError, 'invalid collection name'), + ('ns.coll.', '', 'resource', 'action', ValueError, 'invalid collection name'), + ('ns.coll', 'badsubdir#', 'resource', 'action', ValueError, 'invalid subdirs entry'), + ('ns.coll', 'badsubdir.', 'resource', 'action', ValueError, 'invalid subdirs entry'), + ('ns.coll', '.badsubdir', 'resource', 'action', ValueError, 'invalid subdirs entry'), + ('ns.coll', '', 'resource', 'bogus', ValueError, 'invalid collection ref_type'), + ]) +def test_collectionref_components_invalid(name, subdirs, resource, ref_type, expected_error_type, expected_error_expression): + with pytest.raises(expected_error_type) as curerr: + AnsibleCollectionRef(name, subdirs, resource, ref_type) + + assert re.search(expected_error_expression, str(curerr.value)) + + +# BEGIN TEST SUPPORT + +default_test_collection_paths = [ + os.path.join(os.path.dirname(__file__), 'fixtures', 'collections'), + os.path.join(os.path.dirname(__file__), 'fixtures', 'collections_masked'), + '/bogus/bogussub' +] + + +def get_default_finder(): + return _AnsibleCollectionFinder(paths=default_test_collection_paths) + + +def extend_paths(path_list, suffix): + suffix = suffix.replace('.', '/') + return [os.path.join(p, suffix) for p in path_list] + + +def nuke_module_prefix(prefix): + for module_to_nuke in [m for m in sys.modules if m.startswith(prefix)]: + sys.modules.pop(module_to_nuke) + + +def reset_collections_loader_state(metapath_finder=None): + _AnsibleCollectionFinder._remove() + + nuke_module_prefix('ansible_collections') + nuke_module_prefix('ansible.modules') + nuke_module_prefix('ansible.plugins') + + # FIXME: better to move this someplace else that gets cleaned up automatically? + _AnsibleCollectionLoader._redirected_package_map = {} + + AnsibleCollectionConfig._default_collection = None + AnsibleCollectionConfig._on_collection_load = _EventSource() + + if metapath_finder: + metapath_finder._install() diff --git a/test/units/utils/display/test_display.py b/test/units/utils/display/test_display.py new file mode 100644 index 00000000..cdeb4966 --- /dev/null +++ b/test/units/utils/display/test_display.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible.utils.display import Display + + +def test_display_basic_message(capsys, mocker): + # Disable logging + mocker.patch('ansible.utils.display.logger', return_value=None) + + d = Display() + d.display(u'Some displayed message') + out, err = capsys.readouterr() + assert out == 'Some displayed message\n' + assert err == '' diff --git a/test/units/utils/display/test_logger.py b/test/units/utils/display/test_logger.py new file mode 100644 index 00000000..ed69393b --- /dev/null +++ b/test/units/utils/display/test_logger.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import logging +import sys + + +def test_logger(): + ''' + Avoid CVE-2019-14846 as 3rd party libs will disclose secrets when + logging is set to DEBUG + ''' + + # clear loaded modules to have unadultered test. + for loaded in list(sys.modules.keys()): + if 'ansible' in loaded: + del sys.modules[loaded] + + # force logger to exist via config + from ansible import constants as C + C.DEFAULT_LOG_PATH = '/dev/null' + + # initialize logger + from ansible.utils.display import logger + + assert logger.root.level != logging.DEBUG diff --git a/test/units/utils/display/test_warning.py b/test/units/utils/display/test_warning.py new file mode 100644 index 00000000..be63c348 --- /dev/null +++ b/test/units/utils/display/test_warning.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import pytest + +from ansible.utils.display import Display + + +@pytest.fixture +def warning_message(): + warning_message = 'bad things will happen' + expected_warning_message = '[WARNING]: {0}\n'.format(warning_message) + return warning_message, expected_warning_message + + +def test_warning(capsys, mocker, warning_message): + warning_message, expected_warning_message = warning_message + + mocker.patch('ansible.utils.color.ANSIBLE_COLOR', True) + mocker.patch('ansible.utils.color.parsecolor', return_value=u'1;35') # value for 'bright purple' + + d = Display() + d.warning(warning_message) + out, err = capsys.readouterr() + assert d._warns == {expected_warning_message: 1} + assert err == '\x1b[1;35m{0}\x1b[0m\n'.format(expected_warning_message.rstrip('\n')) + + +def test_warning_no_color(capsys, mocker, warning_message): + warning_message, expected_warning_message = warning_message + + mocker.patch('ansible.utils.color.ANSIBLE_COLOR', False) + + d = Display() + d.warning(warning_message) + out, err = capsys.readouterr() + assert d._warns == {expected_warning_message: 1} + assert err == expected_warning_message diff --git a/test/units/utils/test_cleanup_tmp_file.py b/test/units/utils/test_cleanup_tmp_file.py new file mode 100644 index 00000000..2a44a55b --- /dev/null +++ b/test/units/utils/test_cleanup_tmp_file.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import pytest +import tempfile + +from ansible.utils.path import cleanup_tmp_file + + +def raise_error(): + raise OSError + + +def test_cleanup_tmp_file_file(): + tmp_fd, tmp = tempfile.mkstemp() + cleanup_tmp_file(tmp) + + assert not os.path.exists(tmp) + + +def test_cleanup_tmp_file_dir(): + tmp = tempfile.mkdtemp() + cleanup_tmp_file(tmp) + + assert not os.path.exists(tmp) + + +def test_cleanup_tmp_file_nonexistant(): + assert None is cleanup_tmp_file('nope') + + +def test_cleanup_tmp_file_failure(mocker): + tmp = tempfile.mkdtemp() + with pytest.raises(Exception): + mocker.patch('shutil.rmtree', side_effect=raise_error()) + cleanup_tmp_file(tmp) + + +def test_cleanup_tmp_file_failure_warning(mocker, capsys): + tmp = tempfile.mkdtemp() + with pytest.raises(Exception): + mocker.patch('shutil.rmtree', side_effect=raise_error()) + cleanup_tmp_file(tmp, warn=True) diff --git a/test/units/utils/test_context_objects.py b/test/units/utils/test_context_objects.py new file mode 100644 index 00000000..c56a41d0 --- /dev/null +++ b/test/units/utils/test_context_objects.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Toshio Kuratomi +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import argparse + +import pytest + +from ansible.module_utils.common.collections import ImmutableDict +from ansible.utils import context_objects as co + + +MAKE_IMMUTABLE_DATA = ((u'くらとみ', u'くらとみ'), + (42, 42), + ({u'café': u'くらとみ'}, ImmutableDict({u'café': u'くらとみ'})), + ([1, u'café', u'くらとみ'], (1, u'café', u'くらとみ')), + (set((1, u'café', u'くらとみ')), frozenset((1, u'café', u'くらとみ'))), + ({u'café': [1, set(u'ñ')]}, + ImmutableDict({u'café': (1, frozenset(u'ñ'))})), + ([set((1, 2)), {u'くらとみ': 3}], + (frozenset((1, 2)), ImmutableDict({u'くらとみ': 3}))), + ) + + +@pytest.mark.parametrize('data, expected', MAKE_IMMUTABLE_DATA) +def test_make_immutable(data, expected): + assert co._make_immutable(data) == expected + + +def test_cliargs_from_dict(): + old_dict = {'tags': [u'production', u'webservers'], + 'check_mode': True, + 'start_at_task': u'Start with くらとみ'} + expected = frozenset((('tags', (u'production', u'webservers')), + ('check_mode', True), + ('start_at_task', u'Start with くらとみ'))) + + assert frozenset(co.CLIArgs(old_dict).items()) == expected + + +def test_cliargs(): + class FakeOptions: + pass + options = FakeOptions() + options.tags = [u'production', u'webservers'] + options.check_mode = True + options.start_at_task = u'Start with くらとみ' + + expected = frozenset((('tags', (u'production', u'webservers')), + ('check_mode', True), + ('start_at_task', u'Start with くらとみ'))) + + assert frozenset(co.CLIArgs.from_options(options).items()) == expected + + +def test_cliargs_argparse(): + parser = argparse.ArgumentParser(description='Process some integers.') + parser.add_argument('integers', metavar='N', type=int, nargs='+', + help='an integer for the accumulator') + parser.add_argument('--sum', dest='accumulate', action='store_const', + const=sum, default=max, + help='sum the integers (default: find the max)') + args = parser.parse_args([u'--sum', u'1', u'2']) + + expected = frozenset((('accumulate', sum), ('integers', (1, 2)))) + + assert frozenset(co.CLIArgs.from_options(args).items()) == expected diff --git a/test/units/utils/test_encrypt.py b/test/units/utils/test_encrypt.py new file mode 100644 index 00000000..2cbe828a --- /dev/null +++ b/test/units/utils/test_encrypt.py @@ -0,0 +1,168 @@ +# (c) 2018, Matthias Fuchs +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys + +import pytest + +from ansible.errors import AnsibleError, AnsibleFilterError +from ansible.plugins.filter.core import get_encrypted_password +from ansible.utils import encrypt + + +class passlib_off(object): + def __init__(self): + self.orig = encrypt.PASSLIB_AVAILABLE + + def __enter__(self): + encrypt.PASSLIB_AVAILABLE = False + return self + + def __exit__(self, exception_type, exception_value, traceback): + encrypt.PASSLIB_AVAILABLE = self.orig + + +def assert_hash(expected, secret, algorithm, **settings): + + if encrypt.PASSLIB_AVAILABLE: + assert encrypt.passlib_or_crypt(secret, algorithm, **settings) == expected + assert encrypt.PasslibHash(algorithm).hash(secret, **settings) == expected + else: + assert encrypt.passlib_or_crypt(secret, algorithm, **settings) == expected + with pytest.raises(AnsibleError) as excinfo: + encrypt.PasslibHash(algorithm).hash(secret, **settings) + assert excinfo.value.args[0] == "passlib must be installed to hash with '%s'" % algorithm + + +@pytest.mark.skipif(sys.platform.startswith('darwin'), reason='macOS requires passlib') +def test_encrypt_with_rounds_no_passlib(): + with passlib_off(): + assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7", + secret="123", algorithm="sha256_crypt", salt="12345678", rounds=5000) + assert_hash("$5$rounds=10000$12345678$JBinliYMFEcBeAXKZnLjenhgEhTmJBvZn3aR8l70Oy/", + secret="123", algorithm="sha256_crypt", salt="12345678", rounds=10000) + assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.", + secret="123", algorithm="sha512_crypt", salt="12345678", rounds=5000) + + +# If passlib is not installed. this is identical to the test_encrypt_with_rounds_no_passlib() test +@pytest.mark.skipif(not encrypt.PASSLIB_AVAILABLE, reason='passlib must be installed to run this test') +def test_encrypt_with_rounds(): + assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7", + secret="123", algorithm="sha256_crypt", salt="12345678", rounds=5000) + assert_hash("$5$rounds=10000$12345678$JBinliYMFEcBeAXKZnLjenhgEhTmJBvZn3aR8l70Oy/", + secret="123", algorithm="sha256_crypt", salt="12345678", rounds=10000) + assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.", + secret="123", algorithm="sha512_crypt", salt="12345678", rounds=5000) + + +@pytest.mark.skipif(sys.platform.startswith('darwin'), reason='macOS requires passlib') +def test_encrypt_default_rounds_no_passlib(): + with passlib_off(): + assert_hash("$1$12345678$tRy4cXc3kmcfRZVj4iFXr/", + secret="123", algorithm="md5_crypt", salt="12345678") + assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7", + secret="123", algorithm="sha256_crypt", salt="12345678") + assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.", + secret="123", algorithm="sha512_crypt", salt="12345678") + + assert encrypt.CryptHash("md5_crypt").hash("123") + + +# If passlib is not installed. this is identical to the test_encrypt_default_rounds_no_passlib() test +@pytest.mark.skipif(not encrypt.PASSLIB_AVAILABLE, reason='passlib must be installed to run this test') +def test_encrypt_default_rounds(): + assert_hash("$1$12345678$tRy4cXc3kmcfRZVj4iFXr/", + secret="123", algorithm="md5_crypt", salt="12345678") + assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7", + secret="123", algorithm="sha256_crypt", salt="12345678") + assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.", + secret="123", algorithm="sha512_crypt", salt="12345678") + + assert encrypt.PasslibHash("md5_crypt").hash("123") + + +@pytest.mark.skipif(sys.platform.startswith('darwin'), reason='macOS requires passlib') +def test_password_hash_filter_no_passlib(): + with passlib_off(): + assert not encrypt.PASSLIB_AVAILABLE + assert get_encrypted_password("123", "md5", salt="12345678") == "$1$12345678$tRy4cXc3kmcfRZVj4iFXr/" + + with pytest.raises(AnsibleFilterError): + get_encrypted_password("123", "crypt16", salt="12") + + +def test_password_hash_filter_passlib(): + if not encrypt.PASSLIB_AVAILABLE: + pytest.skip("passlib not available") + + with pytest.raises(AnsibleFilterError): + get_encrypted_password("123", "sha257", salt="12345678") + + # Uses 5000 rounds by default for sha256 matching crypt behaviour + assert get_encrypted_password("123", "sha256", salt="12345678") == "$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7" + assert get_encrypted_password("123", "sha256", salt="12345678", rounds=5000) == "$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7" + + assert (get_encrypted_password("123", "sha256", salt="12345678", rounds=10000) == + "$5$rounds=10000$12345678$JBinliYMFEcBeAXKZnLjenhgEhTmJBvZn3aR8l70Oy/") + + assert (get_encrypted_password("123", "sha512", salt="12345678", rounds=6000) == + "$6$rounds=6000$12345678$l/fC67BdJwZrJ7qneKGP1b6PcatfBr0dI7W6JLBrsv8P1wnv/0pu4WJsWq5p6WiXgZ2gt9Aoir3MeORJxg4.Z/") + + assert (get_encrypted_password("123", "sha512", salt="12345678", rounds=5000) == + "$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.") + + assert get_encrypted_password("123", "crypt16", salt="12") == "12pELHK2ME3McUFlHxel6uMM" + + # Try algorithm that uses a raw salt + assert get_encrypted_password("123", "pbkdf2_sha256") + + +@pytest.mark.skipif(sys.platform.startswith('darwin'), reason='macOS requires passlib') +def test_do_encrypt_no_passlib(): + with passlib_off(): + assert not encrypt.PASSLIB_AVAILABLE + assert encrypt.do_encrypt("123", "md5_crypt", salt="12345678") == "$1$12345678$tRy4cXc3kmcfRZVj4iFXr/" + + with pytest.raises(AnsibleError): + encrypt.do_encrypt("123", "crypt16", salt="12") + + +def test_do_encrypt_passlib(): + if not encrypt.PASSLIB_AVAILABLE: + pytest.skip("passlib not available") + + with pytest.raises(AnsibleError): + encrypt.do_encrypt("123", "sha257_crypt", salt="12345678") + + # Uses 5000 rounds by default for sha256 matching crypt behaviour. + assert encrypt.do_encrypt("123", "sha256_crypt", salt="12345678") == "$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7" + + assert encrypt.do_encrypt("123", "md5_crypt", salt="12345678") == "$1$12345678$tRy4cXc3kmcfRZVj4iFXr/" + + assert encrypt.do_encrypt("123", "crypt16", salt="12") == "12pELHK2ME3McUFlHxel6uMM" + + +def test_random_salt(): + res = encrypt.random_salt() + expected_salt_candidate_chars = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./' + assert len(res) == 8 + for res_char in res: + assert res_char in expected_salt_candidate_chars diff --git a/test/units/utils/test_helpers.py b/test/units/utils/test_helpers.py new file mode 100644 index 00000000..ec37b39b --- /dev/null +++ b/test/units/utils/test_helpers.py @@ -0,0 +1,34 @@ +# (c) 2015, Marius Gedminas +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import unittest + +from ansible.utils.helpers import pct_to_int + + +class TestHelpers(unittest.TestCase): + + def test_pct_to_int(self): + self.assertEqual(pct_to_int(1, 100), 1) + self.assertEqual(pct_to_int(-1, 100), -1) + self.assertEqual(pct_to_int("1%", 10), 1) + self.assertEqual(pct_to_int("1%", 10, 0), 0) + self.assertEqual(pct_to_int("1", 100), 1) + self.assertEqual(pct_to_int("10%", 100), 10) diff --git a/test/units/utils/test_isidentifier.py b/test/units/utils/test_isidentifier.py new file mode 100644 index 00000000..de6de642 --- /dev/null +++ b/test/units/utils/test_isidentifier.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.utils.vars import isidentifier + + +# Originally posted at: http://stackoverflow.com/a/29586366 + + +@pytest.mark.parametrize( + "identifier", [ + "foo", "foo1_23", + ] +) +def test_valid_identifier(identifier): + assert isidentifier(identifier) + + +@pytest.mark.parametrize( + "identifier", [ + "pass", "foo ", " foo", "1234", "1234abc", "", " ", "foo bar", "no-dashed-names-for-you", + ] +) +def test_invalid_identifier(identifier): + assert not isidentifier(identifier) + + +def test_keywords_not_in_PY2(): + """In Python 2 ("True", "False", "None") are not keywords. The isidentifier + method ensures that those are treated as keywords on both Python 2 and 3. + """ + assert not isidentifier("True") + assert not isidentifier("False") + assert not isidentifier("None") + + +def test_non_ascii(): + """In Python 3 non-ascii characters are allowed as opposed to Python 2. The + isidentifier method ensures that those are treated as keywords on both + Python 2 and 3. + """ + assert not isidentifier("křížek") diff --git a/test/units/utils/test_plugin_docs.py b/test/units/utils/test_plugin_docs.py new file mode 100644 index 00000000..ff973b1e --- /dev/null +++ b/test/units/utils/test_plugin_docs.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +# (c) 2020 Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import copy + +import pytest + +from ansible.utils.plugin_docs import ( + add_collection_to_versions_and_dates, +) + + +ADD_TESTS = [ + ( + # Module options + True, + False, + { + 'author': 'x', + 'version_added': '1.0.0', + 'deprecated': { + 'removed_in': '2.0.0', + }, + 'options': { + 'test': { + 'description': '', + 'type': 'str', + 'version_added': '1.1.0', + 'deprecated': { + # should not be touched since this isn't a plugin + 'removed_in': '2.0.0', + }, + 'env': [ + # should not be touched since this isn't a plugin + { + 'version_added': '1.3.0', + 'deprecated': { + 'version': '2.0.0', + }, + }, + ], + 'ini': [ + # should not be touched since this isn't a plugin + { + 'version_added': '1.3.0', + 'deprecated': { + 'version': '2.0.0', + }, + }, + ], + 'vars': [ + # should not be touched since this isn't a plugin + { + 'version_added': '1.3.0', + 'deprecated': { + 'removed_at_date': '2020-01-01', + }, + }, + ], + }, + 'subtest': { + 'description': '', + 'type': 'dict', + 'deprecated': { + # should not be touched since this isn't a plugin + 'version': '2.0.0', + }, + 'suboptions': { + 'suboption': { + 'description': '', + 'type': 'int', + 'version_added': '1.2.0', + } + }, + } + }, + }, + { + 'author': 'x', + 'version_added': '1.0.0', + 'version_added_collection': 'foo.bar', + 'deprecated': { + 'removed_in': '2.0.0', + 'removed_from_collection': 'foo.bar', + }, + 'options': { + 'test': { + 'description': '', + 'type': 'str', + 'version_added': '1.1.0', + 'version_added_collection': 'foo.bar', + 'deprecated': { + # should not be touched since this isn't a plugin + 'removed_in': '2.0.0', + }, + 'env': [ + # should not be touched since this isn't a plugin + { + 'version_added': '1.3.0', + 'deprecated': { + 'version': '2.0.0', + }, + }, + ], + 'ini': [ + # should not be touched since this isn't a plugin + { + 'version_added': '1.3.0', + 'deprecated': { + 'version': '2.0.0', + }, + }, + ], + 'vars': [ + # should not be touched since this isn't a plugin + { + 'version_added': '1.3.0', + 'deprecated': { + 'removed_at_date': '2020-01-01', + }, + }, + ], + }, + 'subtest': { + 'description': '', + 'type': 'dict', + 'deprecated': { + # should not be touched since this isn't a plugin + 'version': '2.0.0', + }, + 'suboptions': { + 'suboption': { + 'description': '', + 'type': 'int', + 'version_added': '1.2.0', + 'version_added_collection': 'foo.bar', + } + }, + } + }, + }, + ), + ( + # Module options + True, + False, + { + 'author': 'x', + 'deprecated': { + 'removed_at_date': '2020-01-01', + }, + }, + { + 'author': 'x', + 'deprecated': { + 'removed_at_date': '2020-01-01', + 'removed_from_collection': 'foo.bar', + }, + }, + ), + ( + # Plugin options + False, + False, + { + 'author': 'x', + 'version_added': '1.0.0', + 'deprecated': { + 'removed_in': '2.0.0', + }, + 'options': { + 'test': { + 'description': '', + 'type': 'str', + 'version_added': '1.1.0', + 'deprecated': { + # should not be touched since this is the wrong name + 'removed_in': '2.0.0', + }, + 'env': [ + { + 'version_added': '1.3.0', + 'deprecated': { + 'version': '2.0.0', + }, + }, + ], + 'ini': [ + { + 'version_added': '1.3.0', + 'deprecated': { + 'version': '2.0.0', + }, + }, + ], + 'vars': [ + { + 'version_added': '1.3.0', + 'deprecated': { + 'removed_at_date': '2020-01-01', + }, + }, + ], + }, + 'subtest': { + 'description': '', + 'type': 'dict', + 'deprecated': { + 'version': '2.0.0', + }, + 'suboptions': { + 'suboption': { + 'description': '', + 'type': 'int', + 'version_added': '1.2.0', + } + }, + } + }, + }, + { + 'author': 'x', + 'version_added': '1.0.0', + 'version_added_collection': 'foo.bar', + 'deprecated': { + 'removed_in': '2.0.0', + 'removed_from_collection': 'foo.bar', + }, + 'options': { + 'test': { + 'description': '', + 'type': 'str', + 'version_added': '1.1.0', + 'version_added_collection': 'foo.bar', + 'deprecated': { + # should not be touched since this is the wrong name + 'removed_in': '2.0.0', + }, + 'env': [ + { + 'version_added': '1.3.0', + 'version_added_collection': 'foo.bar', + 'deprecated': { + 'version': '2.0.0', + 'collection_name': 'foo.bar', + }, + }, + ], + 'ini': [ + { + 'version_added': '1.3.0', + 'version_added_collection': 'foo.bar', + 'deprecated': { + 'version': '2.0.0', + 'collection_name': 'foo.bar', + }, + }, + ], + 'vars': [ + { + 'version_added': '1.3.0', + 'version_added_collection': 'foo.bar', + 'deprecated': { + 'removed_at_date': '2020-01-01', + 'collection_name': 'foo.bar', + }, + }, + ], + }, + 'subtest': { + 'description': '', + 'type': 'dict', + 'deprecated': { + 'version': '2.0.0', + 'collection_name': 'foo.bar', + }, + 'suboptions': { + 'suboption': { + 'description': '', + 'type': 'int', + 'version_added': '1.2.0', + 'version_added_collection': 'foo.bar', + } + }, + } + }, + }, + ), + ( + # Return values + True, # this value is is ignored + True, + { + 'rv1': { + 'version_added': '1.0.0', + 'type': 'dict', + 'contains': { + 'srv1': { + 'version_added': '1.1.0', + }, + 'srv2': { + }, + } + }, + }, + { + 'rv1': { + 'version_added': '1.0.0', + 'version_added_collection': 'foo.bar', + 'type': 'dict', + 'contains': { + 'srv1': { + 'version_added': '1.1.0', + 'version_added_collection': 'foo.bar', + }, + 'srv2': { + }, + } + }, + }, + ), +] + + +@pytest.mark.parametrize('is_module,return_docs,fragment,expected_fragment', ADD_TESTS) +def test_add(is_module, return_docs, fragment, expected_fragment): + fragment_copy = copy.deepcopy(fragment) + add_collection_to_versions_and_dates(fragment_copy, 'foo.bar', is_module, return_docs) + assert fragment_copy == expected_fragment diff --git a/test/units/utils/test_shlex.py b/test/units/utils/test_shlex.py new file mode 100644 index 00000000..e13d302d --- /dev/null +++ b/test/units/utils/test_shlex.py @@ -0,0 +1,41 @@ +# (c) 2015, Marius Gedminas +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import unittest + +from ansible.utils.shlex import shlex_split + + +class TestSplit(unittest.TestCase): + + def test_trivial(self): + self.assertEqual(shlex_split("a b c"), ["a", "b", "c"]) + + def test_unicode(self): + self.assertEqual(shlex_split(u"a b \u010D"), [u"a", u"b", u"\u010D"]) + + def test_quoted(self): + self.assertEqual(shlex_split('"a b" c'), ["a b", "c"]) + + def test_comments(self): + self.assertEqual(shlex_split('"a b" c # d', comments=True), ["a b", "c"]) + + def test_error(self): + self.assertRaises(ValueError, shlex_split, 'a "b') diff --git a/test/units/utils/test_unsafe_proxy.py b/test/units/utils/test_unsafe_proxy.py new file mode 100644 index 00000000..205c0c65 --- /dev/null +++ b/test/units/utils/test_unsafe_proxy.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.six import PY3 +from ansible.utils.unsafe_proxy import AnsibleUnsafe, AnsibleUnsafeBytes, AnsibleUnsafeText, wrap_var + + +def test_wrap_var_text(): + assert isinstance(wrap_var(u'foo'), AnsibleUnsafeText) + + +def test_wrap_var_bytes(): + assert isinstance(wrap_var(b'foo'), AnsibleUnsafeBytes) + + +def test_wrap_var_string(): + if PY3: + assert isinstance(wrap_var('foo'), AnsibleUnsafeText) + else: + assert isinstance(wrap_var('foo'), AnsibleUnsafeBytes) + + +def test_wrap_var_dict(): + assert isinstance(wrap_var(dict(foo='bar')), dict) + assert not isinstance(wrap_var(dict(foo='bar')), AnsibleUnsafe) + assert isinstance(wrap_var(dict(foo=u'bar'))['foo'], AnsibleUnsafeText) + + +def test_wrap_var_dict_None(): + assert wrap_var(dict(foo=None))['foo'] is None + assert not isinstance(wrap_var(dict(foo=None))['foo'], AnsibleUnsafe) + + +def test_wrap_var_list(): + assert isinstance(wrap_var(['foo']), list) + assert not isinstance(wrap_var(['foo']), AnsibleUnsafe) + assert isinstance(wrap_var([u'foo'])[0], AnsibleUnsafeText) + + +def test_wrap_var_list_None(): + assert wrap_var([None])[0] is None + assert not isinstance(wrap_var([None])[0], AnsibleUnsafe) + + +def test_wrap_var_set(): + assert isinstance(wrap_var(set(['foo'])), set) + assert not isinstance(wrap_var(set(['foo'])), AnsibleUnsafe) + for item in wrap_var(set([u'foo'])): + assert isinstance(item, AnsibleUnsafeText) + + +def test_wrap_var_set_None(): + for item in wrap_var(set([None])): + assert item is None + assert not isinstance(item, AnsibleUnsafe) + + +def test_wrap_var_tuple(): + assert isinstance(wrap_var(('foo',)), tuple) + assert not isinstance(wrap_var(('foo',)), AnsibleUnsafe) + assert isinstance(wrap_var(('foo',))[0], AnsibleUnsafe) + + +def test_wrap_var_tuple_None(): + assert wrap_var((None,))[0] is None + assert not isinstance(wrap_var((None,))[0], AnsibleUnsafe) + + +def test_wrap_var_None(): + assert wrap_var(None) is None + assert not isinstance(wrap_var(None), AnsibleUnsafe) + + +def test_wrap_var_unsafe_text(): + assert isinstance(wrap_var(AnsibleUnsafeText(u'foo')), AnsibleUnsafeText) + + +def test_wrap_var_unsafe_bytes(): + assert isinstance(wrap_var(AnsibleUnsafeBytes(b'foo')), AnsibleUnsafeBytes) + + +def test_wrap_var_no_ref(): + thing = { + 'foo': { + 'bar': 'baz' + }, + 'bar': ['baz', 'qux'], + 'baz': ('qux',), + 'none': None, + 'text': 'text', + } + wrapped_thing = wrap_var(thing) + thing is not wrapped_thing + thing['foo'] is not wrapped_thing['foo'] + thing['bar'][0] is not wrapped_thing['bar'][0] + thing['baz'][0] is not wrapped_thing['baz'][0] + thing['none'] is not wrapped_thing['none'] + thing['text'] is not wrapped_thing['text'] + + +def test_AnsibleUnsafeText(): + assert isinstance(AnsibleUnsafeText(u'foo'), AnsibleUnsafe) + + +def test_AnsibleUnsafeBytes(): + assert isinstance(AnsibleUnsafeBytes(b'foo'), AnsibleUnsafe) diff --git a/test/units/utils/test_vars.py b/test/units/utils/test_vars.py new file mode 100644 index 00000000..c92ce4b6 --- /dev/null +++ b/test/units/utils/test_vars.py @@ -0,0 +1,282 @@ +# (c) 2012-2014, Michael DeHaan +# (c) 2015, Toshio Kuraotmi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from collections import defaultdict + +from units.compat import mock, unittest +from ansible.errors import AnsibleError +from ansible.utils.vars import combine_vars, merge_hash + + +class TestVariableUtils(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + combine_vars_merge_data = ( + dict( + a=dict(a=1), + b=dict(b=2), + result=dict(a=1, b=2), + ), + dict( + a=dict(a=1, c=dict(foo='bar')), + b=dict(b=2, c=dict(baz='bam')), + result=dict(a=1, b=2, c=dict(foo='bar', baz='bam')) + ), + dict( + a=defaultdict(a=1, c=defaultdict(foo='bar')), + b=dict(b=2, c=dict(baz='bam')), + result=defaultdict(a=1, b=2, c=defaultdict(foo='bar', baz='bam')) + ), + ) + combine_vars_replace_data = ( + dict( + a=dict(a=1), + b=dict(b=2), + result=dict(a=1, b=2) + ), + dict( + a=dict(a=1, c=dict(foo='bar')), + b=dict(b=2, c=dict(baz='bam')), + result=dict(a=1, b=2, c=dict(baz='bam')) + ), + dict( + a=defaultdict(a=1, c=dict(foo='bar')), + b=dict(b=2, c=defaultdict(baz='bam')), + result=defaultdict(a=1, b=2, c=defaultdict(baz='bam')) + ), + ) + + def test_combine_vars_improper_args(self): + with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'): + with self.assertRaises(AnsibleError): + combine_vars([1, 2, 3], dict(a=1)) + with self.assertRaises(AnsibleError): + combine_vars(dict(a=1), [1, 2, 3]) + + with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'): + with self.assertRaises(AnsibleError): + combine_vars([1, 2, 3], dict(a=1)) + with self.assertRaises(AnsibleError): + combine_vars(dict(a=1), [1, 2, 3]) + + def test_combine_vars_replace(self): + with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'): + for test in self.combine_vars_replace_data: + self.assertEqual(combine_vars(test['a'], test['b']), test['result']) + + def test_combine_vars_merge(self): + with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'): + for test in self.combine_vars_merge_data: + self.assertEqual(combine_vars(test['a'], test['b']), test['result']) + + merge_hash_data = { + "low_prio": { + "a": { + "a'": { + "x": "low_value", + "y": "low_value", + "list": ["low_value"] + } + }, + "b": [1, 1, 2, 3] + }, + "high_prio": { + "a": { + "a'": { + "y": "high_value", + "z": "high_value", + "list": ["high_value"] + } + }, + "b": [3, 4, 4, {"5": "value"}] + } + } + + def test_merge_hash_simple(self): + for test in self.combine_vars_merge_data: + self.assertEqual(merge_hash(test['a'], test['b']), test['result']) + + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": { + "a'": { + "x": "low_value", + "y": "high_value", + "z": "high_value", + "list": ["high_value"] + } + }, + "b": high['b'] + } + self.assertEqual(merge_hash(low, high), expected) + + def test_merge_hash_non_recursive_and_list_replace(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = high + self.assertEqual(merge_hash(low, high, False, 'replace'), expected) + + def test_merge_hash_non_recursive_and_list_keep(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": high['a'], + "b": low['b'] + } + self.assertEqual(merge_hash(low, high, False, 'keep'), expected) + + def test_merge_hash_non_recursive_and_list_append(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": high['a'], + "b": low['b'] + high['b'] + } + self.assertEqual(merge_hash(low, high, False, 'append'), expected) + + def test_merge_hash_non_recursive_and_list_prepend(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": high['a'], + "b": high['b'] + low['b'] + } + self.assertEqual(merge_hash(low, high, False, 'prepend'), expected) + + def test_merge_hash_non_recursive_and_list_append_rp(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": high['a'], + "b": [1, 1, 2] + high['b'] + } + self.assertEqual(merge_hash(low, high, False, 'append_rp'), expected) + + def test_merge_hash_non_recursive_and_list_prepend_rp(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": high['a'], + "b": high['b'] + [1, 1, 2] + } + self.assertEqual(merge_hash(low, high, False, 'prepend_rp'), expected) + + def test_merge_hash_recursive_and_list_replace(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": { + "a'": { + "x": "low_value", + "y": "high_value", + "z": "high_value", + "list": ["high_value"] + } + }, + "b": high['b'] + } + self.assertEqual(merge_hash(low, high, True, 'replace'), expected) + + def test_merge_hash_recursive_and_list_keep(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": { + "a'": { + "x": "low_value", + "y": "high_value", + "z": "high_value", + "list": ["low_value"] + } + }, + "b": low['b'] + } + self.assertEqual(merge_hash(low, high, True, 'keep'), expected) + + def test_merge_hash_recursive_and_list_append(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": { + "a'": { + "x": "low_value", + "y": "high_value", + "z": "high_value", + "list": ["low_value", "high_value"] + } + }, + "b": low['b'] + high['b'] + } + self.assertEqual(merge_hash(low, high, True, 'append'), expected) + + def test_merge_hash_recursive_and_list_prepend(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": { + "a'": { + "x": "low_value", + "y": "high_value", + "z": "high_value", + "list": ["high_value", "low_value"] + } + }, + "b": high['b'] + low['b'] + } + self.assertEqual(merge_hash(low, high, True, 'prepend'), expected) + + def test_merge_hash_recursive_and_list_append_rp(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": { + "a'": { + "x": "low_value", + "y": "high_value", + "z": "high_value", + "list": ["low_value", "high_value"] + } + }, + "b": [1, 1, 2] + high['b'] + } + self.assertEqual(merge_hash(low, high, True, 'append_rp'), expected) + + def test_merge_hash_recursive_and_list_prepend_rp(self): + low = self.merge_hash_data['low_prio'] + high = self.merge_hash_data['high_prio'] + expected = { + "a": { + "a'": { + "x": "low_value", + "y": "high_value", + "z": "high_value", + "list": ["high_value", "low_value"] + } + }, + "b": high['b'] + [1, 1, 2] + } + self.assertEqual(merge_hash(low, high, True, 'prepend_rp'), expected) diff --git a/test/units/utils/test_version.py b/test/units/utils/test_version.py new file mode 100644 index 00000000..7d04c112 --- /dev/null +++ b/test/units/utils/test_version.py @@ -0,0 +1,335 @@ +# -*- coding: utf-8 -*- +# (c) 2020 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from distutils.version import LooseVersion, StrictVersion + +import pytest + +from ansible.utils.version import _Alpha, _Numeric, SemanticVersion + + +EQ = [ + ('1.0.0', '1.0.0', True), + ('1.0.0', '1.0.0-beta', False), + ('1.0.0-beta2+build1', '1.0.0-beta.2+build.1', False), + ('1.0.0-beta+build', '1.0.0-beta+build', True), + ('1.0.0-beta+build1', '1.0.0-beta+build2', True), + ('1.0.0-beta+a', '1.0.0-alpha+bar', False), +] + +NE = [ + ('1.0.0', '1.0.0', False), + ('1.0.0', '1.0.0-beta', True), + ('1.0.0-beta2+build1', '1.0.0-beta.2+build.1', True), + ('1.0.0-beta+build', '1.0.0-beta+build', False), + ('1.0.0-beta+a', '1.0.0-alpha+bar', True), +] + +LT = [ + ('1.0.0', '2.0.0', True), + ('1.0.0-beta', '2.0.0-alpha', True), + ('1.0.0-alpha', '2.0.0-beta', True), + ('1.0.0-alpha', '1.0.0', True), + ('1.0.0-beta', '1.0.0-alpha3', False), + ('1.0.0+foo', '1.0.0-alpha', False), + ('1.0.0-beta.1', '1.0.0-beta.a', True), + ('1.0.0-beta+a', '1.0.0-alpha+bar', False), +] + +GT = [ + ('1.0.0', '2.0.0', False), + ('1.0.0-beta', '2.0.0-alpha', False), + ('1.0.0-alpha', '2.0.0-beta', False), + ('1.0.0-alpha', '1.0.0', False), + ('1.0.0-beta', '1.0.0-alpha3', True), + ('1.0.0+foo', '1.0.0-alpha', True), + ('1.0.0-beta.1', '1.0.0-beta.a', False), + ('1.0.0-beta+a', '1.0.0-alpha+bar', True), +] + +LE = [ + ('1.0.0', '1.0.0', True), + ('1.0.0', '2.0.0', True), + ('1.0.0-alpha', '1.0.0-beta', True), + ('1.0.0-beta', '1.0.0-alpha', False), +] + +GE = [ + ('1.0.0', '1.0.0', True), + ('1.0.0', '2.0.0', False), + ('1.0.0-alpha', '1.0.0-beta', False), + ('1.0.0-beta', '1.0.0-alpha', True), +] + +VALID = [ + "0.0.4", + "1.2.3", + "10.20.30", + "1.1.2-prerelease+meta", + "1.1.2+meta", + "1.1.2+meta-valid", + "1.0.0-alpha", + "1.0.0-beta", + "1.0.0-alpha.beta", + "1.0.0-alpha.beta.1", + "1.0.0-alpha.1", + "1.0.0-alpha0.valid", + "1.0.0-alpha.0valid", + "1.0.0-alpha-a.b-c-somethinglong+build.1-aef.1-its-okay", + "1.0.0-rc.1+build.1", + "2.0.0-rc.1+build.123", + "1.2.3-beta", + "10.2.3-DEV-SNAPSHOT", + "1.2.3-SNAPSHOT-123", + "1.0.0", + "2.0.0", + "1.1.7", + "2.0.0+build.1848", + "2.0.1-alpha.1227", + "1.0.0-alpha+beta", + "1.2.3----RC-SNAPSHOT.12.9.1--.12+788", + "1.2.3----R-S.12.9.1--.12+meta", + "1.2.3----RC-SNAPSHOT.12.9.1--.12", + "1.0.0+0.build.1-rc.10000aaa-kk-0.1", + "99999999999999999999999.999999999999999999.99999999999999999", + "1.0.0-0A.is.legal", +] + +INVALID = [ + "1", + "1.2", + "1.2.3-0123", + "1.2.3-0123.0123", + "1.1.2+.123", + "+invalid", + "-invalid", + "-invalid+invalid", + "-invalid.01", + "alpha", + "alpha.beta", + "alpha.beta.1", + "alpha.1", + "alpha+beta", + "alpha_beta", + "alpha.", + "alpha..", + "beta", + "1.0.0-alpha_beta", + "-alpha.", + "1.0.0-alpha..", + "1.0.0-alpha..1", + "1.0.0-alpha...1", + "1.0.0-alpha....1", + "1.0.0-alpha.....1", + "1.0.0-alpha......1", + "1.0.0-alpha.......1", + "01.1.1", + "1.01.1", + "1.1.01", + "1.2", + "1.2.3.DEV", + "1.2-SNAPSHOT", + "1.2.31.2.3----RC-SNAPSHOT.12.09.1--..12+788", + "1.2-RC-SNAPSHOT", + "-1.0.3-gamma+b7718", + "+justmeta", + "9.8.7+meta+meta", + "9.8.7-whatever+meta+meta", +] + +PRERELEASE = [ + ('1.0.0-alpha', True), + ('1.0.0-alpha.1', True), + ('1.0.0-0.3.7', True), + ('1.0.0-x.7.z.92', True), + ('0.1.2', False), + ('0.1.2+bob', False), + ('1.0.0', False), +] + +STABLE = [ + ('1.0.0-alpha', False), + ('1.0.0-alpha.1', False), + ('1.0.0-0.3.7', False), + ('1.0.0-x.7.z.92', False), + ('0.1.2', False), + ('0.1.2+bob', False), + ('1.0.0', True), + ('1.0.0+bob', True), +] + +LOOSE_VERSION = [ + (LooseVersion('1'), SemanticVersion('1.0.0')), + (LooseVersion('1-alpha'), SemanticVersion('1.0.0-alpha')), + (LooseVersion('1.0.0-alpha+build'), SemanticVersion('1.0.0-alpha+build')), +] + +LOOSE_VERSION_INVALID = [ + LooseVersion('1.a.3'), + LooseVersion(), + 'bar', + StrictVersion('1.2.3'), +] + + +def test_semanticversion_none(): + assert SemanticVersion().major is None + + +@pytest.mark.parametrize('left,right,expected', EQ) +def test_eq(left, right, expected): + assert (SemanticVersion(left) == SemanticVersion(right)) is expected + + +@pytest.mark.parametrize('left,right,expected', NE) +def test_ne(left, right, expected): + assert (SemanticVersion(left) != SemanticVersion(right)) is expected + + +@pytest.mark.parametrize('left,right,expected', LT) +def test_lt(left, right, expected): + assert (SemanticVersion(left) < SemanticVersion(right)) is expected + + +@pytest.mark.parametrize('left,right,expected', LE) +def test_le(left, right, expected): + assert (SemanticVersion(left) <= SemanticVersion(right)) is expected + + +@pytest.mark.parametrize('left,right,expected', GT) +def test_gt(left, right, expected): + assert (SemanticVersion(left) > SemanticVersion(right)) is expected + + +@pytest.mark.parametrize('left,right,expected', GE) +def test_ge(left, right, expected): + assert (SemanticVersion(left) >= SemanticVersion(right)) is expected + + +@pytest.mark.parametrize('value', VALID) +def test_valid(value): + SemanticVersion(value) + + +@pytest.mark.parametrize('value', INVALID) +def test_invalid(value): + pytest.raises(ValueError, SemanticVersion, value) + + +def test_example_precedence(): + # https://semver.org/#spec-item-11 + sv = SemanticVersion + assert sv('1.0.0') < sv('2.0.0') < sv('2.1.0') < sv('2.1.1') + assert sv('1.0.0-alpha') < sv('1.0.0') + assert sv('1.0.0-alpha') < sv('1.0.0-alpha.1') < sv('1.0.0-alpha.beta') + assert sv('1.0.0-beta') < sv('1.0.0-beta.2') < sv('1.0.0-beta.11') < sv('1.0.0-rc.1') < sv('1.0.0') + + +@pytest.mark.parametrize('value,expected', PRERELEASE) +def test_prerelease(value, expected): + assert SemanticVersion(value).is_prerelease is expected + + +@pytest.mark.parametrize('value,expected', STABLE) +def test_stable(value, expected): + assert SemanticVersion(value).is_stable is expected + + +@pytest.mark.parametrize('value,expected', LOOSE_VERSION) +def test_from_loose_version(value, expected): + assert SemanticVersion.from_loose_version(value) == expected + + +@pytest.mark.parametrize('value', LOOSE_VERSION_INVALID) +def test_from_loose_version_invalid(value): + pytest.raises((AttributeError, ValueError), SemanticVersion.from_loose_version, value) + + +def test_comparison_with_string(): + assert SemanticVersion('1.0.0') > '0.1.0' + + +def test_alpha(): + assert _Alpha('a') == _Alpha('a') + assert _Alpha('a') == 'a' + assert _Alpha('a') != _Alpha('b') + assert _Alpha('a') != 1 + assert _Alpha('a') < _Alpha('b') + assert _Alpha('a') < 'c' + assert _Alpha('a') > _Numeric(1) + with pytest.raises(ValueError): + _Alpha('a') < None + assert _Alpha('a') <= _Alpha('a') + assert _Alpha('a') <= _Alpha('b') + assert _Alpha('b') >= _Alpha('a') + assert _Alpha('b') >= _Alpha('b') + + # The following 3*6 tests check that all comparison operators perform + # as expected. DO NOT remove any of them, or reformulate them (to remove + # the explicit `not`)! + + assert _Alpha('a') == _Alpha('a') + assert not _Alpha('a') != _Alpha('a') # pylint: disable=unneeded-not + assert not _Alpha('a') < _Alpha('a') # pylint: disable=unneeded-not + assert _Alpha('a') <= _Alpha('a') + assert not _Alpha('a') > _Alpha('a') # pylint: disable=unneeded-not + assert _Alpha('a') >= _Alpha('a') + + assert not _Alpha('a') == _Alpha('b') # pylint: disable=unneeded-not + assert _Alpha('a') != _Alpha('b') + assert _Alpha('a') < _Alpha('b') + assert _Alpha('a') <= _Alpha('b') + assert not _Alpha('a') > _Alpha('b') # pylint: disable=unneeded-not + assert not _Alpha('a') >= _Alpha('b') # pylint: disable=unneeded-not + + assert not _Alpha('b') == _Alpha('a') # pylint: disable=unneeded-not + assert _Alpha('b') != _Alpha('a') + assert not _Alpha('b') < _Alpha('a') # pylint: disable=unneeded-not + assert not _Alpha('b') <= _Alpha('a') # pylint: disable=unneeded-not + assert _Alpha('b') > _Alpha('a') + assert _Alpha('b') >= _Alpha('a') + + +def test_numeric(): + assert _Numeric(1) == _Numeric(1) + assert _Numeric(1) == 1 + assert _Numeric(1) != _Numeric(2) + assert _Numeric(1) != 'a' + assert _Numeric(1) < _Numeric(2) + assert _Numeric(1) < 3 + assert _Numeric(1) < _Alpha('b') + with pytest.raises(ValueError): + _Numeric(1) < None + assert _Numeric(1) <= _Numeric(1) + assert _Numeric(1) <= _Numeric(2) + assert _Numeric(2) >= _Numeric(1) + assert _Numeric(2) >= _Numeric(2) + + # The following 3*6 tests check that all comparison operators perform + # as expected. DO NOT remove any of them, or reformulate them (to remove + # the explicit `not`)! + + assert _Numeric(1) == _Numeric(1) + assert not _Numeric(1) != _Numeric(1) # pylint: disable=unneeded-not + assert not _Numeric(1) < _Numeric(1) # pylint: disable=unneeded-not + assert _Numeric(1) <= _Numeric(1) + assert not _Numeric(1) > _Numeric(1) # pylint: disable=unneeded-not + assert _Numeric(1) >= _Numeric(1) + + assert not _Numeric(1) == _Numeric(2) # pylint: disable=unneeded-not + assert _Numeric(1) != _Numeric(2) + assert _Numeric(1) < _Numeric(2) + assert _Numeric(1) <= _Numeric(2) + assert not _Numeric(1) > _Numeric(2) # pylint: disable=unneeded-not + assert not _Numeric(1) >= _Numeric(2) # pylint: disable=unneeded-not + + assert not _Numeric(2) == _Numeric(1) # pylint: disable=unneeded-not + assert _Numeric(2) != _Numeric(1) + assert not _Numeric(2) < _Numeric(1) # pylint: disable=unneeded-not + assert not _Numeric(2) <= _Numeric(1) # pylint: disable=unneeded-not + assert _Numeric(2) > _Numeric(1) + assert _Numeric(2) >= _Numeric(1) diff --git a/test/units/vars/__init__.py b/test/units/vars/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/units/vars/test_module_response_deepcopy.py b/test/units/vars/test_module_response_deepcopy.py new file mode 100644 index 00000000..78f9de0e --- /dev/null +++ b/test/units/vars/test_module_response_deepcopy.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.vars.clean import module_response_deepcopy + +import pytest + + +def test_module_response_deepcopy_basic(): + x = 42 + y = module_response_deepcopy(x) + assert y == x + + +def test_module_response_deepcopy_atomic(): + tests = [None, 42, 2**100, 3.14, True, False, 1j, + "hello", u"hello\u1234"] + for x in tests: + assert module_response_deepcopy(x) is x + + +def test_module_response_deepcopy_list(): + x = [[1, 2], 3] + y = module_response_deepcopy(x) + assert y == x + assert x is not y + assert x[0] is not y[0] + + +def test_module_response_deepcopy_empty_tuple(): + x = () + y = module_response_deepcopy(x) + assert x is y + + +@pytest.mark.skip(reason='No current support for this situation') +def test_module_response_deepcopy_tuple(): + x = ([1, 2], 3) + y = module_response_deepcopy(x) + assert y == x + assert x is not y + assert x[0] is not y[0] + + +def test_module_response_deepcopy_tuple_of_immutables(): + x = ((1, 2), 3) + y = module_response_deepcopy(x) + assert x is y + + +def test_module_response_deepcopy_dict(): + x = {"foo": [1, 2], "bar": 3} + y = module_response_deepcopy(x) + assert y == x + assert x is not y + assert x["foo"] is not y["foo"] diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py new file mode 100644 index 00000000..65a79286 --- /dev/null +++ b/test/units/vars/test_variable_manager.py @@ -0,0 +1,307 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from units.compat import unittest +from units.compat.mock import MagicMock, patch +from ansible.inventory.manager import InventoryManager +from ansible.module_utils.six import iteritems +from ansible.playbook.play import Play + + +from units.mock.loader import DictDataLoader +from units.mock.path import mock_unfrackpath_noop + +from ansible.vars.manager import VariableManager + + +class TestVariableManager(unittest.TestCase): + + def test_basic_manager(self): + fake_loader = DictDataLoader({}) + + mock_inventory = MagicMock() + v = VariableManager(loader=fake_loader, inventory=mock_inventory) + variables = v.get_vars(use_cache=False) + + # Check var manager expected values, never check: ['omit', 'vars'] + # FIXME: add the following ['ansible_version', 'ansible_playbook_python', 'groups'] + for varname, value in (('playbook_dir', os.path.abspath('.')), ): + self.assertEqual(variables[varname], value) + + def test_variable_manager_extra_vars(self): + fake_loader = DictDataLoader({}) + + extra_vars = dict(a=1, b=2, c=3) + mock_inventory = MagicMock() + v = VariableManager(loader=fake_loader, inventory=mock_inventory) + + # override internal extra_vars loading + v._extra_vars = extra_vars + + myvars = v.get_vars(use_cache=False) + for (key, val) in iteritems(extra_vars): + self.assertEqual(myvars.get(key), val) + + def test_variable_manager_options_vars(self): + fake_loader = DictDataLoader({}) + + options_vars = dict(a=1, b=2, c=3) + mock_inventory = MagicMock() + v = VariableManager(loader=fake_loader, inventory=mock_inventory) + + # override internal options_vars loading + v._extra_vars = options_vars + + myvars = v.get_vars(use_cache=False) + for (key, val) in iteritems(options_vars): + self.assertEqual(myvars.get(key), val) + + def test_variable_manager_play_vars(self): + fake_loader = DictDataLoader({}) + + mock_play = MagicMock() + mock_play.get_vars.return_value = dict(foo="bar") + mock_play.get_roles.return_value = [] + mock_play.get_vars_files.return_value = [] + + mock_inventory = MagicMock() + v = VariableManager(loader=fake_loader, inventory=mock_inventory) + self.assertEqual(v.get_vars(play=mock_play, use_cache=False).get("foo"), "bar") + + def test_variable_manager_play_vars_files(self): + fake_loader = DictDataLoader({ + __file__: """ + foo: bar + """ + }) + + mock_play = MagicMock() + mock_play.get_vars.return_value = dict() + mock_play.get_roles.return_value = [] + mock_play.get_vars_files.return_value = [__file__] + + mock_inventory = MagicMock() + v = VariableManager(inventory=mock_inventory, loader=fake_loader) + self.assertEqual(v.get_vars(play=mock_play, use_cache=False).get("foo"), "bar") + + def test_variable_manager_task_vars(self): + # FIXME: BCS make this work + return + + # pylint: disable=unreachable + fake_loader = DictDataLoader({}) + + mock_task = MagicMock() + mock_task._role = None + mock_task.loop = None + mock_task.get_vars.return_value = dict(foo="bar") + mock_task.get_include_params.return_value = dict() + + mock_all = MagicMock() + mock_all.get_vars.return_value = {} + mock_all.get_file_vars.return_value = {} + + mock_host = MagicMock() + mock_host.get.name.return_value = 'test01' + mock_host.get_vars.return_value = {} + mock_host.get_host_vars.return_value = {} + + mock_inventory = MagicMock() + mock_inventory.hosts.get.return_value = mock_host + mock_inventory.hosts.get.name.return_value = 'test01' + mock_inventory.get_host.return_value = mock_host + mock_inventory.groups.__getitem__.return_value = mock_all + + v = VariableManager(loader=fake_loader, inventory=mock_inventory) + self.assertEqual(v.get_vars(task=mock_task, use_cache=False).get("foo"), "bar") + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_variable_manager_precedence(self): + # FIXME: this needs to be redone as dataloader is not the automatic source of data anymore + return + + # pylint: disable=unreachable + ''' + Tests complex variations and combinations of get_vars() with different + objects to modify the context under which variables are merged. + ''' + # FIXME: BCS makethiswork + # return True + + mock_inventory = MagicMock() + + inventory1_filedata = """ + [group2:children] + group1 + + [group1] + host1 host_var=host_var_from_inventory_host1 + + [group1:vars] + group_var = group_var_from_inventory_group1 + + [group2:vars] + group_var = group_var_from_inventory_group2 + """ + + fake_loader = DictDataLoader({ + # inventory1 + '/etc/ansible/inventory1': inventory1_filedata, + # role defaults_only1 + '/etc/ansible/roles/defaults_only1/defaults/main.yml': """ + default_var: "default_var_from_defaults_only1" + host_var: "host_var_from_defaults_only1" + group_var: "group_var_from_defaults_only1" + group_var_all: "group_var_all_from_defaults_only1" + extra_var: "extra_var_from_defaults_only1" + """, + '/etc/ansible/roles/defaults_only1/tasks/main.yml': """ + - debug: msg="here i am" + """, + + # role defaults_only2 + '/etc/ansible/roles/defaults_only2/defaults/main.yml': """ + default_var: "default_var_from_defaults_only2" + host_var: "host_var_from_defaults_only2" + group_var: "group_var_from_defaults_only2" + group_var_all: "group_var_all_from_defaults_only2" + extra_var: "extra_var_from_defaults_only2" + """, + }) + + inv1 = InventoryManager(loader=fake_loader, sources=['/etc/ansible/inventory1']) + v = VariableManager(inventory=mock_inventory, loader=fake_loader) + + play1 = Play.load(dict( + hosts=['all'], + roles=['defaults_only1', 'defaults_only2'], + ), loader=fake_loader, variable_manager=v) + + # first we assert that the defaults as viewed as a whole are the merged results + # of the defaults from each role, with the last role defined "winning" when + # there is a variable naming conflict + res = v.get_vars(play=play1) + self.assertEqual(res['default_var'], 'default_var_from_defaults_only2') + + # next, we assert that when vars are viewed from the context of a task within a + # role, that task will see its own role defaults before any other role's + blocks = play1.compile() + task = blocks[1].block[0] + res = v.get_vars(play=play1, task=task) + self.assertEqual(res['default_var'], 'default_var_from_defaults_only1') + + # next we assert the precedence of inventory variables + v.set_inventory(inv1) + h1 = inv1.get_host('host1') + + res = v.get_vars(play=play1, host=h1) + self.assertEqual(res['group_var'], 'group_var_from_inventory_group1') + self.assertEqual(res['host_var'], 'host_var_from_inventory_host1') + + # next we test with group_vars/ files loaded + fake_loader.push("/etc/ansible/group_vars/all", """ + group_var_all: group_var_all_from_group_vars_all + """) + fake_loader.push("/etc/ansible/group_vars/group1", """ + group_var: group_var_from_group_vars_group1 + """) + fake_loader.push("/etc/ansible/group_vars/group3", """ + # this is a dummy, which should not be used anywhere + group_var: group_var_from_group_vars_group3 + """) + fake_loader.push("/etc/ansible/host_vars/host1", """ + host_var: host_var_from_host_vars_host1 + """) + fake_loader.push("group_vars/group1", """ + playbook_group_var: playbook_group_var + """) + fake_loader.push("host_vars/host1", """ + playbook_host_var: playbook_host_var + """) + + res = v.get_vars(play=play1, host=h1) + # self.assertEqual(res['group_var'], 'group_var_from_group_vars_group1') + # self.assertEqual(res['group_var_all'], 'group_var_all_from_group_vars_all') + # self.assertEqual(res['playbook_group_var'], 'playbook_group_var') + # self.assertEqual(res['host_var'], 'host_var_from_host_vars_host1') + # self.assertEqual(res['playbook_host_var'], 'playbook_host_var') + + # add in the fact cache + v._fact_cache['host1'] = dict(fact_cache_var="fact_cache_var_from_fact_cache") + + res = v.get_vars(play=play1, host=h1) + self.assertEqual(res['fact_cache_var'], 'fact_cache_var_from_fact_cache') + + @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) + def test_variable_manager_role_vars_dependencies(self): + ''' + Tests vars from role dependencies with duplicate dependencies. + ''' + mock_inventory = MagicMock() + + fake_loader = DictDataLoader({ + # role common-role + '/etc/ansible/roles/common-role/tasks/main.yml': """ + - debug: msg="{{role_var}}" + """, + # We do not need allow_duplicates: yes for this role + # because eliminating duplicates is done by the execution + # strategy, which we do not test here. + + # role role1 + '/etc/ansible/roles/role1/vars/main.yml': """ + role_var: "role_var_from_role1" + """, + '/etc/ansible/roles/role1/meta/main.yml': """ + dependencies: + - { role: common-role } + """, + + # role role2 + '/etc/ansible/roles/role2/vars/main.yml': """ + role_var: "role_var_from_role2" + """, + '/etc/ansible/roles/role2/meta/main.yml': """ + dependencies: + - { role: common-role } + """, + }) + + v = VariableManager(loader=fake_loader, inventory=mock_inventory) + + play1 = Play.load(dict( + hosts=['all'], + roles=['role1', 'role2'], + ), loader=fake_loader, variable_manager=v) + + # The task defined by common-role exists twice because role1 + # and role2 depend on common-role. Check that the tasks see + # different values of role_var. + blocks = play1.compile() + task = blocks[1].block[0] + res = v.get_vars(play=play1, task=task) + self.assertEqual(res['role_var'], 'role_var_from_role1') + + task = blocks[2].block[0] + res = v.get_vars(play=play1, task=task) + self.assertEqual(res['role_var'], 'role_var_from_role2') -- cgit v1.2.3